##// END OF EJS Templates
debugdeltachain: distinct between snapshot and "other" diffs...
marmoute -
r50113:5b1495c3 default
parent child Browse files
Show More
@@ -1,4930 +1,4932 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import binascii
9 import binascii
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import difflib
13 import difflib
14 import errno
14 import errno
15 import glob
15 import glob
16 import operator
16 import operator
17 import os
17 import os
18 import platform
18 import platform
19 import random
19 import random
20 import re
20 import re
21 import socket
21 import socket
22 import ssl
22 import ssl
23 import stat
23 import stat
24 import string
24 import string
25 import subprocess
25 import subprocess
26 import sys
26 import sys
27 import time
27 import time
28
28
29 from .i18n import _
29 from .i18n import _
30 from .node import (
30 from .node import (
31 bin,
31 bin,
32 hex,
32 hex,
33 nullrev,
33 nullrev,
34 short,
34 short,
35 )
35 )
36 from .pycompat import (
36 from .pycompat import (
37 getattr,
37 getattr,
38 open,
38 open,
39 )
39 )
40 from . import (
40 from . import (
41 bundle2,
41 bundle2,
42 bundlerepo,
42 bundlerepo,
43 changegroup,
43 changegroup,
44 cmdutil,
44 cmdutil,
45 color,
45 color,
46 context,
46 context,
47 copies,
47 copies,
48 dagparser,
48 dagparser,
49 dirstateutils,
49 dirstateutils,
50 encoding,
50 encoding,
51 error,
51 error,
52 exchange,
52 exchange,
53 extensions,
53 extensions,
54 filemerge,
54 filemerge,
55 filesetlang,
55 filesetlang,
56 formatter,
56 formatter,
57 hg,
57 hg,
58 httppeer,
58 httppeer,
59 localrepo,
59 localrepo,
60 lock as lockmod,
60 lock as lockmod,
61 logcmdutil,
61 logcmdutil,
62 mergestate as mergestatemod,
62 mergestate as mergestatemod,
63 metadata,
63 metadata,
64 obsolete,
64 obsolete,
65 obsutil,
65 obsutil,
66 pathutil,
66 pathutil,
67 phases,
67 phases,
68 policy,
68 policy,
69 pvec,
69 pvec,
70 pycompat,
70 pycompat,
71 registrar,
71 registrar,
72 repair,
72 repair,
73 repoview,
73 repoview,
74 requirements,
74 requirements,
75 revlog,
75 revlog,
76 revset,
76 revset,
77 revsetlang,
77 revsetlang,
78 scmutil,
78 scmutil,
79 setdiscovery,
79 setdiscovery,
80 simplemerge,
80 simplemerge,
81 sshpeer,
81 sshpeer,
82 sslutil,
82 sslutil,
83 streamclone,
83 streamclone,
84 strip,
84 strip,
85 tags as tagsmod,
85 tags as tagsmod,
86 templater,
86 templater,
87 treediscovery,
87 treediscovery,
88 upgrade,
88 upgrade,
89 url as urlmod,
89 url as urlmod,
90 util,
90 util,
91 vfs as vfsmod,
91 vfs as vfsmod,
92 wireprotoframing,
92 wireprotoframing,
93 wireprotoserver,
93 wireprotoserver,
94 )
94 )
95 from .interfaces import repository
95 from .interfaces import repository
96 from .utils import (
96 from .utils import (
97 cborutil,
97 cborutil,
98 compression,
98 compression,
99 dateutil,
99 dateutil,
100 procutil,
100 procutil,
101 stringutil,
101 stringutil,
102 urlutil,
102 urlutil,
103 )
103 )
104
104
105 from .revlogutils import (
105 from .revlogutils import (
106 deltas as deltautil,
106 deltas as deltautil,
107 nodemap,
107 nodemap,
108 rewrite,
108 rewrite,
109 sidedata,
109 sidedata,
110 )
110 )
111
111
112 release = lockmod.release
112 release = lockmod.release
113
113
114 table = {}
114 table = {}
115 table.update(strip.command._table)
115 table.update(strip.command._table)
116 command = registrar.command(table)
116 command = registrar.command(table)
117
117
118
118
119 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
119 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
120 def debugancestor(ui, repo, *args):
120 def debugancestor(ui, repo, *args):
121 """find the ancestor revision of two revisions in a given index"""
121 """find the ancestor revision of two revisions in a given index"""
122 if len(args) == 3:
122 if len(args) == 3:
123 index, rev1, rev2 = args
123 index, rev1, rev2 = args
124 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
124 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
125 lookup = r.lookup
125 lookup = r.lookup
126 elif len(args) == 2:
126 elif len(args) == 2:
127 if not repo:
127 if not repo:
128 raise error.Abort(
128 raise error.Abort(
129 _(b'there is no Mercurial repository here (.hg not found)')
129 _(b'there is no Mercurial repository here (.hg not found)')
130 )
130 )
131 rev1, rev2 = args
131 rev1, rev2 = args
132 r = repo.changelog
132 r = repo.changelog
133 lookup = repo.lookup
133 lookup = repo.lookup
134 else:
134 else:
135 raise error.Abort(_(b'either two or three arguments required'))
135 raise error.Abort(_(b'either two or three arguments required'))
136 a = r.ancestor(lookup(rev1), lookup(rev2))
136 a = r.ancestor(lookup(rev1), lookup(rev2))
137 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
137 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
138
138
139
139
140 @command(b'debugantivirusrunning', [])
140 @command(b'debugantivirusrunning', [])
141 def debugantivirusrunning(ui, repo):
141 def debugantivirusrunning(ui, repo):
142 """attempt to trigger an antivirus scanner to see if one is active"""
142 """attempt to trigger an antivirus scanner to see if one is active"""
143 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
143 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
144 f.write(
144 f.write(
145 util.b85decode(
145 util.b85decode(
146 # This is a base85-armored version of the EICAR test file. See
146 # This is a base85-armored version of the EICAR test file. See
147 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
147 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
148 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
148 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
149 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
149 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
150 )
150 )
151 )
151 )
152 # Give an AV engine time to scan the file.
152 # Give an AV engine time to scan the file.
153 time.sleep(2)
153 time.sleep(2)
154 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
154 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
155
155
156
156
157 @command(b'debugapplystreamclonebundle', [], b'FILE')
157 @command(b'debugapplystreamclonebundle', [], b'FILE')
158 def debugapplystreamclonebundle(ui, repo, fname):
158 def debugapplystreamclonebundle(ui, repo, fname):
159 """apply a stream clone bundle file"""
159 """apply a stream clone bundle file"""
160 f = hg.openpath(ui, fname)
160 f = hg.openpath(ui, fname)
161 gen = exchange.readbundle(ui, f, fname)
161 gen = exchange.readbundle(ui, f, fname)
162 gen.apply(repo)
162 gen.apply(repo)
163
163
164
164
165 @command(
165 @command(
166 b'debugbuilddag',
166 b'debugbuilddag',
167 [
167 [
168 (
168 (
169 b'm',
169 b'm',
170 b'mergeable-file',
170 b'mergeable-file',
171 None,
171 None,
172 _(b'add single file mergeable changes'),
172 _(b'add single file mergeable changes'),
173 ),
173 ),
174 (
174 (
175 b'o',
175 b'o',
176 b'overwritten-file',
176 b'overwritten-file',
177 None,
177 None,
178 _(b'add single file all revs overwrite'),
178 _(b'add single file all revs overwrite'),
179 ),
179 ),
180 (b'n', b'new-file', None, _(b'add new file at each rev')),
180 (b'n', b'new-file', None, _(b'add new file at each rev')),
181 (
181 (
182 b'',
182 b'',
183 b'from-existing',
183 b'from-existing',
184 None,
184 None,
185 _(b'continue from a non-empty repository'),
185 _(b'continue from a non-empty repository'),
186 ),
186 ),
187 ],
187 ],
188 _(b'[OPTION]... [TEXT]'),
188 _(b'[OPTION]... [TEXT]'),
189 )
189 )
190 def debugbuilddag(
190 def debugbuilddag(
191 ui,
191 ui,
192 repo,
192 repo,
193 text=None,
193 text=None,
194 mergeable_file=False,
194 mergeable_file=False,
195 overwritten_file=False,
195 overwritten_file=False,
196 new_file=False,
196 new_file=False,
197 from_existing=False,
197 from_existing=False,
198 ):
198 ):
199 """builds a repo with a given DAG from scratch in the current empty repo
199 """builds a repo with a given DAG from scratch in the current empty repo
200
200
201 The description of the DAG is read from stdin if not given on the
201 The description of the DAG is read from stdin if not given on the
202 command line.
202 command line.
203
203
204 Elements:
204 Elements:
205
205
206 - "+n" is a linear run of n nodes based on the current default parent
206 - "+n" is a linear run of n nodes based on the current default parent
207 - "." is a single node based on the current default parent
207 - "." is a single node based on the current default parent
208 - "$" resets the default parent to null (implied at the start);
208 - "$" resets the default parent to null (implied at the start);
209 otherwise the default parent is always the last node created
209 otherwise the default parent is always the last node created
210 - "<p" sets the default parent to the backref p
210 - "<p" sets the default parent to the backref p
211 - "*p" is a fork at parent p, which is a backref
211 - "*p" is a fork at parent p, which is a backref
212 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
212 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
213 - "/p2" is a merge of the preceding node and p2
213 - "/p2" is a merge of the preceding node and p2
214 - ":tag" defines a local tag for the preceding node
214 - ":tag" defines a local tag for the preceding node
215 - "@branch" sets the named branch for subsequent nodes
215 - "@branch" sets the named branch for subsequent nodes
216 - "#...\\n" is a comment up to the end of the line
216 - "#...\\n" is a comment up to the end of the line
217
217
218 Whitespace between the above elements is ignored.
218 Whitespace between the above elements is ignored.
219
219
220 A backref is either
220 A backref is either
221
221
222 - a number n, which references the node curr-n, where curr is the current
222 - a number n, which references the node curr-n, where curr is the current
223 node, or
223 node, or
224 - the name of a local tag you placed earlier using ":tag", or
224 - the name of a local tag you placed earlier using ":tag", or
225 - empty to denote the default parent.
225 - empty to denote the default parent.
226
226
227 All string valued-elements are either strictly alphanumeric, or must
227 All string valued-elements are either strictly alphanumeric, or must
228 be enclosed in double quotes ("..."), with "\\" as escape character.
228 be enclosed in double quotes ("..."), with "\\" as escape character.
229 """
229 """
230
230
231 if text is None:
231 if text is None:
232 ui.status(_(b"reading DAG from stdin\n"))
232 ui.status(_(b"reading DAG from stdin\n"))
233 text = ui.fin.read()
233 text = ui.fin.read()
234
234
235 cl = repo.changelog
235 cl = repo.changelog
236 if len(cl) > 0 and not from_existing:
236 if len(cl) > 0 and not from_existing:
237 raise error.Abort(_(b'repository is not empty'))
237 raise error.Abort(_(b'repository is not empty'))
238
238
239 # determine number of revs in DAG
239 # determine number of revs in DAG
240 total = 0
240 total = 0
241 for type, data in dagparser.parsedag(text):
241 for type, data in dagparser.parsedag(text):
242 if type == b'n':
242 if type == b'n':
243 total += 1
243 total += 1
244
244
245 if mergeable_file:
245 if mergeable_file:
246 linesperrev = 2
246 linesperrev = 2
247 # make a file with k lines per rev
247 # make a file with k lines per rev
248 initialmergedlines = [
248 initialmergedlines = [
249 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
249 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
250 ]
250 ]
251 initialmergedlines.append(b"")
251 initialmergedlines.append(b"")
252
252
253 tags = []
253 tags = []
254 progress = ui.makeprogress(
254 progress = ui.makeprogress(
255 _(b'building'), unit=_(b'revisions'), total=total
255 _(b'building'), unit=_(b'revisions'), total=total
256 )
256 )
257 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
257 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
258 at = -1
258 at = -1
259 atbranch = b'default'
259 atbranch = b'default'
260 nodeids = []
260 nodeids = []
261 id = 0
261 id = 0
262 progress.update(id)
262 progress.update(id)
263 for type, data in dagparser.parsedag(text):
263 for type, data in dagparser.parsedag(text):
264 if type == b'n':
264 if type == b'n':
265 ui.note((b'node %s\n' % pycompat.bytestr(data)))
265 ui.note((b'node %s\n' % pycompat.bytestr(data)))
266 id, ps = data
266 id, ps = data
267
267
268 files = []
268 files = []
269 filecontent = {}
269 filecontent = {}
270
270
271 p2 = None
271 p2 = None
272 if mergeable_file:
272 if mergeable_file:
273 fn = b"mf"
273 fn = b"mf"
274 p1 = repo[ps[0]]
274 p1 = repo[ps[0]]
275 if len(ps) > 1:
275 if len(ps) > 1:
276 p2 = repo[ps[1]]
276 p2 = repo[ps[1]]
277 pa = p1.ancestor(p2)
277 pa = p1.ancestor(p2)
278 base, local, other = [
278 base, local, other = [
279 x[fn].data() for x in (pa, p1, p2)
279 x[fn].data() for x in (pa, p1, p2)
280 ]
280 ]
281 m3 = simplemerge.Merge3Text(base, local, other)
281 m3 = simplemerge.Merge3Text(base, local, other)
282 ml = [
282 ml = [
283 l.strip()
283 l.strip()
284 for l in simplemerge.render_minimized(m3)[0]
284 for l in simplemerge.render_minimized(m3)[0]
285 ]
285 ]
286 ml.append(b"")
286 ml.append(b"")
287 elif at > 0:
287 elif at > 0:
288 ml = p1[fn].data().split(b"\n")
288 ml = p1[fn].data().split(b"\n")
289 else:
289 else:
290 ml = initialmergedlines
290 ml = initialmergedlines
291 ml[id * linesperrev] += b" r%i" % id
291 ml[id * linesperrev] += b" r%i" % id
292 mergedtext = b"\n".join(ml)
292 mergedtext = b"\n".join(ml)
293 files.append(fn)
293 files.append(fn)
294 filecontent[fn] = mergedtext
294 filecontent[fn] = mergedtext
295
295
296 if overwritten_file:
296 if overwritten_file:
297 fn = b"of"
297 fn = b"of"
298 files.append(fn)
298 files.append(fn)
299 filecontent[fn] = b"r%i\n" % id
299 filecontent[fn] = b"r%i\n" % id
300
300
301 if new_file:
301 if new_file:
302 fn = b"nf%i" % id
302 fn = b"nf%i" % id
303 files.append(fn)
303 files.append(fn)
304 filecontent[fn] = b"r%i\n" % id
304 filecontent[fn] = b"r%i\n" % id
305 if len(ps) > 1:
305 if len(ps) > 1:
306 if not p2:
306 if not p2:
307 p2 = repo[ps[1]]
307 p2 = repo[ps[1]]
308 for fn in p2:
308 for fn in p2:
309 if fn.startswith(b"nf"):
309 if fn.startswith(b"nf"):
310 files.append(fn)
310 files.append(fn)
311 filecontent[fn] = p2[fn].data()
311 filecontent[fn] = p2[fn].data()
312
312
313 def fctxfn(repo, cx, path):
313 def fctxfn(repo, cx, path):
314 if path in filecontent:
314 if path in filecontent:
315 return context.memfilectx(
315 return context.memfilectx(
316 repo, cx, path, filecontent[path]
316 repo, cx, path, filecontent[path]
317 )
317 )
318 return None
318 return None
319
319
320 if len(ps) == 0 or ps[0] < 0:
320 if len(ps) == 0 or ps[0] < 0:
321 pars = [None, None]
321 pars = [None, None]
322 elif len(ps) == 1:
322 elif len(ps) == 1:
323 pars = [nodeids[ps[0]], None]
323 pars = [nodeids[ps[0]], None]
324 else:
324 else:
325 pars = [nodeids[p] for p in ps]
325 pars = [nodeids[p] for p in ps]
326 cx = context.memctx(
326 cx = context.memctx(
327 repo,
327 repo,
328 pars,
328 pars,
329 b"r%i" % id,
329 b"r%i" % id,
330 files,
330 files,
331 fctxfn,
331 fctxfn,
332 date=(id, 0),
332 date=(id, 0),
333 user=b"debugbuilddag",
333 user=b"debugbuilddag",
334 extra={b'branch': atbranch},
334 extra={b'branch': atbranch},
335 )
335 )
336 nodeid = repo.commitctx(cx)
336 nodeid = repo.commitctx(cx)
337 nodeids.append(nodeid)
337 nodeids.append(nodeid)
338 at = id
338 at = id
339 elif type == b'l':
339 elif type == b'l':
340 id, name = data
340 id, name = data
341 ui.note((b'tag %s\n' % name))
341 ui.note((b'tag %s\n' % name))
342 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
342 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
343 elif type == b'a':
343 elif type == b'a':
344 ui.note((b'branch %s\n' % data))
344 ui.note((b'branch %s\n' % data))
345 atbranch = data
345 atbranch = data
346 progress.update(id)
346 progress.update(id)
347
347
348 if tags:
348 if tags:
349 repo.vfs.write(b"localtags", b"".join(tags))
349 repo.vfs.write(b"localtags", b"".join(tags))
350
350
351
351
352 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
352 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
353 indent_string = b' ' * indent
353 indent_string = b' ' * indent
354 if all:
354 if all:
355 ui.writenoi18n(
355 ui.writenoi18n(
356 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
356 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
357 % indent_string
357 % indent_string
358 )
358 )
359
359
360 def showchunks(named):
360 def showchunks(named):
361 ui.write(b"\n%s%s\n" % (indent_string, named))
361 ui.write(b"\n%s%s\n" % (indent_string, named))
362 for deltadata in gen.deltaiter():
362 for deltadata in gen.deltaiter():
363 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
363 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
364 ui.write(
364 ui.write(
365 b"%s%s %s %s %s %s %d\n"
365 b"%s%s %s %s %s %s %d\n"
366 % (
366 % (
367 indent_string,
367 indent_string,
368 hex(node),
368 hex(node),
369 hex(p1),
369 hex(p1),
370 hex(p2),
370 hex(p2),
371 hex(cs),
371 hex(cs),
372 hex(deltabase),
372 hex(deltabase),
373 len(delta),
373 len(delta),
374 )
374 )
375 )
375 )
376
376
377 gen.changelogheader()
377 gen.changelogheader()
378 showchunks(b"changelog")
378 showchunks(b"changelog")
379 gen.manifestheader()
379 gen.manifestheader()
380 showchunks(b"manifest")
380 showchunks(b"manifest")
381 for chunkdata in iter(gen.filelogheader, {}):
381 for chunkdata in iter(gen.filelogheader, {}):
382 fname = chunkdata[b'filename']
382 fname = chunkdata[b'filename']
383 showchunks(fname)
383 showchunks(fname)
384 else:
384 else:
385 if isinstance(gen, bundle2.unbundle20):
385 if isinstance(gen, bundle2.unbundle20):
386 raise error.Abort(_(b'use debugbundle2 for this file'))
386 raise error.Abort(_(b'use debugbundle2 for this file'))
387 gen.changelogheader()
387 gen.changelogheader()
388 for deltadata in gen.deltaiter():
388 for deltadata in gen.deltaiter():
389 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
389 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
390 ui.write(b"%s%s\n" % (indent_string, hex(node)))
390 ui.write(b"%s%s\n" % (indent_string, hex(node)))
391
391
392
392
393 def _debugobsmarkers(ui, part, indent=0, **opts):
393 def _debugobsmarkers(ui, part, indent=0, **opts):
394 """display version and markers contained in 'data'"""
394 """display version and markers contained in 'data'"""
395 opts = pycompat.byteskwargs(opts)
395 opts = pycompat.byteskwargs(opts)
396 data = part.read()
396 data = part.read()
397 indent_string = b' ' * indent
397 indent_string = b' ' * indent
398 try:
398 try:
399 version, markers = obsolete._readmarkers(data)
399 version, markers = obsolete._readmarkers(data)
400 except error.UnknownVersion as exc:
400 except error.UnknownVersion as exc:
401 msg = b"%sunsupported version: %s (%d bytes)\n"
401 msg = b"%sunsupported version: %s (%d bytes)\n"
402 msg %= indent_string, exc.version, len(data)
402 msg %= indent_string, exc.version, len(data)
403 ui.write(msg)
403 ui.write(msg)
404 else:
404 else:
405 msg = b"%sversion: %d (%d bytes)\n"
405 msg = b"%sversion: %d (%d bytes)\n"
406 msg %= indent_string, version, len(data)
406 msg %= indent_string, version, len(data)
407 ui.write(msg)
407 ui.write(msg)
408 fm = ui.formatter(b'debugobsolete', opts)
408 fm = ui.formatter(b'debugobsolete', opts)
409 for rawmarker in sorted(markers):
409 for rawmarker in sorted(markers):
410 m = obsutil.marker(None, rawmarker)
410 m = obsutil.marker(None, rawmarker)
411 fm.startitem()
411 fm.startitem()
412 fm.plain(indent_string)
412 fm.plain(indent_string)
413 cmdutil.showmarker(fm, m)
413 cmdutil.showmarker(fm, m)
414 fm.end()
414 fm.end()
415
415
416
416
417 def _debugphaseheads(ui, data, indent=0):
417 def _debugphaseheads(ui, data, indent=0):
418 """display version and markers contained in 'data'"""
418 """display version and markers contained in 'data'"""
419 indent_string = b' ' * indent
419 indent_string = b' ' * indent
420 headsbyphase = phases.binarydecode(data)
420 headsbyphase = phases.binarydecode(data)
421 for phase in phases.allphases:
421 for phase in phases.allphases:
422 for head in headsbyphase[phase]:
422 for head in headsbyphase[phase]:
423 ui.write(indent_string)
423 ui.write(indent_string)
424 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
424 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
425
425
426
426
427 def _quasirepr(thing):
427 def _quasirepr(thing):
428 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
428 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
429 return b'{%s}' % (
429 return b'{%s}' % (
430 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
430 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
431 )
431 )
432 return pycompat.bytestr(repr(thing))
432 return pycompat.bytestr(repr(thing))
433
433
434
434
435 def _debugbundle2(ui, gen, all=None, **opts):
435 def _debugbundle2(ui, gen, all=None, **opts):
436 """lists the contents of a bundle2"""
436 """lists the contents of a bundle2"""
437 if not isinstance(gen, bundle2.unbundle20):
437 if not isinstance(gen, bundle2.unbundle20):
438 raise error.Abort(_(b'not a bundle2 file'))
438 raise error.Abort(_(b'not a bundle2 file'))
439 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
439 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
440 parttypes = opts.get('part_type', [])
440 parttypes = opts.get('part_type', [])
441 for part in gen.iterparts():
441 for part in gen.iterparts():
442 if parttypes and part.type not in parttypes:
442 if parttypes and part.type not in parttypes:
443 continue
443 continue
444 msg = b'%s -- %s (mandatory: %r)\n'
444 msg = b'%s -- %s (mandatory: %r)\n'
445 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
445 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
446 if part.type == b'changegroup':
446 if part.type == b'changegroup':
447 version = part.params.get(b'version', b'01')
447 version = part.params.get(b'version', b'01')
448 cg = changegroup.getunbundler(version, part, b'UN')
448 cg = changegroup.getunbundler(version, part, b'UN')
449 if not ui.quiet:
449 if not ui.quiet:
450 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
450 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
451 if part.type == b'obsmarkers':
451 if part.type == b'obsmarkers':
452 if not ui.quiet:
452 if not ui.quiet:
453 _debugobsmarkers(ui, part, indent=4, **opts)
453 _debugobsmarkers(ui, part, indent=4, **opts)
454 if part.type == b'phase-heads':
454 if part.type == b'phase-heads':
455 if not ui.quiet:
455 if not ui.quiet:
456 _debugphaseheads(ui, part, indent=4)
456 _debugphaseheads(ui, part, indent=4)
457
457
458
458
459 @command(
459 @command(
460 b'debugbundle',
460 b'debugbundle',
461 [
461 [
462 (b'a', b'all', None, _(b'show all details')),
462 (b'a', b'all', None, _(b'show all details')),
463 (b'', b'part-type', [], _(b'show only the named part type')),
463 (b'', b'part-type', [], _(b'show only the named part type')),
464 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
464 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
465 ],
465 ],
466 _(b'FILE'),
466 _(b'FILE'),
467 norepo=True,
467 norepo=True,
468 )
468 )
469 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
469 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
470 """lists the contents of a bundle"""
470 """lists the contents of a bundle"""
471 with hg.openpath(ui, bundlepath) as f:
471 with hg.openpath(ui, bundlepath) as f:
472 if spec:
472 if spec:
473 spec = exchange.getbundlespec(ui, f)
473 spec = exchange.getbundlespec(ui, f)
474 ui.write(b'%s\n' % spec)
474 ui.write(b'%s\n' % spec)
475 return
475 return
476
476
477 gen = exchange.readbundle(ui, f, bundlepath)
477 gen = exchange.readbundle(ui, f, bundlepath)
478 if isinstance(gen, bundle2.unbundle20):
478 if isinstance(gen, bundle2.unbundle20):
479 return _debugbundle2(ui, gen, all=all, **opts)
479 return _debugbundle2(ui, gen, all=all, **opts)
480 _debugchangegroup(ui, gen, all=all, **opts)
480 _debugchangegroup(ui, gen, all=all, **opts)
481
481
482
482
483 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
483 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
484 def debugcapabilities(ui, path, **opts):
484 def debugcapabilities(ui, path, **opts):
485 """lists the capabilities of a remote peer"""
485 """lists the capabilities of a remote peer"""
486 opts = pycompat.byteskwargs(opts)
486 opts = pycompat.byteskwargs(opts)
487 peer = hg.peer(ui, opts, path)
487 peer = hg.peer(ui, opts, path)
488 try:
488 try:
489 caps = peer.capabilities()
489 caps = peer.capabilities()
490 ui.writenoi18n(b'Main capabilities:\n')
490 ui.writenoi18n(b'Main capabilities:\n')
491 for c in sorted(caps):
491 for c in sorted(caps):
492 ui.write(b' %s\n' % c)
492 ui.write(b' %s\n' % c)
493 b2caps = bundle2.bundle2caps(peer)
493 b2caps = bundle2.bundle2caps(peer)
494 if b2caps:
494 if b2caps:
495 ui.writenoi18n(b'Bundle2 capabilities:\n')
495 ui.writenoi18n(b'Bundle2 capabilities:\n')
496 for key, values in sorted(b2caps.items()):
496 for key, values in sorted(b2caps.items()):
497 ui.write(b' %s\n' % key)
497 ui.write(b' %s\n' % key)
498 for v in values:
498 for v in values:
499 ui.write(b' %s\n' % v)
499 ui.write(b' %s\n' % v)
500 finally:
500 finally:
501 peer.close()
501 peer.close()
502
502
503
503
504 @command(
504 @command(
505 b'debugchangedfiles',
505 b'debugchangedfiles',
506 [
506 [
507 (
507 (
508 b'',
508 b'',
509 b'compute',
509 b'compute',
510 False,
510 False,
511 b"compute information instead of reading it from storage",
511 b"compute information instead of reading it from storage",
512 ),
512 ),
513 ],
513 ],
514 b'REV',
514 b'REV',
515 )
515 )
516 def debugchangedfiles(ui, repo, rev, **opts):
516 def debugchangedfiles(ui, repo, rev, **opts):
517 """list the stored files changes for a revision"""
517 """list the stored files changes for a revision"""
518 ctx = logcmdutil.revsingle(repo, rev, None)
518 ctx = logcmdutil.revsingle(repo, rev, None)
519 files = None
519 files = None
520
520
521 if opts['compute']:
521 if opts['compute']:
522 files = metadata.compute_all_files_changes(ctx)
522 files = metadata.compute_all_files_changes(ctx)
523 else:
523 else:
524 sd = repo.changelog.sidedata(ctx.rev())
524 sd = repo.changelog.sidedata(ctx.rev())
525 files_block = sd.get(sidedata.SD_FILES)
525 files_block = sd.get(sidedata.SD_FILES)
526 if files_block is not None:
526 if files_block is not None:
527 files = metadata.decode_files_sidedata(sd)
527 files = metadata.decode_files_sidedata(sd)
528 if files is not None:
528 if files is not None:
529 for f in sorted(files.touched):
529 for f in sorted(files.touched):
530 if f in files.added:
530 if f in files.added:
531 action = b"added"
531 action = b"added"
532 elif f in files.removed:
532 elif f in files.removed:
533 action = b"removed"
533 action = b"removed"
534 elif f in files.merged:
534 elif f in files.merged:
535 action = b"merged"
535 action = b"merged"
536 elif f in files.salvaged:
536 elif f in files.salvaged:
537 action = b"salvaged"
537 action = b"salvaged"
538 else:
538 else:
539 action = b"touched"
539 action = b"touched"
540
540
541 copy_parent = b""
541 copy_parent = b""
542 copy_source = b""
542 copy_source = b""
543 if f in files.copied_from_p1:
543 if f in files.copied_from_p1:
544 copy_parent = b"p1"
544 copy_parent = b"p1"
545 copy_source = files.copied_from_p1[f]
545 copy_source = files.copied_from_p1[f]
546 elif f in files.copied_from_p2:
546 elif f in files.copied_from_p2:
547 copy_parent = b"p2"
547 copy_parent = b"p2"
548 copy_source = files.copied_from_p2[f]
548 copy_source = files.copied_from_p2[f]
549
549
550 data = (action, copy_parent, f, copy_source)
550 data = (action, copy_parent, f, copy_source)
551 template = b"%-8s %2s: %s, %s;\n"
551 template = b"%-8s %2s: %s, %s;\n"
552 ui.write(template % data)
552 ui.write(template % data)
553
553
554
554
555 @command(b'debugcheckstate', [], b'')
555 @command(b'debugcheckstate', [], b'')
556 def debugcheckstate(ui, repo):
556 def debugcheckstate(ui, repo):
557 """validate the correctness of the current dirstate"""
557 """validate the correctness of the current dirstate"""
558 parent1, parent2 = repo.dirstate.parents()
558 parent1, parent2 = repo.dirstate.parents()
559 m1 = repo[parent1].manifest()
559 m1 = repo[parent1].manifest()
560 m2 = repo[parent2].manifest()
560 m2 = repo[parent2].manifest()
561 errors = 0
561 errors = 0
562 for err in repo.dirstate.verify(m1, m2):
562 for err in repo.dirstate.verify(m1, m2):
563 ui.warn(err[0] % err[1:])
563 ui.warn(err[0] % err[1:])
564 errors += 1
564 errors += 1
565 if errors:
565 if errors:
566 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
566 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
567 raise error.Abort(errstr)
567 raise error.Abort(errstr)
568
568
569
569
570 @command(
570 @command(
571 b'debugcolor',
571 b'debugcolor',
572 [(b'', b'style', None, _(b'show all configured styles'))],
572 [(b'', b'style', None, _(b'show all configured styles'))],
573 b'hg debugcolor',
573 b'hg debugcolor',
574 )
574 )
575 def debugcolor(ui, repo, **opts):
575 def debugcolor(ui, repo, **opts):
576 """show available color, effects or style"""
576 """show available color, effects or style"""
577 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
577 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
578 if opts.get('style'):
578 if opts.get('style'):
579 return _debugdisplaystyle(ui)
579 return _debugdisplaystyle(ui)
580 else:
580 else:
581 return _debugdisplaycolor(ui)
581 return _debugdisplaycolor(ui)
582
582
583
583
584 def _debugdisplaycolor(ui):
584 def _debugdisplaycolor(ui):
585 ui = ui.copy()
585 ui = ui.copy()
586 ui._styles.clear()
586 ui._styles.clear()
587 for effect in color._activeeffects(ui).keys():
587 for effect in color._activeeffects(ui).keys():
588 ui._styles[effect] = effect
588 ui._styles[effect] = effect
589 if ui._terminfoparams:
589 if ui._terminfoparams:
590 for k, v in ui.configitems(b'color'):
590 for k, v in ui.configitems(b'color'):
591 if k.startswith(b'color.'):
591 if k.startswith(b'color.'):
592 ui._styles[k] = k[6:]
592 ui._styles[k] = k[6:]
593 elif k.startswith(b'terminfo.'):
593 elif k.startswith(b'terminfo.'):
594 ui._styles[k] = k[9:]
594 ui._styles[k] = k[9:]
595 ui.write(_(b'available colors:\n'))
595 ui.write(_(b'available colors:\n'))
596 # sort label with a '_' after the other to group '_background' entry.
596 # sort label with a '_' after the other to group '_background' entry.
597 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
597 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
598 for colorname, label in items:
598 for colorname, label in items:
599 ui.write(b'%s\n' % colorname, label=label)
599 ui.write(b'%s\n' % colorname, label=label)
600
600
601
601
602 def _debugdisplaystyle(ui):
602 def _debugdisplaystyle(ui):
603 ui.write(_(b'available style:\n'))
603 ui.write(_(b'available style:\n'))
604 if not ui._styles:
604 if not ui._styles:
605 return
605 return
606 width = max(len(s) for s in ui._styles)
606 width = max(len(s) for s in ui._styles)
607 for label, effects in sorted(ui._styles.items()):
607 for label, effects in sorted(ui._styles.items()):
608 ui.write(b'%s' % label, label=label)
608 ui.write(b'%s' % label, label=label)
609 if effects:
609 if effects:
610 # 50
610 # 50
611 ui.write(b': ')
611 ui.write(b': ')
612 ui.write(b' ' * (max(0, width - len(label))))
612 ui.write(b' ' * (max(0, width - len(label))))
613 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
613 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
614 ui.write(b'\n')
614 ui.write(b'\n')
615
615
616
616
617 @command(b'debugcreatestreamclonebundle', [], b'FILE')
617 @command(b'debugcreatestreamclonebundle', [], b'FILE')
618 def debugcreatestreamclonebundle(ui, repo, fname):
618 def debugcreatestreamclonebundle(ui, repo, fname):
619 """create a stream clone bundle file
619 """create a stream clone bundle file
620
620
621 Stream bundles are special bundles that are essentially archives of
621 Stream bundles are special bundles that are essentially archives of
622 revlog files. They are commonly used for cloning very quickly.
622 revlog files. They are commonly used for cloning very quickly.
623 """
623 """
624 # TODO we may want to turn this into an abort when this functionality
624 # TODO we may want to turn this into an abort when this functionality
625 # is moved into `hg bundle`.
625 # is moved into `hg bundle`.
626 if phases.hassecret(repo):
626 if phases.hassecret(repo):
627 ui.warn(
627 ui.warn(
628 _(
628 _(
629 b'(warning: stream clone bundle will contain secret '
629 b'(warning: stream clone bundle will contain secret '
630 b'revisions)\n'
630 b'revisions)\n'
631 )
631 )
632 )
632 )
633
633
634 requirements, gen = streamclone.generatebundlev1(repo)
634 requirements, gen = streamclone.generatebundlev1(repo)
635 changegroup.writechunks(ui, gen, fname)
635 changegroup.writechunks(ui, gen, fname)
636
636
637 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
637 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
638
638
639
639
640 @command(
640 @command(
641 b'debugdag',
641 b'debugdag',
642 [
642 [
643 (b't', b'tags', None, _(b'use tags as labels')),
643 (b't', b'tags', None, _(b'use tags as labels')),
644 (b'b', b'branches', None, _(b'annotate with branch names')),
644 (b'b', b'branches', None, _(b'annotate with branch names')),
645 (b'', b'dots', None, _(b'use dots for runs')),
645 (b'', b'dots', None, _(b'use dots for runs')),
646 (b's', b'spaces', None, _(b'separate elements by spaces')),
646 (b's', b'spaces', None, _(b'separate elements by spaces')),
647 ],
647 ],
648 _(b'[OPTION]... [FILE [REV]...]'),
648 _(b'[OPTION]... [FILE [REV]...]'),
649 optionalrepo=True,
649 optionalrepo=True,
650 )
650 )
651 def debugdag(ui, repo, file_=None, *revs, **opts):
651 def debugdag(ui, repo, file_=None, *revs, **opts):
652 """format the changelog or an index DAG as a concise textual description
652 """format the changelog or an index DAG as a concise textual description
653
653
654 If you pass a revlog index, the revlog's DAG is emitted. If you list
654 If you pass a revlog index, the revlog's DAG is emitted. If you list
655 revision numbers, they get labeled in the output as rN.
655 revision numbers, they get labeled in the output as rN.
656
656
657 Otherwise, the changelog DAG of the current repo is emitted.
657 Otherwise, the changelog DAG of the current repo is emitted.
658 """
658 """
659 spaces = opts.get('spaces')
659 spaces = opts.get('spaces')
660 dots = opts.get('dots')
660 dots = opts.get('dots')
661 if file_:
661 if file_:
662 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
662 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
663 revs = {int(r) for r in revs}
663 revs = {int(r) for r in revs}
664
664
665 def events():
665 def events():
666 for r in rlog:
666 for r in rlog:
667 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
667 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
668 if r in revs:
668 if r in revs:
669 yield b'l', (r, b"r%i" % r)
669 yield b'l', (r, b"r%i" % r)
670
670
671 elif repo:
671 elif repo:
672 cl = repo.changelog
672 cl = repo.changelog
673 tags = opts.get('tags')
673 tags = opts.get('tags')
674 branches = opts.get('branches')
674 branches = opts.get('branches')
675 if tags:
675 if tags:
676 labels = {}
676 labels = {}
677 for l, n in repo.tags().items():
677 for l, n in repo.tags().items():
678 labels.setdefault(cl.rev(n), []).append(l)
678 labels.setdefault(cl.rev(n), []).append(l)
679
679
680 def events():
680 def events():
681 b = b"default"
681 b = b"default"
682 for r in cl:
682 for r in cl:
683 if branches:
683 if branches:
684 newb = cl.read(cl.node(r))[5][b'branch']
684 newb = cl.read(cl.node(r))[5][b'branch']
685 if newb != b:
685 if newb != b:
686 yield b'a', newb
686 yield b'a', newb
687 b = newb
687 b = newb
688 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
688 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
689 if tags:
689 if tags:
690 ls = labels.get(r)
690 ls = labels.get(r)
691 if ls:
691 if ls:
692 for l in ls:
692 for l in ls:
693 yield b'l', (r, l)
693 yield b'l', (r, l)
694
694
695 else:
695 else:
696 raise error.Abort(_(b'need repo for changelog dag'))
696 raise error.Abort(_(b'need repo for changelog dag'))
697
697
698 for line in dagparser.dagtextlines(
698 for line in dagparser.dagtextlines(
699 events(),
699 events(),
700 addspaces=spaces,
700 addspaces=spaces,
701 wraplabels=True,
701 wraplabels=True,
702 wrapannotations=True,
702 wrapannotations=True,
703 wrapnonlinear=dots,
703 wrapnonlinear=dots,
704 usedots=dots,
704 usedots=dots,
705 maxlinewidth=70,
705 maxlinewidth=70,
706 ):
706 ):
707 ui.write(line)
707 ui.write(line)
708 ui.write(b"\n")
708 ui.write(b"\n")
709
709
710
710
711 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
711 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
712 def debugdata(ui, repo, file_, rev=None, **opts):
712 def debugdata(ui, repo, file_, rev=None, **opts):
713 """dump the contents of a data file revision"""
713 """dump the contents of a data file revision"""
714 opts = pycompat.byteskwargs(opts)
714 opts = pycompat.byteskwargs(opts)
715 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
715 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
716 if rev is not None:
716 if rev is not None:
717 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
717 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
718 file_, rev = None, file_
718 file_, rev = None, file_
719 elif rev is None:
719 elif rev is None:
720 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
720 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
721 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
721 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
722 try:
722 try:
723 ui.write(r.rawdata(r.lookup(rev)))
723 ui.write(r.rawdata(r.lookup(rev)))
724 except KeyError:
724 except KeyError:
725 raise error.Abort(_(b'invalid revision identifier %s') % rev)
725 raise error.Abort(_(b'invalid revision identifier %s') % rev)
726
726
727
727
728 @command(
728 @command(
729 b'debugdate',
729 b'debugdate',
730 [(b'e', b'extended', None, _(b'try extended date formats'))],
730 [(b'e', b'extended', None, _(b'try extended date formats'))],
731 _(b'[-e] DATE [RANGE]'),
731 _(b'[-e] DATE [RANGE]'),
732 norepo=True,
732 norepo=True,
733 optionalrepo=True,
733 optionalrepo=True,
734 )
734 )
735 def debugdate(ui, date, range=None, **opts):
735 def debugdate(ui, date, range=None, **opts):
736 """parse and display a date"""
736 """parse and display a date"""
737 if opts["extended"]:
737 if opts["extended"]:
738 d = dateutil.parsedate(date, dateutil.extendeddateformats)
738 d = dateutil.parsedate(date, dateutil.extendeddateformats)
739 else:
739 else:
740 d = dateutil.parsedate(date)
740 d = dateutil.parsedate(date)
741 ui.writenoi18n(b"internal: %d %d\n" % d)
741 ui.writenoi18n(b"internal: %d %d\n" % d)
742 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
742 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
743 if range:
743 if range:
744 m = dateutil.matchdate(range)
744 m = dateutil.matchdate(range)
745 ui.writenoi18n(b"match: %s\n" % m(d[0]))
745 ui.writenoi18n(b"match: %s\n" % m(d[0]))
746
746
747
747
748 @command(
748 @command(
749 b'debugdeltachain',
749 b'debugdeltachain',
750 cmdutil.debugrevlogopts + cmdutil.formatteropts,
750 cmdutil.debugrevlogopts + cmdutil.formatteropts,
751 _(b'-c|-m|FILE'),
751 _(b'-c|-m|FILE'),
752 optionalrepo=True,
752 optionalrepo=True,
753 )
753 )
754 def debugdeltachain(ui, repo, file_=None, **opts):
754 def debugdeltachain(ui, repo, file_=None, **opts):
755 """dump information about delta chains in a revlog
755 """dump information about delta chains in a revlog
756
756
757 Output can be templatized. Available template keywords are:
757 Output can be templatized. Available template keywords are:
758
758
759 :``rev``: revision number
759 :``rev``: revision number
760 :``chainid``: delta chain identifier (numbered by unique base)
760 :``chainid``: delta chain identifier (numbered by unique base)
761 :``chainlen``: delta chain length to this revision
761 :``chainlen``: delta chain length to this revision
762 :``prevrev``: previous revision in delta chain
762 :``prevrev``: previous revision in delta chain
763 :``deltatype``: role of delta / how it was computed
763 :``deltatype``: role of delta / how it was computed
764 :``compsize``: compressed size of revision
764 :``compsize``: compressed size of revision
765 :``uncompsize``: uncompressed size of revision
765 :``uncompsize``: uncompressed size of revision
766 :``chainsize``: total size of compressed revisions in chain
766 :``chainsize``: total size of compressed revisions in chain
767 :``chainratio``: total chain size divided by uncompressed revision size
767 :``chainratio``: total chain size divided by uncompressed revision size
768 (new delta chains typically start at ratio 2.00)
768 (new delta chains typically start at ratio 2.00)
769 :``lindist``: linear distance from base revision in delta chain to end
769 :``lindist``: linear distance from base revision in delta chain to end
770 of this revision
770 of this revision
771 :``extradist``: total size of revisions not part of this delta chain from
771 :``extradist``: total size of revisions not part of this delta chain from
772 base of delta chain to end of this revision; a measurement
772 base of delta chain to end of this revision; a measurement
773 of how much extra data we need to read/seek across to read
773 of how much extra data we need to read/seek across to read
774 the delta chain for this revision
774 the delta chain for this revision
775 :``extraratio``: extradist divided by chainsize; another representation of
775 :``extraratio``: extradist divided by chainsize; another representation of
776 how much unrelated data is needed to load this delta chain
776 how much unrelated data is needed to load this delta chain
777
777
778 If the repository is configured to use the sparse read, additional keywords
778 If the repository is configured to use the sparse read, additional keywords
779 are available:
779 are available:
780
780
781 :``readsize``: total size of data read from the disk for a revision
781 :``readsize``: total size of data read from the disk for a revision
782 (sum of the sizes of all the blocks)
782 (sum of the sizes of all the blocks)
783 :``largestblock``: size of the largest block of data read from the disk
783 :``largestblock``: size of the largest block of data read from the disk
784 :``readdensity``: density of useful bytes in the data read from the disk
784 :``readdensity``: density of useful bytes in the data read from the disk
785 :``srchunks``: in how many data hunks the whole revision would be read
785 :``srchunks``: in how many data hunks the whole revision would be read
786
786
787 The sparse read can be enabled with experimental.sparse-read = True
787 The sparse read can be enabled with experimental.sparse-read = True
788 """
788 """
789 opts = pycompat.byteskwargs(opts)
789 opts = pycompat.byteskwargs(opts)
790 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
790 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
791 index = r.index
791 index = r.index
792 start = r.start
792 start = r.start
793 length = r.length
793 length = r.length
794 generaldelta = r._generaldelta
794 generaldelta = r._generaldelta
795 withsparseread = getattr(r, '_withsparseread', False)
795 withsparseread = getattr(r, '_withsparseread', False)
796
796
797 def revinfo(rev):
797 def revinfo(rev):
798 e = index[rev]
798 e = index[rev]
799 compsize = e[1]
799 compsize = e[1]
800 uncompsize = e[2]
800 uncompsize = e[2]
801 chainsize = 0
801 chainsize = 0
802
802
803 if generaldelta:
803 if generaldelta:
804 if e[3] == e[5]:
804 if e[3] == e[5]:
805 deltatype = b'p1'
805 deltatype = b'p1'
806 elif e[3] == e[6]:
806 elif e[3] == e[6]:
807 deltatype = b'p2'
807 deltatype = b'p2'
808 elif e[3] == rev:
809 deltatype = b'base'
810 elif r.issnapshot(rev):
811 deltatype = b'snap'
808 elif e[3] == rev - 1:
812 elif e[3] == rev - 1:
809 deltatype = b'prev'
813 deltatype = b'prev'
810 elif e[3] == rev:
811 deltatype = b'base'
812 else:
814 else:
813 deltatype = b'other'
815 deltatype = b'other'
814 else:
816 else:
815 if e[3] == rev:
817 if e[3] == rev:
816 deltatype = b'base'
818 deltatype = b'base'
817 else:
819 else:
818 deltatype = b'prev'
820 deltatype = b'prev'
819
821
820 chain = r._deltachain(rev)[0]
822 chain = r._deltachain(rev)[0]
821 for iterrev in chain:
823 for iterrev in chain:
822 e = index[iterrev]
824 e = index[iterrev]
823 chainsize += e[1]
825 chainsize += e[1]
824
826
825 return compsize, uncompsize, deltatype, chain, chainsize
827 return compsize, uncompsize, deltatype, chain, chainsize
826
828
827 fm = ui.formatter(b'debugdeltachain', opts)
829 fm = ui.formatter(b'debugdeltachain', opts)
828
830
829 fm.plain(
831 fm.plain(
830 b' rev chain# chainlen prev delta '
832 b' rev chain# chainlen prev delta '
831 b'size rawsize chainsize ratio lindist extradist '
833 b'size rawsize chainsize ratio lindist extradist '
832 b'extraratio'
834 b'extraratio'
833 )
835 )
834 if withsparseread:
836 if withsparseread:
835 fm.plain(b' readsize largestblk rddensity srchunks')
837 fm.plain(b' readsize largestblk rddensity srchunks')
836 fm.plain(b'\n')
838 fm.plain(b'\n')
837
839
838 chainbases = {}
840 chainbases = {}
839 for rev in r:
841 for rev in r:
840 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
842 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
841 chainbase = chain[0]
843 chainbase = chain[0]
842 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
844 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
843 basestart = start(chainbase)
845 basestart = start(chainbase)
844 revstart = start(rev)
846 revstart = start(rev)
845 lineardist = revstart + comp - basestart
847 lineardist = revstart + comp - basestart
846 extradist = lineardist - chainsize
848 extradist = lineardist - chainsize
847 try:
849 try:
848 prevrev = chain[-2]
850 prevrev = chain[-2]
849 except IndexError:
851 except IndexError:
850 prevrev = -1
852 prevrev = -1
851
853
852 if uncomp != 0:
854 if uncomp != 0:
853 chainratio = float(chainsize) / float(uncomp)
855 chainratio = float(chainsize) / float(uncomp)
854 else:
856 else:
855 chainratio = chainsize
857 chainratio = chainsize
856
858
857 if chainsize != 0:
859 if chainsize != 0:
858 extraratio = float(extradist) / float(chainsize)
860 extraratio = float(extradist) / float(chainsize)
859 else:
861 else:
860 extraratio = extradist
862 extraratio = extradist
861
863
862 fm.startitem()
864 fm.startitem()
863 fm.write(
865 fm.write(
864 b'rev chainid chainlen prevrev deltatype compsize '
866 b'rev chainid chainlen prevrev deltatype compsize '
865 b'uncompsize chainsize chainratio lindist extradist '
867 b'uncompsize chainsize chainratio lindist extradist '
866 b'extraratio',
868 b'extraratio',
867 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
869 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
868 rev,
870 rev,
869 chainid,
871 chainid,
870 len(chain),
872 len(chain),
871 prevrev,
873 prevrev,
872 deltatype,
874 deltatype,
873 comp,
875 comp,
874 uncomp,
876 uncomp,
875 chainsize,
877 chainsize,
876 chainratio,
878 chainratio,
877 lineardist,
879 lineardist,
878 extradist,
880 extradist,
879 extraratio,
881 extraratio,
880 rev=rev,
882 rev=rev,
881 chainid=chainid,
883 chainid=chainid,
882 chainlen=len(chain),
884 chainlen=len(chain),
883 prevrev=prevrev,
885 prevrev=prevrev,
884 deltatype=deltatype,
886 deltatype=deltatype,
885 compsize=comp,
887 compsize=comp,
886 uncompsize=uncomp,
888 uncompsize=uncomp,
887 chainsize=chainsize,
889 chainsize=chainsize,
888 chainratio=chainratio,
890 chainratio=chainratio,
889 lindist=lineardist,
891 lindist=lineardist,
890 extradist=extradist,
892 extradist=extradist,
891 extraratio=extraratio,
893 extraratio=extraratio,
892 )
894 )
893 if withsparseread:
895 if withsparseread:
894 readsize = 0
896 readsize = 0
895 largestblock = 0
897 largestblock = 0
896 srchunks = 0
898 srchunks = 0
897
899
898 for revschunk in deltautil.slicechunk(r, chain):
900 for revschunk in deltautil.slicechunk(r, chain):
899 srchunks += 1
901 srchunks += 1
900 blkend = start(revschunk[-1]) + length(revschunk[-1])
902 blkend = start(revschunk[-1]) + length(revschunk[-1])
901 blksize = blkend - start(revschunk[0])
903 blksize = blkend - start(revschunk[0])
902
904
903 readsize += blksize
905 readsize += blksize
904 if largestblock < blksize:
906 if largestblock < blksize:
905 largestblock = blksize
907 largestblock = blksize
906
908
907 if readsize:
909 if readsize:
908 readdensity = float(chainsize) / float(readsize)
910 readdensity = float(chainsize) / float(readsize)
909 else:
911 else:
910 readdensity = 1
912 readdensity = 1
911
913
912 fm.write(
914 fm.write(
913 b'readsize largestblock readdensity srchunks',
915 b'readsize largestblock readdensity srchunks',
914 b' %10d %10d %9.5f %8d',
916 b' %10d %10d %9.5f %8d',
915 readsize,
917 readsize,
916 largestblock,
918 largestblock,
917 readdensity,
919 readdensity,
918 srchunks,
920 srchunks,
919 readsize=readsize,
921 readsize=readsize,
920 largestblock=largestblock,
922 largestblock=largestblock,
921 readdensity=readdensity,
923 readdensity=readdensity,
922 srchunks=srchunks,
924 srchunks=srchunks,
923 )
925 )
924
926
925 fm.plain(b'\n')
927 fm.plain(b'\n')
926
928
927 fm.end()
929 fm.end()
928
930
929
931
930 @command(
932 @command(
931 b'debugdirstate|debugstate',
933 b'debugdirstate|debugstate',
932 [
934 [
933 (
935 (
934 b'',
936 b'',
935 b'nodates',
937 b'nodates',
936 None,
938 None,
937 _(b'do not display the saved mtime (DEPRECATED)'),
939 _(b'do not display the saved mtime (DEPRECATED)'),
938 ),
940 ),
939 (b'', b'dates', True, _(b'display the saved mtime')),
941 (b'', b'dates', True, _(b'display the saved mtime')),
940 (b'', b'datesort', None, _(b'sort by saved mtime')),
942 (b'', b'datesort', None, _(b'sort by saved mtime')),
941 (
943 (
942 b'',
944 b'',
943 b'docket',
945 b'docket',
944 False,
946 False,
945 _(b'display the docket (metadata file) instead'),
947 _(b'display the docket (metadata file) instead'),
946 ),
948 ),
947 (
949 (
948 b'',
950 b'',
949 b'all',
951 b'all',
950 False,
952 False,
951 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
953 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
952 ),
954 ),
953 ],
955 ],
954 _(b'[OPTION]...'),
956 _(b'[OPTION]...'),
955 )
957 )
956 def debugstate(ui, repo, **opts):
958 def debugstate(ui, repo, **opts):
957 """show the contents of the current dirstate"""
959 """show the contents of the current dirstate"""
958
960
959 if opts.get("docket"):
961 if opts.get("docket"):
960 if not repo.dirstate._use_dirstate_v2:
962 if not repo.dirstate._use_dirstate_v2:
961 raise error.Abort(_(b'dirstate v1 does not have a docket'))
963 raise error.Abort(_(b'dirstate v1 does not have a docket'))
962
964
963 docket = repo.dirstate._map.docket
965 docket = repo.dirstate._map.docket
964 (
966 (
965 start_offset,
967 start_offset,
966 root_nodes,
968 root_nodes,
967 nodes_with_entry,
969 nodes_with_entry,
968 nodes_with_copy,
970 nodes_with_copy,
969 unused_bytes,
971 unused_bytes,
970 _unused,
972 _unused,
971 ignore_pattern,
973 ignore_pattern,
972 ) = dirstateutils.v2.TREE_METADATA.unpack(docket.tree_metadata)
974 ) = dirstateutils.v2.TREE_METADATA.unpack(docket.tree_metadata)
973
975
974 ui.write(_(b"size of dirstate data: %d\n") % docket.data_size)
976 ui.write(_(b"size of dirstate data: %d\n") % docket.data_size)
975 ui.write(_(b"data file uuid: %s\n") % docket.uuid)
977 ui.write(_(b"data file uuid: %s\n") % docket.uuid)
976 ui.write(_(b"start offset of root nodes: %d\n") % start_offset)
978 ui.write(_(b"start offset of root nodes: %d\n") % start_offset)
977 ui.write(_(b"number of root nodes: %d\n") % root_nodes)
979 ui.write(_(b"number of root nodes: %d\n") % root_nodes)
978 ui.write(_(b"nodes with entries: %d\n") % nodes_with_entry)
980 ui.write(_(b"nodes with entries: %d\n") % nodes_with_entry)
979 ui.write(_(b"nodes with copies: %d\n") % nodes_with_copy)
981 ui.write(_(b"nodes with copies: %d\n") % nodes_with_copy)
980 ui.write(_(b"number of unused bytes: %d\n") % unused_bytes)
982 ui.write(_(b"number of unused bytes: %d\n") % unused_bytes)
981 ui.write(
983 ui.write(
982 _(b"ignore pattern hash: %s\n") % binascii.hexlify(ignore_pattern)
984 _(b"ignore pattern hash: %s\n") % binascii.hexlify(ignore_pattern)
983 )
985 )
984 return
986 return
985
987
986 nodates = not opts['dates']
988 nodates = not opts['dates']
987 if opts.get('nodates') is not None:
989 if opts.get('nodates') is not None:
988 nodates = True
990 nodates = True
989 datesort = opts.get('datesort')
991 datesort = opts.get('datesort')
990
992
991 if datesort:
993 if datesort:
992
994
993 def keyfunc(entry):
995 def keyfunc(entry):
994 filename, _state, _mode, _size, mtime = entry
996 filename, _state, _mode, _size, mtime = entry
995 return (mtime, filename)
997 return (mtime, filename)
996
998
997 else:
999 else:
998 keyfunc = None # sort by filename
1000 keyfunc = None # sort by filename
999 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
1001 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
1000 entries.sort(key=keyfunc)
1002 entries.sort(key=keyfunc)
1001 for entry in entries:
1003 for entry in entries:
1002 filename, state, mode, size, mtime = entry
1004 filename, state, mode, size, mtime = entry
1003 if mtime == -1:
1005 if mtime == -1:
1004 timestr = b'unset '
1006 timestr = b'unset '
1005 elif nodates:
1007 elif nodates:
1006 timestr = b'set '
1008 timestr = b'set '
1007 else:
1009 else:
1008 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
1010 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
1009 timestr = encoding.strtolocal(timestr)
1011 timestr = encoding.strtolocal(timestr)
1010 if mode & 0o20000:
1012 if mode & 0o20000:
1011 mode = b'lnk'
1013 mode = b'lnk'
1012 else:
1014 else:
1013 mode = b'%3o' % (mode & 0o777 & ~util.umask)
1015 mode = b'%3o' % (mode & 0o777 & ~util.umask)
1014 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
1016 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
1015 for f in repo.dirstate.copies():
1017 for f in repo.dirstate.copies():
1016 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
1018 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
1017
1019
1018
1020
1019 @command(
1021 @command(
1020 b'debugdirstateignorepatternshash',
1022 b'debugdirstateignorepatternshash',
1021 [],
1023 [],
1022 _(b''),
1024 _(b''),
1023 )
1025 )
1024 def debugdirstateignorepatternshash(ui, repo, **opts):
1026 def debugdirstateignorepatternshash(ui, repo, **opts):
1025 """show the hash of ignore patterns stored in dirstate if v2,
1027 """show the hash of ignore patterns stored in dirstate if v2,
1026 or nothing for dirstate-v2
1028 or nothing for dirstate-v2
1027 """
1029 """
1028 if repo.dirstate._use_dirstate_v2:
1030 if repo.dirstate._use_dirstate_v2:
1029 docket = repo.dirstate._map.docket
1031 docket = repo.dirstate._map.docket
1030 hash_len = 20 # 160 bits for SHA-1
1032 hash_len = 20 # 160 bits for SHA-1
1031 hash_bytes = docket.tree_metadata[-hash_len:]
1033 hash_bytes = docket.tree_metadata[-hash_len:]
1032 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1034 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1033
1035
1034
1036
1035 @command(
1037 @command(
1036 b'debugdiscovery',
1038 b'debugdiscovery',
1037 [
1039 [
1038 (b'', b'old', None, _(b'use old-style discovery')),
1040 (b'', b'old', None, _(b'use old-style discovery')),
1039 (
1041 (
1040 b'',
1042 b'',
1041 b'nonheads',
1043 b'nonheads',
1042 None,
1044 None,
1043 _(b'use old-style discovery with non-heads included'),
1045 _(b'use old-style discovery with non-heads included'),
1044 ),
1046 ),
1045 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1047 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1046 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1048 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1047 (
1049 (
1048 b'',
1050 b'',
1049 b'local-as-revs',
1051 b'local-as-revs',
1050 b"",
1052 b"",
1051 b'treat local has having these revisions only',
1053 b'treat local has having these revisions only',
1052 ),
1054 ),
1053 (
1055 (
1054 b'',
1056 b'',
1055 b'remote-as-revs',
1057 b'remote-as-revs',
1056 b"",
1058 b"",
1057 b'use local as remote, with only these revisions',
1059 b'use local as remote, with only these revisions',
1058 ),
1060 ),
1059 ]
1061 ]
1060 + cmdutil.remoteopts
1062 + cmdutil.remoteopts
1061 + cmdutil.formatteropts,
1063 + cmdutil.formatteropts,
1062 _(b'[--rev REV] [OTHER]'),
1064 _(b'[--rev REV] [OTHER]'),
1063 )
1065 )
1064 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1066 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1065 """runs the changeset discovery protocol in isolation
1067 """runs the changeset discovery protocol in isolation
1066
1068
1067 The local peer can be "replaced" by a subset of the local repository by
1069 The local peer can be "replaced" by a subset of the local repository by
1068 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1070 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1069 be "replaced" by a subset of the local repository using the
1071 be "replaced" by a subset of the local repository using the
1070 `--local-as-revs` flag. This is useful to efficiently debug pathological
1072 `--local-as-revs` flag. This is useful to efficiently debug pathological
1071 discovery situation.
1073 discovery situation.
1072
1074
1073 The following developer oriented config are relevant for people playing with this command:
1075 The following developer oriented config are relevant for people playing with this command:
1074
1076
1075 * devel.discovery.exchange-heads=True
1077 * devel.discovery.exchange-heads=True
1076
1078
1077 If False, the discovery will not start with
1079 If False, the discovery will not start with
1078 remote head fetching and local head querying.
1080 remote head fetching and local head querying.
1079
1081
1080 * devel.discovery.grow-sample=True
1082 * devel.discovery.grow-sample=True
1081
1083
1082 If False, the sample size used in set discovery will not be increased
1084 If False, the sample size used in set discovery will not be increased
1083 through the process
1085 through the process
1084
1086
1085 * devel.discovery.grow-sample.dynamic=True
1087 * devel.discovery.grow-sample.dynamic=True
1086
1088
1087 When discovery.grow-sample.dynamic is True, the default, the sample size is
1089 When discovery.grow-sample.dynamic is True, the default, the sample size is
1088 adapted to the shape of the undecided set (it is set to the max of:
1090 adapted to the shape of the undecided set (it is set to the max of:
1089 <target-size>, len(roots(undecided)), len(heads(undecided)
1091 <target-size>, len(roots(undecided)), len(heads(undecided)
1090
1092
1091 * devel.discovery.grow-sample.rate=1.05
1093 * devel.discovery.grow-sample.rate=1.05
1092
1094
1093 the rate at which the sample grow
1095 the rate at which the sample grow
1094
1096
1095 * devel.discovery.randomize=True
1097 * devel.discovery.randomize=True
1096
1098
1097 If andom sampling during discovery are deterministic. It is meant for
1099 If andom sampling during discovery are deterministic. It is meant for
1098 integration tests.
1100 integration tests.
1099
1101
1100 * devel.discovery.sample-size=200
1102 * devel.discovery.sample-size=200
1101
1103
1102 Control the initial size of the discovery sample
1104 Control the initial size of the discovery sample
1103
1105
1104 * devel.discovery.sample-size.initial=100
1106 * devel.discovery.sample-size.initial=100
1105
1107
1106 Control the initial size of the discovery for initial change
1108 Control the initial size of the discovery for initial change
1107 """
1109 """
1108 opts = pycompat.byteskwargs(opts)
1110 opts = pycompat.byteskwargs(opts)
1109 unfi = repo.unfiltered()
1111 unfi = repo.unfiltered()
1110
1112
1111 # setup potential extra filtering
1113 # setup potential extra filtering
1112 local_revs = opts[b"local_as_revs"]
1114 local_revs = opts[b"local_as_revs"]
1113 remote_revs = opts[b"remote_as_revs"]
1115 remote_revs = opts[b"remote_as_revs"]
1114
1116
1115 # make sure tests are repeatable
1117 # make sure tests are repeatable
1116 random.seed(int(opts[b'seed']))
1118 random.seed(int(opts[b'seed']))
1117
1119
1118 if not remote_revs:
1120 if not remote_revs:
1119
1121
1120 remoteurl, branches = urlutil.get_unique_pull_path(
1122 remoteurl, branches = urlutil.get_unique_pull_path(
1121 b'debugdiscovery', repo, ui, remoteurl
1123 b'debugdiscovery', repo, ui, remoteurl
1122 )
1124 )
1123 remote = hg.peer(repo, opts, remoteurl)
1125 remote = hg.peer(repo, opts, remoteurl)
1124 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1126 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1125 else:
1127 else:
1126 branches = (None, [])
1128 branches = (None, [])
1127 remote_filtered_revs = logcmdutil.revrange(
1129 remote_filtered_revs = logcmdutil.revrange(
1128 unfi, [b"not (::(%s))" % remote_revs]
1130 unfi, [b"not (::(%s))" % remote_revs]
1129 )
1131 )
1130 remote_filtered_revs = frozenset(remote_filtered_revs)
1132 remote_filtered_revs = frozenset(remote_filtered_revs)
1131
1133
1132 def remote_func(x):
1134 def remote_func(x):
1133 return remote_filtered_revs
1135 return remote_filtered_revs
1134
1136
1135 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1137 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1136
1138
1137 remote = repo.peer()
1139 remote = repo.peer()
1138 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1140 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1139
1141
1140 if local_revs:
1142 if local_revs:
1141 local_filtered_revs = logcmdutil.revrange(
1143 local_filtered_revs = logcmdutil.revrange(
1142 unfi, [b"not (::(%s))" % local_revs]
1144 unfi, [b"not (::(%s))" % local_revs]
1143 )
1145 )
1144 local_filtered_revs = frozenset(local_filtered_revs)
1146 local_filtered_revs = frozenset(local_filtered_revs)
1145
1147
1146 def local_func(x):
1148 def local_func(x):
1147 return local_filtered_revs
1149 return local_filtered_revs
1148
1150
1149 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1151 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1150 repo = repo.filtered(b'debug-discovery-local-filter')
1152 repo = repo.filtered(b'debug-discovery-local-filter')
1151
1153
1152 data = {}
1154 data = {}
1153 if opts.get(b'old'):
1155 if opts.get(b'old'):
1154
1156
1155 def doit(pushedrevs, remoteheads, remote=remote):
1157 def doit(pushedrevs, remoteheads, remote=remote):
1156 if not util.safehasattr(remote, b'branches'):
1158 if not util.safehasattr(remote, b'branches'):
1157 # enable in-client legacy support
1159 # enable in-client legacy support
1158 remote = localrepo.locallegacypeer(remote.local())
1160 remote = localrepo.locallegacypeer(remote.local())
1159 common, _in, hds = treediscovery.findcommonincoming(
1161 common, _in, hds = treediscovery.findcommonincoming(
1160 repo, remote, force=True, audit=data
1162 repo, remote, force=True, audit=data
1161 )
1163 )
1162 common = set(common)
1164 common = set(common)
1163 if not opts.get(b'nonheads'):
1165 if not opts.get(b'nonheads'):
1164 ui.writenoi18n(
1166 ui.writenoi18n(
1165 b"unpruned common: %s\n"
1167 b"unpruned common: %s\n"
1166 % b" ".join(sorted(short(n) for n in common))
1168 % b" ".join(sorted(short(n) for n in common))
1167 )
1169 )
1168
1170
1169 clnode = repo.changelog.node
1171 clnode = repo.changelog.node
1170 common = repo.revs(b'heads(::%ln)', common)
1172 common = repo.revs(b'heads(::%ln)', common)
1171 common = {clnode(r) for r in common}
1173 common = {clnode(r) for r in common}
1172 return common, hds
1174 return common, hds
1173
1175
1174 else:
1176 else:
1175
1177
1176 def doit(pushedrevs, remoteheads, remote=remote):
1178 def doit(pushedrevs, remoteheads, remote=remote):
1177 nodes = None
1179 nodes = None
1178 if pushedrevs:
1180 if pushedrevs:
1179 revs = logcmdutil.revrange(repo, pushedrevs)
1181 revs = logcmdutil.revrange(repo, pushedrevs)
1180 nodes = [repo[r].node() for r in revs]
1182 nodes = [repo[r].node() for r in revs]
1181 common, any, hds = setdiscovery.findcommonheads(
1183 common, any, hds = setdiscovery.findcommonheads(
1182 ui, repo, remote, ancestorsof=nodes, audit=data
1184 ui, repo, remote, ancestorsof=nodes, audit=data
1183 )
1185 )
1184 return common, hds
1186 return common, hds
1185
1187
1186 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1188 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1187 localrevs = opts[b'rev']
1189 localrevs = opts[b'rev']
1188
1190
1189 fm = ui.formatter(b'debugdiscovery', opts)
1191 fm = ui.formatter(b'debugdiscovery', opts)
1190 if fm.strict_format:
1192 if fm.strict_format:
1191
1193
1192 @contextlib.contextmanager
1194 @contextlib.contextmanager
1193 def may_capture_output():
1195 def may_capture_output():
1194 ui.pushbuffer()
1196 ui.pushbuffer()
1195 yield
1197 yield
1196 data[b'output'] = ui.popbuffer()
1198 data[b'output'] = ui.popbuffer()
1197
1199
1198 else:
1200 else:
1199 may_capture_output = util.nullcontextmanager
1201 may_capture_output = util.nullcontextmanager
1200 with may_capture_output():
1202 with may_capture_output():
1201 with util.timedcm('debug-discovery') as t:
1203 with util.timedcm('debug-discovery') as t:
1202 common, hds = doit(localrevs, remoterevs)
1204 common, hds = doit(localrevs, remoterevs)
1203
1205
1204 # compute all statistics
1206 # compute all statistics
1205 heads_common = set(common)
1207 heads_common = set(common)
1206 heads_remote = set(hds)
1208 heads_remote = set(hds)
1207 heads_local = set(repo.heads())
1209 heads_local = set(repo.heads())
1208 # note: they cannot be a local or remote head that is in common and not
1210 # note: they cannot be a local or remote head that is in common and not
1209 # itself a head of common.
1211 # itself a head of common.
1210 heads_common_local = heads_common & heads_local
1212 heads_common_local = heads_common & heads_local
1211 heads_common_remote = heads_common & heads_remote
1213 heads_common_remote = heads_common & heads_remote
1212 heads_common_both = heads_common & heads_remote & heads_local
1214 heads_common_both = heads_common & heads_remote & heads_local
1213
1215
1214 all = repo.revs(b'all()')
1216 all = repo.revs(b'all()')
1215 common = repo.revs(b'::%ln', common)
1217 common = repo.revs(b'::%ln', common)
1216 roots_common = repo.revs(b'roots(::%ld)', common)
1218 roots_common = repo.revs(b'roots(::%ld)', common)
1217 missing = repo.revs(b'not ::%ld', common)
1219 missing = repo.revs(b'not ::%ld', common)
1218 heads_missing = repo.revs(b'heads(%ld)', missing)
1220 heads_missing = repo.revs(b'heads(%ld)', missing)
1219 roots_missing = repo.revs(b'roots(%ld)', missing)
1221 roots_missing = repo.revs(b'roots(%ld)', missing)
1220 assert len(common) + len(missing) == len(all)
1222 assert len(common) + len(missing) == len(all)
1221
1223
1222 initial_undecided = repo.revs(
1224 initial_undecided = repo.revs(
1223 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1225 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1224 )
1226 )
1225 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1227 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1226 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1228 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1227 common_initial_undecided = initial_undecided & common
1229 common_initial_undecided = initial_undecided & common
1228 missing_initial_undecided = initial_undecided & missing
1230 missing_initial_undecided = initial_undecided & missing
1229
1231
1230 data[b'elapsed'] = t.elapsed
1232 data[b'elapsed'] = t.elapsed
1231 data[b'nb-common-heads'] = len(heads_common)
1233 data[b'nb-common-heads'] = len(heads_common)
1232 data[b'nb-common-heads-local'] = len(heads_common_local)
1234 data[b'nb-common-heads-local'] = len(heads_common_local)
1233 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1235 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1234 data[b'nb-common-heads-both'] = len(heads_common_both)
1236 data[b'nb-common-heads-both'] = len(heads_common_both)
1235 data[b'nb-common-roots'] = len(roots_common)
1237 data[b'nb-common-roots'] = len(roots_common)
1236 data[b'nb-head-local'] = len(heads_local)
1238 data[b'nb-head-local'] = len(heads_local)
1237 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1239 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1238 data[b'nb-head-remote'] = len(heads_remote)
1240 data[b'nb-head-remote'] = len(heads_remote)
1239 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1241 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1240 heads_common_remote
1242 heads_common_remote
1241 )
1243 )
1242 data[b'nb-revs'] = len(all)
1244 data[b'nb-revs'] = len(all)
1243 data[b'nb-revs-common'] = len(common)
1245 data[b'nb-revs-common'] = len(common)
1244 data[b'nb-revs-missing'] = len(missing)
1246 data[b'nb-revs-missing'] = len(missing)
1245 data[b'nb-missing-heads'] = len(heads_missing)
1247 data[b'nb-missing-heads'] = len(heads_missing)
1246 data[b'nb-missing-roots'] = len(roots_missing)
1248 data[b'nb-missing-roots'] = len(roots_missing)
1247 data[b'nb-ini_und'] = len(initial_undecided)
1249 data[b'nb-ini_und'] = len(initial_undecided)
1248 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1250 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1249 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1251 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1250 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1252 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1251 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1253 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1252
1254
1253 fm.startitem()
1255 fm.startitem()
1254 fm.data(**pycompat.strkwargs(data))
1256 fm.data(**pycompat.strkwargs(data))
1255 # display discovery summary
1257 # display discovery summary
1256 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1258 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1257 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1259 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1258 fm.plain(b"queries: %(total-queries)9d\n" % data)
1260 fm.plain(b"queries: %(total-queries)9d\n" % data)
1259 fm.plain(b"heads summary:\n")
1261 fm.plain(b"heads summary:\n")
1260 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1262 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1261 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1263 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1262 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1264 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1263 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1265 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1264 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1266 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1265 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1267 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1266 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1268 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1267 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1269 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1268 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1270 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1269 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1271 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1270 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1272 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1271 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1273 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1272 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1274 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1273 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1275 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1274 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1276 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1275 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1277 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1276 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1278 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1277 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1279 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1278 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1280 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1279 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1281 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1280 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1282 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1281 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1283 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1282
1284
1283 if ui.verbose:
1285 if ui.verbose:
1284 fm.plain(
1286 fm.plain(
1285 b"common heads: %s\n"
1287 b"common heads: %s\n"
1286 % b" ".join(sorted(short(n) for n in heads_common))
1288 % b" ".join(sorted(short(n) for n in heads_common))
1287 )
1289 )
1288 fm.end()
1290 fm.end()
1289
1291
1290
1292
1291 _chunksize = 4 << 10
1293 _chunksize = 4 << 10
1292
1294
1293
1295
1294 @command(
1296 @command(
1295 b'debugdownload',
1297 b'debugdownload',
1296 [
1298 [
1297 (b'o', b'output', b'', _(b'path')),
1299 (b'o', b'output', b'', _(b'path')),
1298 ],
1300 ],
1299 optionalrepo=True,
1301 optionalrepo=True,
1300 )
1302 )
1301 def debugdownload(ui, repo, url, output=None, **opts):
1303 def debugdownload(ui, repo, url, output=None, **opts):
1302 """download a resource using Mercurial logic and config"""
1304 """download a resource using Mercurial logic and config"""
1303 fh = urlmod.open(ui, url, output)
1305 fh = urlmod.open(ui, url, output)
1304
1306
1305 dest = ui
1307 dest = ui
1306 if output:
1308 if output:
1307 dest = open(output, b"wb", _chunksize)
1309 dest = open(output, b"wb", _chunksize)
1308 try:
1310 try:
1309 data = fh.read(_chunksize)
1311 data = fh.read(_chunksize)
1310 while data:
1312 while data:
1311 dest.write(data)
1313 dest.write(data)
1312 data = fh.read(_chunksize)
1314 data = fh.read(_chunksize)
1313 finally:
1315 finally:
1314 if output:
1316 if output:
1315 dest.close()
1317 dest.close()
1316
1318
1317
1319
1318 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1320 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1319 def debugextensions(ui, repo, **opts):
1321 def debugextensions(ui, repo, **opts):
1320 '''show information about active extensions'''
1322 '''show information about active extensions'''
1321 opts = pycompat.byteskwargs(opts)
1323 opts = pycompat.byteskwargs(opts)
1322 exts = extensions.extensions(ui)
1324 exts = extensions.extensions(ui)
1323 hgver = util.version()
1325 hgver = util.version()
1324 fm = ui.formatter(b'debugextensions', opts)
1326 fm = ui.formatter(b'debugextensions', opts)
1325 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1327 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1326 isinternal = extensions.ismoduleinternal(extmod)
1328 isinternal = extensions.ismoduleinternal(extmod)
1327 extsource = None
1329 extsource = None
1328
1330
1329 if util.safehasattr(extmod, '__file__'):
1331 if util.safehasattr(extmod, '__file__'):
1330 extsource = pycompat.fsencode(extmod.__file__)
1332 extsource = pycompat.fsencode(extmod.__file__)
1331 elif getattr(sys, 'oxidized', False):
1333 elif getattr(sys, 'oxidized', False):
1332 extsource = pycompat.sysexecutable
1334 extsource = pycompat.sysexecutable
1333 if isinternal:
1335 if isinternal:
1334 exttestedwith = [] # never expose magic string to users
1336 exttestedwith = [] # never expose magic string to users
1335 else:
1337 else:
1336 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1338 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1337 extbuglink = getattr(extmod, 'buglink', None)
1339 extbuglink = getattr(extmod, 'buglink', None)
1338
1340
1339 fm.startitem()
1341 fm.startitem()
1340
1342
1341 if ui.quiet or ui.verbose:
1343 if ui.quiet or ui.verbose:
1342 fm.write(b'name', b'%s\n', extname)
1344 fm.write(b'name', b'%s\n', extname)
1343 else:
1345 else:
1344 fm.write(b'name', b'%s', extname)
1346 fm.write(b'name', b'%s', extname)
1345 if isinternal or hgver in exttestedwith:
1347 if isinternal or hgver in exttestedwith:
1346 fm.plain(b'\n')
1348 fm.plain(b'\n')
1347 elif not exttestedwith:
1349 elif not exttestedwith:
1348 fm.plain(_(b' (untested!)\n'))
1350 fm.plain(_(b' (untested!)\n'))
1349 else:
1351 else:
1350 lasttestedversion = exttestedwith[-1]
1352 lasttestedversion = exttestedwith[-1]
1351 fm.plain(b' (%s!)\n' % lasttestedversion)
1353 fm.plain(b' (%s!)\n' % lasttestedversion)
1352
1354
1353 fm.condwrite(
1355 fm.condwrite(
1354 ui.verbose and extsource,
1356 ui.verbose and extsource,
1355 b'source',
1357 b'source',
1356 _(b' location: %s\n'),
1358 _(b' location: %s\n'),
1357 extsource or b"",
1359 extsource or b"",
1358 )
1360 )
1359
1361
1360 if ui.verbose:
1362 if ui.verbose:
1361 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1363 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1362 fm.data(bundled=isinternal)
1364 fm.data(bundled=isinternal)
1363
1365
1364 fm.condwrite(
1366 fm.condwrite(
1365 ui.verbose and exttestedwith,
1367 ui.verbose and exttestedwith,
1366 b'testedwith',
1368 b'testedwith',
1367 _(b' tested with: %s\n'),
1369 _(b' tested with: %s\n'),
1368 fm.formatlist(exttestedwith, name=b'ver'),
1370 fm.formatlist(exttestedwith, name=b'ver'),
1369 )
1371 )
1370
1372
1371 fm.condwrite(
1373 fm.condwrite(
1372 ui.verbose and extbuglink,
1374 ui.verbose and extbuglink,
1373 b'buglink',
1375 b'buglink',
1374 _(b' bug reporting: %s\n'),
1376 _(b' bug reporting: %s\n'),
1375 extbuglink or b"",
1377 extbuglink or b"",
1376 )
1378 )
1377
1379
1378 fm.end()
1380 fm.end()
1379
1381
1380
1382
1381 @command(
1383 @command(
1382 b'debugfileset',
1384 b'debugfileset',
1383 [
1385 [
1384 (
1386 (
1385 b'r',
1387 b'r',
1386 b'rev',
1388 b'rev',
1387 b'',
1389 b'',
1388 _(b'apply the filespec on this revision'),
1390 _(b'apply the filespec on this revision'),
1389 _(b'REV'),
1391 _(b'REV'),
1390 ),
1392 ),
1391 (
1393 (
1392 b'',
1394 b'',
1393 b'all-files',
1395 b'all-files',
1394 False,
1396 False,
1395 _(b'test files from all revisions and working directory'),
1397 _(b'test files from all revisions and working directory'),
1396 ),
1398 ),
1397 (
1399 (
1398 b's',
1400 b's',
1399 b'show-matcher',
1401 b'show-matcher',
1400 None,
1402 None,
1401 _(b'print internal representation of matcher'),
1403 _(b'print internal representation of matcher'),
1402 ),
1404 ),
1403 (
1405 (
1404 b'p',
1406 b'p',
1405 b'show-stage',
1407 b'show-stage',
1406 [],
1408 [],
1407 _(b'print parsed tree at the given stage'),
1409 _(b'print parsed tree at the given stage'),
1408 _(b'NAME'),
1410 _(b'NAME'),
1409 ),
1411 ),
1410 ],
1412 ],
1411 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1413 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1412 )
1414 )
1413 def debugfileset(ui, repo, expr, **opts):
1415 def debugfileset(ui, repo, expr, **opts):
1414 '''parse and apply a fileset specification'''
1416 '''parse and apply a fileset specification'''
1415 from . import fileset
1417 from . import fileset
1416
1418
1417 fileset.symbols # force import of fileset so we have predicates to optimize
1419 fileset.symbols # force import of fileset so we have predicates to optimize
1418 opts = pycompat.byteskwargs(opts)
1420 opts = pycompat.byteskwargs(opts)
1419 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
1421 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
1420
1422
1421 stages = [
1423 stages = [
1422 (b'parsed', pycompat.identity),
1424 (b'parsed', pycompat.identity),
1423 (b'analyzed', filesetlang.analyze),
1425 (b'analyzed', filesetlang.analyze),
1424 (b'optimized', filesetlang.optimize),
1426 (b'optimized', filesetlang.optimize),
1425 ]
1427 ]
1426 stagenames = {n for n, f in stages}
1428 stagenames = {n for n, f in stages}
1427
1429
1428 showalways = set()
1430 showalways = set()
1429 if ui.verbose and not opts[b'show_stage']:
1431 if ui.verbose and not opts[b'show_stage']:
1430 # show parsed tree by --verbose (deprecated)
1432 # show parsed tree by --verbose (deprecated)
1431 showalways.add(b'parsed')
1433 showalways.add(b'parsed')
1432 if opts[b'show_stage'] == [b'all']:
1434 if opts[b'show_stage'] == [b'all']:
1433 showalways.update(stagenames)
1435 showalways.update(stagenames)
1434 else:
1436 else:
1435 for n in opts[b'show_stage']:
1437 for n in opts[b'show_stage']:
1436 if n not in stagenames:
1438 if n not in stagenames:
1437 raise error.Abort(_(b'invalid stage name: %s') % n)
1439 raise error.Abort(_(b'invalid stage name: %s') % n)
1438 showalways.update(opts[b'show_stage'])
1440 showalways.update(opts[b'show_stage'])
1439
1441
1440 tree = filesetlang.parse(expr)
1442 tree = filesetlang.parse(expr)
1441 for n, f in stages:
1443 for n, f in stages:
1442 tree = f(tree)
1444 tree = f(tree)
1443 if n in showalways:
1445 if n in showalways:
1444 if opts[b'show_stage'] or n != b'parsed':
1446 if opts[b'show_stage'] or n != b'parsed':
1445 ui.write(b"* %s:\n" % n)
1447 ui.write(b"* %s:\n" % n)
1446 ui.write(filesetlang.prettyformat(tree), b"\n")
1448 ui.write(filesetlang.prettyformat(tree), b"\n")
1447
1449
1448 files = set()
1450 files = set()
1449 if opts[b'all_files']:
1451 if opts[b'all_files']:
1450 for r in repo:
1452 for r in repo:
1451 c = repo[r]
1453 c = repo[r]
1452 files.update(c.files())
1454 files.update(c.files())
1453 files.update(c.substate)
1455 files.update(c.substate)
1454 if opts[b'all_files'] or ctx.rev() is None:
1456 if opts[b'all_files'] or ctx.rev() is None:
1455 wctx = repo[None]
1457 wctx = repo[None]
1456 files.update(
1458 files.update(
1457 repo.dirstate.walk(
1459 repo.dirstate.walk(
1458 scmutil.matchall(repo),
1460 scmutil.matchall(repo),
1459 subrepos=list(wctx.substate),
1461 subrepos=list(wctx.substate),
1460 unknown=True,
1462 unknown=True,
1461 ignored=True,
1463 ignored=True,
1462 )
1464 )
1463 )
1465 )
1464 files.update(wctx.substate)
1466 files.update(wctx.substate)
1465 else:
1467 else:
1466 files.update(ctx.files())
1468 files.update(ctx.files())
1467 files.update(ctx.substate)
1469 files.update(ctx.substate)
1468
1470
1469 m = ctx.matchfileset(repo.getcwd(), expr)
1471 m = ctx.matchfileset(repo.getcwd(), expr)
1470 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1472 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1471 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1473 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1472 for f in sorted(files):
1474 for f in sorted(files):
1473 if not m(f):
1475 if not m(f):
1474 continue
1476 continue
1475 ui.write(b"%s\n" % f)
1477 ui.write(b"%s\n" % f)
1476
1478
1477
1479
1478 @command(
1480 @command(
1479 b"debug-repair-issue6528",
1481 b"debug-repair-issue6528",
1480 [
1482 [
1481 (
1483 (
1482 b'',
1484 b'',
1483 b'to-report',
1485 b'to-report',
1484 b'',
1486 b'',
1485 _(b'build a report of affected revisions to this file'),
1487 _(b'build a report of affected revisions to this file'),
1486 _(b'FILE'),
1488 _(b'FILE'),
1487 ),
1489 ),
1488 (
1490 (
1489 b'',
1491 b'',
1490 b'from-report',
1492 b'from-report',
1491 b'',
1493 b'',
1492 _(b'repair revisions listed in this report file'),
1494 _(b'repair revisions listed in this report file'),
1493 _(b'FILE'),
1495 _(b'FILE'),
1494 ),
1496 ),
1495 (
1497 (
1496 b'',
1498 b'',
1497 b'paranoid',
1499 b'paranoid',
1498 False,
1500 False,
1499 _(b'check that both detection methods do the same thing'),
1501 _(b'check that both detection methods do the same thing'),
1500 ),
1502 ),
1501 ]
1503 ]
1502 + cmdutil.dryrunopts,
1504 + cmdutil.dryrunopts,
1503 )
1505 )
1504 def debug_repair_issue6528(ui, repo, **opts):
1506 def debug_repair_issue6528(ui, repo, **opts):
1505 """find affected revisions and repair them. See issue6528 for more details.
1507 """find affected revisions and repair them. See issue6528 for more details.
1506
1508
1507 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1509 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1508 computation of affected revisions for a given repository across clones.
1510 computation of affected revisions for a given repository across clones.
1509 The report format is line-based (with empty lines ignored):
1511 The report format is line-based (with empty lines ignored):
1510
1512
1511 ```
1513 ```
1512 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1514 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1513 ```
1515 ```
1514
1516
1515 There can be multiple broken revisions per filelog, they are separated by
1517 There can be multiple broken revisions per filelog, they are separated by
1516 a comma with no spaces. The only space is between the revision(s) and the
1518 a comma with no spaces. The only space is between the revision(s) and the
1517 filename.
1519 filename.
1518
1520
1519 Note that this does *not* mean that this repairs future affected revisions,
1521 Note that this does *not* mean that this repairs future affected revisions,
1520 that needs a separate fix at the exchange level that was introduced in
1522 that needs a separate fix at the exchange level that was introduced in
1521 Mercurial 5.9.1.
1523 Mercurial 5.9.1.
1522
1524
1523 There is a `--paranoid` flag to test that the fast implementation is correct
1525 There is a `--paranoid` flag to test that the fast implementation is correct
1524 by checking it against the slow implementation. Since this matter is quite
1526 by checking it against the slow implementation. Since this matter is quite
1525 urgent and testing every edge-case is probably quite costly, we use this
1527 urgent and testing every edge-case is probably quite costly, we use this
1526 method to test on large repositories as a fuzzing method of sorts.
1528 method to test on large repositories as a fuzzing method of sorts.
1527 """
1529 """
1528 cmdutil.check_incompatible_arguments(
1530 cmdutil.check_incompatible_arguments(
1529 opts, 'to_report', ['from_report', 'dry_run']
1531 opts, 'to_report', ['from_report', 'dry_run']
1530 )
1532 )
1531 dry_run = opts.get('dry_run')
1533 dry_run = opts.get('dry_run')
1532 to_report = opts.get('to_report')
1534 to_report = opts.get('to_report')
1533 from_report = opts.get('from_report')
1535 from_report = opts.get('from_report')
1534 paranoid = opts.get('paranoid')
1536 paranoid = opts.get('paranoid')
1535 # TODO maybe add filelog pattern and revision pattern parameters to help
1537 # TODO maybe add filelog pattern and revision pattern parameters to help
1536 # narrow down the search for users that know what they're looking for?
1538 # narrow down the search for users that know what they're looking for?
1537
1539
1538 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1540 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1539 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1541 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1540 raise error.Abort(_(msg))
1542 raise error.Abort(_(msg))
1541
1543
1542 rewrite.repair_issue6528(
1544 rewrite.repair_issue6528(
1543 ui,
1545 ui,
1544 repo,
1546 repo,
1545 dry_run=dry_run,
1547 dry_run=dry_run,
1546 to_report=to_report,
1548 to_report=to_report,
1547 from_report=from_report,
1549 from_report=from_report,
1548 paranoid=paranoid,
1550 paranoid=paranoid,
1549 )
1551 )
1550
1552
1551
1553
1552 @command(b'debugformat', [] + cmdutil.formatteropts)
1554 @command(b'debugformat', [] + cmdutil.formatteropts)
1553 def debugformat(ui, repo, **opts):
1555 def debugformat(ui, repo, **opts):
1554 """display format information about the current repository
1556 """display format information about the current repository
1555
1557
1556 Use --verbose to get extra information about current config value and
1558 Use --verbose to get extra information about current config value and
1557 Mercurial default."""
1559 Mercurial default."""
1558 opts = pycompat.byteskwargs(opts)
1560 opts = pycompat.byteskwargs(opts)
1559 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1561 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1560 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1562 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1561
1563
1562 def makeformatname(name):
1564 def makeformatname(name):
1563 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1565 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1564
1566
1565 fm = ui.formatter(b'debugformat', opts)
1567 fm = ui.formatter(b'debugformat', opts)
1566 if fm.isplain():
1568 if fm.isplain():
1567
1569
1568 def formatvalue(value):
1570 def formatvalue(value):
1569 if util.safehasattr(value, b'startswith'):
1571 if util.safehasattr(value, b'startswith'):
1570 return value
1572 return value
1571 if value:
1573 if value:
1572 return b'yes'
1574 return b'yes'
1573 else:
1575 else:
1574 return b'no'
1576 return b'no'
1575
1577
1576 else:
1578 else:
1577 formatvalue = pycompat.identity
1579 formatvalue = pycompat.identity
1578
1580
1579 fm.plain(b'format-variant')
1581 fm.plain(b'format-variant')
1580 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1582 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1581 fm.plain(b' repo')
1583 fm.plain(b' repo')
1582 if ui.verbose:
1584 if ui.verbose:
1583 fm.plain(b' config default')
1585 fm.plain(b' config default')
1584 fm.plain(b'\n')
1586 fm.plain(b'\n')
1585 for fv in upgrade.allformatvariant:
1587 for fv in upgrade.allformatvariant:
1586 fm.startitem()
1588 fm.startitem()
1587 repovalue = fv.fromrepo(repo)
1589 repovalue = fv.fromrepo(repo)
1588 configvalue = fv.fromconfig(repo)
1590 configvalue = fv.fromconfig(repo)
1589
1591
1590 if repovalue != configvalue:
1592 if repovalue != configvalue:
1591 namelabel = b'formatvariant.name.mismatchconfig'
1593 namelabel = b'formatvariant.name.mismatchconfig'
1592 repolabel = b'formatvariant.repo.mismatchconfig'
1594 repolabel = b'formatvariant.repo.mismatchconfig'
1593 elif repovalue != fv.default:
1595 elif repovalue != fv.default:
1594 namelabel = b'formatvariant.name.mismatchdefault'
1596 namelabel = b'formatvariant.name.mismatchdefault'
1595 repolabel = b'formatvariant.repo.mismatchdefault'
1597 repolabel = b'formatvariant.repo.mismatchdefault'
1596 else:
1598 else:
1597 namelabel = b'formatvariant.name.uptodate'
1599 namelabel = b'formatvariant.name.uptodate'
1598 repolabel = b'formatvariant.repo.uptodate'
1600 repolabel = b'formatvariant.repo.uptodate'
1599
1601
1600 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1602 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1601 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1603 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1602 if fv.default != configvalue:
1604 if fv.default != configvalue:
1603 configlabel = b'formatvariant.config.special'
1605 configlabel = b'formatvariant.config.special'
1604 else:
1606 else:
1605 configlabel = b'formatvariant.config.default'
1607 configlabel = b'formatvariant.config.default'
1606 fm.condwrite(
1608 fm.condwrite(
1607 ui.verbose,
1609 ui.verbose,
1608 b'config',
1610 b'config',
1609 b' %6s',
1611 b' %6s',
1610 formatvalue(configvalue),
1612 formatvalue(configvalue),
1611 label=configlabel,
1613 label=configlabel,
1612 )
1614 )
1613 fm.condwrite(
1615 fm.condwrite(
1614 ui.verbose,
1616 ui.verbose,
1615 b'default',
1617 b'default',
1616 b' %7s',
1618 b' %7s',
1617 formatvalue(fv.default),
1619 formatvalue(fv.default),
1618 label=b'formatvariant.default',
1620 label=b'formatvariant.default',
1619 )
1621 )
1620 fm.plain(b'\n')
1622 fm.plain(b'\n')
1621 fm.end()
1623 fm.end()
1622
1624
1623
1625
1624 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1626 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1625 def debugfsinfo(ui, path=b"."):
1627 def debugfsinfo(ui, path=b"."):
1626 """show information detected about current filesystem"""
1628 """show information detected about current filesystem"""
1627 ui.writenoi18n(b'path: %s\n' % path)
1629 ui.writenoi18n(b'path: %s\n' % path)
1628 ui.writenoi18n(
1630 ui.writenoi18n(
1629 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1631 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1630 )
1632 )
1631 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1633 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1632 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1634 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1633 ui.writenoi18n(
1635 ui.writenoi18n(
1634 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1636 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1635 )
1637 )
1636 ui.writenoi18n(
1638 ui.writenoi18n(
1637 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1639 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1638 )
1640 )
1639 casesensitive = b'(unknown)'
1641 casesensitive = b'(unknown)'
1640 try:
1642 try:
1641 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1643 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1642 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1644 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1643 except OSError:
1645 except OSError:
1644 pass
1646 pass
1645 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1647 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1646
1648
1647
1649
1648 @command(
1650 @command(
1649 b'debuggetbundle',
1651 b'debuggetbundle',
1650 [
1652 [
1651 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1653 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1652 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1654 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1653 (
1655 (
1654 b't',
1656 b't',
1655 b'type',
1657 b'type',
1656 b'bzip2',
1658 b'bzip2',
1657 _(b'bundle compression type to use'),
1659 _(b'bundle compression type to use'),
1658 _(b'TYPE'),
1660 _(b'TYPE'),
1659 ),
1661 ),
1660 ],
1662 ],
1661 _(b'REPO FILE [-H|-C ID]...'),
1663 _(b'REPO FILE [-H|-C ID]...'),
1662 norepo=True,
1664 norepo=True,
1663 )
1665 )
1664 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1666 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1665 """retrieves a bundle from a repo
1667 """retrieves a bundle from a repo
1666
1668
1667 Every ID must be a full-length hex node id string. Saves the bundle to the
1669 Every ID must be a full-length hex node id string. Saves the bundle to the
1668 given file.
1670 given file.
1669 """
1671 """
1670 opts = pycompat.byteskwargs(opts)
1672 opts = pycompat.byteskwargs(opts)
1671 repo = hg.peer(ui, opts, repopath)
1673 repo = hg.peer(ui, opts, repopath)
1672 if not repo.capable(b'getbundle'):
1674 if not repo.capable(b'getbundle'):
1673 raise error.Abort(b"getbundle() not supported by target repository")
1675 raise error.Abort(b"getbundle() not supported by target repository")
1674 args = {}
1676 args = {}
1675 if common:
1677 if common:
1676 args['common'] = [bin(s) for s in common]
1678 args['common'] = [bin(s) for s in common]
1677 if head:
1679 if head:
1678 args['heads'] = [bin(s) for s in head]
1680 args['heads'] = [bin(s) for s in head]
1679 # TODO: get desired bundlecaps from command line.
1681 # TODO: get desired bundlecaps from command line.
1680 args['bundlecaps'] = None
1682 args['bundlecaps'] = None
1681 bundle = repo.getbundle(b'debug', **args)
1683 bundle = repo.getbundle(b'debug', **args)
1682
1684
1683 bundletype = opts.get(b'type', b'bzip2').lower()
1685 bundletype = opts.get(b'type', b'bzip2').lower()
1684 btypes = {
1686 btypes = {
1685 b'none': b'HG10UN',
1687 b'none': b'HG10UN',
1686 b'bzip2': b'HG10BZ',
1688 b'bzip2': b'HG10BZ',
1687 b'gzip': b'HG10GZ',
1689 b'gzip': b'HG10GZ',
1688 b'bundle2': b'HG20',
1690 b'bundle2': b'HG20',
1689 }
1691 }
1690 bundletype = btypes.get(bundletype)
1692 bundletype = btypes.get(bundletype)
1691 if bundletype not in bundle2.bundletypes:
1693 if bundletype not in bundle2.bundletypes:
1692 raise error.Abort(_(b'unknown bundle type specified with --type'))
1694 raise error.Abort(_(b'unknown bundle type specified with --type'))
1693 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1695 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1694
1696
1695
1697
1696 @command(b'debugignore', [], b'[FILE]')
1698 @command(b'debugignore', [], b'[FILE]')
1697 def debugignore(ui, repo, *files, **opts):
1699 def debugignore(ui, repo, *files, **opts):
1698 """display the combined ignore pattern and information about ignored files
1700 """display the combined ignore pattern and information about ignored files
1699
1701
1700 With no argument display the combined ignore pattern.
1702 With no argument display the combined ignore pattern.
1701
1703
1702 Given space separated file names, shows if the given file is ignored and
1704 Given space separated file names, shows if the given file is ignored and
1703 if so, show the ignore rule (file and line number) that matched it.
1705 if so, show the ignore rule (file and line number) that matched it.
1704 """
1706 """
1705 ignore = repo.dirstate._ignore
1707 ignore = repo.dirstate._ignore
1706 if not files:
1708 if not files:
1707 # Show all the patterns
1709 # Show all the patterns
1708 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1710 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1709 else:
1711 else:
1710 m = scmutil.match(repo[None], pats=files)
1712 m = scmutil.match(repo[None], pats=files)
1711 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1713 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1712 for f in m.files():
1714 for f in m.files():
1713 nf = util.normpath(f)
1715 nf = util.normpath(f)
1714 ignored = None
1716 ignored = None
1715 ignoredata = None
1717 ignoredata = None
1716 if nf != b'.':
1718 if nf != b'.':
1717 if ignore(nf):
1719 if ignore(nf):
1718 ignored = nf
1720 ignored = nf
1719 ignoredata = repo.dirstate._ignorefileandline(nf)
1721 ignoredata = repo.dirstate._ignorefileandline(nf)
1720 else:
1722 else:
1721 for p in pathutil.finddirs(nf):
1723 for p in pathutil.finddirs(nf):
1722 if ignore(p):
1724 if ignore(p):
1723 ignored = p
1725 ignored = p
1724 ignoredata = repo.dirstate._ignorefileandline(p)
1726 ignoredata = repo.dirstate._ignorefileandline(p)
1725 break
1727 break
1726 if ignored:
1728 if ignored:
1727 if ignored == nf:
1729 if ignored == nf:
1728 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1730 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1729 else:
1731 else:
1730 ui.write(
1732 ui.write(
1731 _(
1733 _(
1732 b"%s is ignored because of "
1734 b"%s is ignored because of "
1733 b"containing directory %s\n"
1735 b"containing directory %s\n"
1734 )
1736 )
1735 % (uipathfn(f), ignored)
1737 % (uipathfn(f), ignored)
1736 )
1738 )
1737 ignorefile, lineno, line = ignoredata
1739 ignorefile, lineno, line = ignoredata
1738 ui.write(
1740 ui.write(
1739 _(b"(ignore rule in %s, line %d: '%s')\n")
1741 _(b"(ignore rule in %s, line %d: '%s')\n")
1740 % (ignorefile, lineno, line)
1742 % (ignorefile, lineno, line)
1741 )
1743 )
1742 else:
1744 else:
1743 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1745 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1744
1746
1745
1747
1746 @command(
1748 @command(
1747 b'debugindex',
1749 b'debugindex',
1748 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1750 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1749 _(b'-c|-m|FILE'),
1751 _(b'-c|-m|FILE'),
1750 )
1752 )
1751 def debugindex(ui, repo, file_=None, **opts):
1753 def debugindex(ui, repo, file_=None, **opts):
1752 """dump index data for a storage primitive"""
1754 """dump index data for a storage primitive"""
1753 opts = pycompat.byteskwargs(opts)
1755 opts = pycompat.byteskwargs(opts)
1754 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1756 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1755
1757
1756 if ui.debugflag:
1758 if ui.debugflag:
1757 shortfn = hex
1759 shortfn = hex
1758 else:
1760 else:
1759 shortfn = short
1761 shortfn = short
1760
1762
1761 idlen = 12
1763 idlen = 12
1762 for i in store:
1764 for i in store:
1763 idlen = len(shortfn(store.node(i)))
1765 idlen = len(shortfn(store.node(i)))
1764 break
1766 break
1765
1767
1766 fm = ui.formatter(b'debugindex', opts)
1768 fm = ui.formatter(b'debugindex', opts)
1767 fm.plain(
1769 fm.plain(
1768 b' rev linkrev %s %s p2\n'
1770 b' rev linkrev %s %s p2\n'
1769 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1771 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1770 )
1772 )
1771
1773
1772 for rev in store:
1774 for rev in store:
1773 node = store.node(rev)
1775 node = store.node(rev)
1774 parents = store.parents(node)
1776 parents = store.parents(node)
1775
1777
1776 fm.startitem()
1778 fm.startitem()
1777 fm.write(b'rev', b'%6d ', rev)
1779 fm.write(b'rev', b'%6d ', rev)
1778 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1780 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1779 fm.write(b'node', b'%s ', shortfn(node))
1781 fm.write(b'node', b'%s ', shortfn(node))
1780 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1782 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1781 fm.write(b'p2', b'%s', shortfn(parents[1]))
1783 fm.write(b'p2', b'%s', shortfn(parents[1]))
1782 fm.plain(b'\n')
1784 fm.plain(b'\n')
1783
1785
1784 fm.end()
1786 fm.end()
1785
1787
1786
1788
1787 @command(
1789 @command(
1788 b'debugindexdot',
1790 b'debugindexdot',
1789 cmdutil.debugrevlogopts,
1791 cmdutil.debugrevlogopts,
1790 _(b'-c|-m|FILE'),
1792 _(b'-c|-m|FILE'),
1791 optionalrepo=True,
1793 optionalrepo=True,
1792 )
1794 )
1793 def debugindexdot(ui, repo, file_=None, **opts):
1795 def debugindexdot(ui, repo, file_=None, **opts):
1794 """dump an index DAG as a graphviz dot file"""
1796 """dump an index DAG as a graphviz dot file"""
1795 opts = pycompat.byteskwargs(opts)
1797 opts = pycompat.byteskwargs(opts)
1796 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1798 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1797 ui.writenoi18n(b"digraph G {\n")
1799 ui.writenoi18n(b"digraph G {\n")
1798 for i in r:
1800 for i in r:
1799 node = r.node(i)
1801 node = r.node(i)
1800 pp = r.parents(node)
1802 pp = r.parents(node)
1801 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1803 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1802 if pp[1] != repo.nullid:
1804 if pp[1] != repo.nullid:
1803 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1805 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1804 ui.write(b"}\n")
1806 ui.write(b"}\n")
1805
1807
1806
1808
1807 @command(b'debugindexstats', [])
1809 @command(b'debugindexstats', [])
1808 def debugindexstats(ui, repo):
1810 def debugindexstats(ui, repo):
1809 """show stats related to the changelog index"""
1811 """show stats related to the changelog index"""
1810 repo.changelog.shortest(repo.nullid, 1)
1812 repo.changelog.shortest(repo.nullid, 1)
1811 index = repo.changelog.index
1813 index = repo.changelog.index
1812 if not util.safehasattr(index, b'stats'):
1814 if not util.safehasattr(index, b'stats'):
1813 raise error.Abort(_(b'debugindexstats only works with native code'))
1815 raise error.Abort(_(b'debugindexstats only works with native code'))
1814 for k, v in sorted(index.stats().items()):
1816 for k, v in sorted(index.stats().items()):
1815 ui.write(b'%s: %d\n' % (k, v))
1817 ui.write(b'%s: %d\n' % (k, v))
1816
1818
1817
1819
1818 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1820 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1819 def debuginstall(ui, **opts):
1821 def debuginstall(ui, **opts):
1820 """test Mercurial installation
1822 """test Mercurial installation
1821
1823
1822 Returns 0 on success.
1824 Returns 0 on success.
1823 """
1825 """
1824 opts = pycompat.byteskwargs(opts)
1826 opts = pycompat.byteskwargs(opts)
1825
1827
1826 problems = 0
1828 problems = 0
1827
1829
1828 fm = ui.formatter(b'debuginstall', opts)
1830 fm = ui.formatter(b'debuginstall', opts)
1829 fm.startitem()
1831 fm.startitem()
1830
1832
1831 # encoding might be unknown or wrong. don't translate these messages.
1833 # encoding might be unknown or wrong. don't translate these messages.
1832 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1834 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1833 err = None
1835 err = None
1834 try:
1836 try:
1835 codecs.lookup(pycompat.sysstr(encoding.encoding))
1837 codecs.lookup(pycompat.sysstr(encoding.encoding))
1836 except LookupError as inst:
1838 except LookupError as inst:
1837 err = stringutil.forcebytestr(inst)
1839 err = stringutil.forcebytestr(inst)
1838 problems += 1
1840 problems += 1
1839 fm.condwrite(
1841 fm.condwrite(
1840 err,
1842 err,
1841 b'encodingerror',
1843 b'encodingerror',
1842 b" %s\n (check that your locale is properly set)\n",
1844 b" %s\n (check that your locale is properly set)\n",
1843 err,
1845 err,
1844 )
1846 )
1845
1847
1846 # Python
1848 # Python
1847 pythonlib = None
1849 pythonlib = None
1848 if util.safehasattr(os, '__file__'):
1850 if util.safehasattr(os, '__file__'):
1849 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1851 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1850 elif getattr(sys, 'oxidized', False):
1852 elif getattr(sys, 'oxidized', False):
1851 pythonlib = pycompat.sysexecutable
1853 pythonlib = pycompat.sysexecutable
1852
1854
1853 fm.write(
1855 fm.write(
1854 b'pythonexe',
1856 b'pythonexe',
1855 _(b"checking Python executable (%s)\n"),
1857 _(b"checking Python executable (%s)\n"),
1856 pycompat.sysexecutable or _(b"unknown"),
1858 pycompat.sysexecutable or _(b"unknown"),
1857 )
1859 )
1858 fm.write(
1860 fm.write(
1859 b'pythonimplementation',
1861 b'pythonimplementation',
1860 _(b"checking Python implementation (%s)\n"),
1862 _(b"checking Python implementation (%s)\n"),
1861 pycompat.sysbytes(platform.python_implementation()),
1863 pycompat.sysbytes(platform.python_implementation()),
1862 )
1864 )
1863 fm.write(
1865 fm.write(
1864 b'pythonver',
1866 b'pythonver',
1865 _(b"checking Python version (%s)\n"),
1867 _(b"checking Python version (%s)\n"),
1866 (b"%d.%d.%d" % sys.version_info[:3]),
1868 (b"%d.%d.%d" % sys.version_info[:3]),
1867 )
1869 )
1868 fm.write(
1870 fm.write(
1869 b'pythonlib',
1871 b'pythonlib',
1870 _(b"checking Python lib (%s)...\n"),
1872 _(b"checking Python lib (%s)...\n"),
1871 pythonlib or _(b"unknown"),
1873 pythonlib or _(b"unknown"),
1872 )
1874 )
1873
1875
1874 try:
1876 try:
1875 from . import rustext # pytype: disable=import-error
1877 from . import rustext # pytype: disable=import-error
1876
1878
1877 rustext.__doc__ # trigger lazy import
1879 rustext.__doc__ # trigger lazy import
1878 except ImportError:
1880 except ImportError:
1879 rustext = None
1881 rustext = None
1880
1882
1881 security = set(sslutil.supportedprotocols)
1883 security = set(sslutil.supportedprotocols)
1882 if sslutil.hassni:
1884 if sslutil.hassni:
1883 security.add(b'sni')
1885 security.add(b'sni')
1884
1886
1885 fm.write(
1887 fm.write(
1886 b'pythonsecurity',
1888 b'pythonsecurity',
1887 _(b"checking Python security support (%s)\n"),
1889 _(b"checking Python security support (%s)\n"),
1888 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1890 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1889 )
1891 )
1890
1892
1891 # These are warnings, not errors. So don't increment problem count. This
1893 # These are warnings, not errors. So don't increment problem count. This
1892 # may change in the future.
1894 # may change in the future.
1893 if b'tls1.2' not in security:
1895 if b'tls1.2' not in security:
1894 fm.plain(
1896 fm.plain(
1895 _(
1897 _(
1896 b' TLS 1.2 not supported by Python install; '
1898 b' TLS 1.2 not supported by Python install; '
1897 b'network connections lack modern security\n'
1899 b'network connections lack modern security\n'
1898 )
1900 )
1899 )
1901 )
1900 if b'sni' not in security:
1902 if b'sni' not in security:
1901 fm.plain(
1903 fm.plain(
1902 _(
1904 _(
1903 b' SNI not supported by Python install; may have '
1905 b' SNI not supported by Python install; may have '
1904 b'connectivity issues with some servers\n'
1906 b'connectivity issues with some servers\n'
1905 )
1907 )
1906 )
1908 )
1907
1909
1908 fm.plain(
1910 fm.plain(
1909 _(
1911 _(
1910 b"checking Rust extensions (%s)\n"
1912 b"checking Rust extensions (%s)\n"
1911 % (b'missing' if rustext is None else b'installed')
1913 % (b'missing' if rustext is None else b'installed')
1912 ),
1914 ),
1913 )
1915 )
1914
1916
1915 # TODO print CA cert info
1917 # TODO print CA cert info
1916
1918
1917 # hg version
1919 # hg version
1918 hgver = util.version()
1920 hgver = util.version()
1919 fm.write(
1921 fm.write(
1920 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1922 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1921 )
1923 )
1922 fm.write(
1924 fm.write(
1923 b'hgverextra',
1925 b'hgverextra',
1924 _(b"checking Mercurial custom build (%s)\n"),
1926 _(b"checking Mercurial custom build (%s)\n"),
1925 b'+'.join(hgver.split(b'+')[1:]),
1927 b'+'.join(hgver.split(b'+')[1:]),
1926 )
1928 )
1927
1929
1928 # compiled modules
1930 # compiled modules
1929 hgmodules = None
1931 hgmodules = None
1930 if util.safehasattr(sys.modules[__name__], '__file__'):
1932 if util.safehasattr(sys.modules[__name__], '__file__'):
1931 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1933 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1932 elif getattr(sys, 'oxidized', False):
1934 elif getattr(sys, 'oxidized', False):
1933 hgmodules = pycompat.sysexecutable
1935 hgmodules = pycompat.sysexecutable
1934
1936
1935 fm.write(
1937 fm.write(
1936 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1938 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1937 )
1939 )
1938 fm.write(
1940 fm.write(
1939 b'hgmodules',
1941 b'hgmodules',
1940 _(b"checking installed modules (%s)...\n"),
1942 _(b"checking installed modules (%s)...\n"),
1941 hgmodules or _(b"unknown"),
1943 hgmodules or _(b"unknown"),
1942 )
1944 )
1943
1945
1944 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1946 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1945 rustext = rustandc # for now, that's the only case
1947 rustext = rustandc # for now, that's the only case
1946 cext = policy.policy in (b'c', b'allow') or rustandc
1948 cext = policy.policy in (b'c', b'allow') or rustandc
1947 nopure = cext or rustext
1949 nopure = cext or rustext
1948 if nopure:
1950 if nopure:
1949 err = None
1951 err = None
1950 try:
1952 try:
1951 if cext:
1953 if cext:
1952 from .cext import ( # pytype: disable=import-error
1954 from .cext import ( # pytype: disable=import-error
1953 base85,
1955 base85,
1954 bdiff,
1956 bdiff,
1955 mpatch,
1957 mpatch,
1956 osutil,
1958 osutil,
1957 )
1959 )
1958
1960
1959 # quiet pyflakes
1961 # quiet pyflakes
1960 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1962 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1961 if rustext:
1963 if rustext:
1962 from .rustext import ( # pytype: disable=import-error
1964 from .rustext import ( # pytype: disable=import-error
1963 ancestor,
1965 ancestor,
1964 dirstate,
1966 dirstate,
1965 )
1967 )
1966
1968
1967 dir(ancestor), dir(dirstate) # quiet pyflakes
1969 dir(ancestor), dir(dirstate) # quiet pyflakes
1968 except Exception as inst:
1970 except Exception as inst:
1969 err = stringutil.forcebytestr(inst)
1971 err = stringutil.forcebytestr(inst)
1970 problems += 1
1972 problems += 1
1971 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1973 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1972
1974
1973 compengines = util.compengines._engines.values()
1975 compengines = util.compengines._engines.values()
1974 fm.write(
1976 fm.write(
1975 b'compengines',
1977 b'compengines',
1976 _(b'checking registered compression engines (%s)\n'),
1978 _(b'checking registered compression engines (%s)\n'),
1977 fm.formatlist(
1979 fm.formatlist(
1978 sorted(e.name() for e in compengines),
1980 sorted(e.name() for e in compengines),
1979 name=b'compengine',
1981 name=b'compengine',
1980 fmt=b'%s',
1982 fmt=b'%s',
1981 sep=b', ',
1983 sep=b', ',
1982 ),
1984 ),
1983 )
1985 )
1984 fm.write(
1986 fm.write(
1985 b'compenginesavail',
1987 b'compenginesavail',
1986 _(b'checking available compression engines (%s)\n'),
1988 _(b'checking available compression engines (%s)\n'),
1987 fm.formatlist(
1989 fm.formatlist(
1988 sorted(e.name() for e in compengines if e.available()),
1990 sorted(e.name() for e in compengines if e.available()),
1989 name=b'compengine',
1991 name=b'compengine',
1990 fmt=b'%s',
1992 fmt=b'%s',
1991 sep=b', ',
1993 sep=b', ',
1992 ),
1994 ),
1993 )
1995 )
1994 wirecompengines = compression.compengines.supportedwireengines(
1996 wirecompengines = compression.compengines.supportedwireengines(
1995 compression.SERVERROLE
1997 compression.SERVERROLE
1996 )
1998 )
1997 fm.write(
1999 fm.write(
1998 b'compenginesserver',
2000 b'compenginesserver',
1999 _(
2001 _(
2000 b'checking available compression engines '
2002 b'checking available compression engines '
2001 b'for wire protocol (%s)\n'
2003 b'for wire protocol (%s)\n'
2002 ),
2004 ),
2003 fm.formatlist(
2005 fm.formatlist(
2004 [e.name() for e in wirecompengines if e.wireprotosupport()],
2006 [e.name() for e in wirecompengines if e.wireprotosupport()],
2005 name=b'compengine',
2007 name=b'compengine',
2006 fmt=b'%s',
2008 fmt=b'%s',
2007 sep=b', ',
2009 sep=b', ',
2008 ),
2010 ),
2009 )
2011 )
2010 re2 = b'missing'
2012 re2 = b'missing'
2011 if util._re2:
2013 if util._re2:
2012 re2 = b'available'
2014 re2 = b'available'
2013 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
2015 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
2014 fm.data(re2=bool(util._re2))
2016 fm.data(re2=bool(util._re2))
2015
2017
2016 # templates
2018 # templates
2017 p = templater.templatedir()
2019 p = templater.templatedir()
2018 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
2020 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
2019 fm.condwrite(not p, b'', _(b" no template directories found\n"))
2021 fm.condwrite(not p, b'', _(b" no template directories found\n"))
2020 if p:
2022 if p:
2021 (m, fp) = templater.try_open_template(b"map-cmdline.default")
2023 (m, fp) = templater.try_open_template(b"map-cmdline.default")
2022 if m:
2024 if m:
2023 # template found, check if it is working
2025 # template found, check if it is working
2024 err = None
2026 err = None
2025 try:
2027 try:
2026 templater.templater.frommapfile(m)
2028 templater.templater.frommapfile(m)
2027 except Exception as inst:
2029 except Exception as inst:
2028 err = stringutil.forcebytestr(inst)
2030 err = stringutil.forcebytestr(inst)
2029 p = None
2031 p = None
2030 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2032 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2031 else:
2033 else:
2032 p = None
2034 p = None
2033 fm.condwrite(
2035 fm.condwrite(
2034 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2036 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2035 )
2037 )
2036 fm.condwrite(
2038 fm.condwrite(
2037 not m,
2039 not m,
2038 b'defaulttemplatenotfound',
2040 b'defaulttemplatenotfound',
2039 _(b" template '%s' not found\n"),
2041 _(b" template '%s' not found\n"),
2040 b"default",
2042 b"default",
2041 )
2043 )
2042 if not p:
2044 if not p:
2043 problems += 1
2045 problems += 1
2044 fm.condwrite(
2046 fm.condwrite(
2045 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2047 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2046 )
2048 )
2047
2049
2048 # editor
2050 # editor
2049 editor = ui.geteditor()
2051 editor = ui.geteditor()
2050 editor = util.expandpath(editor)
2052 editor = util.expandpath(editor)
2051 editorbin = procutil.shellsplit(editor)[0]
2053 editorbin = procutil.shellsplit(editor)[0]
2052 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2054 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2053 cmdpath = procutil.findexe(editorbin)
2055 cmdpath = procutil.findexe(editorbin)
2054 fm.condwrite(
2056 fm.condwrite(
2055 not cmdpath and editor == b'vi',
2057 not cmdpath and editor == b'vi',
2056 b'vinotfound',
2058 b'vinotfound',
2057 _(
2059 _(
2058 b" No commit editor set and can't find %s in PATH\n"
2060 b" No commit editor set and can't find %s in PATH\n"
2059 b" (specify a commit editor in your configuration"
2061 b" (specify a commit editor in your configuration"
2060 b" file)\n"
2062 b" file)\n"
2061 ),
2063 ),
2062 not cmdpath and editor == b'vi' and editorbin,
2064 not cmdpath and editor == b'vi' and editorbin,
2063 )
2065 )
2064 fm.condwrite(
2066 fm.condwrite(
2065 not cmdpath and editor != b'vi',
2067 not cmdpath and editor != b'vi',
2066 b'editornotfound',
2068 b'editornotfound',
2067 _(
2069 _(
2068 b" Can't find editor '%s' in PATH\n"
2070 b" Can't find editor '%s' in PATH\n"
2069 b" (specify a commit editor in your configuration"
2071 b" (specify a commit editor in your configuration"
2070 b" file)\n"
2072 b" file)\n"
2071 ),
2073 ),
2072 not cmdpath and editorbin,
2074 not cmdpath and editorbin,
2073 )
2075 )
2074 if not cmdpath and editor != b'vi':
2076 if not cmdpath and editor != b'vi':
2075 problems += 1
2077 problems += 1
2076
2078
2077 # check username
2079 # check username
2078 username = None
2080 username = None
2079 err = None
2081 err = None
2080 try:
2082 try:
2081 username = ui.username()
2083 username = ui.username()
2082 except error.Abort as e:
2084 except error.Abort as e:
2083 err = e.message
2085 err = e.message
2084 problems += 1
2086 problems += 1
2085
2087
2086 fm.condwrite(
2088 fm.condwrite(
2087 username, b'username', _(b"checking username (%s)\n"), username
2089 username, b'username', _(b"checking username (%s)\n"), username
2088 )
2090 )
2089 fm.condwrite(
2091 fm.condwrite(
2090 err,
2092 err,
2091 b'usernameerror',
2093 b'usernameerror',
2092 _(
2094 _(
2093 b"checking username...\n %s\n"
2095 b"checking username...\n %s\n"
2094 b" (specify a username in your configuration file)\n"
2096 b" (specify a username in your configuration file)\n"
2095 ),
2097 ),
2096 err,
2098 err,
2097 )
2099 )
2098
2100
2099 for name, mod in extensions.extensions():
2101 for name, mod in extensions.extensions():
2100 handler = getattr(mod, 'debuginstall', None)
2102 handler = getattr(mod, 'debuginstall', None)
2101 if handler is not None:
2103 if handler is not None:
2102 problems += handler(ui, fm)
2104 problems += handler(ui, fm)
2103
2105
2104 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2106 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2105 if not problems:
2107 if not problems:
2106 fm.data(problems=problems)
2108 fm.data(problems=problems)
2107 fm.condwrite(
2109 fm.condwrite(
2108 problems,
2110 problems,
2109 b'problems',
2111 b'problems',
2110 _(b"%d problems detected, please check your install!\n"),
2112 _(b"%d problems detected, please check your install!\n"),
2111 problems,
2113 problems,
2112 )
2114 )
2113 fm.end()
2115 fm.end()
2114
2116
2115 return problems
2117 return problems
2116
2118
2117
2119
2118 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2120 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2119 def debugknown(ui, repopath, *ids, **opts):
2121 def debugknown(ui, repopath, *ids, **opts):
2120 """test whether node ids are known to a repo
2122 """test whether node ids are known to a repo
2121
2123
2122 Every ID must be a full-length hex node id string. Returns a list of 0s
2124 Every ID must be a full-length hex node id string. Returns a list of 0s
2123 and 1s indicating unknown/known.
2125 and 1s indicating unknown/known.
2124 """
2126 """
2125 opts = pycompat.byteskwargs(opts)
2127 opts = pycompat.byteskwargs(opts)
2126 repo = hg.peer(ui, opts, repopath)
2128 repo = hg.peer(ui, opts, repopath)
2127 if not repo.capable(b'known'):
2129 if not repo.capable(b'known'):
2128 raise error.Abort(b"known() not supported by target repository")
2130 raise error.Abort(b"known() not supported by target repository")
2129 flags = repo.known([bin(s) for s in ids])
2131 flags = repo.known([bin(s) for s in ids])
2130 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2132 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2131
2133
2132
2134
2133 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2135 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2134 def debuglabelcomplete(ui, repo, *args):
2136 def debuglabelcomplete(ui, repo, *args):
2135 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2137 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2136 debugnamecomplete(ui, repo, *args)
2138 debugnamecomplete(ui, repo, *args)
2137
2139
2138
2140
2139 @command(
2141 @command(
2140 b'debuglocks',
2142 b'debuglocks',
2141 [
2143 [
2142 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2144 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2143 (
2145 (
2144 b'W',
2146 b'W',
2145 b'force-free-wlock',
2147 b'force-free-wlock',
2146 None,
2148 None,
2147 _(b'free the working state lock (DANGEROUS)'),
2149 _(b'free the working state lock (DANGEROUS)'),
2148 ),
2150 ),
2149 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2151 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2150 (
2152 (
2151 b'S',
2153 b'S',
2152 b'set-wlock',
2154 b'set-wlock',
2153 None,
2155 None,
2154 _(b'set the working state lock until stopped'),
2156 _(b'set the working state lock until stopped'),
2155 ),
2157 ),
2156 ],
2158 ],
2157 _(b'[OPTION]...'),
2159 _(b'[OPTION]...'),
2158 )
2160 )
2159 def debuglocks(ui, repo, **opts):
2161 def debuglocks(ui, repo, **opts):
2160 """show or modify state of locks
2162 """show or modify state of locks
2161
2163
2162 By default, this command will show which locks are held. This
2164 By default, this command will show which locks are held. This
2163 includes the user and process holding the lock, the amount of time
2165 includes the user and process holding the lock, the amount of time
2164 the lock has been held, and the machine name where the process is
2166 the lock has been held, and the machine name where the process is
2165 running if it's not local.
2167 running if it's not local.
2166
2168
2167 Locks protect the integrity of Mercurial's data, so should be
2169 Locks protect the integrity of Mercurial's data, so should be
2168 treated with care. System crashes or other interruptions may cause
2170 treated with care. System crashes or other interruptions may cause
2169 locks to not be properly released, though Mercurial will usually
2171 locks to not be properly released, though Mercurial will usually
2170 detect and remove such stale locks automatically.
2172 detect and remove such stale locks automatically.
2171
2173
2172 However, detecting stale locks may not always be possible (for
2174 However, detecting stale locks may not always be possible (for
2173 instance, on a shared filesystem). Removing locks may also be
2175 instance, on a shared filesystem). Removing locks may also be
2174 blocked by filesystem permissions.
2176 blocked by filesystem permissions.
2175
2177
2176 Setting a lock will prevent other commands from changing the data.
2178 Setting a lock will prevent other commands from changing the data.
2177 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2179 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2178 The set locks are removed when the command exits.
2180 The set locks are removed when the command exits.
2179
2181
2180 Returns 0 if no locks are held.
2182 Returns 0 if no locks are held.
2181
2183
2182 """
2184 """
2183
2185
2184 if opts.get('force_free_lock'):
2186 if opts.get('force_free_lock'):
2185 repo.svfs.tryunlink(b'lock')
2187 repo.svfs.tryunlink(b'lock')
2186 if opts.get('force_free_wlock'):
2188 if opts.get('force_free_wlock'):
2187 repo.vfs.tryunlink(b'wlock')
2189 repo.vfs.tryunlink(b'wlock')
2188 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2190 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2189 return 0
2191 return 0
2190
2192
2191 locks = []
2193 locks = []
2192 try:
2194 try:
2193 if opts.get('set_wlock'):
2195 if opts.get('set_wlock'):
2194 try:
2196 try:
2195 locks.append(repo.wlock(False))
2197 locks.append(repo.wlock(False))
2196 except error.LockHeld:
2198 except error.LockHeld:
2197 raise error.Abort(_(b'wlock is already held'))
2199 raise error.Abort(_(b'wlock is already held'))
2198 if opts.get('set_lock'):
2200 if opts.get('set_lock'):
2199 try:
2201 try:
2200 locks.append(repo.lock(False))
2202 locks.append(repo.lock(False))
2201 except error.LockHeld:
2203 except error.LockHeld:
2202 raise error.Abort(_(b'lock is already held'))
2204 raise error.Abort(_(b'lock is already held'))
2203 if len(locks):
2205 if len(locks):
2204 try:
2206 try:
2205 if ui.interactive():
2207 if ui.interactive():
2206 prompt = _(b"ready to release the lock (y)? $$ &Yes")
2208 prompt = _(b"ready to release the lock (y)? $$ &Yes")
2207 ui.promptchoice(prompt)
2209 ui.promptchoice(prompt)
2208 else:
2210 else:
2209 msg = b"%d locks held, waiting for signal\n"
2211 msg = b"%d locks held, waiting for signal\n"
2210 msg %= len(locks)
2212 msg %= len(locks)
2211 ui.status(msg)
2213 ui.status(msg)
2212 while True: # XXX wait for a signal
2214 while True: # XXX wait for a signal
2213 time.sleep(0.1)
2215 time.sleep(0.1)
2214 except KeyboardInterrupt:
2216 except KeyboardInterrupt:
2215 msg = b"signal-received releasing locks\n"
2217 msg = b"signal-received releasing locks\n"
2216 ui.status(msg)
2218 ui.status(msg)
2217 return 0
2219 return 0
2218 finally:
2220 finally:
2219 release(*locks)
2221 release(*locks)
2220
2222
2221 now = time.time()
2223 now = time.time()
2222 held = 0
2224 held = 0
2223
2225
2224 def report(vfs, name, method):
2226 def report(vfs, name, method):
2225 # this causes stale locks to get reaped for more accurate reporting
2227 # this causes stale locks to get reaped for more accurate reporting
2226 try:
2228 try:
2227 l = method(False)
2229 l = method(False)
2228 except error.LockHeld:
2230 except error.LockHeld:
2229 l = None
2231 l = None
2230
2232
2231 if l:
2233 if l:
2232 l.release()
2234 l.release()
2233 else:
2235 else:
2234 try:
2236 try:
2235 st = vfs.lstat(name)
2237 st = vfs.lstat(name)
2236 age = now - st[stat.ST_MTIME]
2238 age = now - st[stat.ST_MTIME]
2237 user = util.username(st.st_uid)
2239 user = util.username(st.st_uid)
2238 locker = vfs.readlock(name)
2240 locker = vfs.readlock(name)
2239 if b":" in locker:
2241 if b":" in locker:
2240 host, pid = locker.split(b':')
2242 host, pid = locker.split(b':')
2241 if host == socket.gethostname():
2243 if host == socket.gethostname():
2242 locker = b'user %s, process %s' % (user or b'None', pid)
2244 locker = b'user %s, process %s' % (user or b'None', pid)
2243 else:
2245 else:
2244 locker = b'user %s, process %s, host %s' % (
2246 locker = b'user %s, process %s, host %s' % (
2245 user or b'None',
2247 user or b'None',
2246 pid,
2248 pid,
2247 host,
2249 host,
2248 )
2250 )
2249 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2251 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2250 return 1
2252 return 1
2251 except OSError as e:
2253 except OSError as e:
2252 if e.errno != errno.ENOENT:
2254 if e.errno != errno.ENOENT:
2253 raise
2255 raise
2254
2256
2255 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2257 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2256 return 0
2258 return 0
2257
2259
2258 held += report(repo.svfs, b"lock", repo.lock)
2260 held += report(repo.svfs, b"lock", repo.lock)
2259 held += report(repo.vfs, b"wlock", repo.wlock)
2261 held += report(repo.vfs, b"wlock", repo.wlock)
2260
2262
2261 return held
2263 return held
2262
2264
2263
2265
2264 @command(
2266 @command(
2265 b'debugmanifestfulltextcache',
2267 b'debugmanifestfulltextcache',
2266 [
2268 [
2267 (b'', b'clear', False, _(b'clear the cache')),
2269 (b'', b'clear', False, _(b'clear the cache')),
2268 (
2270 (
2269 b'a',
2271 b'a',
2270 b'add',
2272 b'add',
2271 [],
2273 [],
2272 _(b'add the given manifest nodes to the cache'),
2274 _(b'add the given manifest nodes to the cache'),
2273 _(b'NODE'),
2275 _(b'NODE'),
2274 ),
2276 ),
2275 ],
2277 ],
2276 b'',
2278 b'',
2277 )
2279 )
2278 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2280 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2279 """show, clear or amend the contents of the manifest fulltext cache"""
2281 """show, clear or amend the contents of the manifest fulltext cache"""
2280
2282
2281 def getcache():
2283 def getcache():
2282 r = repo.manifestlog.getstorage(b'')
2284 r = repo.manifestlog.getstorage(b'')
2283 try:
2285 try:
2284 return r._fulltextcache
2286 return r._fulltextcache
2285 except AttributeError:
2287 except AttributeError:
2286 msg = _(
2288 msg = _(
2287 b"Current revlog implementation doesn't appear to have a "
2289 b"Current revlog implementation doesn't appear to have a "
2288 b"manifest fulltext cache\n"
2290 b"manifest fulltext cache\n"
2289 )
2291 )
2290 raise error.Abort(msg)
2292 raise error.Abort(msg)
2291
2293
2292 if opts.get('clear'):
2294 if opts.get('clear'):
2293 with repo.wlock():
2295 with repo.wlock():
2294 cache = getcache()
2296 cache = getcache()
2295 cache.clear(clear_persisted_data=True)
2297 cache.clear(clear_persisted_data=True)
2296 return
2298 return
2297
2299
2298 if add:
2300 if add:
2299 with repo.wlock():
2301 with repo.wlock():
2300 m = repo.manifestlog
2302 m = repo.manifestlog
2301 store = m.getstorage(b'')
2303 store = m.getstorage(b'')
2302 for n in add:
2304 for n in add:
2303 try:
2305 try:
2304 manifest = m[store.lookup(n)]
2306 manifest = m[store.lookup(n)]
2305 except error.LookupError as e:
2307 except error.LookupError as e:
2306 raise error.Abort(
2308 raise error.Abort(
2307 bytes(e), hint=b"Check your manifest node id"
2309 bytes(e), hint=b"Check your manifest node id"
2308 )
2310 )
2309 manifest.read() # stores revisision in cache too
2311 manifest.read() # stores revisision in cache too
2310 return
2312 return
2311
2313
2312 cache = getcache()
2314 cache = getcache()
2313 if not len(cache):
2315 if not len(cache):
2314 ui.write(_(b'cache empty\n'))
2316 ui.write(_(b'cache empty\n'))
2315 else:
2317 else:
2316 ui.write(
2318 ui.write(
2317 _(
2319 _(
2318 b'cache contains %d manifest entries, in order of most to '
2320 b'cache contains %d manifest entries, in order of most to '
2319 b'least recent:\n'
2321 b'least recent:\n'
2320 )
2322 )
2321 % (len(cache),)
2323 % (len(cache),)
2322 )
2324 )
2323 totalsize = 0
2325 totalsize = 0
2324 for nodeid in cache:
2326 for nodeid in cache:
2325 # Use cache.get to not update the LRU order
2327 # Use cache.get to not update the LRU order
2326 data = cache.peek(nodeid)
2328 data = cache.peek(nodeid)
2327 size = len(data)
2329 size = len(data)
2328 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2330 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2329 ui.write(
2331 ui.write(
2330 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2332 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2331 )
2333 )
2332 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2334 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2333 ui.write(
2335 ui.write(
2334 _(b'total cache data size %s, on-disk %s\n')
2336 _(b'total cache data size %s, on-disk %s\n')
2335 % (util.bytecount(totalsize), util.bytecount(ondisk))
2337 % (util.bytecount(totalsize), util.bytecount(ondisk))
2336 )
2338 )
2337
2339
2338
2340
2339 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2341 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2340 def debugmergestate(ui, repo, *args, **opts):
2342 def debugmergestate(ui, repo, *args, **opts):
2341 """print merge state
2343 """print merge state
2342
2344
2343 Use --verbose to print out information about whether v1 or v2 merge state
2345 Use --verbose to print out information about whether v1 or v2 merge state
2344 was chosen."""
2346 was chosen."""
2345
2347
2346 if ui.verbose:
2348 if ui.verbose:
2347 ms = mergestatemod.mergestate(repo)
2349 ms = mergestatemod.mergestate(repo)
2348
2350
2349 # sort so that reasonable information is on top
2351 # sort so that reasonable information is on top
2350 v1records = ms._readrecordsv1()
2352 v1records = ms._readrecordsv1()
2351 v2records = ms._readrecordsv2()
2353 v2records = ms._readrecordsv2()
2352
2354
2353 if not v1records and not v2records:
2355 if not v1records and not v2records:
2354 pass
2356 pass
2355 elif not v2records:
2357 elif not v2records:
2356 ui.writenoi18n(b'no version 2 merge state\n')
2358 ui.writenoi18n(b'no version 2 merge state\n')
2357 elif ms._v1v2match(v1records, v2records):
2359 elif ms._v1v2match(v1records, v2records):
2358 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2360 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2359 else:
2361 else:
2360 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2362 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2361
2363
2362 opts = pycompat.byteskwargs(opts)
2364 opts = pycompat.byteskwargs(opts)
2363 if not opts[b'template']:
2365 if not opts[b'template']:
2364 opts[b'template'] = (
2366 opts[b'template'] = (
2365 b'{if(commits, "", "no merge state found\n")}'
2367 b'{if(commits, "", "no merge state found\n")}'
2366 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2368 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2367 b'{files % "file: {path} (state \\"{state}\\")\n'
2369 b'{files % "file: {path} (state \\"{state}\\")\n'
2368 b'{if(local_path, "'
2370 b'{if(local_path, "'
2369 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2371 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2370 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2372 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2371 b' other path: {other_path} (node {other_node})\n'
2373 b' other path: {other_path} (node {other_node})\n'
2372 b'")}'
2374 b'")}'
2373 b'{if(rename_side, "'
2375 b'{if(rename_side, "'
2374 b' rename side: {rename_side}\n'
2376 b' rename side: {rename_side}\n'
2375 b' renamed path: {renamed_path}\n'
2377 b' renamed path: {renamed_path}\n'
2376 b'")}'
2378 b'")}'
2377 b'{extras % " extra: {key} = {value}\n"}'
2379 b'{extras % " extra: {key} = {value}\n"}'
2378 b'"}'
2380 b'"}'
2379 b'{extras % "extra: {file} ({key} = {value})\n"}'
2381 b'{extras % "extra: {file} ({key} = {value})\n"}'
2380 )
2382 )
2381
2383
2382 ms = mergestatemod.mergestate.read(repo)
2384 ms = mergestatemod.mergestate.read(repo)
2383
2385
2384 fm = ui.formatter(b'debugmergestate', opts)
2386 fm = ui.formatter(b'debugmergestate', opts)
2385 fm.startitem()
2387 fm.startitem()
2386
2388
2387 fm_commits = fm.nested(b'commits')
2389 fm_commits = fm.nested(b'commits')
2388 if ms.active():
2390 if ms.active():
2389 for name, node, label_index in (
2391 for name, node, label_index in (
2390 (b'local', ms.local, 0),
2392 (b'local', ms.local, 0),
2391 (b'other', ms.other, 1),
2393 (b'other', ms.other, 1),
2392 ):
2394 ):
2393 fm_commits.startitem()
2395 fm_commits.startitem()
2394 fm_commits.data(name=name)
2396 fm_commits.data(name=name)
2395 fm_commits.data(node=hex(node))
2397 fm_commits.data(node=hex(node))
2396 if ms._labels and len(ms._labels) > label_index:
2398 if ms._labels and len(ms._labels) > label_index:
2397 fm_commits.data(label=ms._labels[label_index])
2399 fm_commits.data(label=ms._labels[label_index])
2398 fm_commits.end()
2400 fm_commits.end()
2399
2401
2400 fm_files = fm.nested(b'files')
2402 fm_files = fm.nested(b'files')
2401 if ms.active():
2403 if ms.active():
2402 for f in ms:
2404 for f in ms:
2403 fm_files.startitem()
2405 fm_files.startitem()
2404 fm_files.data(path=f)
2406 fm_files.data(path=f)
2405 state = ms._state[f]
2407 state = ms._state[f]
2406 fm_files.data(state=state[0])
2408 fm_files.data(state=state[0])
2407 if state[0] in (
2409 if state[0] in (
2408 mergestatemod.MERGE_RECORD_UNRESOLVED,
2410 mergestatemod.MERGE_RECORD_UNRESOLVED,
2409 mergestatemod.MERGE_RECORD_RESOLVED,
2411 mergestatemod.MERGE_RECORD_RESOLVED,
2410 ):
2412 ):
2411 fm_files.data(local_key=state[1])
2413 fm_files.data(local_key=state[1])
2412 fm_files.data(local_path=state[2])
2414 fm_files.data(local_path=state[2])
2413 fm_files.data(ancestor_path=state[3])
2415 fm_files.data(ancestor_path=state[3])
2414 fm_files.data(ancestor_node=state[4])
2416 fm_files.data(ancestor_node=state[4])
2415 fm_files.data(other_path=state[5])
2417 fm_files.data(other_path=state[5])
2416 fm_files.data(other_node=state[6])
2418 fm_files.data(other_node=state[6])
2417 fm_files.data(local_flags=state[7])
2419 fm_files.data(local_flags=state[7])
2418 elif state[0] in (
2420 elif state[0] in (
2419 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2421 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2420 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2422 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2421 ):
2423 ):
2422 fm_files.data(renamed_path=state[1])
2424 fm_files.data(renamed_path=state[1])
2423 fm_files.data(rename_side=state[2])
2425 fm_files.data(rename_side=state[2])
2424 fm_extras = fm_files.nested(b'extras')
2426 fm_extras = fm_files.nested(b'extras')
2425 for k, v in sorted(ms.extras(f).items()):
2427 for k, v in sorted(ms.extras(f).items()):
2426 fm_extras.startitem()
2428 fm_extras.startitem()
2427 fm_extras.data(key=k)
2429 fm_extras.data(key=k)
2428 fm_extras.data(value=v)
2430 fm_extras.data(value=v)
2429 fm_extras.end()
2431 fm_extras.end()
2430
2432
2431 fm_files.end()
2433 fm_files.end()
2432
2434
2433 fm_extras = fm.nested(b'extras')
2435 fm_extras = fm.nested(b'extras')
2434 for f, d in sorted(ms.allextras().items()):
2436 for f, d in sorted(ms.allextras().items()):
2435 if f in ms:
2437 if f in ms:
2436 # If file is in mergestate, we have already processed it's extras
2438 # If file is in mergestate, we have already processed it's extras
2437 continue
2439 continue
2438 for k, v in d.items():
2440 for k, v in d.items():
2439 fm_extras.startitem()
2441 fm_extras.startitem()
2440 fm_extras.data(file=f)
2442 fm_extras.data(file=f)
2441 fm_extras.data(key=k)
2443 fm_extras.data(key=k)
2442 fm_extras.data(value=v)
2444 fm_extras.data(value=v)
2443 fm_extras.end()
2445 fm_extras.end()
2444
2446
2445 fm.end()
2447 fm.end()
2446
2448
2447
2449
2448 @command(b'debugnamecomplete', [], _(b'NAME...'))
2450 @command(b'debugnamecomplete', [], _(b'NAME...'))
2449 def debugnamecomplete(ui, repo, *args):
2451 def debugnamecomplete(ui, repo, *args):
2450 '''complete "names" - tags, open branch names, bookmark names'''
2452 '''complete "names" - tags, open branch names, bookmark names'''
2451
2453
2452 names = set()
2454 names = set()
2453 # since we previously only listed open branches, we will handle that
2455 # since we previously only listed open branches, we will handle that
2454 # specially (after this for loop)
2456 # specially (after this for loop)
2455 for name, ns in repo.names.items():
2457 for name, ns in repo.names.items():
2456 if name != b'branches':
2458 if name != b'branches':
2457 names.update(ns.listnames(repo))
2459 names.update(ns.listnames(repo))
2458 names.update(
2460 names.update(
2459 tag
2461 tag
2460 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2462 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2461 if not closed
2463 if not closed
2462 )
2464 )
2463 completions = set()
2465 completions = set()
2464 if not args:
2466 if not args:
2465 args = [b'']
2467 args = [b'']
2466 for a in args:
2468 for a in args:
2467 completions.update(n for n in names if n.startswith(a))
2469 completions.update(n for n in names if n.startswith(a))
2468 ui.write(b'\n'.join(sorted(completions)))
2470 ui.write(b'\n'.join(sorted(completions)))
2469 ui.write(b'\n')
2471 ui.write(b'\n')
2470
2472
2471
2473
2472 @command(
2474 @command(
2473 b'debugnodemap',
2475 b'debugnodemap',
2474 [
2476 [
2475 (
2477 (
2476 b'',
2478 b'',
2477 b'dump-new',
2479 b'dump-new',
2478 False,
2480 False,
2479 _(b'write a (new) persistent binary nodemap on stdout'),
2481 _(b'write a (new) persistent binary nodemap on stdout'),
2480 ),
2482 ),
2481 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2483 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2482 (
2484 (
2483 b'',
2485 b'',
2484 b'check',
2486 b'check',
2485 False,
2487 False,
2486 _(b'check that the data on disk data are correct.'),
2488 _(b'check that the data on disk data are correct.'),
2487 ),
2489 ),
2488 (
2490 (
2489 b'',
2491 b'',
2490 b'metadata',
2492 b'metadata',
2491 False,
2493 False,
2492 _(b'display the on disk meta data for the nodemap'),
2494 _(b'display the on disk meta data for the nodemap'),
2493 ),
2495 ),
2494 ],
2496 ],
2495 )
2497 )
2496 def debugnodemap(ui, repo, **opts):
2498 def debugnodemap(ui, repo, **opts):
2497 """write and inspect on disk nodemap"""
2499 """write and inspect on disk nodemap"""
2498 if opts['dump_new']:
2500 if opts['dump_new']:
2499 unfi = repo.unfiltered()
2501 unfi = repo.unfiltered()
2500 cl = unfi.changelog
2502 cl = unfi.changelog
2501 if util.safehasattr(cl.index, "nodemap_data_all"):
2503 if util.safehasattr(cl.index, "nodemap_data_all"):
2502 data = cl.index.nodemap_data_all()
2504 data = cl.index.nodemap_data_all()
2503 else:
2505 else:
2504 data = nodemap.persistent_data(cl.index)
2506 data = nodemap.persistent_data(cl.index)
2505 ui.write(data)
2507 ui.write(data)
2506 elif opts['dump_disk']:
2508 elif opts['dump_disk']:
2507 unfi = repo.unfiltered()
2509 unfi = repo.unfiltered()
2508 cl = unfi.changelog
2510 cl = unfi.changelog
2509 nm_data = nodemap.persisted_data(cl)
2511 nm_data = nodemap.persisted_data(cl)
2510 if nm_data is not None:
2512 if nm_data is not None:
2511 docket, data = nm_data
2513 docket, data = nm_data
2512 ui.write(data[:])
2514 ui.write(data[:])
2513 elif opts['check']:
2515 elif opts['check']:
2514 unfi = repo.unfiltered()
2516 unfi = repo.unfiltered()
2515 cl = unfi.changelog
2517 cl = unfi.changelog
2516 nm_data = nodemap.persisted_data(cl)
2518 nm_data = nodemap.persisted_data(cl)
2517 if nm_data is not None:
2519 if nm_data is not None:
2518 docket, data = nm_data
2520 docket, data = nm_data
2519 return nodemap.check_data(ui, cl.index, data)
2521 return nodemap.check_data(ui, cl.index, data)
2520 elif opts['metadata']:
2522 elif opts['metadata']:
2521 unfi = repo.unfiltered()
2523 unfi = repo.unfiltered()
2522 cl = unfi.changelog
2524 cl = unfi.changelog
2523 nm_data = nodemap.persisted_data(cl)
2525 nm_data = nodemap.persisted_data(cl)
2524 if nm_data is not None:
2526 if nm_data is not None:
2525 docket, data = nm_data
2527 docket, data = nm_data
2526 ui.write((b"uid: %s\n") % docket.uid)
2528 ui.write((b"uid: %s\n") % docket.uid)
2527 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2529 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2528 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2530 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2529 ui.write((b"data-length: %d\n") % docket.data_length)
2531 ui.write((b"data-length: %d\n") % docket.data_length)
2530 ui.write((b"data-unused: %d\n") % docket.data_unused)
2532 ui.write((b"data-unused: %d\n") % docket.data_unused)
2531 unused_perc = docket.data_unused * 100.0 / docket.data_length
2533 unused_perc = docket.data_unused * 100.0 / docket.data_length
2532 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2534 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2533
2535
2534
2536
2535 @command(
2537 @command(
2536 b'debugobsolete',
2538 b'debugobsolete',
2537 [
2539 [
2538 (b'', b'flags', 0, _(b'markers flag')),
2540 (b'', b'flags', 0, _(b'markers flag')),
2539 (
2541 (
2540 b'',
2542 b'',
2541 b'record-parents',
2543 b'record-parents',
2542 False,
2544 False,
2543 _(b'record parent information for the precursor'),
2545 _(b'record parent information for the precursor'),
2544 ),
2546 ),
2545 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2547 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2546 (
2548 (
2547 b'',
2549 b'',
2548 b'exclusive',
2550 b'exclusive',
2549 False,
2551 False,
2550 _(b'restrict display to markers only relevant to REV'),
2552 _(b'restrict display to markers only relevant to REV'),
2551 ),
2553 ),
2552 (b'', b'index', False, _(b'display index of the marker')),
2554 (b'', b'index', False, _(b'display index of the marker')),
2553 (b'', b'delete', [], _(b'delete markers specified by indices')),
2555 (b'', b'delete', [], _(b'delete markers specified by indices')),
2554 ]
2556 ]
2555 + cmdutil.commitopts2
2557 + cmdutil.commitopts2
2556 + cmdutil.formatteropts,
2558 + cmdutil.formatteropts,
2557 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2559 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2558 )
2560 )
2559 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2561 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2560 """create arbitrary obsolete marker
2562 """create arbitrary obsolete marker
2561
2563
2562 With no arguments, displays the list of obsolescence markers."""
2564 With no arguments, displays the list of obsolescence markers."""
2563
2565
2564 opts = pycompat.byteskwargs(opts)
2566 opts = pycompat.byteskwargs(opts)
2565
2567
2566 def parsenodeid(s):
2568 def parsenodeid(s):
2567 try:
2569 try:
2568 # We do not use revsingle/revrange functions here to accept
2570 # We do not use revsingle/revrange functions here to accept
2569 # arbitrary node identifiers, possibly not present in the
2571 # arbitrary node identifiers, possibly not present in the
2570 # local repository.
2572 # local repository.
2571 n = bin(s)
2573 n = bin(s)
2572 if len(n) != repo.nodeconstants.nodelen:
2574 if len(n) != repo.nodeconstants.nodelen:
2573 raise TypeError()
2575 raise TypeError()
2574 return n
2576 return n
2575 except TypeError:
2577 except TypeError:
2576 raise error.InputError(
2578 raise error.InputError(
2577 b'changeset references must be full hexadecimal '
2579 b'changeset references must be full hexadecimal '
2578 b'node identifiers'
2580 b'node identifiers'
2579 )
2581 )
2580
2582
2581 if opts.get(b'delete'):
2583 if opts.get(b'delete'):
2582 indices = []
2584 indices = []
2583 for v in opts.get(b'delete'):
2585 for v in opts.get(b'delete'):
2584 try:
2586 try:
2585 indices.append(int(v))
2587 indices.append(int(v))
2586 except ValueError:
2588 except ValueError:
2587 raise error.InputError(
2589 raise error.InputError(
2588 _(b'invalid index value: %r') % v,
2590 _(b'invalid index value: %r') % v,
2589 hint=_(b'use integers for indices'),
2591 hint=_(b'use integers for indices'),
2590 )
2592 )
2591
2593
2592 if repo.currenttransaction():
2594 if repo.currenttransaction():
2593 raise error.Abort(
2595 raise error.Abort(
2594 _(b'cannot delete obsmarkers in the middle of transaction.')
2596 _(b'cannot delete obsmarkers in the middle of transaction.')
2595 )
2597 )
2596
2598
2597 with repo.lock():
2599 with repo.lock():
2598 n = repair.deleteobsmarkers(repo.obsstore, indices)
2600 n = repair.deleteobsmarkers(repo.obsstore, indices)
2599 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2601 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2600
2602
2601 return
2603 return
2602
2604
2603 if precursor is not None:
2605 if precursor is not None:
2604 if opts[b'rev']:
2606 if opts[b'rev']:
2605 raise error.InputError(
2607 raise error.InputError(
2606 b'cannot select revision when creating marker'
2608 b'cannot select revision when creating marker'
2607 )
2609 )
2608 metadata = {}
2610 metadata = {}
2609 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2611 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2610 succs = tuple(parsenodeid(succ) for succ in successors)
2612 succs = tuple(parsenodeid(succ) for succ in successors)
2611 l = repo.lock()
2613 l = repo.lock()
2612 try:
2614 try:
2613 tr = repo.transaction(b'debugobsolete')
2615 tr = repo.transaction(b'debugobsolete')
2614 try:
2616 try:
2615 date = opts.get(b'date')
2617 date = opts.get(b'date')
2616 if date:
2618 if date:
2617 date = dateutil.parsedate(date)
2619 date = dateutil.parsedate(date)
2618 else:
2620 else:
2619 date = None
2621 date = None
2620 prec = parsenodeid(precursor)
2622 prec = parsenodeid(precursor)
2621 parents = None
2623 parents = None
2622 if opts[b'record_parents']:
2624 if opts[b'record_parents']:
2623 if prec not in repo.unfiltered():
2625 if prec not in repo.unfiltered():
2624 raise error.Abort(
2626 raise error.Abort(
2625 b'cannot used --record-parents on '
2627 b'cannot used --record-parents on '
2626 b'unknown changesets'
2628 b'unknown changesets'
2627 )
2629 )
2628 parents = repo.unfiltered()[prec].parents()
2630 parents = repo.unfiltered()[prec].parents()
2629 parents = tuple(p.node() for p in parents)
2631 parents = tuple(p.node() for p in parents)
2630 repo.obsstore.create(
2632 repo.obsstore.create(
2631 tr,
2633 tr,
2632 prec,
2634 prec,
2633 succs,
2635 succs,
2634 opts[b'flags'],
2636 opts[b'flags'],
2635 parents=parents,
2637 parents=parents,
2636 date=date,
2638 date=date,
2637 metadata=metadata,
2639 metadata=metadata,
2638 ui=ui,
2640 ui=ui,
2639 )
2641 )
2640 tr.close()
2642 tr.close()
2641 except ValueError as exc:
2643 except ValueError as exc:
2642 raise error.Abort(
2644 raise error.Abort(
2643 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2645 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2644 )
2646 )
2645 finally:
2647 finally:
2646 tr.release()
2648 tr.release()
2647 finally:
2649 finally:
2648 l.release()
2650 l.release()
2649 else:
2651 else:
2650 if opts[b'rev']:
2652 if opts[b'rev']:
2651 revs = logcmdutil.revrange(repo, opts[b'rev'])
2653 revs = logcmdutil.revrange(repo, opts[b'rev'])
2652 nodes = [repo[r].node() for r in revs]
2654 nodes = [repo[r].node() for r in revs]
2653 markers = list(
2655 markers = list(
2654 obsutil.getmarkers(
2656 obsutil.getmarkers(
2655 repo, nodes=nodes, exclusive=opts[b'exclusive']
2657 repo, nodes=nodes, exclusive=opts[b'exclusive']
2656 )
2658 )
2657 )
2659 )
2658 markers.sort(key=lambda x: x._data)
2660 markers.sort(key=lambda x: x._data)
2659 else:
2661 else:
2660 markers = obsutil.getmarkers(repo)
2662 markers = obsutil.getmarkers(repo)
2661
2663
2662 markerstoiter = markers
2664 markerstoiter = markers
2663 isrelevant = lambda m: True
2665 isrelevant = lambda m: True
2664 if opts.get(b'rev') and opts.get(b'index'):
2666 if opts.get(b'rev') and opts.get(b'index'):
2665 markerstoiter = obsutil.getmarkers(repo)
2667 markerstoiter = obsutil.getmarkers(repo)
2666 markerset = set(markers)
2668 markerset = set(markers)
2667 isrelevant = lambda m: m in markerset
2669 isrelevant = lambda m: m in markerset
2668
2670
2669 fm = ui.formatter(b'debugobsolete', opts)
2671 fm = ui.formatter(b'debugobsolete', opts)
2670 for i, m in enumerate(markerstoiter):
2672 for i, m in enumerate(markerstoiter):
2671 if not isrelevant(m):
2673 if not isrelevant(m):
2672 # marker can be irrelevant when we're iterating over a set
2674 # marker can be irrelevant when we're iterating over a set
2673 # of markers (markerstoiter) which is bigger than the set
2675 # of markers (markerstoiter) which is bigger than the set
2674 # of markers we want to display (markers)
2676 # of markers we want to display (markers)
2675 # this can happen if both --index and --rev options are
2677 # this can happen if both --index and --rev options are
2676 # provided and thus we need to iterate over all of the markers
2678 # provided and thus we need to iterate over all of the markers
2677 # to get the correct indices, but only display the ones that
2679 # to get the correct indices, but only display the ones that
2678 # are relevant to --rev value
2680 # are relevant to --rev value
2679 continue
2681 continue
2680 fm.startitem()
2682 fm.startitem()
2681 ind = i if opts.get(b'index') else None
2683 ind = i if opts.get(b'index') else None
2682 cmdutil.showmarker(fm, m, index=ind)
2684 cmdutil.showmarker(fm, m, index=ind)
2683 fm.end()
2685 fm.end()
2684
2686
2685
2687
2686 @command(
2688 @command(
2687 b'debugp1copies',
2689 b'debugp1copies',
2688 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2690 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2689 _(b'[-r REV]'),
2691 _(b'[-r REV]'),
2690 )
2692 )
2691 def debugp1copies(ui, repo, **opts):
2693 def debugp1copies(ui, repo, **opts):
2692 """dump copy information compared to p1"""
2694 """dump copy information compared to p1"""
2693
2695
2694 opts = pycompat.byteskwargs(opts)
2696 opts = pycompat.byteskwargs(opts)
2695 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2697 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2696 for dst, src in ctx.p1copies().items():
2698 for dst, src in ctx.p1copies().items():
2697 ui.write(b'%s -> %s\n' % (src, dst))
2699 ui.write(b'%s -> %s\n' % (src, dst))
2698
2700
2699
2701
2700 @command(
2702 @command(
2701 b'debugp2copies',
2703 b'debugp2copies',
2702 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2704 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2703 _(b'[-r REV]'),
2705 _(b'[-r REV]'),
2704 )
2706 )
2705 def debugp1copies(ui, repo, **opts):
2707 def debugp1copies(ui, repo, **opts):
2706 """dump copy information compared to p2"""
2708 """dump copy information compared to p2"""
2707
2709
2708 opts = pycompat.byteskwargs(opts)
2710 opts = pycompat.byteskwargs(opts)
2709 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2711 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2710 for dst, src in ctx.p2copies().items():
2712 for dst, src in ctx.p2copies().items():
2711 ui.write(b'%s -> %s\n' % (src, dst))
2713 ui.write(b'%s -> %s\n' % (src, dst))
2712
2714
2713
2715
2714 @command(
2716 @command(
2715 b'debugpathcomplete',
2717 b'debugpathcomplete',
2716 [
2718 [
2717 (b'f', b'full', None, _(b'complete an entire path')),
2719 (b'f', b'full', None, _(b'complete an entire path')),
2718 (b'n', b'normal', None, _(b'show only normal files')),
2720 (b'n', b'normal', None, _(b'show only normal files')),
2719 (b'a', b'added', None, _(b'show only added files')),
2721 (b'a', b'added', None, _(b'show only added files')),
2720 (b'r', b'removed', None, _(b'show only removed files')),
2722 (b'r', b'removed', None, _(b'show only removed files')),
2721 ],
2723 ],
2722 _(b'FILESPEC...'),
2724 _(b'FILESPEC...'),
2723 )
2725 )
2724 def debugpathcomplete(ui, repo, *specs, **opts):
2726 def debugpathcomplete(ui, repo, *specs, **opts):
2725 """complete part or all of a tracked path
2727 """complete part or all of a tracked path
2726
2728
2727 This command supports shells that offer path name completion. It
2729 This command supports shells that offer path name completion. It
2728 currently completes only files already known to the dirstate.
2730 currently completes only files already known to the dirstate.
2729
2731
2730 Completion extends only to the next path segment unless
2732 Completion extends only to the next path segment unless
2731 --full is specified, in which case entire paths are used."""
2733 --full is specified, in which case entire paths are used."""
2732
2734
2733 def complete(path, acceptable):
2735 def complete(path, acceptable):
2734 dirstate = repo.dirstate
2736 dirstate = repo.dirstate
2735 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2737 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2736 rootdir = repo.root + pycompat.ossep
2738 rootdir = repo.root + pycompat.ossep
2737 if spec != repo.root and not spec.startswith(rootdir):
2739 if spec != repo.root and not spec.startswith(rootdir):
2738 return [], []
2740 return [], []
2739 if os.path.isdir(spec):
2741 if os.path.isdir(spec):
2740 spec += b'/'
2742 spec += b'/'
2741 spec = spec[len(rootdir) :]
2743 spec = spec[len(rootdir) :]
2742 fixpaths = pycompat.ossep != b'/'
2744 fixpaths = pycompat.ossep != b'/'
2743 if fixpaths:
2745 if fixpaths:
2744 spec = spec.replace(pycompat.ossep, b'/')
2746 spec = spec.replace(pycompat.ossep, b'/')
2745 speclen = len(spec)
2747 speclen = len(spec)
2746 fullpaths = opts['full']
2748 fullpaths = opts['full']
2747 files, dirs = set(), set()
2749 files, dirs = set(), set()
2748 adddir, addfile = dirs.add, files.add
2750 adddir, addfile = dirs.add, files.add
2749 for f, st in dirstate.items():
2751 for f, st in dirstate.items():
2750 if f.startswith(spec) and st.state in acceptable:
2752 if f.startswith(spec) and st.state in acceptable:
2751 if fixpaths:
2753 if fixpaths:
2752 f = f.replace(b'/', pycompat.ossep)
2754 f = f.replace(b'/', pycompat.ossep)
2753 if fullpaths:
2755 if fullpaths:
2754 addfile(f)
2756 addfile(f)
2755 continue
2757 continue
2756 s = f.find(pycompat.ossep, speclen)
2758 s = f.find(pycompat.ossep, speclen)
2757 if s >= 0:
2759 if s >= 0:
2758 adddir(f[:s])
2760 adddir(f[:s])
2759 else:
2761 else:
2760 addfile(f)
2762 addfile(f)
2761 return files, dirs
2763 return files, dirs
2762
2764
2763 acceptable = b''
2765 acceptable = b''
2764 if opts['normal']:
2766 if opts['normal']:
2765 acceptable += b'nm'
2767 acceptable += b'nm'
2766 if opts['added']:
2768 if opts['added']:
2767 acceptable += b'a'
2769 acceptable += b'a'
2768 if opts['removed']:
2770 if opts['removed']:
2769 acceptable += b'r'
2771 acceptable += b'r'
2770 cwd = repo.getcwd()
2772 cwd = repo.getcwd()
2771 if not specs:
2773 if not specs:
2772 specs = [b'.']
2774 specs = [b'.']
2773
2775
2774 files, dirs = set(), set()
2776 files, dirs = set(), set()
2775 for spec in specs:
2777 for spec in specs:
2776 f, d = complete(spec, acceptable or b'nmar')
2778 f, d = complete(spec, acceptable or b'nmar')
2777 files.update(f)
2779 files.update(f)
2778 dirs.update(d)
2780 dirs.update(d)
2779 files.update(dirs)
2781 files.update(dirs)
2780 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2782 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2781 ui.write(b'\n')
2783 ui.write(b'\n')
2782
2784
2783
2785
2784 @command(
2786 @command(
2785 b'debugpathcopies',
2787 b'debugpathcopies',
2786 cmdutil.walkopts,
2788 cmdutil.walkopts,
2787 b'hg debugpathcopies REV1 REV2 [FILE]',
2789 b'hg debugpathcopies REV1 REV2 [FILE]',
2788 inferrepo=True,
2790 inferrepo=True,
2789 )
2791 )
2790 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2792 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2791 """show copies between two revisions"""
2793 """show copies between two revisions"""
2792 ctx1 = scmutil.revsingle(repo, rev1)
2794 ctx1 = scmutil.revsingle(repo, rev1)
2793 ctx2 = scmutil.revsingle(repo, rev2)
2795 ctx2 = scmutil.revsingle(repo, rev2)
2794 m = scmutil.match(ctx1, pats, opts)
2796 m = scmutil.match(ctx1, pats, opts)
2795 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2797 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2796 ui.write(b'%s -> %s\n' % (src, dst))
2798 ui.write(b'%s -> %s\n' % (src, dst))
2797
2799
2798
2800
2799 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2801 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2800 def debugpeer(ui, path):
2802 def debugpeer(ui, path):
2801 """establish a connection to a peer repository"""
2803 """establish a connection to a peer repository"""
2802 # Always enable peer request logging. Requires --debug to display
2804 # Always enable peer request logging. Requires --debug to display
2803 # though.
2805 # though.
2804 overrides = {
2806 overrides = {
2805 (b'devel', b'debug.peer-request'): True,
2807 (b'devel', b'debug.peer-request'): True,
2806 }
2808 }
2807
2809
2808 with ui.configoverride(overrides):
2810 with ui.configoverride(overrides):
2809 peer = hg.peer(ui, {}, path)
2811 peer = hg.peer(ui, {}, path)
2810
2812
2811 try:
2813 try:
2812 local = peer.local() is not None
2814 local = peer.local() is not None
2813 canpush = peer.canpush()
2815 canpush = peer.canpush()
2814
2816
2815 ui.write(_(b'url: %s\n') % peer.url())
2817 ui.write(_(b'url: %s\n') % peer.url())
2816 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2818 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2817 ui.write(
2819 ui.write(
2818 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2820 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2819 )
2821 )
2820 finally:
2822 finally:
2821 peer.close()
2823 peer.close()
2822
2824
2823
2825
2824 @command(
2826 @command(
2825 b'debugpickmergetool',
2827 b'debugpickmergetool',
2826 [
2828 [
2827 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2829 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2828 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2830 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2829 ]
2831 ]
2830 + cmdutil.walkopts
2832 + cmdutil.walkopts
2831 + cmdutil.mergetoolopts,
2833 + cmdutil.mergetoolopts,
2832 _(b'[PATTERN]...'),
2834 _(b'[PATTERN]...'),
2833 inferrepo=True,
2835 inferrepo=True,
2834 )
2836 )
2835 def debugpickmergetool(ui, repo, *pats, **opts):
2837 def debugpickmergetool(ui, repo, *pats, **opts):
2836 """examine which merge tool is chosen for specified file
2838 """examine which merge tool is chosen for specified file
2837
2839
2838 As described in :hg:`help merge-tools`, Mercurial examines
2840 As described in :hg:`help merge-tools`, Mercurial examines
2839 configurations below in this order to decide which merge tool is
2841 configurations below in this order to decide which merge tool is
2840 chosen for specified file.
2842 chosen for specified file.
2841
2843
2842 1. ``--tool`` option
2844 1. ``--tool`` option
2843 2. ``HGMERGE`` environment variable
2845 2. ``HGMERGE`` environment variable
2844 3. configurations in ``merge-patterns`` section
2846 3. configurations in ``merge-patterns`` section
2845 4. configuration of ``ui.merge``
2847 4. configuration of ``ui.merge``
2846 5. configurations in ``merge-tools`` section
2848 5. configurations in ``merge-tools`` section
2847 6. ``hgmerge`` tool (for historical reason only)
2849 6. ``hgmerge`` tool (for historical reason only)
2848 7. default tool for fallback (``:merge`` or ``:prompt``)
2850 7. default tool for fallback (``:merge`` or ``:prompt``)
2849
2851
2850 This command writes out examination result in the style below::
2852 This command writes out examination result in the style below::
2851
2853
2852 FILE = MERGETOOL
2854 FILE = MERGETOOL
2853
2855
2854 By default, all files known in the first parent context of the
2856 By default, all files known in the first parent context of the
2855 working directory are examined. Use file patterns and/or -I/-X
2857 working directory are examined. Use file patterns and/or -I/-X
2856 options to limit target files. -r/--rev is also useful to examine
2858 options to limit target files. -r/--rev is also useful to examine
2857 files in another context without actual updating to it.
2859 files in another context without actual updating to it.
2858
2860
2859 With --debug, this command shows warning messages while matching
2861 With --debug, this command shows warning messages while matching
2860 against ``merge-patterns`` and so on, too. It is recommended to
2862 against ``merge-patterns`` and so on, too. It is recommended to
2861 use this option with explicit file patterns and/or -I/-X options,
2863 use this option with explicit file patterns and/or -I/-X options,
2862 because this option increases amount of output per file according
2864 because this option increases amount of output per file according
2863 to configurations in hgrc.
2865 to configurations in hgrc.
2864
2866
2865 With -v/--verbose, this command shows configurations below at
2867 With -v/--verbose, this command shows configurations below at
2866 first (only if specified).
2868 first (only if specified).
2867
2869
2868 - ``--tool`` option
2870 - ``--tool`` option
2869 - ``HGMERGE`` environment variable
2871 - ``HGMERGE`` environment variable
2870 - configuration of ``ui.merge``
2872 - configuration of ``ui.merge``
2871
2873
2872 If merge tool is chosen before matching against
2874 If merge tool is chosen before matching against
2873 ``merge-patterns``, this command can't show any helpful
2875 ``merge-patterns``, this command can't show any helpful
2874 information, even with --debug. In such case, information above is
2876 information, even with --debug. In such case, information above is
2875 useful to know why a merge tool is chosen.
2877 useful to know why a merge tool is chosen.
2876 """
2878 """
2877 opts = pycompat.byteskwargs(opts)
2879 opts = pycompat.byteskwargs(opts)
2878 overrides = {}
2880 overrides = {}
2879 if opts[b'tool']:
2881 if opts[b'tool']:
2880 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2882 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2881 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2883 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2882
2884
2883 with ui.configoverride(overrides, b'debugmergepatterns'):
2885 with ui.configoverride(overrides, b'debugmergepatterns'):
2884 hgmerge = encoding.environ.get(b"HGMERGE")
2886 hgmerge = encoding.environ.get(b"HGMERGE")
2885 if hgmerge is not None:
2887 if hgmerge is not None:
2886 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2888 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2887 uimerge = ui.config(b"ui", b"merge")
2889 uimerge = ui.config(b"ui", b"merge")
2888 if uimerge:
2890 if uimerge:
2889 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2891 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2890
2892
2891 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2893 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2892 m = scmutil.match(ctx, pats, opts)
2894 m = scmutil.match(ctx, pats, opts)
2893 changedelete = opts[b'changedelete']
2895 changedelete = opts[b'changedelete']
2894 for path in ctx.walk(m):
2896 for path in ctx.walk(m):
2895 fctx = ctx[path]
2897 fctx = ctx[path]
2896 with ui.silent(
2898 with ui.silent(
2897 error=True
2899 error=True
2898 ) if not ui.debugflag else util.nullcontextmanager():
2900 ) if not ui.debugflag else util.nullcontextmanager():
2899 tool, toolpath = filemerge._picktool(
2901 tool, toolpath = filemerge._picktool(
2900 repo,
2902 repo,
2901 ui,
2903 ui,
2902 path,
2904 path,
2903 fctx.isbinary(),
2905 fctx.isbinary(),
2904 b'l' in fctx.flags(),
2906 b'l' in fctx.flags(),
2905 changedelete,
2907 changedelete,
2906 )
2908 )
2907 ui.write(b'%s = %s\n' % (path, tool))
2909 ui.write(b'%s = %s\n' % (path, tool))
2908
2910
2909
2911
2910 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2912 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2911 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2913 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2912 """access the pushkey key/value protocol
2914 """access the pushkey key/value protocol
2913
2915
2914 With two args, list the keys in the given namespace.
2916 With two args, list the keys in the given namespace.
2915
2917
2916 With five args, set a key to new if it currently is set to old.
2918 With five args, set a key to new if it currently is set to old.
2917 Reports success or failure.
2919 Reports success or failure.
2918 """
2920 """
2919
2921
2920 target = hg.peer(ui, {}, repopath)
2922 target = hg.peer(ui, {}, repopath)
2921 try:
2923 try:
2922 if keyinfo:
2924 if keyinfo:
2923 key, old, new = keyinfo
2925 key, old, new = keyinfo
2924 with target.commandexecutor() as e:
2926 with target.commandexecutor() as e:
2925 r = e.callcommand(
2927 r = e.callcommand(
2926 b'pushkey',
2928 b'pushkey',
2927 {
2929 {
2928 b'namespace': namespace,
2930 b'namespace': namespace,
2929 b'key': key,
2931 b'key': key,
2930 b'old': old,
2932 b'old': old,
2931 b'new': new,
2933 b'new': new,
2932 },
2934 },
2933 ).result()
2935 ).result()
2934
2936
2935 ui.status(pycompat.bytestr(r) + b'\n')
2937 ui.status(pycompat.bytestr(r) + b'\n')
2936 return not r
2938 return not r
2937 else:
2939 else:
2938 for k, v in sorted(target.listkeys(namespace).items()):
2940 for k, v in sorted(target.listkeys(namespace).items()):
2939 ui.write(
2941 ui.write(
2940 b"%s\t%s\n"
2942 b"%s\t%s\n"
2941 % (stringutil.escapestr(k), stringutil.escapestr(v))
2943 % (stringutil.escapestr(k), stringutil.escapestr(v))
2942 )
2944 )
2943 finally:
2945 finally:
2944 target.close()
2946 target.close()
2945
2947
2946
2948
2947 @command(b'debugpvec', [], _(b'A B'))
2949 @command(b'debugpvec', [], _(b'A B'))
2948 def debugpvec(ui, repo, a, b=None):
2950 def debugpvec(ui, repo, a, b=None):
2949 ca = scmutil.revsingle(repo, a)
2951 ca = scmutil.revsingle(repo, a)
2950 cb = scmutil.revsingle(repo, b)
2952 cb = scmutil.revsingle(repo, b)
2951 pa = pvec.ctxpvec(ca)
2953 pa = pvec.ctxpvec(ca)
2952 pb = pvec.ctxpvec(cb)
2954 pb = pvec.ctxpvec(cb)
2953 if pa == pb:
2955 if pa == pb:
2954 rel = b"="
2956 rel = b"="
2955 elif pa > pb:
2957 elif pa > pb:
2956 rel = b">"
2958 rel = b">"
2957 elif pa < pb:
2959 elif pa < pb:
2958 rel = b"<"
2960 rel = b"<"
2959 elif pa | pb:
2961 elif pa | pb:
2960 rel = b"|"
2962 rel = b"|"
2961 ui.write(_(b"a: %s\n") % pa)
2963 ui.write(_(b"a: %s\n") % pa)
2962 ui.write(_(b"b: %s\n") % pb)
2964 ui.write(_(b"b: %s\n") % pb)
2963 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2965 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2964 ui.write(
2966 ui.write(
2965 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2967 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2966 % (
2968 % (
2967 abs(pa._depth - pb._depth),
2969 abs(pa._depth - pb._depth),
2968 pvec._hamming(pa._vec, pb._vec),
2970 pvec._hamming(pa._vec, pb._vec),
2969 pa.distance(pb),
2971 pa.distance(pb),
2970 rel,
2972 rel,
2971 )
2973 )
2972 )
2974 )
2973
2975
2974
2976
2975 @command(
2977 @command(
2976 b'debugrebuilddirstate|debugrebuildstate',
2978 b'debugrebuilddirstate|debugrebuildstate',
2977 [
2979 [
2978 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2980 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2979 (
2981 (
2980 b'',
2982 b'',
2981 b'minimal',
2983 b'minimal',
2982 None,
2984 None,
2983 _(
2985 _(
2984 b'only rebuild files that are inconsistent with '
2986 b'only rebuild files that are inconsistent with '
2985 b'the working copy parent'
2987 b'the working copy parent'
2986 ),
2988 ),
2987 ),
2989 ),
2988 ],
2990 ],
2989 _(b'[-r REV]'),
2991 _(b'[-r REV]'),
2990 )
2992 )
2991 def debugrebuilddirstate(ui, repo, rev, **opts):
2993 def debugrebuilddirstate(ui, repo, rev, **opts):
2992 """rebuild the dirstate as it would look like for the given revision
2994 """rebuild the dirstate as it would look like for the given revision
2993
2995
2994 If no revision is specified the first current parent will be used.
2996 If no revision is specified the first current parent will be used.
2995
2997
2996 The dirstate will be set to the files of the given revision.
2998 The dirstate will be set to the files of the given revision.
2997 The actual working directory content or existing dirstate
2999 The actual working directory content or existing dirstate
2998 information such as adds or removes is not considered.
3000 information such as adds or removes is not considered.
2999
3001
3000 ``minimal`` will only rebuild the dirstate status for files that claim to be
3002 ``minimal`` will only rebuild the dirstate status for files that claim to be
3001 tracked but are not in the parent manifest, or that exist in the parent
3003 tracked but are not in the parent manifest, or that exist in the parent
3002 manifest but are not in the dirstate. It will not change adds, removes, or
3004 manifest but are not in the dirstate. It will not change adds, removes, or
3003 modified files that are in the working copy parent.
3005 modified files that are in the working copy parent.
3004
3006
3005 One use of this command is to make the next :hg:`status` invocation
3007 One use of this command is to make the next :hg:`status` invocation
3006 check the actual file content.
3008 check the actual file content.
3007 """
3009 """
3008 ctx = scmutil.revsingle(repo, rev)
3010 ctx = scmutil.revsingle(repo, rev)
3009 with repo.wlock():
3011 with repo.wlock():
3010 dirstate = repo.dirstate
3012 dirstate = repo.dirstate
3011 changedfiles = None
3013 changedfiles = None
3012 # See command doc for what minimal does.
3014 # See command doc for what minimal does.
3013 if opts.get('minimal'):
3015 if opts.get('minimal'):
3014 manifestfiles = set(ctx.manifest().keys())
3016 manifestfiles = set(ctx.manifest().keys())
3015 dirstatefiles = set(dirstate)
3017 dirstatefiles = set(dirstate)
3016 manifestonly = manifestfiles - dirstatefiles
3018 manifestonly = manifestfiles - dirstatefiles
3017 dsonly = dirstatefiles - manifestfiles
3019 dsonly = dirstatefiles - manifestfiles
3018 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
3020 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
3019 changedfiles = manifestonly | dsnotadded
3021 changedfiles = manifestonly | dsnotadded
3020
3022
3021 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
3023 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
3022
3024
3023
3025
3024 @command(
3026 @command(
3025 b'debugrebuildfncache',
3027 b'debugrebuildfncache',
3026 [
3028 [
3027 (
3029 (
3028 b'',
3030 b'',
3029 b'only-data',
3031 b'only-data',
3030 False,
3032 False,
3031 _(b'only look for wrong .d files (much faster)'),
3033 _(b'only look for wrong .d files (much faster)'),
3032 )
3034 )
3033 ],
3035 ],
3034 b'',
3036 b'',
3035 )
3037 )
3036 def debugrebuildfncache(ui, repo, **opts):
3038 def debugrebuildfncache(ui, repo, **opts):
3037 """rebuild the fncache file"""
3039 """rebuild the fncache file"""
3038 opts = pycompat.byteskwargs(opts)
3040 opts = pycompat.byteskwargs(opts)
3039 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
3041 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
3040
3042
3041
3043
3042 @command(
3044 @command(
3043 b'debugrename',
3045 b'debugrename',
3044 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3046 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3045 _(b'[-r REV] [FILE]...'),
3047 _(b'[-r REV] [FILE]...'),
3046 )
3048 )
3047 def debugrename(ui, repo, *pats, **opts):
3049 def debugrename(ui, repo, *pats, **opts):
3048 """dump rename information"""
3050 """dump rename information"""
3049
3051
3050 opts = pycompat.byteskwargs(opts)
3052 opts = pycompat.byteskwargs(opts)
3051 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3053 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3052 m = scmutil.match(ctx, pats, opts)
3054 m = scmutil.match(ctx, pats, opts)
3053 for abs in ctx.walk(m):
3055 for abs in ctx.walk(m):
3054 fctx = ctx[abs]
3056 fctx = ctx[abs]
3055 o = fctx.filelog().renamed(fctx.filenode())
3057 o = fctx.filelog().renamed(fctx.filenode())
3056 rel = repo.pathto(abs)
3058 rel = repo.pathto(abs)
3057 if o:
3059 if o:
3058 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3060 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3059 else:
3061 else:
3060 ui.write(_(b"%s not renamed\n") % rel)
3062 ui.write(_(b"%s not renamed\n") % rel)
3061
3063
3062
3064
3063 @command(b'debugrequires|debugrequirements', [], b'')
3065 @command(b'debugrequires|debugrequirements', [], b'')
3064 def debugrequirements(ui, repo):
3066 def debugrequirements(ui, repo):
3065 """print the current repo requirements"""
3067 """print the current repo requirements"""
3066 for r in sorted(repo.requirements):
3068 for r in sorted(repo.requirements):
3067 ui.write(b"%s\n" % r)
3069 ui.write(b"%s\n" % r)
3068
3070
3069
3071
3070 @command(
3072 @command(
3071 b'debugrevlog',
3073 b'debugrevlog',
3072 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3074 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3073 _(b'-c|-m|FILE'),
3075 _(b'-c|-m|FILE'),
3074 optionalrepo=True,
3076 optionalrepo=True,
3075 )
3077 )
3076 def debugrevlog(ui, repo, file_=None, **opts):
3078 def debugrevlog(ui, repo, file_=None, **opts):
3077 """show data and statistics about a revlog"""
3079 """show data and statistics about a revlog"""
3078 opts = pycompat.byteskwargs(opts)
3080 opts = pycompat.byteskwargs(opts)
3079 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
3081 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
3080
3082
3081 if opts.get(b"dump"):
3083 if opts.get(b"dump"):
3082 numrevs = len(r)
3084 numrevs = len(r)
3083 ui.write(
3085 ui.write(
3084 (
3086 (
3085 b"# rev p1rev p2rev start end deltastart base p1 p2"
3087 b"# rev p1rev p2rev start end deltastart base p1 p2"
3086 b" rawsize totalsize compression heads chainlen\n"
3088 b" rawsize totalsize compression heads chainlen\n"
3087 )
3089 )
3088 )
3090 )
3089 ts = 0
3091 ts = 0
3090 heads = set()
3092 heads = set()
3091
3093
3092 for rev in pycompat.xrange(numrevs):
3094 for rev in pycompat.xrange(numrevs):
3093 dbase = r.deltaparent(rev)
3095 dbase = r.deltaparent(rev)
3094 if dbase == -1:
3096 if dbase == -1:
3095 dbase = rev
3097 dbase = rev
3096 cbase = r.chainbase(rev)
3098 cbase = r.chainbase(rev)
3097 clen = r.chainlen(rev)
3099 clen = r.chainlen(rev)
3098 p1, p2 = r.parentrevs(rev)
3100 p1, p2 = r.parentrevs(rev)
3099 rs = r.rawsize(rev)
3101 rs = r.rawsize(rev)
3100 ts = ts + rs
3102 ts = ts + rs
3101 heads -= set(r.parentrevs(rev))
3103 heads -= set(r.parentrevs(rev))
3102 heads.add(rev)
3104 heads.add(rev)
3103 try:
3105 try:
3104 compression = ts / r.end(rev)
3106 compression = ts / r.end(rev)
3105 except ZeroDivisionError:
3107 except ZeroDivisionError:
3106 compression = 0
3108 compression = 0
3107 ui.write(
3109 ui.write(
3108 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
3110 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
3109 b"%11d %5d %8d\n"
3111 b"%11d %5d %8d\n"
3110 % (
3112 % (
3111 rev,
3113 rev,
3112 p1,
3114 p1,
3113 p2,
3115 p2,
3114 r.start(rev),
3116 r.start(rev),
3115 r.end(rev),
3117 r.end(rev),
3116 r.start(dbase),
3118 r.start(dbase),
3117 r.start(cbase),
3119 r.start(cbase),
3118 r.start(p1),
3120 r.start(p1),
3119 r.start(p2),
3121 r.start(p2),
3120 rs,
3122 rs,
3121 ts,
3123 ts,
3122 compression,
3124 compression,
3123 len(heads),
3125 len(heads),
3124 clen,
3126 clen,
3125 )
3127 )
3126 )
3128 )
3127 return 0
3129 return 0
3128
3130
3129 format = r._format_version
3131 format = r._format_version
3130 v = r._format_flags
3132 v = r._format_flags
3131 flags = []
3133 flags = []
3132 gdelta = False
3134 gdelta = False
3133 if v & revlog.FLAG_INLINE_DATA:
3135 if v & revlog.FLAG_INLINE_DATA:
3134 flags.append(b'inline')
3136 flags.append(b'inline')
3135 if v & revlog.FLAG_GENERALDELTA:
3137 if v & revlog.FLAG_GENERALDELTA:
3136 gdelta = True
3138 gdelta = True
3137 flags.append(b'generaldelta')
3139 flags.append(b'generaldelta')
3138 if not flags:
3140 if not flags:
3139 flags = [b'(none)']
3141 flags = [b'(none)']
3140
3142
3141 ### tracks merge vs single parent
3143 ### tracks merge vs single parent
3142 nummerges = 0
3144 nummerges = 0
3143
3145
3144 ### tracks ways the "delta" are build
3146 ### tracks ways the "delta" are build
3145 # nodelta
3147 # nodelta
3146 numempty = 0
3148 numempty = 0
3147 numemptytext = 0
3149 numemptytext = 0
3148 numemptydelta = 0
3150 numemptydelta = 0
3149 # full file content
3151 # full file content
3150 numfull = 0
3152 numfull = 0
3151 # intermediate snapshot against a prior snapshot
3153 # intermediate snapshot against a prior snapshot
3152 numsemi = 0
3154 numsemi = 0
3153 # snapshot count per depth
3155 # snapshot count per depth
3154 numsnapdepth = collections.defaultdict(lambda: 0)
3156 numsnapdepth = collections.defaultdict(lambda: 0)
3155 # delta against previous revision
3157 # delta against previous revision
3156 numprev = 0
3158 numprev = 0
3157 # delta against first or second parent (not prev)
3159 # delta against first or second parent (not prev)
3158 nump1 = 0
3160 nump1 = 0
3159 nump2 = 0
3161 nump2 = 0
3160 # delta against neither prev nor parents
3162 # delta against neither prev nor parents
3161 numother = 0
3163 numother = 0
3162 # delta against prev that are also first or second parent
3164 # delta against prev that are also first or second parent
3163 # (details of `numprev`)
3165 # (details of `numprev`)
3164 nump1prev = 0
3166 nump1prev = 0
3165 nump2prev = 0
3167 nump2prev = 0
3166
3168
3167 # data about delta chain of each revs
3169 # data about delta chain of each revs
3168 chainlengths = []
3170 chainlengths = []
3169 chainbases = []
3171 chainbases = []
3170 chainspans = []
3172 chainspans = []
3171
3173
3172 # data about each revision
3174 # data about each revision
3173 datasize = [None, 0, 0]
3175 datasize = [None, 0, 0]
3174 fullsize = [None, 0, 0]
3176 fullsize = [None, 0, 0]
3175 semisize = [None, 0, 0]
3177 semisize = [None, 0, 0]
3176 # snapshot count per depth
3178 # snapshot count per depth
3177 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3179 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3178 deltasize = [None, 0, 0]
3180 deltasize = [None, 0, 0]
3179 chunktypecounts = {}
3181 chunktypecounts = {}
3180 chunktypesizes = {}
3182 chunktypesizes = {}
3181
3183
3182 def addsize(size, l):
3184 def addsize(size, l):
3183 if l[0] is None or size < l[0]:
3185 if l[0] is None or size < l[0]:
3184 l[0] = size
3186 l[0] = size
3185 if size > l[1]:
3187 if size > l[1]:
3186 l[1] = size
3188 l[1] = size
3187 l[2] += size
3189 l[2] += size
3188
3190
3189 numrevs = len(r)
3191 numrevs = len(r)
3190 for rev in pycompat.xrange(numrevs):
3192 for rev in pycompat.xrange(numrevs):
3191 p1, p2 = r.parentrevs(rev)
3193 p1, p2 = r.parentrevs(rev)
3192 delta = r.deltaparent(rev)
3194 delta = r.deltaparent(rev)
3193 if format > 0:
3195 if format > 0:
3194 addsize(r.rawsize(rev), datasize)
3196 addsize(r.rawsize(rev), datasize)
3195 if p2 != nullrev:
3197 if p2 != nullrev:
3196 nummerges += 1
3198 nummerges += 1
3197 size = r.length(rev)
3199 size = r.length(rev)
3198 if delta == nullrev:
3200 if delta == nullrev:
3199 chainlengths.append(0)
3201 chainlengths.append(0)
3200 chainbases.append(r.start(rev))
3202 chainbases.append(r.start(rev))
3201 chainspans.append(size)
3203 chainspans.append(size)
3202 if size == 0:
3204 if size == 0:
3203 numempty += 1
3205 numempty += 1
3204 numemptytext += 1
3206 numemptytext += 1
3205 else:
3207 else:
3206 numfull += 1
3208 numfull += 1
3207 numsnapdepth[0] += 1
3209 numsnapdepth[0] += 1
3208 addsize(size, fullsize)
3210 addsize(size, fullsize)
3209 addsize(size, snapsizedepth[0])
3211 addsize(size, snapsizedepth[0])
3210 else:
3212 else:
3211 chainlengths.append(chainlengths[delta] + 1)
3213 chainlengths.append(chainlengths[delta] + 1)
3212 baseaddr = chainbases[delta]
3214 baseaddr = chainbases[delta]
3213 revaddr = r.start(rev)
3215 revaddr = r.start(rev)
3214 chainbases.append(baseaddr)
3216 chainbases.append(baseaddr)
3215 chainspans.append((revaddr - baseaddr) + size)
3217 chainspans.append((revaddr - baseaddr) + size)
3216 if size == 0:
3218 if size == 0:
3217 numempty += 1
3219 numempty += 1
3218 numemptydelta += 1
3220 numemptydelta += 1
3219 elif r.issnapshot(rev):
3221 elif r.issnapshot(rev):
3220 addsize(size, semisize)
3222 addsize(size, semisize)
3221 numsemi += 1
3223 numsemi += 1
3222 depth = r.snapshotdepth(rev)
3224 depth = r.snapshotdepth(rev)
3223 numsnapdepth[depth] += 1
3225 numsnapdepth[depth] += 1
3224 addsize(size, snapsizedepth[depth])
3226 addsize(size, snapsizedepth[depth])
3225 else:
3227 else:
3226 addsize(size, deltasize)
3228 addsize(size, deltasize)
3227 if delta == rev - 1:
3229 if delta == rev - 1:
3228 numprev += 1
3230 numprev += 1
3229 if delta == p1:
3231 if delta == p1:
3230 nump1prev += 1
3232 nump1prev += 1
3231 elif delta == p2:
3233 elif delta == p2:
3232 nump2prev += 1
3234 nump2prev += 1
3233 elif delta == p1:
3235 elif delta == p1:
3234 nump1 += 1
3236 nump1 += 1
3235 elif delta == p2:
3237 elif delta == p2:
3236 nump2 += 1
3238 nump2 += 1
3237 elif delta != nullrev:
3239 elif delta != nullrev:
3238 numother += 1
3240 numother += 1
3239
3241
3240 # Obtain data on the raw chunks in the revlog.
3242 # Obtain data on the raw chunks in the revlog.
3241 if util.safehasattr(r, b'_getsegmentforrevs'):
3243 if util.safehasattr(r, b'_getsegmentforrevs'):
3242 segment = r._getsegmentforrevs(rev, rev)[1]
3244 segment = r._getsegmentforrevs(rev, rev)[1]
3243 else:
3245 else:
3244 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3246 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3245 if segment:
3247 if segment:
3246 chunktype = bytes(segment[0:1])
3248 chunktype = bytes(segment[0:1])
3247 else:
3249 else:
3248 chunktype = b'empty'
3250 chunktype = b'empty'
3249
3251
3250 if chunktype not in chunktypecounts:
3252 if chunktype not in chunktypecounts:
3251 chunktypecounts[chunktype] = 0
3253 chunktypecounts[chunktype] = 0
3252 chunktypesizes[chunktype] = 0
3254 chunktypesizes[chunktype] = 0
3253
3255
3254 chunktypecounts[chunktype] += 1
3256 chunktypecounts[chunktype] += 1
3255 chunktypesizes[chunktype] += size
3257 chunktypesizes[chunktype] += size
3256
3258
3257 # Adjust size min value for empty cases
3259 # Adjust size min value for empty cases
3258 for size in (datasize, fullsize, semisize, deltasize):
3260 for size in (datasize, fullsize, semisize, deltasize):
3259 if size[0] is None:
3261 if size[0] is None:
3260 size[0] = 0
3262 size[0] = 0
3261
3263
3262 numdeltas = numrevs - numfull - numempty - numsemi
3264 numdeltas = numrevs - numfull - numempty - numsemi
3263 numoprev = numprev - nump1prev - nump2prev
3265 numoprev = numprev - nump1prev - nump2prev
3264 totalrawsize = datasize[2]
3266 totalrawsize = datasize[2]
3265 datasize[2] /= numrevs
3267 datasize[2] /= numrevs
3266 fulltotal = fullsize[2]
3268 fulltotal = fullsize[2]
3267 if numfull == 0:
3269 if numfull == 0:
3268 fullsize[2] = 0
3270 fullsize[2] = 0
3269 else:
3271 else:
3270 fullsize[2] /= numfull
3272 fullsize[2] /= numfull
3271 semitotal = semisize[2]
3273 semitotal = semisize[2]
3272 snaptotal = {}
3274 snaptotal = {}
3273 if numsemi > 0:
3275 if numsemi > 0:
3274 semisize[2] /= numsemi
3276 semisize[2] /= numsemi
3275 for depth in snapsizedepth:
3277 for depth in snapsizedepth:
3276 snaptotal[depth] = snapsizedepth[depth][2]
3278 snaptotal[depth] = snapsizedepth[depth][2]
3277 snapsizedepth[depth][2] /= numsnapdepth[depth]
3279 snapsizedepth[depth][2] /= numsnapdepth[depth]
3278
3280
3279 deltatotal = deltasize[2]
3281 deltatotal = deltasize[2]
3280 if numdeltas > 0:
3282 if numdeltas > 0:
3281 deltasize[2] /= numdeltas
3283 deltasize[2] /= numdeltas
3282 totalsize = fulltotal + semitotal + deltatotal
3284 totalsize = fulltotal + semitotal + deltatotal
3283 avgchainlen = sum(chainlengths) / numrevs
3285 avgchainlen = sum(chainlengths) / numrevs
3284 maxchainlen = max(chainlengths)
3286 maxchainlen = max(chainlengths)
3285 maxchainspan = max(chainspans)
3287 maxchainspan = max(chainspans)
3286 compratio = 1
3288 compratio = 1
3287 if totalsize:
3289 if totalsize:
3288 compratio = totalrawsize / totalsize
3290 compratio = totalrawsize / totalsize
3289
3291
3290 basedfmtstr = b'%%%dd\n'
3292 basedfmtstr = b'%%%dd\n'
3291 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3293 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3292
3294
3293 def dfmtstr(max):
3295 def dfmtstr(max):
3294 return basedfmtstr % len(str(max))
3296 return basedfmtstr % len(str(max))
3295
3297
3296 def pcfmtstr(max, padding=0):
3298 def pcfmtstr(max, padding=0):
3297 return basepcfmtstr % (len(str(max)), b' ' * padding)
3299 return basepcfmtstr % (len(str(max)), b' ' * padding)
3298
3300
3299 def pcfmt(value, total):
3301 def pcfmt(value, total):
3300 if total:
3302 if total:
3301 return (value, 100 * float(value) / total)
3303 return (value, 100 * float(value) / total)
3302 else:
3304 else:
3303 return value, 100.0
3305 return value, 100.0
3304
3306
3305 ui.writenoi18n(b'format : %d\n' % format)
3307 ui.writenoi18n(b'format : %d\n' % format)
3306 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3308 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3307
3309
3308 ui.write(b'\n')
3310 ui.write(b'\n')
3309 fmt = pcfmtstr(totalsize)
3311 fmt = pcfmtstr(totalsize)
3310 fmt2 = dfmtstr(totalsize)
3312 fmt2 = dfmtstr(totalsize)
3311 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3313 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3312 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3314 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3313 ui.writenoi18n(
3315 ui.writenoi18n(
3314 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3316 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3315 )
3317 )
3316 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3318 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3317 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3319 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3318 ui.writenoi18n(
3320 ui.writenoi18n(
3319 b' text : '
3321 b' text : '
3320 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3322 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3321 )
3323 )
3322 ui.writenoi18n(
3324 ui.writenoi18n(
3323 b' delta : '
3325 b' delta : '
3324 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3326 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3325 )
3327 )
3326 ui.writenoi18n(
3328 ui.writenoi18n(
3327 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3329 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3328 )
3330 )
3329 for depth in sorted(numsnapdepth):
3331 for depth in sorted(numsnapdepth):
3330 ui.write(
3332 ui.write(
3331 (b' lvl-%-3d : ' % depth)
3333 (b' lvl-%-3d : ' % depth)
3332 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3334 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3333 )
3335 )
3334 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3336 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3335 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3337 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3336 ui.writenoi18n(
3338 ui.writenoi18n(
3337 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3339 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3338 )
3340 )
3339 for depth in sorted(numsnapdepth):
3341 for depth in sorted(numsnapdepth):
3340 ui.write(
3342 ui.write(
3341 (b' lvl-%-3d : ' % depth)
3343 (b' lvl-%-3d : ' % depth)
3342 + fmt % pcfmt(snaptotal[depth], totalsize)
3344 + fmt % pcfmt(snaptotal[depth], totalsize)
3343 )
3345 )
3344 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3346 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3345
3347
3346 def fmtchunktype(chunktype):
3348 def fmtchunktype(chunktype):
3347 if chunktype == b'empty':
3349 if chunktype == b'empty':
3348 return b' %s : ' % chunktype
3350 return b' %s : ' % chunktype
3349 elif chunktype in pycompat.bytestr(string.ascii_letters):
3351 elif chunktype in pycompat.bytestr(string.ascii_letters):
3350 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3352 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3351 else:
3353 else:
3352 return b' 0x%s : ' % hex(chunktype)
3354 return b' 0x%s : ' % hex(chunktype)
3353
3355
3354 ui.write(b'\n')
3356 ui.write(b'\n')
3355 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3357 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3356 for chunktype in sorted(chunktypecounts):
3358 for chunktype in sorted(chunktypecounts):
3357 ui.write(fmtchunktype(chunktype))
3359 ui.write(fmtchunktype(chunktype))
3358 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3360 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3359 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3361 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3360 for chunktype in sorted(chunktypecounts):
3362 for chunktype in sorted(chunktypecounts):
3361 ui.write(fmtchunktype(chunktype))
3363 ui.write(fmtchunktype(chunktype))
3362 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3364 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3363
3365
3364 ui.write(b'\n')
3366 ui.write(b'\n')
3365 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3367 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3366 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3368 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3367 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3369 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3368 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3370 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3369 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3371 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3370
3372
3371 if format > 0:
3373 if format > 0:
3372 ui.write(b'\n')
3374 ui.write(b'\n')
3373 ui.writenoi18n(
3375 ui.writenoi18n(
3374 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3376 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3375 % tuple(datasize)
3377 % tuple(datasize)
3376 )
3378 )
3377 ui.writenoi18n(
3379 ui.writenoi18n(
3378 b'full revision size (min/max/avg) : %d / %d / %d\n'
3380 b'full revision size (min/max/avg) : %d / %d / %d\n'
3379 % tuple(fullsize)
3381 % tuple(fullsize)
3380 )
3382 )
3381 ui.writenoi18n(
3383 ui.writenoi18n(
3382 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3384 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3383 % tuple(semisize)
3385 % tuple(semisize)
3384 )
3386 )
3385 for depth in sorted(snapsizedepth):
3387 for depth in sorted(snapsizedepth):
3386 if depth == 0:
3388 if depth == 0:
3387 continue
3389 continue
3388 ui.writenoi18n(
3390 ui.writenoi18n(
3389 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3391 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3390 % ((depth,) + tuple(snapsizedepth[depth]))
3392 % ((depth,) + tuple(snapsizedepth[depth]))
3391 )
3393 )
3392 ui.writenoi18n(
3394 ui.writenoi18n(
3393 b'delta size (min/max/avg) : %d / %d / %d\n'
3395 b'delta size (min/max/avg) : %d / %d / %d\n'
3394 % tuple(deltasize)
3396 % tuple(deltasize)
3395 )
3397 )
3396
3398
3397 if numdeltas > 0:
3399 if numdeltas > 0:
3398 ui.write(b'\n')
3400 ui.write(b'\n')
3399 fmt = pcfmtstr(numdeltas)
3401 fmt = pcfmtstr(numdeltas)
3400 fmt2 = pcfmtstr(numdeltas, 4)
3402 fmt2 = pcfmtstr(numdeltas, 4)
3401 ui.writenoi18n(
3403 ui.writenoi18n(
3402 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3404 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3403 )
3405 )
3404 if numprev > 0:
3406 if numprev > 0:
3405 ui.writenoi18n(
3407 ui.writenoi18n(
3406 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3408 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3407 )
3409 )
3408 ui.writenoi18n(
3410 ui.writenoi18n(
3409 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3411 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3410 )
3412 )
3411 ui.writenoi18n(
3413 ui.writenoi18n(
3412 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3414 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3413 )
3415 )
3414 if gdelta:
3416 if gdelta:
3415 ui.writenoi18n(
3417 ui.writenoi18n(
3416 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3418 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3417 )
3419 )
3418 ui.writenoi18n(
3420 ui.writenoi18n(
3419 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3421 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3420 )
3422 )
3421 ui.writenoi18n(
3423 ui.writenoi18n(
3422 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3424 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3423 )
3425 )
3424
3426
3425
3427
3426 @command(
3428 @command(
3427 b'debugrevlogindex',
3429 b'debugrevlogindex',
3428 cmdutil.debugrevlogopts
3430 cmdutil.debugrevlogopts
3429 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3431 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3430 _(b'[-f FORMAT] -c|-m|FILE'),
3432 _(b'[-f FORMAT] -c|-m|FILE'),
3431 optionalrepo=True,
3433 optionalrepo=True,
3432 )
3434 )
3433 def debugrevlogindex(ui, repo, file_=None, **opts):
3435 def debugrevlogindex(ui, repo, file_=None, **opts):
3434 """dump the contents of a revlog index"""
3436 """dump the contents of a revlog index"""
3435 opts = pycompat.byteskwargs(opts)
3437 opts = pycompat.byteskwargs(opts)
3436 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3438 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3437 format = opts.get(b'format', 0)
3439 format = opts.get(b'format', 0)
3438 if format not in (0, 1):
3440 if format not in (0, 1):
3439 raise error.Abort(_(b"unknown format %d") % format)
3441 raise error.Abort(_(b"unknown format %d") % format)
3440
3442
3441 if ui.debugflag:
3443 if ui.debugflag:
3442 shortfn = hex
3444 shortfn = hex
3443 else:
3445 else:
3444 shortfn = short
3446 shortfn = short
3445
3447
3446 # There might not be anything in r, so have a sane default
3448 # There might not be anything in r, so have a sane default
3447 idlen = 12
3449 idlen = 12
3448 for i in r:
3450 for i in r:
3449 idlen = len(shortfn(r.node(i)))
3451 idlen = len(shortfn(r.node(i)))
3450 break
3452 break
3451
3453
3452 if format == 0:
3454 if format == 0:
3453 if ui.verbose:
3455 if ui.verbose:
3454 ui.writenoi18n(
3456 ui.writenoi18n(
3455 b" rev offset length linkrev %s %s p2\n"
3457 b" rev offset length linkrev %s %s p2\n"
3456 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3458 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3457 )
3459 )
3458 else:
3460 else:
3459 ui.writenoi18n(
3461 ui.writenoi18n(
3460 b" rev linkrev %s %s p2\n"
3462 b" rev linkrev %s %s p2\n"
3461 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3463 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3462 )
3464 )
3463 elif format == 1:
3465 elif format == 1:
3464 if ui.verbose:
3466 if ui.verbose:
3465 ui.writenoi18n(
3467 ui.writenoi18n(
3466 (
3468 (
3467 b" rev flag offset length size link p1"
3469 b" rev flag offset length size link p1"
3468 b" p2 %s\n"
3470 b" p2 %s\n"
3469 )
3471 )
3470 % b"nodeid".rjust(idlen)
3472 % b"nodeid".rjust(idlen)
3471 )
3473 )
3472 else:
3474 else:
3473 ui.writenoi18n(
3475 ui.writenoi18n(
3474 b" rev flag size link p1 p2 %s\n"
3476 b" rev flag size link p1 p2 %s\n"
3475 % b"nodeid".rjust(idlen)
3477 % b"nodeid".rjust(idlen)
3476 )
3478 )
3477
3479
3478 for i in r:
3480 for i in r:
3479 node = r.node(i)
3481 node = r.node(i)
3480 if format == 0:
3482 if format == 0:
3481 try:
3483 try:
3482 pp = r.parents(node)
3484 pp = r.parents(node)
3483 except Exception:
3485 except Exception:
3484 pp = [repo.nullid, repo.nullid]
3486 pp = [repo.nullid, repo.nullid]
3485 if ui.verbose:
3487 if ui.verbose:
3486 ui.write(
3488 ui.write(
3487 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3489 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3488 % (
3490 % (
3489 i,
3491 i,
3490 r.start(i),
3492 r.start(i),
3491 r.length(i),
3493 r.length(i),
3492 r.linkrev(i),
3494 r.linkrev(i),
3493 shortfn(node),
3495 shortfn(node),
3494 shortfn(pp[0]),
3496 shortfn(pp[0]),
3495 shortfn(pp[1]),
3497 shortfn(pp[1]),
3496 )
3498 )
3497 )
3499 )
3498 else:
3500 else:
3499 ui.write(
3501 ui.write(
3500 b"% 6d % 7d %s %s %s\n"
3502 b"% 6d % 7d %s %s %s\n"
3501 % (
3503 % (
3502 i,
3504 i,
3503 r.linkrev(i),
3505 r.linkrev(i),
3504 shortfn(node),
3506 shortfn(node),
3505 shortfn(pp[0]),
3507 shortfn(pp[0]),
3506 shortfn(pp[1]),
3508 shortfn(pp[1]),
3507 )
3509 )
3508 )
3510 )
3509 elif format == 1:
3511 elif format == 1:
3510 pr = r.parentrevs(i)
3512 pr = r.parentrevs(i)
3511 if ui.verbose:
3513 if ui.verbose:
3512 ui.write(
3514 ui.write(
3513 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3515 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3514 % (
3516 % (
3515 i,
3517 i,
3516 r.flags(i),
3518 r.flags(i),
3517 r.start(i),
3519 r.start(i),
3518 r.length(i),
3520 r.length(i),
3519 r.rawsize(i),
3521 r.rawsize(i),
3520 r.linkrev(i),
3522 r.linkrev(i),
3521 pr[0],
3523 pr[0],
3522 pr[1],
3524 pr[1],
3523 shortfn(node),
3525 shortfn(node),
3524 )
3526 )
3525 )
3527 )
3526 else:
3528 else:
3527 ui.write(
3529 ui.write(
3528 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3530 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3529 % (
3531 % (
3530 i,
3532 i,
3531 r.flags(i),
3533 r.flags(i),
3532 r.rawsize(i),
3534 r.rawsize(i),
3533 r.linkrev(i),
3535 r.linkrev(i),
3534 pr[0],
3536 pr[0],
3535 pr[1],
3537 pr[1],
3536 shortfn(node),
3538 shortfn(node),
3537 )
3539 )
3538 )
3540 )
3539
3541
3540
3542
3541 @command(
3543 @command(
3542 b'debugrevspec',
3544 b'debugrevspec',
3543 [
3545 [
3544 (
3546 (
3545 b'',
3547 b'',
3546 b'optimize',
3548 b'optimize',
3547 None,
3549 None,
3548 _(b'print parsed tree after optimizing (DEPRECATED)'),
3550 _(b'print parsed tree after optimizing (DEPRECATED)'),
3549 ),
3551 ),
3550 (
3552 (
3551 b'',
3553 b'',
3552 b'show-revs',
3554 b'show-revs',
3553 True,
3555 True,
3554 _(b'print list of result revisions (default)'),
3556 _(b'print list of result revisions (default)'),
3555 ),
3557 ),
3556 (
3558 (
3557 b's',
3559 b's',
3558 b'show-set',
3560 b'show-set',
3559 None,
3561 None,
3560 _(b'print internal representation of result set'),
3562 _(b'print internal representation of result set'),
3561 ),
3563 ),
3562 (
3564 (
3563 b'p',
3565 b'p',
3564 b'show-stage',
3566 b'show-stage',
3565 [],
3567 [],
3566 _(b'print parsed tree at the given stage'),
3568 _(b'print parsed tree at the given stage'),
3567 _(b'NAME'),
3569 _(b'NAME'),
3568 ),
3570 ),
3569 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3571 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3570 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3572 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3571 ],
3573 ],
3572 b'REVSPEC',
3574 b'REVSPEC',
3573 )
3575 )
3574 def debugrevspec(ui, repo, expr, **opts):
3576 def debugrevspec(ui, repo, expr, **opts):
3575 """parse and apply a revision specification
3577 """parse and apply a revision specification
3576
3578
3577 Use -p/--show-stage option to print the parsed tree at the given stages.
3579 Use -p/--show-stage option to print the parsed tree at the given stages.
3578 Use -p all to print tree at every stage.
3580 Use -p all to print tree at every stage.
3579
3581
3580 Use --no-show-revs option with -s or -p to print only the set
3582 Use --no-show-revs option with -s or -p to print only the set
3581 representation or the parsed tree respectively.
3583 representation or the parsed tree respectively.
3582
3584
3583 Use --verify-optimized to compare the optimized result with the unoptimized
3585 Use --verify-optimized to compare the optimized result with the unoptimized
3584 one. Returns 1 if the optimized result differs.
3586 one. Returns 1 if the optimized result differs.
3585 """
3587 """
3586 opts = pycompat.byteskwargs(opts)
3588 opts = pycompat.byteskwargs(opts)
3587 aliases = ui.configitems(b'revsetalias')
3589 aliases = ui.configitems(b'revsetalias')
3588 stages = [
3590 stages = [
3589 (b'parsed', lambda tree: tree),
3591 (b'parsed', lambda tree: tree),
3590 (
3592 (
3591 b'expanded',
3593 b'expanded',
3592 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3594 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3593 ),
3595 ),
3594 (b'concatenated', revsetlang.foldconcat),
3596 (b'concatenated', revsetlang.foldconcat),
3595 (b'analyzed', revsetlang.analyze),
3597 (b'analyzed', revsetlang.analyze),
3596 (b'optimized', revsetlang.optimize),
3598 (b'optimized', revsetlang.optimize),
3597 ]
3599 ]
3598 if opts[b'no_optimized']:
3600 if opts[b'no_optimized']:
3599 stages = stages[:-1]
3601 stages = stages[:-1]
3600 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3602 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3601 raise error.Abort(
3603 raise error.Abort(
3602 _(b'cannot use --verify-optimized with --no-optimized')
3604 _(b'cannot use --verify-optimized with --no-optimized')
3603 )
3605 )
3604 stagenames = {n for n, f in stages}
3606 stagenames = {n for n, f in stages}
3605
3607
3606 showalways = set()
3608 showalways = set()
3607 showchanged = set()
3609 showchanged = set()
3608 if ui.verbose and not opts[b'show_stage']:
3610 if ui.verbose and not opts[b'show_stage']:
3609 # show parsed tree by --verbose (deprecated)
3611 # show parsed tree by --verbose (deprecated)
3610 showalways.add(b'parsed')
3612 showalways.add(b'parsed')
3611 showchanged.update([b'expanded', b'concatenated'])
3613 showchanged.update([b'expanded', b'concatenated'])
3612 if opts[b'optimize']:
3614 if opts[b'optimize']:
3613 showalways.add(b'optimized')
3615 showalways.add(b'optimized')
3614 if opts[b'show_stage'] and opts[b'optimize']:
3616 if opts[b'show_stage'] and opts[b'optimize']:
3615 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3617 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3616 if opts[b'show_stage'] == [b'all']:
3618 if opts[b'show_stage'] == [b'all']:
3617 showalways.update(stagenames)
3619 showalways.update(stagenames)
3618 else:
3620 else:
3619 for n in opts[b'show_stage']:
3621 for n in opts[b'show_stage']:
3620 if n not in stagenames:
3622 if n not in stagenames:
3621 raise error.Abort(_(b'invalid stage name: %s') % n)
3623 raise error.Abort(_(b'invalid stage name: %s') % n)
3622 showalways.update(opts[b'show_stage'])
3624 showalways.update(opts[b'show_stage'])
3623
3625
3624 treebystage = {}
3626 treebystage = {}
3625 printedtree = None
3627 printedtree = None
3626 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3628 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3627 for n, f in stages:
3629 for n, f in stages:
3628 treebystage[n] = tree = f(tree)
3630 treebystage[n] = tree = f(tree)
3629 if n in showalways or (n in showchanged and tree != printedtree):
3631 if n in showalways or (n in showchanged and tree != printedtree):
3630 if opts[b'show_stage'] or n != b'parsed':
3632 if opts[b'show_stage'] or n != b'parsed':
3631 ui.write(b"* %s:\n" % n)
3633 ui.write(b"* %s:\n" % n)
3632 ui.write(revsetlang.prettyformat(tree), b"\n")
3634 ui.write(revsetlang.prettyformat(tree), b"\n")
3633 printedtree = tree
3635 printedtree = tree
3634
3636
3635 if opts[b'verify_optimized']:
3637 if opts[b'verify_optimized']:
3636 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3638 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3637 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3639 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3638 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3640 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3639 ui.writenoi18n(
3641 ui.writenoi18n(
3640 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3642 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3641 )
3643 )
3642 ui.writenoi18n(
3644 ui.writenoi18n(
3643 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3645 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3644 )
3646 )
3645 arevs = list(arevs)
3647 arevs = list(arevs)
3646 brevs = list(brevs)
3648 brevs = list(brevs)
3647 if arevs == brevs:
3649 if arevs == brevs:
3648 return 0
3650 return 0
3649 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3651 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3650 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3652 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3651 sm = difflib.SequenceMatcher(None, arevs, brevs)
3653 sm = difflib.SequenceMatcher(None, arevs, brevs)
3652 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3654 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3653 if tag in ('delete', 'replace'):
3655 if tag in ('delete', 'replace'):
3654 for c in arevs[alo:ahi]:
3656 for c in arevs[alo:ahi]:
3655 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3657 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3656 if tag in ('insert', 'replace'):
3658 if tag in ('insert', 'replace'):
3657 for c in brevs[blo:bhi]:
3659 for c in brevs[blo:bhi]:
3658 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3660 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3659 if tag == 'equal':
3661 if tag == 'equal':
3660 for c in arevs[alo:ahi]:
3662 for c in arevs[alo:ahi]:
3661 ui.write(b' %d\n' % c)
3663 ui.write(b' %d\n' % c)
3662 return 1
3664 return 1
3663
3665
3664 func = revset.makematcher(tree)
3666 func = revset.makematcher(tree)
3665 revs = func(repo)
3667 revs = func(repo)
3666 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3668 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3667 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3669 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3668 if not opts[b'show_revs']:
3670 if not opts[b'show_revs']:
3669 return
3671 return
3670 for c in revs:
3672 for c in revs:
3671 ui.write(b"%d\n" % c)
3673 ui.write(b"%d\n" % c)
3672
3674
3673
3675
3674 @command(
3676 @command(
3675 b'debugserve',
3677 b'debugserve',
3676 [
3678 [
3677 (
3679 (
3678 b'',
3680 b'',
3679 b'sshstdio',
3681 b'sshstdio',
3680 False,
3682 False,
3681 _(b'run an SSH server bound to process handles'),
3683 _(b'run an SSH server bound to process handles'),
3682 ),
3684 ),
3683 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3685 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3684 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3686 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3685 ],
3687 ],
3686 b'',
3688 b'',
3687 )
3689 )
3688 def debugserve(ui, repo, **opts):
3690 def debugserve(ui, repo, **opts):
3689 """run a server with advanced settings
3691 """run a server with advanced settings
3690
3692
3691 This command is similar to :hg:`serve`. It exists partially as a
3693 This command is similar to :hg:`serve`. It exists partially as a
3692 workaround to the fact that ``hg serve --stdio`` must have specific
3694 workaround to the fact that ``hg serve --stdio`` must have specific
3693 arguments for security reasons.
3695 arguments for security reasons.
3694 """
3696 """
3695 opts = pycompat.byteskwargs(opts)
3697 opts = pycompat.byteskwargs(opts)
3696
3698
3697 if not opts[b'sshstdio']:
3699 if not opts[b'sshstdio']:
3698 raise error.Abort(_(b'only --sshstdio is currently supported'))
3700 raise error.Abort(_(b'only --sshstdio is currently supported'))
3699
3701
3700 logfh = None
3702 logfh = None
3701
3703
3702 if opts[b'logiofd'] and opts[b'logiofile']:
3704 if opts[b'logiofd'] and opts[b'logiofile']:
3703 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3705 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3704
3706
3705 if opts[b'logiofd']:
3707 if opts[b'logiofd']:
3706 # Ideally we would be line buffered. But line buffering in binary
3708 # Ideally we would be line buffered. But line buffering in binary
3707 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3709 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3708 # buffering could have performance impacts. But since this isn't
3710 # buffering could have performance impacts. But since this isn't
3709 # performance critical code, it should be fine.
3711 # performance critical code, it should be fine.
3710 try:
3712 try:
3711 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3713 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3712 except OSError as e:
3714 except OSError as e:
3713 if e.errno != errno.ESPIPE:
3715 if e.errno != errno.ESPIPE:
3714 raise
3716 raise
3715 # can't seek a pipe, so `ab` mode fails on py3
3717 # can't seek a pipe, so `ab` mode fails on py3
3716 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3718 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3717 elif opts[b'logiofile']:
3719 elif opts[b'logiofile']:
3718 logfh = open(opts[b'logiofile'], b'ab', 0)
3720 logfh = open(opts[b'logiofile'], b'ab', 0)
3719
3721
3720 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3722 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3721 s.serve_forever()
3723 s.serve_forever()
3722
3724
3723
3725
3724 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3726 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3725 def debugsetparents(ui, repo, rev1, rev2=None):
3727 def debugsetparents(ui, repo, rev1, rev2=None):
3726 """manually set the parents of the current working directory (DANGEROUS)
3728 """manually set the parents of the current working directory (DANGEROUS)
3727
3729
3728 This command is not what you are looking for and should not be used. Using
3730 This command is not what you are looking for and should not be used. Using
3729 this command will most certainly results in slight corruption of the file
3731 this command will most certainly results in slight corruption of the file
3730 level histories withing your repository. DO NOT USE THIS COMMAND.
3732 level histories withing your repository. DO NOT USE THIS COMMAND.
3731
3733
3732 The command update the p1 and p2 field in the dirstate, and not touching
3734 The command update the p1 and p2 field in the dirstate, and not touching
3733 anything else. This useful for writing repository conversion tools, but
3735 anything else. This useful for writing repository conversion tools, but
3734 should be used with extreme care. For example, neither the working
3736 should be used with extreme care. For example, neither the working
3735 directory nor the dirstate is updated, so file status may be incorrect
3737 directory nor the dirstate is updated, so file status may be incorrect
3736 after running this command. Only used if you are one of the few people that
3738 after running this command. Only used if you are one of the few people that
3737 deeply unstand both conversion tools and file level histories. If you are
3739 deeply unstand both conversion tools and file level histories. If you are
3738 reading this help, you are not one of this people (most of them sailed west
3740 reading this help, you are not one of this people (most of them sailed west
3739 from Mithlond anyway.
3741 from Mithlond anyway.
3740
3742
3741 So one last time DO NOT USE THIS COMMAND.
3743 So one last time DO NOT USE THIS COMMAND.
3742
3744
3743 Returns 0 on success.
3745 Returns 0 on success.
3744 """
3746 """
3745
3747
3746 node1 = scmutil.revsingle(repo, rev1).node()
3748 node1 = scmutil.revsingle(repo, rev1).node()
3747 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3749 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3748
3750
3749 with repo.wlock():
3751 with repo.wlock():
3750 repo.setparents(node1, node2)
3752 repo.setparents(node1, node2)
3751
3753
3752
3754
3753 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3755 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3754 def debugsidedata(ui, repo, file_, rev=None, **opts):
3756 def debugsidedata(ui, repo, file_, rev=None, **opts):
3755 """dump the side data for a cl/manifest/file revision
3757 """dump the side data for a cl/manifest/file revision
3756
3758
3757 Use --verbose to dump the sidedata content."""
3759 Use --verbose to dump the sidedata content."""
3758 opts = pycompat.byteskwargs(opts)
3760 opts = pycompat.byteskwargs(opts)
3759 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3761 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3760 if rev is not None:
3762 if rev is not None:
3761 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3763 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3762 file_, rev = None, file_
3764 file_, rev = None, file_
3763 elif rev is None:
3765 elif rev is None:
3764 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3766 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3765 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3767 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3766 r = getattr(r, '_revlog', r)
3768 r = getattr(r, '_revlog', r)
3767 try:
3769 try:
3768 sidedata = r.sidedata(r.lookup(rev))
3770 sidedata = r.sidedata(r.lookup(rev))
3769 except KeyError:
3771 except KeyError:
3770 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3772 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3771 if sidedata:
3773 if sidedata:
3772 sidedata = list(sidedata.items())
3774 sidedata = list(sidedata.items())
3773 sidedata.sort()
3775 sidedata.sort()
3774 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3776 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3775 for key, value in sidedata:
3777 for key, value in sidedata:
3776 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3778 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3777 if ui.verbose:
3779 if ui.verbose:
3778 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3780 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3779
3781
3780
3782
3781 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3783 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3782 def debugssl(ui, repo, source=None, **opts):
3784 def debugssl(ui, repo, source=None, **opts):
3783 """test a secure connection to a server
3785 """test a secure connection to a server
3784
3786
3785 This builds the certificate chain for the server on Windows, installing the
3787 This builds the certificate chain for the server on Windows, installing the
3786 missing intermediates and trusted root via Windows Update if necessary. It
3788 missing intermediates and trusted root via Windows Update if necessary. It
3787 does nothing on other platforms.
3789 does nothing on other platforms.
3788
3790
3789 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3791 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3790 that server is used. See :hg:`help urls` for more information.
3792 that server is used. See :hg:`help urls` for more information.
3791
3793
3792 If the update succeeds, retry the original operation. Otherwise, the cause
3794 If the update succeeds, retry the original operation. Otherwise, the cause
3793 of the SSL error is likely another issue.
3795 of the SSL error is likely another issue.
3794 """
3796 """
3795 if not pycompat.iswindows:
3797 if not pycompat.iswindows:
3796 raise error.Abort(
3798 raise error.Abort(
3797 _(b'certificate chain building is only possible on Windows')
3799 _(b'certificate chain building is only possible on Windows')
3798 )
3800 )
3799
3801
3800 if not source:
3802 if not source:
3801 if not repo:
3803 if not repo:
3802 raise error.Abort(
3804 raise error.Abort(
3803 _(
3805 _(
3804 b"there is no Mercurial repository here, and no "
3806 b"there is no Mercurial repository here, and no "
3805 b"server specified"
3807 b"server specified"
3806 )
3808 )
3807 )
3809 )
3808 source = b"default"
3810 source = b"default"
3809
3811
3810 source, branches = urlutil.get_unique_pull_path(
3812 source, branches = urlutil.get_unique_pull_path(
3811 b'debugssl', repo, ui, source
3813 b'debugssl', repo, ui, source
3812 )
3814 )
3813 url = urlutil.url(source)
3815 url = urlutil.url(source)
3814
3816
3815 defaultport = {b'https': 443, b'ssh': 22}
3817 defaultport = {b'https': 443, b'ssh': 22}
3816 if url.scheme in defaultport:
3818 if url.scheme in defaultport:
3817 try:
3819 try:
3818 addr = (url.host, int(url.port or defaultport[url.scheme]))
3820 addr = (url.host, int(url.port or defaultport[url.scheme]))
3819 except ValueError:
3821 except ValueError:
3820 raise error.Abort(_(b"malformed port number in URL"))
3822 raise error.Abort(_(b"malformed port number in URL"))
3821 else:
3823 else:
3822 raise error.Abort(_(b"only https and ssh connections are supported"))
3824 raise error.Abort(_(b"only https and ssh connections are supported"))
3823
3825
3824 from . import win32
3826 from . import win32
3825
3827
3826 s = ssl.wrap_socket(
3828 s = ssl.wrap_socket(
3827 socket.socket(),
3829 socket.socket(),
3828 ssl_version=ssl.PROTOCOL_TLS,
3830 ssl_version=ssl.PROTOCOL_TLS,
3829 cert_reqs=ssl.CERT_NONE,
3831 cert_reqs=ssl.CERT_NONE,
3830 ca_certs=None,
3832 ca_certs=None,
3831 )
3833 )
3832
3834
3833 try:
3835 try:
3834 s.connect(addr)
3836 s.connect(addr)
3835 cert = s.getpeercert(True)
3837 cert = s.getpeercert(True)
3836
3838
3837 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3839 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3838
3840
3839 complete = win32.checkcertificatechain(cert, build=False)
3841 complete = win32.checkcertificatechain(cert, build=False)
3840
3842
3841 if not complete:
3843 if not complete:
3842 ui.status(_(b'certificate chain is incomplete, updating... '))
3844 ui.status(_(b'certificate chain is incomplete, updating... '))
3843
3845
3844 if not win32.checkcertificatechain(cert):
3846 if not win32.checkcertificatechain(cert):
3845 ui.status(_(b'failed.\n'))
3847 ui.status(_(b'failed.\n'))
3846 else:
3848 else:
3847 ui.status(_(b'done.\n'))
3849 ui.status(_(b'done.\n'))
3848 else:
3850 else:
3849 ui.status(_(b'full certificate chain is available\n'))
3851 ui.status(_(b'full certificate chain is available\n'))
3850 finally:
3852 finally:
3851 s.close()
3853 s.close()
3852
3854
3853
3855
3854 @command(
3856 @command(
3855 b"debugbackupbundle",
3857 b"debugbackupbundle",
3856 [
3858 [
3857 (
3859 (
3858 b"",
3860 b"",
3859 b"recover",
3861 b"recover",
3860 b"",
3862 b"",
3861 b"brings the specified changeset back into the repository",
3863 b"brings the specified changeset back into the repository",
3862 )
3864 )
3863 ]
3865 ]
3864 + cmdutil.logopts,
3866 + cmdutil.logopts,
3865 _(b"hg debugbackupbundle [--recover HASH]"),
3867 _(b"hg debugbackupbundle [--recover HASH]"),
3866 )
3868 )
3867 def debugbackupbundle(ui, repo, *pats, **opts):
3869 def debugbackupbundle(ui, repo, *pats, **opts):
3868 """lists the changesets available in backup bundles
3870 """lists the changesets available in backup bundles
3869
3871
3870 Without any arguments, this command prints a list of the changesets in each
3872 Without any arguments, this command prints a list of the changesets in each
3871 backup bundle.
3873 backup bundle.
3872
3874
3873 --recover takes a changeset hash and unbundles the first bundle that
3875 --recover takes a changeset hash and unbundles the first bundle that
3874 contains that hash, which puts that changeset back in your repository.
3876 contains that hash, which puts that changeset back in your repository.
3875
3877
3876 --verbose will print the entire commit message and the bundle path for that
3878 --verbose will print the entire commit message and the bundle path for that
3877 backup.
3879 backup.
3878 """
3880 """
3879 backups = list(
3881 backups = list(
3880 filter(
3882 filter(
3881 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3883 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3882 )
3884 )
3883 )
3885 )
3884 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3886 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3885
3887
3886 opts = pycompat.byteskwargs(opts)
3888 opts = pycompat.byteskwargs(opts)
3887 opts[b"bundle"] = b""
3889 opts[b"bundle"] = b""
3888 opts[b"force"] = None
3890 opts[b"force"] = None
3889 limit = logcmdutil.getlimit(opts)
3891 limit = logcmdutil.getlimit(opts)
3890
3892
3891 def display(other, chlist, displayer):
3893 def display(other, chlist, displayer):
3892 if opts.get(b"newest_first"):
3894 if opts.get(b"newest_first"):
3893 chlist.reverse()
3895 chlist.reverse()
3894 count = 0
3896 count = 0
3895 for n in chlist:
3897 for n in chlist:
3896 if limit is not None and count >= limit:
3898 if limit is not None and count >= limit:
3897 break
3899 break
3898 parents = [
3900 parents = [
3899 True for p in other.changelog.parents(n) if p != repo.nullid
3901 True for p in other.changelog.parents(n) if p != repo.nullid
3900 ]
3902 ]
3901 if opts.get(b"no_merges") and len(parents) == 2:
3903 if opts.get(b"no_merges") and len(parents) == 2:
3902 continue
3904 continue
3903 count += 1
3905 count += 1
3904 displayer.show(other[n])
3906 displayer.show(other[n])
3905
3907
3906 recovernode = opts.get(b"recover")
3908 recovernode = opts.get(b"recover")
3907 if recovernode:
3909 if recovernode:
3908 if scmutil.isrevsymbol(repo, recovernode):
3910 if scmutil.isrevsymbol(repo, recovernode):
3909 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3911 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3910 return
3912 return
3911 elif backups:
3913 elif backups:
3912 msg = _(
3914 msg = _(
3913 b"Recover changesets using: hg debugbackupbundle --recover "
3915 b"Recover changesets using: hg debugbackupbundle --recover "
3914 b"<changeset hash>\n\nAvailable backup changesets:"
3916 b"<changeset hash>\n\nAvailable backup changesets:"
3915 )
3917 )
3916 ui.status(msg, label=b"status.removed")
3918 ui.status(msg, label=b"status.removed")
3917 else:
3919 else:
3918 ui.status(_(b"no backup changesets found\n"))
3920 ui.status(_(b"no backup changesets found\n"))
3919 return
3921 return
3920
3922
3921 for backup in backups:
3923 for backup in backups:
3922 # Much of this is copied from the hg incoming logic
3924 # Much of this is copied from the hg incoming logic
3923 source = os.path.relpath(backup, encoding.getcwd())
3925 source = os.path.relpath(backup, encoding.getcwd())
3924 source, branches = urlutil.get_unique_pull_path(
3926 source, branches = urlutil.get_unique_pull_path(
3925 b'debugbackupbundle',
3927 b'debugbackupbundle',
3926 repo,
3928 repo,
3927 ui,
3929 ui,
3928 source,
3930 source,
3929 default_branches=opts.get(b'branch'),
3931 default_branches=opts.get(b'branch'),
3930 )
3932 )
3931 try:
3933 try:
3932 other = hg.peer(repo, opts, source)
3934 other = hg.peer(repo, opts, source)
3933 except error.LookupError as ex:
3935 except error.LookupError as ex:
3934 msg = _(b"\nwarning: unable to open bundle %s") % source
3936 msg = _(b"\nwarning: unable to open bundle %s") % source
3935 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3937 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3936 ui.warn(msg, hint=hint)
3938 ui.warn(msg, hint=hint)
3937 continue
3939 continue
3938 revs, checkout = hg.addbranchrevs(
3940 revs, checkout = hg.addbranchrevs(
3939 repo, other, branches, opts.get(b"rev")
3941 repo, other, branches, opts.get(b"rev")
3940 )
3942 )
3941
3943
3942 if revs:
3944 if revs:
3943 revs = [other.lookup(rev) for rev in revs]
3945 revs = [other.lookup(rev) for rev in revs]
3944
3946
3945 with ui.silent():
3947 with ui.silent():
3946 try:
3948 try:
3947 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3949 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3948 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3950 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3949 )
3951 )
3950 except error.LookupError:
3952 except error.LookupError:
3951 continue
3953 continue
3952
3954
3953 try:
3955 try:
3954 if not chlist:
3956 if not chlist:
3955 continue
3957 continue
3956 if recovernode:
3958 if recovernode:
3957 with repo.lock(), repo.transaction(b"unbundle") as tr:
3959 with repo.lock(), repo.transaction(b"unbundle") as tr:
3958 if scmutil.isrevsymbol(other, recovernode):
3960 if scmutil.isrevsymbol(other, recovernode):
3959 ui.status(_(b"Unbundling %s\n") % (recovernode))
3961 ui.status(_(b"Unbundling %s\n") % (recovernode))
3960 f = hg.openpath(ui, source)
3962 f = hg.openpath(ui, source)
3961 gen = exchange.readbundle(ui, f, source)
3963 gen = exchange.readbundle(ui, f, source)
3962 if isinstance(gen, bundle2.unbundle20):
3964 if isinstance(gen, bundle2.unbundle20):
3963 bundle2.applybundle(
3965 bundle2.applybundle(
3964 repo,
3966 repo,
3965 gen,
3967 gen,
3966 tr,
3968 tr,
3967 source=b"unbundle",
3969 source=b"unbundle",
3968 url=b"bundle:" + source,
3970 url=b"bundle:" + source,
3969 )
3971 )
3970 else:
3972 else:
3971 gen.apply(repo, b"unbundle", b"bundle:" + source)
3973 gen.apply(repo, b"unbundle", b"bundle:" + source)
3972 break
3974 break
3973 else:
3975 else:
3974 backupdate = encoding.strtolocal(
3976 backupdate = encoding.strtolocal(
3975 time.strftime(
3977 time.strftime(
3976 "%a %H:%M, %Y-%m-%d",
3978 "%a %H:%M, %Y-%m-%d",
3977 time.localtime(os.path.getmtime(source)),
3979 time.localtime(os.path.getmtime(source)),
3978 )
3980 )
3979 )
3981 )
3980 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3982 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3981 if ui.verbose:
3983 if ui.verbose:
3982 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3984 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3983 else:
3985 else:
3984 opts[
3986 opts[
3985 b"template"
3987 b"template"
3986 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3988 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3987 displayer = logcmdutil.changesetdisplayer(
3989 displayer = logcmdutil.changesetdisplayer(
3988 ui, other, opts, False
3990 ui, other, opts, False
3989 )
3991 )
3990 display(other, chlist, displayer)
3992 display(other, chlist, displayer)
3991 displayer.close()
3993 displayer.close()
3992 finally:
3994 finally:
3993 cleanupfn()
3995 cleanupfn()
3994
3996
3995
3997
3996 @command(
3998 @command(
3997 b'debugsub',
3999 b'debugsub',
3998 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
4000 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3999 _(b'[-r REV] [REV]'),
4001 _(b'[-r REV] [REV]'),
4000 )
4002 )
4001 def debugsub(ui, repo, rev=None):
4003 def debugsub(ui, repo, rev=None):
4002 ctx = scmutil.revsingle(repo, rev, None)
4004 ctx = scmutil.revsingle(repo, rev, None)
4003 for k, v in sorted(ctx.substate.items()):
4005 for k, v in sorted(ctx.substate.items()):
4004 ui.writenoi18n(b'path %s\n' % k)
4006 ui.writenoi18n(b'path %s\n' % k)
4005 ui.writenoi18n(b' source %s\n' % v[0])
4007 ui.writenoi18n(b' source %s\n' % v[0])
4006 ui.writenoi18n(b' revision %s\n' % v[1])
4008 ui.writenoi18n(b' revision %s\n' % v[1])
4007
4009
4008
4010
4009 @command(b'debugshell', optionalrepo=True)
4011 @command(b'debugshell', optionalrepo=True)
4010 def debugshell(ui, repo):
4012 def debugshell(ui, repo):
4011 """run an interactive Python interpreter
4013 """run an interactive Python interpreter
4012
4014
4013 The local namespace is provided with a reference to the ui and
4015 The local namespace is provided with a reference to the ui and
4014 the repo instance (if available).
4016 the repo instance (if available).
4015 """
4017 """
4016 import code
4018 import code
4017
4019
4018 imported_objects = {
4020 imported_objects = {
4019 'ui': ui,
4021 'ui': ui,
4020 'repo': repo,
4022 'repo': repo,
4021 }
4023 }
4022
4024
4023 code.interact(local=imported_objects)
4025 code.interact(local=imported_objects)
4024
4026
4025
4027
4026 @command(
4028 @command(
4027 b'debugsuccessorssets',
4029 b'debugsuccessorssets',
4028 [(b'', b'closest', False, _(b'return closest successors sets only'))],
4030 [(b'', b'closest', False, _(b'return closest successors sets only'))],
4029 _(b'[REV]'),
4031 _(b'[REV]'),
4030 )
4032 )
4031 def debugsuccessorssets(ui, repo, *revs, **opts):
4033 def debugsuccessorssets(ui, repo, *revs, **opts):
4032 """show set of successors for revision
4034 """show set of successors for revision
4033
4035
4034 A successors set of changeset A is a consistent group of revisions that
4036 A successors set of changeset A is a consistent group of revisions that
4035 succeed A. It contains non-obsolete changesets only unless closests
4037 succeed A. It contains non-obsolete changesets only unless closests
4036 successors set is set.
4038 successors set is set.
4037
4039
4038 In most cases a changeset A has a single successors set containing a single
4040 In most cases a changeset A has a single successors set containing a single
4039 successor (changeset A replaced by A').
4041 successor (changeset A replaced by A').
4040
4042
4041 A changeset that is made obsolete with no successors are called "pruned".
4043 A changeset that is made obsolete with no successors are called "pruned".
4042 Such changesets have no successors sets at all.
4044 Such changesets have no successors sets at all.
4043
4045
4044 A changeset that has been "split" will have a successors set containing
4046 A changeset that has been "split" will have a successors set containing
4045 more than one successor.
4047 more than one successor.
4046
4048
4047 A changeset that has been rewritten in multiple different ways is called
4049 A changeset that has been rewritten in multiple different ways is called
4048 "divergent". Such changesets have multiple successor sets (each of which
4050 "divergent". Such changesets have multiple successor sets (each of which
4049 may also be split, i.e. have multiple successors).
4051 may also be split, i.e. have multiple successors).
4050
4052
4051 Results are displayed as follows::
4053 Results are displayed as follows::
4052
4054
4053 <rev1>
4055 <rev1>
4054 <successors-1A>
4056 <successors-1A>
4055 <rev2>
4057 <rev2>
4056 <successors-2A>
4058 <successors-2A>
4057 <successors-2B1> <successors-2B2> <successors-2B3>
4059 <successors-2B1> <successors-2B2> <successors-2B3>
4058
4060
4059 Here rev2 has two possible (i.e. divergent) successors sets. The first
4061 Here rev2 has two possible (i.e. divergent) successors sets. The first
4060 holds one element, whereas the second holds three (i.e. the changeset has
4062 holds one element, whereas the second holds three (i.e. the changeset has
4061 been split).
4063 been split).
4062 """
4064 """
4063 # passed to successorssets caching computation from one call to another
4065 # passed to successorssets caching computation from one call to another
4064 cache = {}
4066 cache = {}
4065 ctx2str = bytes
4067 ctx2str = bytes
4066 node2str = short
4068 node2str = short
4067 for rev in logcmdutil.revrange(repo, revs):
4069 for rev in logcmdutil.revrange(repo, revs):
4068 ctx = repo[rev]
4070 ctx = repo[rev]
4069 ui.write(b'%s\n' % ctx2str(ctx))
4071 ui.write(b'%s\n' % ctx2str(ctx))
4070 for succsset in obsutil.successorssets(
4072 for succsset in obsutil.successorssets(
4071 repo, ctx.node(), closest=opts['closest'], cache=cache
4073 repo, ctx.node(), closest=opts['closest'], cache=cache
4072 ):
4074 ):
4073 if succsset:
4075 if succsset:
4074 ui.write(b' ')
4076 ui.write(b' ')
4075 ui.write(node2str(succsset[0]))
4077 ui.write(node2str(succsset[0]))
4076 for node in succsset[1:]:
4078 for node in succsset[1:]:
4077 ui.write(b' ')
4079 ui.write(b' ')
4078 ui.write(node2str(node))
4080 ui.write(node2str(node))
4079 ui.write(b'\n')
4081 ui.write(b'\n')
4080
4082
4081
4083
4082 @command(b'debugtagscache', [])
4084 @command(b'debugtagscache', [])
4083 def debugtagscache(ui, repo):
4085 def debugtagscache(ui, repo):
4084 """display the contents of .hg/cache/hgtagsfnodes1"""
4086 """display the contents of .hg/cache/hgtagsfnodes1"""
4085 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
4087 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
4086 flog = repo.file(b'.hgtags')
4088 flog = repo.file(b'.hgtags')
4087 for r in repo:
4089 for r in repo:
4088 node = repo[r].node()
4090 node = repo[r].node()
4089 tagsnode = cache.getfnode(node, computemissing=False)
4091 tagsnode = cache.getfnode(node, computemissing=False)
4090 if tagsnode:
4092 if tagsnode:
4091 tagsnodedisplay = hex(tagsnode)
4093 tagsnodedisplay = hex(tagsnode)
4092 if not flog.hasnode(tagsnode):
4094 if not flog.hasnode(tagsnode):
4093 tagsnodedisplay += b' (unknown node)'
4095 tagsnodedisplay += b' (unknown node)'
4094 elif tagsnode is None:
4096 elif tagsnode is None:
4095 tagsnodedisplay = b'missing'
4097 tagsnodedisplay = b'missing'
4096 else:
4098 else:
4097 tagsnodedisplay = b'invalid'
4099 tagsnodedisplay = b'invalid'
4098
4100
4099 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
4101 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
4100
4102
4101
4103
4102 @command(
4104 @command(
4103 b'debugtemplate',
4105 b'debugtemplate',
4104 [
4106 [
4105 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
4107 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
4106 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
4108 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
4107 ],
4109 ],
4108 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
4110 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
4109 optionalrepo=True,
4111 optionalrepo=True,
4110 )
4112 )
4111 def debugtemplate(ui, repo, tmpl, **opts):
4113 def debugtemplate(ui, repo, tmpl, **opts):
4112 """parse and apply a template
4114 """parse and apply a template
4113
4115
4114 If -r/--rev is given, the template is processed as a log template and
4116 If -r/--rev is given, the template is processed as a log template and
4115 applied to the given changesets. Otherwise, it is processed as a generic
4117 applied to the given changesets. Otherwise, it is processed as a generic
4116 template.
4118 template.
4117
4119
4118 Use --verbose to print the parsed tree.
4120 Use --verbose to print the parsed tree.
4119 """
4121 """
4120 revs = None
4122 revs = None
4121 if opts['rev']:
4123 if opts['rev']:
4122 if repo is None:
4124 if repo is None:
4123 raise error.RepoError(
4125 raise error.RepoError(
4124 _(b'there is no Mercurial repository here (.hg not found)')
4126 _(b'there is no Mercurial repository here (.hg not found)')
4125 )
4127 )
4126 revs = logcmdutil.revrange(repo, opts['rev'])
4128 revs = logcmdutil.revrange(repo, opts['rev'])
4127
4129
4128 props = {}
4130 props = {}
4129 for d in opts['define']:
4131 for d in opts['define']:
4130 try:
4132 try:
4131 k, v = (e.strip() for e in d.split(b'=', 1))
4133 k, v = (e.strip() for e in d.split(b'=', 1))
4132 if not k or k == b'ui':
4134 if not k or k == b'ui':
4133 raise ValueError
4135 raise ValueError
4134 props[k] = v
4136 props[k] = v
4135 except ValueError:
4137 except ValueError:
4136 raise error.Abort(_(b'malformed keyword definition: %s') % d)
4138 raise error.Abort(_(b'malformed keyword definition: %s') % d)
4137
4139
4138 if ui.verbose:
4140 if ui.verbose:
4139 aliases = ui.configitems(b'templatealias')
4141 aliases = ui.configitems(b'templatealias')
4140 tree = templater.parse(tmpl)
4142 tree = templater.parse(tmpl)
4141 ui.note(templater.prettyformat(tree), b'\n')
4143 ui.note(templater.prettyformat(tree), b'\n')
4142 newtree = templater.expandaliases(tree, aliases)
4144 newtree = templater.expandaliases(tree, aliases)
4143 if newtree != tree:
4145 if newtree != tree:
4144 ui.notenoi18n(
4146 ui.notenoi18n(
4145 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4147 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4146 )
4148 )
4147
4149
4148 if revs is None:
4150 if revs is None:
4149 tres = formatter.templateresources(ui, repo)
4151 tres = formatter.templateresources(ui, repo)
4150 t = formatter.maketemplater(ui, tmpl, resources=tres)
4152 t = formatter.maketemplater(ui, tmpl, resources=tres)
4151 if ui.verbose:
4153 if ui.verbose:
4152 kwds, funcs = t.symbolsuseddefault()
4154 kwds, funcs = t.symbolsuseddefault()
4153 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4155 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4154 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4156 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4155 ui.write(t.renderdefault(props))
4157 ui.write(t.renderdefault(props))
4156 else:
4158 else:
4157 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4159 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4158 if ui.verbose:
4160 if ui.verbose:
4159 kwds, funcs = displayer.t.symbolsuseddefault()
4161 kwds, funcs = displayer.t.symbolsuseddefault()
4160 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4162 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4161 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4163 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4162 for r in revs:
4164 for r in revs:
4163 displayer.show(repo[r], **pycompat.strkwargs(props))
4165 displayer.show(repo[r], **pycompat.strkwargs(props))
4164 displayer.close()
4166 displayer.close()
4165
4167
4166
4168
4167 @command(
4169 @command(
4168 b'debuguigetpass',
4170 b'debuguigetpass',
4169 [
4171 [
4170 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4172 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4171 ],
4173 ],
4172 _(b'[-p TEXT]'),
4174 _(b'[-p TEXT]'),
4173 norepo=True,
4175 norepo=True,
4174 )
4176 )
4175 def debuguigetpass(ui, prompt=b''):
4177 def debuguigetpass(ui, prompt=b''):
4176 """show prompt to type password"""
4178 """show prompt to type password"""
4177 r = ui.getpass(prompt)
4179 r = ui.getpass(prompt)
4178 if r is None:
4180 if r is None:
4179 r = b"<default response>"
4181 r = b"<default response>"
4180 ui.writenoi18n(b'response: %s\n' % r)
4182 ui.writenoi18n(b'response: %s\n' % r)
4181
4183
4182
4184
4183 @command(
4185 @command(
4184 b'debuguiprompt',
4186 b'debuguiprompt',
4185 [
4187 [
4186 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4188 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4187 ],
4189 ],
4188 _(b'[-p TEXT]'),
4190 _(b'[-p TEXT]'),
4189 norepo=True,
4191 norepo=True,
4190 )
4192 )
4191 def debuguiprompt(ui, prompt=b''):
4193 def debuguiprompt(ui, prompt=b''):
4192 """show plain prompt"""
4194 """show plain prompt"""
4193 r = ui.prompt(prompt)
4195 r = ui.prompt(prompt)
4194 ui.writenoi18n(b'response: %s\n' % r)
4196 ui.writenoi18n(b'response: %s\n' % r)
4195
4197
4196
4198
4197 @command(b'debugupdatecaches', [])
4199 @command(b'debugupdatecaches', [])
4198 def debugupdatecaches(ui, repo, *pats, **opts):
4200 def debugupdatecaches(ui, repo, *pats, **opts):
4199 """warm all known caches in the repository"""
4201 """warm all known caches in the repository"""
4200 with repo.wlock(), repo.lock():
4202 with repo.wlock(), repo.lock():
4201 repo.updatecaches(caches=repository.CACHES_ALL)
4203 repo.updatecaches(caches=repository.CACHES_ALL)
4202
4204
4203
4205
4204 @command(
4206 @command(
4205 b'debugupgraderepo',
4207 b'debugupgraderepo',
4206 [
4208 [
4207 (
4209 (
4208 b'o',
4210 b'o',
4209 b'optimize',
4211 b'optimize',
4210 [],
4212 [],
4211 _(b'extra optimization to perform'),
4213 _(b'extra optimization to perform'),
4212 _(b'NAME'),
4214 _(b'NAME'),
4213 ),
4215 ),
4214 (b'', b'run', False, _(b'performs an upgrade')),
4216 (b'', b'run', False, _(b'performs an upgrade')),
4215 (b'', b'backup', True, _(b'keep the old repository content around')),
4217 (b'', b'backup', True, _(b'keep the old repository content around')),
4216 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4218 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4217 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4219 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4218 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4220 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4219 ],
4221 ],
4220 )
4222 )
4221 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4223 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4222 """upgrade a repository to use different features
4224 """upgrade a repository to use different features
4223
4225
4224 If no arguments are specified, the repository is evaluated for upgrade
4226 If no arguments are specified, the repository is evaluated for upgrade
4225 and a list of problems and potential optimizations is printed.
4227 and a list of problems and potential optimizations is printed.
4226
4228
4227 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4229 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4228 can be influenced via additional arguments. More details will be provided
4230 can be influenced via additional arguments. More details will be provided
4229 by the command output when run without ``--run``.
4231 by the command output when run without ``--run``.
4230
4232
4231 During the upgrade, the repository will be locked and no writes will be
4233 During the upgrade, the repository will be locked and no writes will be
4232 allowed.
4234 allowed.
4233
4235
4234 At the end of the upgrade, the repository may not be readable while new
4236 At the end of the upgrade, the repository may not be readable while new
4235 repository data is swapped in. This window will be as long as it takes to
4237 repository data is swapped in. This window will be as long as it takes to
4236 rename some directories inside the ``.hg`` directory. On most machines, this
4238 rename some directories inside the ``.hg`` directory. On most machines, this
4237 should complete almost instantaneously and the chances of a consumer being
4239 should complete almost instantaneously and the chances of a consumer being
4238 unable to access the repository should be low.
4240 unable to access the repository should be low.
4239
4241
4240 By default, all revlogs will be upgraded. You can restrict this using flags
4242 By default, all revlogs will be upgraded. You can restrict this using flags
4241 such as `--manifest`:
4243 such as `--manifest`:
4242
4244
4243 * `--manifest`: only optimize the manifest
4245 * `--manifest`: only optimize the manifest
4244 * `--no-manifest`: optimize all revlog but the manifest
4246 * `--no-manifest`: optimize all revlog but the manifest
4245 * `--changelog`: optimize the changelog only
4247 * `--changelog`: optimize the changelog only
4246 * `--no-changelog --no-manifest`: optimize filelogs only
4248 * `--no-changelog --no-manifest`: optimize filelogs only
4247 * `--filelogs`: optimize the filelogs only
4249 * `--filelogs`: optimize the filelogs only
4248 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4250 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4249 """
4251 """
4250 return upgrade.upgraderepo(
4252 return upgrade.upgraderepo(
4251 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4253 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4252 )
4254 )
4253
4255
4254
4256
4255 @command(
4257 @command(
4256 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4258 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4257 )
4259 )
4258 def debugwalk(ui, repo, *pats, **opts):
4260 def debugwalk(ui, repo, *pats, **opts):
4259 """show how files match on given patterns"""
4261 """show how files match on given patterns"""
4260 opts = pycompat.byteskwargs(opts)
4262 opts = pycompat.byteskwargs(opts)
4261 m = scmutil.match(repo[None], pats, opts)
4263 m = scmutil.match(repo[None], pats, opts)
4262 if ui.verbose:
4264 if ui.verbose:
4263 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4265 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4264 items = list(repo[None].walk(m))
4266 items = list(repo[None].walk(m))
4265 if not items:
4267 if not items:
4266 return
4268 return
4267 f = lambda fn: fn
4269 f = lambda fn: fn
4268 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4270 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4269 f = lambda fn: util.normpath(fn)
4271 f = lambda fn: util.normpath(fn)
4270 fmt = b'f %%-%ds %%-%ds %%s' % (
4272 fmt = b'f %%-%ds %%-%ds %%s' % (
4271 max([len(abs) for abs in items]),
4273 max([len(abs) for abs in items]),
4272 max([len(repo.pathto(abs)) for abs in items]),
4274 max([len(repo.pathto(abs)) for abs in items]),
4273 )
4275 )
4274 for abs in items:
4276 for abs in items:
4275 line = fmt % (
4277 line = fmt % (
4276 abs,
4278 abs,
4277 f(repo.pathto(abs)),
4279 f(repo.pathto(abs)),
4278 m.exact(abs) and b'exact' or b'',
4280 m.exact(abs) and b'exact' or b'',
4279 )
4281 )
4280 ui.write(b"%s\n" % line.rstrip())
4282 ui.write(b"%s\n" % line.rstrip())
4281
4283
4282
4284
4283 @command(b'debugwhyunstable', [], _(b'REV'))
4285 @command(b'debugwhyunstable', [], _(b'REV'))
4284 def debugwhyunstable(ui, repo, rev):
4286 def debugwhyunstable(ui, repo, rev):
4285 """explain instabilities of a changeset"""
4287 """explain instabilities of a changeset"""
4286 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4288 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4287 dnodes = b''
4289 dnodes = b''
4288 if entry.get(b'divergentnodes'):
4290 if entry.get(b'divergentnodes'):
4289 dnodes = (
4291 dnodes = (
4290 b' '.join(
4292 b' '.join(
4291 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4293 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4292 for ctx in entry[b'divergentnodes']
4294 for ctx in entry[b'divergentnodes']
4293 )
4295 )
4294 + b' '
4296 + b' '
4295 )
4297 )
4296 ui.write(
4298 ui.write(
4297 b'%s: %s%s %s\n'
4299 b'%s: %s%s %s\n'
4298 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4300 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4299 )
4301 )
4300
4302
4301
4303
4302 @command(
4304 @command(
4303 b'debugwireargs',
4305 b'debugwireargs',
4304 [
4306 [
4305 (b'', b'three', b'', b'three'),
4307 (b'', b'three', b'', b'three'),
4306 (b'', b'four', b'', b'four'),
4308 (b'', b'four', b'', b'four'),
4307 (b'', b'five', b'', b'five'),
4309 (b'', b'five', b'', b'five'),
4308 ]
4310 ]
4309 + cmdutil.remoteopts,
4311 + cmdutil.remoteopts,
4310 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4312 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4311 norepo=True,
4313 norepo=True,
4312 )
4314 )
4313 def debugwireargs(ui, repopath, *vals, **opts):
4315 def debugwireargs(ui, repopath, *vals, **opts):
4314 opts = pycompat.byteskwargs(opts)
4316 opts = pycompat.byteskwargs(opts)
4315 repo = hg.peer(ui, opts, repopath)
4317 repo = hg.peer(ui, opts, repopath)
4316 try:
4318 try:
4317 for opt in cmdutil.remoteopts:
4319 for opt in cmdutil.remoteopts:
4318 del opts[opt[1]]
4320 del opts[opt[1]]
4319 args = {}
4321 args = {}
4320 for k, v in opts.items():
4322 for k, v in opts.items():
4321 if v:
4323 if v:
4322 args[k] = v
4324 args[k] = v
4323 args = pycompat.strkwargs(args)
4325 args = pycompat.strkwargs(args)
4324 # run twice to check that we don't mess up the stream for the next command
4326 # run twice to check that we don't mess up the stream for the next command
4325 res1 = repo.debugwireargs(*vals, **args)
4327 res1 = repo.debugwireargs(*vals, **args)
4326 res2 = repo.debugwireargs(*vals, **args)
4328 res2 = repo.debugwireargs(*vals, **args)
4327 ui.write(b"%s\n" % res1)
4329 ui.write(b"%s\n" % res1)
4328 if res1 != res2:
4330 if res1 != res2:
4329 ui.warn(b"%s\n" % res2)
4331 ui.warn(b"%s\n" % res2)
4330 finally:
4332 finally:
4331 repo.close()
4333 repo.close()
4332
4334
4333
4335
4334 def _parsewirelangblocks(fh):
4336 def _parsewirelangblocks(fh):
4335 activeaction = None
4337 activeaction = None
4336 blocklines = []
4338 blocklines = []
4337 lastindent = 0
4339 lastindent = 0
4338
4340
4339 for line in fh:
4341 for line in fh:
4340 line = line.rstrip()
4342 line = line.rstrip()
4341 if not line:
4343 if not line:
4342 continue
4344 continue
4343
4345
4344 if line.startswith(b'#'):
4346 if line.startswith(b'#'):
4345 continue
4347 continue
4346
4348
4347 if not line.startswith(b' '):
4349 if not line.startswith(b' '):
4348 # New block. Flush previous one.
4350 # New block. Flush previous one.
4349 if activeaction:
4351 if activeaction:
4350 yield activeaction, blocklines
4352 yield activeaction, blocklines
4351
4353
4352 activeaction = line
4354 activeaction = line
4353 blocklines = []
4355 blocklines = []
4354 lastindent = 0
4356 lastindent = 0
4355 continue
4357 continue
4356
4358
4357 # Else we start with an indent.
4359 # Else we start with an indent.
4358
4360
4359 if not activeaction:
4361 if not activeaction:
4360 raise error.Abort(_(b'indented line outside of block'))
4362 raise error.Abort(_(b'indented line outside of block'))
4361
4363
4362 indent = len(line) - len(line.lstrip())
4364 indent = len(line) - len(line.lstrip())
4363
4365
4364 # If this line is indented more than the last line, concatenate it.
4366 # If this line is indented more than the last line, concatenate it.
4365 if indent > lastindent and blocklines:
4367 if indent > lastindent and blocklines:
4366 blocklines[-1] += line.lstrip()
4368 blocklines[-1] += line.lstrip()
4367 else:
4369 else:
4368 blocklines.append(line)
4370 blocklines.append(line)
4369 lastindent = indent
4371 lastindent = indent
4370
4372
4371 # Flush last block.
4373 # Flush last block.
4372 if activeaction:
4374 if activeaction:
4373 yield activeaction, blocklines
4375 yield activeaction, blocklines
4374
4376
4375
4377
4376 @command(
4378 @command(
4377 b'debugwireproto',
4379 b'debugwireproto',
4378 [
4380 [
4379 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4381 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4380 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4382 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4381 (
4383 (
4382 b'',
4384 b'',
4383 b'noreadstderr',
4385 b'noreadstderr',
4384 False,
4386 False,
4385 _(b'do not read from stderr of the remote'),
4387 _(b'do not read from stderr of the remote'),
4386 ),
4388 ),
4387 (
4389 (
4388 b'',
4390 b'',
4389 b'nologhandshake',
4391 b'nologhandshake',
4390 False,
4392 False,
4391 _(b'do not log I/O related to the peer handshake'),
4393 _(b'do not log I/O related to the peer handshake'),
4392 ),
4394 ),
4393 ]
4395 ]
4394 + cmdutil.remoteopts,
4396 + cmdutil.remoteopts,
4395 _(b'[PATH]'),
4397 _(b'[PATH]'),
4396 optionalrepo=True,
4398 optionalrepo=True,
4397 )
4399 )
4398 def debugwireproto(ui, repo, path=None, **opts):
4400 def debugwireproto(ui, repo, path=None, **opts):
4399 """send wire protocol commands to a server
4401 """send wire protocol commands to a server
4400
4402
4401 This command can be used to issue wire protocol commands to remote
4403 This command can be used to issue wire protocol commands to remote
4402 peers and to debug the raw data being exchanged.
4404 peers and to debug the raw data being exchanged.
4403
4405
4404 ``--localssh`` will start an SSH server against the current repository
4406 ``--localssh`` will start an SSH server against the current repository
4405 and connect to that. By default, the connection will perform a handshake
4407 and connect to that. By default, the connection will perform a handshake
4406 and establish an appropriate peer instance.
4408 and establish an appropriate peer instance.
4407
4409
4408 ``--peer`` can be used to bypass the handshake protocol and construct a
4410 ``--peer`` can be used to bypass the handshake protocol and construct a
4409 peer instance using the specified class type. Valid values are ``raw``,
4411 peer instance using the specified class type. Valid values are ``raw``,
4410 ``ssh1``. ``raw`` instances only allow sending raw data payloads and
4412 ``ssh1``. ``raw`` instances only allow sending raw data payloads and
4411 don't support higher-level command actions.
4413 don't support higher-level command actions.
4412
4414
4413 ``--noreadstderr`` can be used to disable automatic reading from stderr
4415 ``--noreadstderr`` can be used to disable automatic reading from stderr
4414 of the peer (for SSH connections only). Disabling automatic reading of
4416 of the peer (for SSH connections only). Disabling automatic reading of
4415 stderr is useful for making output more deterministic.
4417 stderr is useful for making output more deterministic.
4416
4418
4417 Commands are issued via a mini language which is specified via stdin.
4419 Commands are issued via a mini language which is specified via stdin.
4418 The language consists of individual actions to perform. An action is
4420 The language consists of individual actions to perform. An action is
4419 defined by a block. A block is defined as a line with no leading
4421 defined by a block. A block is defined as a line with no leading
4420 space followed by 0 or more lines with leading space. Blocks are
4422 space followed by 0 or more lines with leading space. Blocks are
4421 effectively a high-level command with additional metadata.
4423 effectively a high-level command with additional metadata.
4422
4424
4423 Lines beginning with ``#`` are ignored.
4425 Lines beginning with ``#`` are ignored.
4424
4426
4425 The following sections denote available actions.
4427 The following sections denote available actions.
4426
4428
4427 raw
4429 raw
4428 ---
4430 ---
4429
4431
4430 Send raw data to the server.
4432 Send raw data to the server.
4431
4433
4432 The block payload contains the raw data to send as one atomic send
4434 The block payload contains the raw data to send as one atomic send
4433 operation. The data may not actually be delivered in a single system
4435 operation. The data may not actually be delivered in a single system
4434 call: it depends on the abilities of the transport being used.
4436 call: it depends on the abilities of the transport being used.
4435
4437
4436 Each line in the block is de-indented and concatenated. Then, that
4438 Each line in the block is de-indented and concatenated. Then, that
4437 value is evaluated as a Python b'' literal. This allows the use of
4439 value is evaluated as a Python b'' literal. This allows the use of
4438 backslash escaping, etc.
4440 backslash escaping, etc.
4439
4441
4440 raw+
4442 raw+
4441 ----
4443 ----
4442
4444
4443 Behaves like ``raw`` except flushes output afterwards.
4445 Behaves like ``raw`` except flushes output afterwards.
4444
4446
4445 command <X>
4447 command <X>
4446 -----------
4448 -----------
4447
4449
4448 Send a request to run a named command, whose name follows the ``command``
4450 Send a request to run a named command, whose name follows the ``command``
4449 string.
4451 string.
4450
4452
4451 Arguments to the command are defined as lines in this block. The format of
4453 Arguments to the command are defined as lines in this block. The format of
4452 each line is ``<key> <value>``. e.g.::
4454 each line is ``<key> <value>``. e.g.::
4453
4455
4454 command listkeys
4456 command listkeys
4455 namespace bookmarks
4457 namespace bookmarks
4456
4458
4457 If the value begins with ``eval:``, it will be interpreted as a Python
4459 If the value begins with ``eval:``, it will be interpreted as a Python
4458 literal expression. Otherwise values are interpreted as Python b'' literals.
4460 literal expression. Otherwise values are interpreted as Python b'' literals.
4459 This allows sending complex types and encoding special byte sequences via
4461 This allows sending complex types and encoding special byte sequences via
4460 backslash escaping.
4462 backslash escaping.
4461
4463
4462 The following arguments have special meaning:
4464 The following arguments have special meaning:
4463
4465
4464 ``PUSHFILE``
4466 ``PUSHFILE``
4465 When defined, the *push* mechanism of the peer will be used instead
4467 When defined, the *push* mechanism of the peer will be used instead
4466 of the static request-response mechanism and the content of the
4468 of the static request-response mechanism and the content of the
4467 file specified in the value of this argument will be sent as the
4469 file specified in the value of this argument will be sent as the
4468 command payload.
4470 command payload.
4469
4471
4470 This can be used to submit a local bundle file to the remote.
4472 This can be used to submit a local bundle file to the remote.
4471
4473
4472 batchbegin
4474 batchbegin
4473 ----------
4475 ----------
4474
4476
4475 Instruct the peer to begin a batched send.
4477 Instruct the peer to begin a batched send.
4476
4478
4477 All ``command`` blocks are queued for execution until the next
4479 All ``command`` blocks are queued for execution until the next
4478 ``batchsubmit`` block.
4480 ``batchsubmit`` block.
4479
4481
4480 batchsubmit
4482 batchsubmit
4481 -----------
4483 -----------
4482
4484
4483 Submit previously queued ``command`` blocks as a batch request.
4485 Submit previously queued ``command`` blocks as a batch request.
4484
4486
4485 This action MUST be paired with a ``batchbegin`` action.
4487 This action MUST be paired with a ``batchbegin`` action.
4486
4488
4487 httprequest <method> <path>
4489 httprequest <method> <path>
4488 ---------------------------
4490 ---------------------------
4489
4491
4490 (HTTP peer only)
4492 (HTTP peer only)
4491
4493
4492 Send an HTTP request to the peer.
4494 Send an HTTP request to the peer.
4493
4495
4494 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4496 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4495
4497
4496 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4498 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4497 headers to add to the request. e.g. ``Accept: foo``.
4499 headers to add to the request. e.g. ``Accept: foo``.
4498
4500
4499 The following arguments are special:
4501 The following arguments are special:
4500
4502
4501 ``BODYFILE``
4503 ``BODYFILE``
4502 The content of the file defined as the value to this argument will be
4504 The content of the file defined as the value to this argument will be
4503 transferred verbatim as the HTTP request body.
4505 transferred verbatim as the HTTP request body.
4504
4506
4505 ``frame <type> <flags> <payload>``
4507 ``frame <type> <flags> <payload>``
4506 Send a unified protocol frame as part of the request body.
4508 Send a unified protocol frame as part of the request body.
4507
4509
4508 All frames will be collected and sent as the body to the HTTP
4510 All frames will be collected and sent as the body to the HTTP
4509 request.
4511 request.
4510
4512
4511 close
4513 close
4512 -----
4514 -----
4513
4515
4514 Close the connection to the server.
4516 Close the connection to the server.
4515
4517
4516 flush
4518 flush
4517 -----
4519 -----
4518
4520
4519 Flush data written to the server.
4521 Flush data written to the server.
4520
4522
4521 readavailable
4523 readavailable
4522 -------------
4524 -------------
4523
4525
4524 Close the write end of the connection and read all available data from
4526 Close the write end of the connection and read all available data from
4525 the server.
4527 the server.
4526
4528
4527 If the connection to the server encompasses multiple pipes, we poll both
4529 If the connection to the server encompasses multiple pipes, we poll both
4528 pipes and read available data.
4530 pipes and read available data.
4529
4531
4530 readline
4532 readline
4531 --------
4533 --------
4532
4534
4533 Read a line of output from the server. If there are multiple output
4535 Read a line of output from the server. If there are multiple output
4534 pipes, reads only the main pipe.
4536 pipes, reads only the main pipe.
4535
4537
4536 ereadline
4538 ereadline
4537 ---------
4539 ---------
4538
4540
4539 Like ``readline``, but read from the stderr pipe, if available.
4541 Like ``readline``, but read from the stderr pipe, if available.
4540
4542
4541 read <X>
4543 read <X>
4542 --------
4544 --------
4543
4545
4544 ``read()`` N bytes from the server's main output pipe.
4546 ``read()`` N bytes from the server's main output pipe.
4545
4547
4546 eread <X>
4548 eread <X>
4547 ---------
4549 ---------
4548
4550
4549 ``read()`` N bytes from the server's stderr pipe, if available.
4551 ``read()`` N bytes from the server's stderr pipe, if available.
4550
4552
4551 Specifying Unified Frame-Based Protocol Frames
4553 Specifying Unified Frame-Based Protocol Frames
4552 ----------------------------------------------
4554 ----------------------------------------------
4553
4555
4554 It is possible to emit a *Unified Frame-Based Protocol* by using special
4556 It is possible to emit a *Unified Frame-Based Protocol* by using special
4555 syntax.
4557 syntax.
4556
4558
4557 A frame is composed as a type, flags, and payload. These can be parsed
4559 A frame is composed as a type, flags, and payload. These can be parsed
4558 from a string of the form:
4560 from a string of the form:
4559
4561
4560 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4562 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4561
4563
4562 ``request-id`` and ``stream-id`` are integers defining the request and
4564 ``request-id`` and ``stream-id`` are integers defining the request and
4563 stream identifiers.
4565 stream identifiers.
4564
4566
4565 ``type`` can be an integer value for the frame type or the string name
4567 ``type`` can be an integer value for the frame type or the string name
4566 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4568 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4567 ``command-name``.
4569 ``command-name``.
4568
4570
4569 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4571 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4570 components. Each component (and there can be just one) can be an integer
4572 components. Each component (and there can be just one) can be an integer
4571 or a flag name for stream flags or frame flags, respectively. Values are
4573 or a flag name for stream flags or frame flags, respectively. Values are
4572 resolved to integers and then bitwise OR'd together.
4574 resolved to integers and then bitwise OR'd together.
4573
4575
4574 ``payload`` represents the raw frame payload. If it begins with
4576 ``payload`` represents the raw frame payload. If it begins with
4575 ``cbor:``, the following string is evaluated as Python code and the
4577 ``cbor:``, the following string is evaluated as Python code and the
4576 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4578 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4577 as a Python byte string literal.
4579 as a Python byte string literal.
4578 """
4580 """
4579 opts = pycompat.byteskwargs(opts)
4581 opts = pycompat.byteskwargs(opts)
4580
4582
4581 if opts[b'localssh'] and not repo:
4583 if opts[b'localssh'] and not repo:
4582 raise error.Abort(_(b'--localssh requires a repository'))
4584 raise error.Abort(_(b'--localssh requires a repository'))
4583
4585
4584 if opts[b'peer'] and opts[b'peer'] not in (
4586 if opts[b'peer'] and opts[b'peer'] not in (
4585 b'raw',
4587 b'raw',
4586 b'ssh1',
4588 b'ssh1',
4587 ):
4589 ):
4588 raise error.Abort(
4590 raise error.Abort(
4589 _(b'invalid value for --peer'),
4591 _(b'invalid value for --peer'),
4590 hint=_(b'valid values are "raw" and "ssh1"'),
4592 hint=_(b'valid values are "raw" and "ssh1"'),
4591 )
4593 )
4592
4594
4593 if path and opts[b'localssh']:
4595 if path and opts[b'localssh']:
4594 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4596 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4595
4597
4596 if ui.interactive():
4598 if ui.interactive():
4597 ui.write(_(b'(waiting for commands on stdin)\n'))
4599 ui.write(_(b'(waiting for commands on stdin)\n'))
4598
4600
4599 blocks = list(_parsewirelangblocks(ui.fin))
4601 blocks = list(_parsewirelangblocks(ui.fin))
4600
4602
4601 proc = None
4603 proc = None
4602 stdin = None
4604 stdin = None
4603 stdout = None
4605 stdout = None
4604 stderr = None
4606 stderr = None
4605 opener = None
4607 opener = None
4606
4608
4607 if opts[b'localssh']:
4609 if opts[b'localssh']:
4608 # We start the SSH server in its own process so there is process
4610 # We start the SSH server in its own process so there is process
4609 # separation. This prevents a whole class of potential bugs around
4611 # separation. This prevents a whole class of potential bugs around
4610 # shared state from interfering with server operation.
4612 # shared state from interfering with server operation.
4611 args = procutil.hgcmd() + [
4613 args = procutil.hgcmd() + [
4612 b'-R',
4614 b'-R',
4613 repo.root,
4615 repo.root,
4614 b'debugserve',
4616 b'debugserve',
4615 b'--sshstdio',
4617 b'--sshstdio',
4616 ]
4618 ]
4617 proc = subprocess.Popen(
4619 proc = subprocess.Popen(
4618 pycompat.rapply(procutil.tonativestr, args),
4620 pycompat.rapply(procutil.tonativestr, args),
4619 stdin=subprocess.PIPE,
4621 stdin=subprocess.PIPE,
4620 stdout=subprocess.PIPE,
4622 stdout=subprocess.PIPE,
4621 stderr=subprocess.PIPE,
4623 stderr=subprocess.PIPE,
4622 bufsize=0,
4624 bufsize=0,
4623 )
4625 )
4624
4626
4625 stdin = proc.stdin
4627 stdin = proc.stdin
4626 stdout = proc.stdout
4628 stdout = proc.stdout
4627 stderr = proc.stderr
4629 stderr = proc.stderr
4628
4630
4629 # We turn the pipes into observers so we can log I/O.
4631 # We turn the pipes into observers so we can log I/O.
4630 if ui.verbose or opts[b'peer'] == b'raw':
4632 if ui.verbose or opts[b'peer'] == b'raw':
4631 stdin = util.makeloggingfileobject(
4633 stdin = util.makeloggingfileobject(
4632 ui, proc.stdin, b'i', logdata=True
4634 ui, proc.stdin, b'i', logdata=True
4633 )
4635 )
4634 stdout = util.makeloggingfileobject(
4636 stdout = util.makeloggingfileobject(
4635 ui, proc.stdout, b'o', logdata=True
4637 ui, proc.stdout, b'o', logdata=True
4636 )
4638 )
4637 stderr = util.makeloggingfileobject(
4639 stderr = util.makeloggingfileobject(
4638 ui, proc.stderr, b'e', logdata=True
4640 ui, proc.stderr, b'e', logdata=True
4639 )
4641 )
4640
4642
4641 # --localssh also implies the peer connection settings.
4643 # --localssh also implies the peer connection settings.
4642
4644
4643 url = b'ssh://localserver'
4645 url = b'ssh://localserver'
4644 autoreadstderr = not opts[b'noreadstderr']
4646 autoreadstderr = not opts[b'noreadstderr']
4645
4647
4646 if opts[b'peer'] == b'ssh1':
4648 if opts[b'peer'] == b'ssh1':
4647 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4649 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4648 peer = sshpeer.sshv1peer(
4650 peer = sshpeer.sshv1peer(
4649 ui,
4651 ui,
4650 url,
4652 url,
4651 proc,
4653 proc,
4652 stdin,
4654 stdin,
4653 stdout,
4655 stdout,
4654 stderr,
4656 stderr,
4655 None,
4657 None,
4656 autoreadstderr=autoreadstderr,
4658 autoreadstderr=autoreadstderr,
4657 )
4659 )
4658 elif opts[b'peer'] == b'raw':
4660 elif opts[b'peer'] == b'raw':
4659 ui.write(_(b'using raw connection to peer\n'))
4661 ui.write(_(b'using raw connection to peer\n'))
4660 peer = None
4662 peer = None
4661 else:
4663 else:
4662 ui.write(_(b'creating ssh peer from handshake results\n'))
4664 ui.write(_(b'creating ssh peer from handshake results\n'))
4663 peer = sshpeer.makepeer(
4665 peer = sshpeer.makepeer(
4664 ui,
4666 ui,
4665 url,
4667 url,
4666 proc,
4668 proc,
4667 stdin,
4669 stdin,
4668 stdout,
4670 stdout,
4669 stderr,
4671 stderr,
4670 autoreadstderr=autoreadstderr,
4672 autoreadstderr=autoreadstderr,
4671 )
4673 )
4672
4674
4673 elif path:
4675 elif path:
4674 # We bypass hg.peer() so we can proxy the sockets.
4676 # We bypass hg.peer() so we can proxy the sockets.
4675 # TODO consider not doing this because we skip
4677 # TODO consider not doing this because we skip
4676 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4678 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4677 u = urlutil.url(path)
4679 u = urlutil.url(path)
4678 if u.scheme != b'http':
4680 if u.scheme != b'http':
4679 raise error.Abort(_(b'only http:// paths are currently supported'))
4681 raise error.Abort(_(b'only http:// paths are currently supported'))
4680
4682
4681 url, authinfo = u.authinfo()
4683 url, authinfo = u.authinfo()
4682 openerargs = {
4684 openerargs = {
4683 'useragent': b'Mercurial debugwireproto',
4685 'useragent': b'Mercurial debugwireproto',
4684 }
4686 }
4685
4687
4686 # Turn pipes/sockets into observers so we can log I/O.
4688 # Turn pipes/sockets into observers so we can log I/O.
4687 if ui.verbose:
4689 if ui.verbose:
4688 openerargs.update(
4690 openerargs.update(
4689 {
4691 {
4690 'loggingfh': ui,
4692 'loggingfh': ui,
4691 'loggingname': b's',
4693 'loggingname': b's',
4692 'loggingopts': {
4694 'loggingopts': {
4693 'logdata': True,
4695 'logdata': True,
4694 'logdataapis': False,
4696 'logdataapis': False,
4695 },
4697 },
4696 }
4698 }
4697 )
4699 )
4698
4700
4699 if ui.debugflag:
4701 if ui.debugflag:
4700 openerargs['loggingopts']['logdataapis'] = True
4702 openerargs['loggingopts']['logdataapis'] = True
4701
4703
4702 # Don't send default headers when in raw mode. This allows us to
4704 # Don't send default headers when in raw mode. This allows us to
4703 # bypass most of the behavior of our URL handling code so we can
4705 # bypass most of the behavior of our URL handling code so we can
4704 # have near complete control over what's sent on the wire.
4706 # have near complete control over what's sent on the wire.
4705 if opts[b'peer'] == b'raw':
4707 if opts[b'peer'] == b'raw':
4706 openerargs['sendaccept'] = False
4708 openerargs['sendaccept'] = False
4707
4709
4708 opener = urlmod.opener(ui, authinfo, **openerargs)
4710 opener = urlmod.opener(ui, authinfo, **openerargs)
4709
4711
4710 if opts[b'peer'] == b'raw':
4712 if opts[b'peer'] == b'raw':
4711 ui.write(_(b'using raw connection to peer\n'))
4713 ui.write(_(b'using raw connection to peer\n'))
4712 peer = None
4714 peer = None
4713 elif opts[b'peer']:
4715 elif opts[b'peer']:
4714 raise error.Abort(
4716 raise error.Abort(
4715 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4717 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4716 )
4718 )
4717 else:
4719 else:
4718 peer = httppeer.makepeer(ui, path, opener=opener)
4720 peer = httppeer.makepeer(ui, path, opener=opener)
4719
4721
4720 # We /could/ populate stdin/stdout with sock.makefile()...
4722 # We /could/ populate stdin/stdout with sock.makefile()...
4721 else:
4723 else:
4722 raise error.Abort(_(b'unsupported connection configuration'))
4724 raise error.Abort(_(b'unsupported connection configuration'))
4723
4725
4724 batchedcommands = None
4726 batchedcommands = None
4725
4727
4726 # Now perform actions based on the parsed wire language instructions.
4728 # Now perform actions based on the parsed wire language instructions.
4727 for action, lines in blocks:
4729 for action, lines in blocks:
4728 if action in (b'raw', b'raw+'):
4730 if action in (b'raw', b'raw+'):
4729 if not stdin:
4731 if not stdin:
4730 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4732 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4731
4733
4732 # Concatenate the data together.
4734 # Concatenate the data together.
4733 data = b''.join(l.lstrip() for l in lines)
4735 data = b''.join(l.lstrip() for l in lines)
4734 data = stringutil.unescapestr(data)
4736 data = stringutil.unescapestr(data)
4735 stdin.write(data)
4737 stdin.write(data)
4736
4738
4737 if action == b'raw+':
4739 if action == b'raw+':
4738 stdin.flush()
4740 stdin.flush()
4739 elif action == b'flush':
4741 elif action == b'flush':
4740 if not stdin:
4742 if not stdin:
4741 raise error.Abort(_(b'cannot call flush on this peer'))
4743 raise error.Abort(_(b'cannot call flush on this peer'))
4742 stdin.flush()
4744 stdin.flush()
4743 elif action.startswith(b'command'):
4745 elif action.startswith(b'command'):
4744 if not peer:
4746 if not peer:
4745 raise error.Abort(
4747 raise error.Abort(
4746 _(
4748 _(
4747 b'cannot send commands unless peer instance '
4749 b'cannot send commands unless peer instance '
4748 b'is available'
4750 b'is available'
4749 )
4751 )
4750 )
4752 )
4751
4753
4752 command = action.split(b' ', 1)[1]
4754 command = action.split(b' ', 1)[1]
4753
4755
4754 args = {}
4756 args = {}
4755 for line in lines:
4757 for line in lines:
4756 # We need to allow empty values.
4758 # We need to allow empty values.
4757 fields = line.lstrip().split(b' ', 1)
4759 fields = line.lstrip().split(b' ', 1)
4758 if len(fields) == 1:
4760 if len(fields) == 1:
4759 key = fields[0]
4761 key = fields[0]
4760 value = b''
4762 value = b''
4761 else:
4763 else:
4762 key, value = fields
4764 key, value = fields
4763
4765
4764 if value.startswith(b'eval:'):
4766 if value.startswith(b'eval:'):
4765 value = stringutil.evalpythonliteral(value[5:])
4767 value = stringutil.evalpythonliteral(value[5:])
4766 else:
4768 else:
4767 value = stringutil.unescapestr(value)
4769 value = stringutil.unescapestr(value)
4768
4770
4769 args[key] = value
4771 args[key] = value
4770
4772
4771 if batchedcommands is not None:
4773 if batchedcommands is not None:
4772 batchedcommands.append((command, args))
4774 batchedcommands.append((command, args))
4773 continue
4775 continue
4774
4776
4775 ui.status(_(b'sending %s command\n') % command)
4777 ui.status(_(b'sending %s command\n') % command)
4776
4778
4777 if b'PUSHFILE' in args:
4779 if b'PUSHFILE' in args:
4778 with open(args[b'PUSHFILE'], 'rb') as fh:
4780 with open(args[b'PUSHFILE'], 'rb') as fh:
4779 del args[b'PUSHFILE']
4781 del args[b'PUSHFILE']
4780 res, output = peer._callpush(
4782 res, output = peer._callpush(
4781 command, fh, **pycompat.strkwargs(args)
4783 command, fh, **pycompat.strkwargs(args)
4782 )
4784 )
4783 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4785 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4784 ui.status(
4786 ui.status(
4785 _(b'remote output: %s\n') % stringutil.escapestr(output)
4787 _(b'remote output: %s\n') % stringutil.escapestr(output)
4786 )
4788 )
4787 else:
4789 else:
4788 with peer.commandexecutor() as e:
4790 with peer.commandexecutor() as e:
4789 res = e.callcommand(command, args).result()
4791 res = e.callcommand(command, args).result()
4790
4792
4791 ui.status(
4793 ui.status(
4792 _(b'response: %s\n')
4794 _(b'response: %s\n')
4793 % stringutil.pprint(res, bprefix=True, indent=2)
4795 % stringutil.pprint(res, bprefix=True, indent=2)
4794 )
4796 )
4795
4797
4796 elif action == b'batchbegin':
4798 elif action == b'batchbegin':
4797 if batchedcommands is not None:
4799 if batchedcommands is not None:
4798 raise error.Abort(_(b'nested batchbegin not allowed'))
4800 raise error.Abort(_(b'nested batchbegin not allowed'))
4799
4801
4800 batchedcommands = []
4802 batchedcommands = []
4801 elif action == b'batchsubmit':
4803 elif action == b'batchsubmit':
4802 # There is a batching API we could go through. But it would be
4804 # There is a batching API we could go through. But it would be
4803 # difficult to normalize requests into function calls. It is easier
4805 # difficult to normalize requests into function calls. It is easier
4804 # to bypass this layer and normalize to commands + args.
4806 # to bypass this layer and normalize to commands + args.
4805 ui.status(
4807 ui.status(
4806 _(b'sending batch with %d sub-commands\n')
4808 _(b'sending batch with %d sub-commands\n')
4807 % len(batchedcommands)
4809 % len(batchedcommands)
4808 )
4810 )
4809 assert peer is not None
4811 assert peer is not None
4810 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4812 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4811 ui.status(
4813 ui.status(
4812 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4814 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4813 )
4815 )
4814
4816
4815 batchedcommands = None
4817 batchedcommands = None
4816
4818
4817 elif action.startswith(b'httprequest '):
4819 elif action.startswith(b'httprequest '):
4818 if not opener:
4820 if not opener:
4819 raise error.Abort(
4821 raise error.Abort(
4820 _(b'cannot use httprequest without an HTTP peer')
4822 _(b'cannot use httprequest without an HTTP peer')
4821 )
4823 )
4822
4824
4823 request = action.split(b' ', 2)
4825 request = action.split(b' ', 2)
4824 if len(request) != 3:
4826 if len(request) != 3:
4825 raise error.Abort(
4827 raise error.Abort(
4826 _(
4828 _(
4827 b'invalid httprequest: expected format is '
4829 b'invalid httprequest: expected format is '
4828 b'"httprequest <method> <path>'
4830 b'"httprequest <method> <path>'
4829 )
4831 )
4830 )
4832 )
4831
4833
4832 method, httppath = request[1:]
4834 method, httppath = request[1:]
4833 headers = {}
4835 headers = {}
4834 body = None
4836 body = None
4835 frames = []
4837 frames = []
4836 for line in lines:
4838 for line in lines:
4837 line = line.lstrip()
4839 line = line.lstrip()
4838 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4840 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4839 if m:
4841 if m:
4840 # Headers need to use native strings.
4842 # Headers need to use native strings.
4841 key = pycompat.strurl(m.group(1))
4843 key = pycompat.strurl(m.group(1))
4842 value = pycompat.strurl(m.group(2))
4844 value = pycompat.strurl(m.group(2))
4843 headers[key] = value
4845 headers[key] = value
4844 continue
4846 continue
4845
4847
4846 if line.startswith(b'BODYFILE '):
4848 if line.startswith(b'BODYFILE '):
4847 with open(line.split(b' ', 1), b'rb') as fh:
4849 with open(line.split(b' ', 1), b'rb') as fh:
4848 body = fh.read()
4850 body = fh.read()
4849 elif line.startswith(b'frame '):
4851 elif line.startswith(b'frame '):
4850 frame = wireprotoframing.makeframefromhumanstring(
4852 frame = wireprotoframing.makeframefromhumanstring(
4851 line[len(b'frame ') :]
4853 line[len(b'frame ') :]
4852 )
4854 )
4853
4855
4854 frames.append(frame)
4856 frames.append(frame)
4855 else:
4857 else:
4856 raise error.Abort(
4858 raise error.Abort(
4857 _(b'unknown argument to httprequest: %s') % line
4859 _(b'unknown argument to httprequest: %s') % line
4858 )
4860 )
4859
4861
4860 url = path + httppath
4862 url = path + httppath
4861
4863
4862 if frames:
4864 if frames:
4863 body = b''.join(bytes(f) for f in frames)
4865 body = b''.join(bytes(f) for f in frames)
4864
4866
4865 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4867 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4866
4868
4867 # urllib.Request insists on using has_data() as a proxy for
4869 # urllib.Request insists on using has_data() as a proxy for
4868 # determining the request method. Override that to use our
4870 # determining the request method. Override that to use our
4869 # explicitly requested method.
4871 # explicitly requested method.
4870 req.get_method = lambda: pycompat.sysstr(method)
4872 req.get_method = lambda: pycompat.sysstr(method)
4871
4873
4872 try:
4874 try:
4873 res = opener.open(req)
4875 res = opener.open(req)
4874 body = res.read()
4876 body = res.read()
4875 except util.urlerr.urlerror as e:
4877 except util.urlerr.urlerror as e:
4876 # read() method must be called, but only exists in Python 2
4878 # read() method must be called, but only exists in Python 2
4877 getattr(e, 'read', lambda: None)()
4879 getattr(e, 'read', lambda: None)()
4878 continue
4880 continue
4879
4881
4880 ct = res.headers.get('Content-Type')
4882 ct = res.headers.get('Content-Type')
4881 if ct == 'application/mercurial-cbor':
4883 if ct == 'application/mercurial-cbor':
4882 ui.write(
4884 ui.write(
4883 _(b'cbor> %s\n')
4885 _(b'cbor> %s\n')
4884 % stringutil.pprint(
4886 % stringutil.pprint(
4885 cborutil.decodeall(body), bprefix=True, indent=2
4887 cborutil.decodeall(body), bprefix=True, indent=2
4886 )
4888 )
4887 )
4889 )
4888
4890
4889 elif action == b'close':
4891 elif action == b'close':
4890 assert peer is not None
4892 assert peer is not None
4891 peer.close()
4893 peer.close()
4892 elif action == b'readavailable':
4894 elif action == b'readavailable':
4893 if not stdout or not stderr:
4895 if not stdout or not stderr:
4894 raise error.Abort(
4896 raise error.Abort(
4895 _(b'readavailable not available on this peer')
4897 _(b'readavailable not available on this peer')
4896 )
4898 )
4897
4899
4898 stdin.close()
4900 stdin.close()
4899 stdout.read()
4901 stdout.read()
4900 stderr.read()
4902 stderr.read()
4901
4903
4902 elif action == b'readline':
4904 elif action == b'readline':
4903 if not stdout:
4905 if not stdout:
4904 raise error.Abort(_(b'readline not available on this peer'))
4906 raise error.Abort(_(b'readline not available on this peer'))
4905 stdout.readline()
4907 stdout.readline()
4906 elif action == b'ereadline':
4908 elif action == b'ereadline':
4907 if not stderr:
4909 if not stderr:
4908 raise error.Abort(_(b'ereadline not available on this peer'))
4910 raise error.Abort(_(b'ereadline not available on this peer'))
4909 stderr.readline()
4911 stderr.readline()
4910 elif action.startswith(b'read '):
4912 elif action.startswith(b'read '):
4911 count = int(action.split(b' ', 1)[1])
4913 count = int(action.split(b' ', 1)[1])
4912 if not stdout:
4914 if not stdout:
4913 raise error.Abort(_(b'read not available on this peer'))
4915 raise error.Abort(_(b'read not available on this peer'))
4914 stdout.read(count)
4916 stdout.read(count)
4915 elif action.startswith(b'eread '):
4917 elif action.startswith(b'eread '):
4916 count = int(action.split(b' ', 1)[1])
4918 count = int(action.split(b' ', 1)[1])
4917 if not stderr:
4919 if not stderr:
4918 raise error.Abort(_(b'eread not available on this peer'))
4920 raise error.Abort(_(b'eread not available on this peer'))
4919 stderr.read(count)
4921 stderr.read(count)
4920 else:
4922 else:
4921 raise error.Abort(_(b'unknown action: %s') % action)
4923 raise error.Abort(_(b'unknown action: %s') % action)
4922
4924
4923 if batchedcommands is not None:
4925 if batchedcommands is not None:
4924 raise error.Abort(_(b'unclosed "batchbegin" request'))
4926 raise error.Abort(_(b'unclosed "batchbegin" request'))
4925
4927
4926 if peer:
4928 if peer:
4927 peer.close()
4929 peer.close()
4928
4930
4929 if proc:
4931 if proc:
4930 proc.kill()
4932 proc.kill()
@@ -1,2089 +1,2089 b''
1 #require no-reposimplestore
1 #require no-reposimplestore
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > share =
5 > share =
6 > [format]
6 > [format]
7 > # stabilize test accross variant
7 > # stabilize test accross variant
8 > revlog-compression=zlib
8 > revlog-compression=zlib
9 > [storage]
9 > [storage]
10 > dirstate-v2.slow-path=allow
10 > dirstate-v2.slow-path=allow
11 > EOF
11 > EOF
12
12
13 store and revlogv1 are required in source
13 store and revlogv1 are required in source
14
14
15 $ hg --config format.usestore=false init no-store
15 $ hg --config format.usestore=false init no-store
16 $ hg -R no-store debugupgraderepo
16 $ hg -R no-store debugupgraderepo
17 abort: cannot upgrade repository; requirement missing: store
17 abort: cannot upgrade repository; requirement missing: store
18 [255]
18 [255]
19
19
20 $ hg init no-revlogv1
20 $ hg init no-revlogv1
21 $ cat > no-revlogv1/.hg/requires << EOF
21 $ cat > no-revlogv1/.hg/requires << EOF
22 > dotencode
22 > dotencode
23 > fncache
23 > fncache
24 > generaldelta
24 > generaldelta
25 > store
25 > store
26 > EOF
26 > EOF
27
27
28 $ hg -R no-revlogv1 debugupgraderepo
28 $ hg -R no-revlogv1 debugupgraderepo
29 abort: cannot upgrade repository; missing a revlog version
29 abort: cannot upgrade repository; missing a revlog version
30 [255]
30 [255]
31
31
32 Cannot upgrade shared repositories
32 Cannot upgrade shared repositories
33
33
34 $ hg init share-parent
34 $ hg init share-parent
35 $ hg -R share-parent debugbuilddag -n .+9
35 $ hg -R share-parent debugbuilddag -n .+9
36 $ hg -R share-parent up tip
36 $ hg -R share-parent up tip
37 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
37 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
38 $ hg -q share share-parent share-child
38 $ hg -q share share-parent share-child
39
39
40 $ hg -R share-child debugupgraderepo --config format.sparse-revlog=no
40 $ hg -R share-child debugupgraderepo --config format.sparse-revlog=no
41 abort: cannot use these actions on a share repository: sparserevlog
41 abort: cannot use these actions on a share repository: sparserevlog
42 (upgrade the main repository directly)
42 (upgrade the main repository directly)
43 [255]
43 [255]
44
44
45 Unless the action is compatible with share
45 Unless the action is compatible with share
46
46
47 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=yes --quiet
47 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=yes --quiet
48 requirements
48 requirements
49 preserved: * (glob)
49 preserved: * (glob)
50 added: dirstate-v2
50 added: dirstate-v2
51
51
52 no revlogs to process
52 no revlogs to process
53
53
54
54
55 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=yes --quiet --run
55 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=yes --quiet --run
56 upgrade will perform the following actions:
56 upgrade will perform the following actions:
57
57
58 requirements
58 requirements
59 preserved: * (glob)
59 preserved: * (glob)
60 added: dirstate-v2
60 added: dirstate-v2
61
61
62 no revlogs to process
62 no revlogs to process
63
63
64 $ hg debugformat -R share-child | grep dirstate-v2
64 $ hg debugformat -R share-child | grep dirstate-v2
65 dirstate-v2: yes
65 dirstate-v2: yes
66 $ hg debugformat -R share-parent | grep dirstate-v2
66 $ hg debugformat -R share-parent | grep dirstate-v2
67 dirstate-v2: no
67 dirstate-v2: no
68 $ hg status --all -R share-child
68 $ hg status --all -R share-child
69 C nf0
69 C nf0
70 C nf1
70 C nf1
71 C nf2
71 C nf2
72 C nf3
72 C nf3
73 C nf4
73 C nf4
74 C nf5
74 C nf5
75 C nf6
75 C nf6
76 C nf7
76 C nf7
77 C nf8
77 C nf8
78 C nf9
78 C nf9
79 $ hg log -l 3 -R share-child
79 $ hg log -l 3 -R share-child
80 changeset: 9:0059eb38e4a4
80 changeset: 9:0059eb38e4a4
81 tag: tip
81 tag: tip
82 user: debugbuilddag
82 user: debugbuilddag
83 date: Thu Jan 01 00:00:09 1970 +0000
83 date: Thu Jan 01 00:00:09 1970 +0000
84 summary: r9
84 summary: r9
85
85
86 changeset: 8:4d5be70c8130
86 changeset: 8:4d5be70c8130
87 user: debugbuilddag
87 user: debugbuilddag
88 date: Thu Jan 01 00:00:08 1970 +0000
88 date: Thu Jan 01 00:00:08 1970 +0000
89 summary: r8
89 summary: r8
90
90
91 changeset: 7:e60bfe72517e
91 changeset: 7:e60bfe72517e
92 user: debugbuilddag
92 user: debugbuilddag
93 date: Thu Jan 01 00:00:07 1970 +0000
93 date: Thu Jan 01 00:00:07 1970 +0000
94 summary: r7
94 summary: r7
95
95
96 $ hg status --all -R share-parent
96 $ hg status --all -R share-parent
97 C nf0
97 C nf0
98 C nf1
98 C nf1
99 C nf2
99 C nf2
100 C nf3
100 C nf3
101 C nf4
101 C nf4
102 C nf5
102 C nf5
103 C nf6
103 C nf6
104 C nf7
104 C nf7
105 C nf8
105 C nf8
106 C nf9
106 C nf9
107 $ hg log -l 3 -R share-parent
107 $ hg log -l 3 -R share-parent
108 changeset: 9:0059eb38e4a4
108 changeset: 9:0059eb38e4a4
109 tag: tip
109 tag: tip
110 user: debugbuilddag
110 user: debugbuilddag
111 date: Thu Jan 01 00:00:09 1970 +0000
111 date: Thu Jan 01 00:00:09 1970 +0000
112 summary: r9
112 summary: r9
113
113
114 changeset: 8:4d5be70c8130
114 changeset: 8:4d5be70c8130
115 user: debugbuilddag
115 user: debugbuilddag
116 date: Thu Jan 01 00:00:08 1970 +0000
116 date: Thu Jan 01 00:00:08 1970 +0000
117 summary: r8
117 summary: r8
118
118
119 changeset: 7:e60bfe72517e
119 changeset: 7:e60bfe72517e
120 user: debugbuilddag
120 user: debugbuilddag
121 date: Thu Jan 01 00:00:07 1970 +0000
121 date: Thu Jan 01 00:00:07 1970 +0000
122 summary: r7
122 summary: r7
123
123
124
124
125 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=no --quiet --run
125 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=no --quiet --run
126 upgrade will perform the following actions:
126 upgrade will perform the following actions:
127
127
128 requirements
128 requirements
129 preserved: * (glob)
129 preserved: * (glob)
130 removed: dirstate-v2
130 removed: dirstate-v2
131
131
132 no revlogs to process
132 no revlogs to process
133
133
134 $ hg debugformat -R share-child | grep dirstate-v2
134 $ hg debugformat -R share-child | grep dirstate-v2
135 dirstate-v2: no
135 dirstate-v2: no
136 $ hg debugformat -R share-parent | grep dirstate-v2
136 $ hg debugformat -R share-parent | grep dirstate-v2
137 dirstate-v2: no
137 dirstate-v2: no
138 $ hg status --all -R share-child
138 $ hg status --all -R share-child
139 C nf0
139 C nf0
140 C nf1
140 C nf1
141 C nf2
141 C nf2
142 C nf3
142 C nf3
143 C nf4
143 C nf4
144 C nf5
144 C nf5
145 C nf6
145 C nf6
146 C nf7
146 C nf7
147 C nf8
147 C nf8
148 C nf9
148 C nf9
149 $ hg log -l 3 -R share-child
149 $ hg log -l 3 -R share-child
150 changeset: 9:0059eb38e4a4
150 changeset: 9:0059eb38e4a4
151 tag: tip
151 tag: tip
152 user: debugbuilddag
152 user: debugbuilddag
153 date: Thu Jan 01 00:00:09 1970 +0000
153 date: Thu Jan 01 00:00:09 1970 +0000
154 summary: r9
154 summary: r9
155
155
156 changeset: 8:4d5be70c8130
156 changeset: 8:4d5be70c8130
157 user: debugbuilddag
157 user: debugbuilddag
158 date: Thu Jan 01 00:00:08 1970 +0000
158 date: Thu Jan 01 00:00:08 1970 +0000
159 summary: r8
159 summary: r8
160
160
161 changeset: 7:e60bfe72517e
161 changeset: 7:e60bfe72517e
162 user: debugbuilddag
162 user: debugbuilddag
163 date: Thu Jan 01 00:00:07 1970 +0000
163 date: Thu Jan 01 00:00:07 1970 +0000
164 summary: r7
164 summary: r7
165
165
166 $ hg status --all -R share-parent
166 $ hg status --all -R share-parent
167 C nf0
167 C nf0
168 C nf1
168 C nf1
169 C nf2
169 C nf2
170 C nf3
170 C nf3
171 C nf4
171 C nf4
172 C nf5
172 C nf5
173 C nf6
173 C nf6
174 C nf7
174 C nf7
175 C nf8
175 C nf8
176 C nf9
176 C nf9
177 $ hg log -l 3 -R share-parent
177 $ hg log -l 3 -R share-parent
178 changeset: 9:0059eb38e4a4
178 changeset: 9:0059eb38e4a4
179 tag: tip
179 tag: tip
180 user: debugbuilddag
180 user: debugbuilddag
181 date: Thu Jan 01 00:00:09 1970 +0000
181 date: Thu Jan 01 00:00:09 1970 +0000
182 summary: r9
182 summary: r9
183
183
184 changeset: 8:4d5be70c8130
184 changeset: 8:4d5be70c8130
185 user: debugbuilddag
185 user: debugbuilddag
186 date: Thu Jan 01 00:00:08 1970 +0000
186 date: Thu Jan 01 00:00:08 1970 +0000
187 summary: r8
187 summary: r8
188
188
189 changeset: 7:e60bfe72517e
189 changeset: 7:e60bfe72517e
190 user: debugbuilddag
190 user: debugbuilddag
191 date: Thu Jan 01 00:00:07 1970 +0000
191 date: Thu Jan 01 00:00:07 1970 +0000
192 summary: r7
192 summary: r7
193
193
194
194
195 Do not yet support upgrading treemanifest repos
195 Do not yet support upgrading treemanifest repos
196
196
197 $ hg --config experimental.treemanifest=true init treemanifest
197 $ hg --config experimental.treemanifest=true init treemanifest
198 $ hg -R treemanifest debugupgraderepo
198 $ hg -R treemanifest debugupgraderepo
199 abort: cannot upgrade repository; unsupported source requirement: treemanifest
199 abort: cannot upgrade repository; unsupported source requirement: treemanifest
200 [255]
200 [255]
201
201
202 Cannot add treemanifest requirement during upgrade
202 Cannot add treemanifest requirement during upgrade
203
203
204 $ hg init disallowaddedreq
204 $ hg init disallowaddedreq
205 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
205 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
206 abort: cannot upgrade repository; do not support adding requirement: treemanifest
206 abort: cannot upgrade repository; do not support adding requirement: treemanifest
207 [255]
207 [255]
208
208
209 An upgrade of a repository created with recommended settings only suggests optimizations
209 An upgrade of a repository created with recommended settings only suggests optimizations
210
210
211 $ hg init empty
211 $ hg init empty
212 $ cd empty
212 $ cd empty
213 $ hg debugformat
213 $ hg debugformat
214 format-variant repo
214 format-variant repo
215 fncache: yes
215 fncache: yes
216 dirstate-v2: no
216 dirstate-v2: no
217 tracked-hint: no
217 tracked-hint: no
218 dotencode: yes
218 dotencode: yes
219 generaldelta: yes
219 generaldelta: yes
220 share-safe: yes
220 share-safe: yes
221 sparserevlog: yes
221 sparserevlog: yes
222 persistent-nodemap: no (no-rust !)
222 persistent-nodemap: no (no-rust !)
223 persistent-nodemap: yes (rust !)
223 persistent-nodemap: yes (rust !)
224 copies-sdc: no
224 copies-sdc: no
225 revlog-v2: no
225 revlog-v2: no
226 changelog-v2: no
226 changelog-v2: no
227 plain-cl-delta: yes
227 plain-cl-delta: yes
228 compression: zlib
228 compression: zlib
229 compression-level: default
229 compression-level: default
230 $ hg debugformat --verbose
230 $ hg debugformat --verbose
231 format-variant repo config default
231 format-variant repo config default
232 fncache: yes yes yes
232 fncache: yes yes yes
233 dirstate-v2: no no no
233 dirstate-v2: no no no
234 tracked-hint: no no no
234 tracked-hint: no no no
235 dotencode: yes yes yes
235 dotencode: yes yes yes
236 generaldelta: yes yes yes
236 generaldelta: yes yes yes
237 share-safe: yes yes yes
237 share-safe: yes yes yes
238 sparserevlog: yes yes yes
238 sparserevlog: yes yes yes
239 persistent-nodemap: no no no (no-rust !)
239 persistent-nodemap: no no no (no-rust !)
240 persistent-nodemap: yes yes no (rust !)
240 persistent-nodemap: yes yes no (rust !)
241 copies-sdc: no no no
241 copies-sdc: no no no
242 revlog-v2: no no no
242 revlog-v2: no no no
243 changelog-v2: no no no
243 changelog-v2: no no no
244 plain-cl-delta: yes yes yes
244 plain-cl-delta: yes yes yes
245 compression: zlib zlib zlib (no-zstd !)
245 compression: zlib zlib zlib (no-zstd !)
246 compression: zlib zlib zstd (zstd !)
246 compression: zlib zlib zstd (zstd !)
247 compression-level: default default default
247 compression-level: default default default
248 $ hg debugformat --verbose --config format.usefncache=no
248 $ hg debugformat --verbose --config format.usefncache=no
249 format-variant repo config default
249 format-variant repo config default
250 fncache: yes no yes
250 fncache: yes no yes
251 dirstate-v2: no no no
251 dirstate-v2: no no no
252 tracked-hint: no no no
252 tracked-hint: no no no
253 dotencode: yes no yes
253 dotencode: yes no yes
254 generaldelta: yes yes yes
254 generaldelta: yes yes yes
255 share-safe: yes yes yes
255 share-safe: yes yes yes
256 sparserevlog: yes yes yes
256 sparserevlog: yes yes yes
257 persistent-nodemap: no no no (no-rust !)
257 persistent-nodemap: no no no (no-rust !)
258 persistent-nodemap: yes yes no (rust !)
258 persistent-nodemap: yes yes no (rust !)
259 copies-sdc: no no no
259 copies-sdc: no no no
260 revlog-v2: no no no
260 revlog-v2: no no no
261 changelog-v2: no no no
261 changelog-v2: no no no
262 plain-cl-delta: yes yes yes
262 plain-cl-delta: yes yes yes
263 compression: zlib zlib zlib (no-zstd !)
263 compression: zlib zlib zlib (no-zstd !)
264 compression: zlib zlib zstd (zstd !)
264 compression: zlib zlib zstd (zstd !)
265 compression-level: default default default
265 compression-level: default default default
266 $ hg debugformat --verbose --config format.usefncache=no --color=debug
266 $ hg debugformat --verbose --config format.usefncache=no --color=debug
267 format-variant repo config default
267 format-variant repo config default
268 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
268 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
269 [formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
269 [formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
270 [formatvariant.name.uptodate|tracked-hint: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
270 [formatvariant.name.uptodate|tracked-hint: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
271 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
271 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
272 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
272 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
273 [formatvariant.name.uptodate|share-safe: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
273 [formatvariant.name.uptodate|share-safe: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
274 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
274 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
275 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
275 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
276 [formatvariant.name.mismatchdefault|persistent-nodemap:][formatvariant.repo.mismatchdefault| yes][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
276 [formatvariant.name.mismatchdefault|persistent-nodemap:][formatvariant.repo.mismatchdefault| yes][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
277 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
277 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
278 [formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
278 [formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
279 [formatvariant.name.uptodate|changelog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
279 [formatvariant.name.uptodate|changelog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
280 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
280 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
281 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
281 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
282 [formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
282 [formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
283 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
283 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
284 $ hg debugformat -Tjson
284 $ hg debugformat -Tjson
285 [
285 [
286 {
286 {
287 "config": true,
287 "config": true,
288 "default": true,
288 "default": true,
289 "name": "fncache",
289 "name": "fncache",
290 "repo": true
290 "repo": true
291 },
291 },
292 {
292 {
293 "config": false,
293 "config": false,
294 "default": false,
294 "default": false,
295 "name": "dirstate-v2",
295 "name": "dirstate-v2",
296 "repo": false
296 "repo": false
297 },
297 },
298 {
298 {
299 "config": false,
299 "config": false,
300 "default": false,
300 "default": false,
301 "name": "tracked-hint",
301 "name": "tracked-hint",
302 "repo": false
302 "repo": false
303 },
303 },
304 {
304 {
305 "config": true,
305 "config": true,
306 "default": true,
306 "default": true,
307 "name": "dotencode",
307 "name": "dotencode",
308 "repo": true
308 "repo": true
309 },
309 },
310 {
310 {
311 "config": true,
311 "config": true,
312 "default": true,
312 "default": true,
313 "name": "generaldelta",
313 "name": "generaldelta",
314 "repo": true
314 "repo": true
315 },
315 },
316 {
316 {
317 "config": true,
317 "config": true,
318 "default": true,
318 "default": true,
319 "name": "share-safe",
319 "name": "share-safe",
320 "repo": true
320 "repo": true
321 },
321 },
322 {
322 {
323 "config": true,
323 "config": true,
324 "default": true,
324 "default": true,
325 "name": "sparserevlog",
325 "name": "sparserevlog",
326 "repo": true
326 "repo": true
327 },
327 },
328 {
328 {
329 "config": false, (no-rust !)
329 "config": false, (no-rust !)
330 "config": true, (rust !)
330 "config": true, (rust !)
331 "default": false,
331 "default": false,
332 "name": "persistent-nodemap",
332 "name": "persistent-nodemap",
333 "repo": false (no-rust !)
333 "repo": false (no-rust !)
334 "repo": true (rust !)
334 "repo": true (rust !)
335 },
335 },
336 {
336 {
337 "config": false,
337 "config": false,
338 "default": false,
338 "default": false,
339 "name": "copies-sdc",
339 "name": "copies-sdc",
340 "repo": false
340 "repo": false
341 },
341 },
342 {
342 {
343 "config": false,
343 "config": false,
344 "default": false,
344 "default": false,
345 "name": "revlog-v2",
345 "name": "revlog-v2",
346 "repo": false
346 "repo": false
347 },
347 },
348 {
348 {
349 "config": false,
349 "config": false,
350 "default": false,
350 "default": false,
351 "name": "changelog-v2",
351 "name": "changelog-v2",
352 "repo": false
352 "repo": false
353 },
353 },
354 {
354 {
355 "config": true,
355 "config": true,
356 "default": true,
356 "default": true,
357 "name": "plain-cl-delta",
357 "name": "plain-cl-delta",
358 "repo": true
358 "repo": true
359 },
359 },
360 {
360 {
361 "config": "zlib",
361 "config": "zlib",
362 "default": "zlib", (no-zstd !)
362 "default": "zlib", (no-zstd !)
363 "default": "zstd", (zstd !)
363 "default": "zstd", (zstd !)
364 "name": "compression",
364 "name": "compression",
365 "repo": "zlib"
365 "repo": "zlib"
366 },
366 },
367 {
367 {
368 "config": "default",
368 "config": "default",
369 "default": "default",
369 "default": "default",
370 "name": "compression-level",
370 "name": "compression-level",
371 "repo": "default"
371 "repo": "default"
372 }
372 }
373 ]
373 ]
374 $ hg debugupgraderepo
374 $ hg debugupgraderepo
375 (no format upgrades found in existing repository)
375 (no format upgrades found in existing repository)
376 performing an upgrade with "--run" will make the following changes:
376 performing an upgrade with "--run" will make the following changes:
377
377
378 requirements
378 requirements
379 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
379 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
380 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
380 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
381
381
382 no revlogs to process
382 no revlogs to process
383
383
384 additional optimizations are available by specifying "--optimize <name>":
384 additional optimizations are available by specifying "--optimize <name>":
385
385
386 re-delta-parent
386 re-delta-parent
387 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
387 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
388
388
389 re-delta-multibase
389 re-delta-multibase
390 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
390 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
391
391
392 re-delta-all
392 re-delta-all
393 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
393 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
394
394
395 re-delta-fulladd
395 re-delta-fulladd
396 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
396 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
397
397
398
398
399 $ hg debugupgraderepo --quiet
399 $ hg debugupgraderepo --quiet
400 requirements
400 requirements
401 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
401 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
402 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
402 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
403
403
404 no revlogs to process
404 no revlogs to process
405
405
406
406
407 --optimize can be used to add optimizations
407 --optimize can be used to add optimizations
408
408
409 $ hg debugupgrade --optimize 're-delta-parent'
409 $ hg debugupgrade --optimize 're-delta-parent'
410 (no format upgrades found in existing repository)
410 (no format upgrades found in existing repository)
411 performing an upgrade with "--run" will make the following changes:
411 performing an upgrade with "--run" will make the following changes:
412
412
413 requirements
413 requirements
414 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
414 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
415 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
415 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
416
416
417 optimisations: re-delta-parent
417 optimisations: re-delta-parent
418
418
419 re-delta-parent
419 re-delta-parent
420 deltas within internal storage will choose a new base revision if needed
420 deltas within internal storage will choose a new base revision if needed
421
421
422 processed revlogs:
422 processed revlogs:
423 - all-filelogs
423 - all-filelogs
424 - changelog
424 - changelog
425 - manifest
425 - manifest
426
426
427 additional optimizations are available by specifying "--optimize <name>":
427 additional optimizations are available by specifying "--optimize <name>":
428
428
429 re-delta-multibase
429 re-delta-multibase
430 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
430 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
431
431
432 re-delta-all
432 re-delta-all
433 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
433 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
434
434
435 re-delta-fulladd
435 re-delta-fulladd
436 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
436 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
437
437
438
438
439 modern form of the option
439 modern form of the option
440
440
441 $ hg debugupgrade --optimize re-delta-parent
441 $ hg debugupgrade --optimize re-delta-parent
442 (no format upgrades found in existing repository)
442 (no format upgrades found in existing repository)
443 performing an upgrade with "--run" will make the following changes:
443 performing an upgrade with "--run" will make the following changes:
444
444
445 requirements
445 requirements
446 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
446 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
447 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
447 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
448
448
449 optimisations: re-delta-parent
449 optimisations: re-delta-parent
450
450
451 re-delta-parent
451 re-delta-parent
452 deltas within internal storage will choose a new base revision if needed
452 deltas within internal storage will choose a new base revision if needed
453
453
454 processed revlogs:
454 processed revlogs:
455 - all-filelogs
455 - all-filelogs
456 - changelog
456 - changelog
457 - manifest
457 - manifest
458
458
459 additional optimizations are available by specifying "--optimize <name>":
459 additional optimizations are available by specifying "--optimize <name>":
460
460
461 re-delta-multibase
461 re-delta-multibase
462 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
462 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
463
463
464 re-delta-all
464 re-delta-all
465 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
465 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
466
466
467 re-delta-fulladd
467 re-delta-fulladd
468 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
468 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
469
469
470 $ hg debugupgrade --optimize re-delta-parent --quiet
470 $ hg debugupgrade --optimize re-delta-parent --quiet
471 requirements
471 requirements
472 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
472 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
473 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
473 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
474
474
475 optimisations: re-delta-parent
475 optimisations: re-delta-parent
476
476
477 processed revlogs:
477 processed revlogs:
478 - all-filelogs
478 - all-filelogs
479 - changelog
479 - changelog
480 - manifest
480 - manifest
481
481
482
482
483 unknown optimization:
483 unknown optimization:
484
484
485 $ hg debugupgrade --optimize foobar
485 $ hg debugupgrade --optimize foobar
486 abort: unknown optimization action requested: foobar
486 abort: unknown optimization action requested: foobar
487 (run without arguments to see valid optimizations)
487 (run without arguments to see valid optimizations)
488 [255]
488 [255]
489
489
490 Various sub-optimal detections work
490 Various sub-optimal detections work
491
491
492 $ cat > .hg/requires << EOF
492 $ cat > .hg/requires << EOF
493 > revlogv1
493 > revlogv1
494 > store
494 > store
495 > EOF
495 > EOF
496
496
497 $ hg debugformat
497 $ hg debugformat
498 format-variant repo
498 format-variant repo
499 fncache: no
499 fncache: no
500 dirstate-v2: no
500 dirstate-v2: no
501 tracked-hint: no
501 tracked-hint: no
502 dotencode: no
502 dotencode: no
503 generaldelta: no
503 generaldelta: no
504 share-safe: no
504 share-safe: no
505 sparserevlog: no
505 sparserevlog: no
506 persistent-nodemap: no
506 persistent-nodemap: no
507 copies-sdc: no
507 copies-sdc: no
508 revlog-v2: no
508 revlog-v2: no
509 changelog-v2: no
509 changelog-v2: no
510 plain-cl-delta: yes
510 plain-cl-delta: yes
511 compression: zlib
511 compression: zlib
512 compression-level: default
512 compression-level: default
513 $ hg debugformat --verbose
513 $ hg debugformat --verbose
514 format-variant repo config default
514 format-variant repo config default
515 fncache: no yes yes
515 fncache: no yes yes
516 dirstate-v2: no no no
516 dirstate-v2: no no no
517 tracked-hint: no no no
517 tracked-hint: no no no
518 dotencode: no yes yes
518 dotencode: no yes yes
519 generaldelta: no yes yes
519 generaldelta: no yes yes
520 share-safe: no yes yes
520 share-safe: no yes yes
521 sparserevlog: no yes yes
521 sparserevlog: no yes yes
522 persistent-nodemap: no no no (no-rust !)
522 persistent-nodemap: no no no (no-rust !)
523 persistent-nodemap: no yes no (rust !)
523 persistent-nodemap: no yes no (rust !)
524 copies-sdc: no no no
524 copies-sdc: no no no
525 revlog-v2: no no no
525 revlog-v2: no no no
526 changelog-v2: no no no
526 changelog-v2: no no no
527 plain-cl-delta: yes yes yes
527 plain-cl-delta: yes yes yes
528 compression: zlib zlib zlib (no-zstd !)
528 compression: zlib zlib zlib (no-zstd !)
529 compression: zlib zlib zstd (zstd !)
529 compression: zlib zlib zstd (zstd !)
530 compression-level: default default default
530 compression-level: default default default
531 $ hg debugformat --verbose --config format.usegeneraldelta=no
531 $ hg debugformat --verbose --config format.usegeneraldelta=no
532 format-variant repo config default
532 format-variant repo config default
533 fncache: no yes yes
533 fncache: no yes yes
534 dirstate-v2: no no no
534 dirstate-v2: no no no
535 tracked-hint: no no no
535 tracked-hint: no no no
536 dotencode: no yes yes
536 dotencode: no yes yes
537 generaldelta: no no yes
537 generaldelta: no no yes
538 share-safe: no yes yes
538 share-safe: no yes yes
539 sparserevlog: no no yes
539 sparserevlog: no no yes
540 persistent-nodemap: no no no (no-rust !)
540 persistent-nodemap: no no no (no-rust !)
541 persistent-nodemap: no yes no (rust !)
541 persistent-nodemap: no yes no (rust !)
542 copies-sdc: no no no
542 copies-sdc: no no no
543 revlog-v2: no no no
543 revlog-v2: no no no
544 changelog-v2: no no no
544 changelog-v2: no no no
545 plain-cl-delta: yes yes yes
545 plain-cl-delta: yes yes yes
546 compression: zlib zlib zlib (no-zstd !)
546 compression: zlib zlib zlib (no-zstd !)
547 compression: zlib zlib zstd (zstd !)
547 compression: zlib zlib zstd (zstd !)
548 compression-level: default default default
548 compression-level: default default default
549 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
549 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
550 format-variant repo config default
550 format-variant repo config default
551 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
551 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
552 [formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
552 [formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
553 [formatvariant.name.uptodate|tracked-hint: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
553 [formatvariant.name.uptodate|tracked-hint: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
554 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
554 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
555 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
555 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
556 [formatvariant.name.mismatchconfig|share-safe: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
556 [formatvariant.name.mismatchconfig|share-safe: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
557 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
557 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
558 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
558 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
559 [formatvariant.name.mismatchconfig|persistent-nodemap:][formatvariant.repo.mismatchconfig| no][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
559 [formatvariant.name.mismatchconfig|persistent-nodemap:][formatvariant.repo.mismatchconfig| no][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
560 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
560 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
561 [formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
561 [formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
562 [formatvariant.name.uptodate|changelog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
562 [formatvariant.name.uptodate|changelog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
563 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
563 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
564 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
564 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
565 [formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
565 [formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
566 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
566 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
567 $ hg debugupgraderepo
567 $ hg debugupgraderepo
568 note: selecting all-filelogs for processing to change: dotencode
568 note: selecting all-filelogs for processing to change: dotencode
569 note: selecting all-manifestlogs for processing to change: dotencode
569 note: selecting all-manifestlogs for processing to change: dotencode
570 note: selecting changelog for processing to change: dotencode
570 note: selecting changelog for processing to change: dotencode
571
571
572 repository lacks features recommended by current config options:
572 repository lacks features recommended by current config options:
573
573
574 fncache
574 fncache
575 long and reserved filenames may not work correctly; repository performance is sub-optimal
575 long and reserved filenames may not work correctly; repository performance is sub-optimal
576
576
577 dotencode
577 dotencode
578 storage of filenames beginning with a period or space may not work correctly
578 storage of filenames beginning with a period or space may not work correctly
579
579
580 generaldelta
580 generaldelta
581 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
581 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
582
582
583 share-safe
583 share-safe
584 old shared repositories do not share source repository requirements and config. This leads to various problems when the source repository format is upgraded or some new extensions are enabled.
584 old shared repositories do not share source repository requirements and config. This leads to various problems when the source repository format is upgraded or some new extensions are enabled.
585
585
586 sparserevlog
586 sparserevlog
587 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
587 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
588
588
589 persistent-nodemap (rust !)
589 persistent-nodemap (rust !)
590 persist the node -> rev mapping on disk to speedup lookup (rust !)
590 persist the node -> rev mapping on disk to speedup lookup (rust !)
591 (rust !)
591 (rust !)
592
592
593 performing an upgrade with "--run" will make the following changes:
593 performing an upgrade with "--run" will make the following changes:
594
594
595 requirements
595 requirements
596 preserved: revlogv1, store
596 preserved: revlogv1, store
597 added: dotencode, fncache, generaldelta, share-safe, sparserevlog (no-rust !)
597 added: dotencode, fncache, generaldelta, share-safe, sparserevlog (no-rust !)
598 added: dotencode, fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
598 added: dotencode, fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
599
599
600 fncache
600 fncache
601 repository will be more resilient to storing certain paths and performance of certain operations should be improved
601 repository will be more resilient to storing certain paths and performance of certain operations should be improved
602
602
603 dotencode
603 dotencode
604 repository will be better able to store files beginning with a space or period
604 repository will be better able to store files beginning with a space or period
605
605
606 generaldelta
606 generaldelta
607 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
607 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
608
608
609 share-safe
609 share-safe
610 Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
610 Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
611
611
612 sparserevlog
612 sparserevlog
613 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
613 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
614
614
615 persistent-nodemap (rust !)
615 persistent-nodemap (rust !)
616 Speedup revision lookup by node id. (rust !)
616 Speedup revision lookup by node id. (rust !)
617 (rust !)
617 (rust !)
618 processed revlogs:
618 processed revlogs:
619 - all-filelogs
619 - all-filelogs
620 - changelog
620 - changelog
621 - manifest
621 - manifest
622
622
623 additional optimizations are available by specifying "--optimize <name>":
623 additional optimizations are available by specifying "--optimize <name>":
624
624
625 re-delta-parent
625 re-delta-parent
626 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
626 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
627
627
628 re-delta-multibase
628 re-delta-multibase
629 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
629 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
630
630
631 re-delta-all
631 re-delta-all
632 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
632 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
633
633
634 re-delta-fulladd
634 re-delta-fulladd
635 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
635 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
636
636
637 $ hg debugupgraderepo --quiet
637 $ hg debugupgraderepo --quiet
638 requirements
638 requirements
639 preserved: revlogv1, store
639 preserved: revlogv1, store
640 added: dotencode, fncache, generaldelta, share-safe, sparserevlog (no-rust !)
640 added: dotencode, fncache, generaldelta, share-safe, sparserevlog (no-rust !)
641 added: dotencode, fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
641 added: dotencode, fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
642
642
643 processed revlogs:
643 processed revlogs:
644 - all-filelogs
644 - all-filelogs
645 - changelog
645 - changelog
646 - manifest
646 - manifest
647
647
648
648
649 $ hg --config format.dotencode=false debugupgraderepo
649 $ hg --config format.dotencode=false debugupgraderepo
650 note: selecting all-filelogs for processing to change: fncache
650 note: selecting all-filelogs for processing to change: fncache
651 note: selecting all-manifestlogs for processing to change: fncache
651 note: selecting all-manifestlogs for processing to change: fncache
652 note: selecting changelog for processing to change: fncache
652 note: selecting changelog for processing to change: fncache
653
653
654 repository lacks features recommended by current config options:
654 repository lacks features recommended by current config options:
655
655
656 fncache
656 fncache
657 long and reserved filenames may not work correctly; repository performance is sub-optimal
657 long and reserved filenames may not work correctly; repository performance is sub-optimal
658
658
659 generaldelta
659 generaldelta
660 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
660 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
661
661
662 share-safe
662 share-safe
663 old shared repositories do not share source repository requirements and config. This leads to various problems when the source repository format is upgraded or some new extensions are enabled.
663 old shared repositories do not share source repository requirements and config. This leads to various problems when the source repository format is upgraded or some new extensions are enabled.
664
664
665 sparserevlog
665 sparserevlog
666 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
666 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
667
667
668 persistent-nodemap (rust !)
668 persistent-nodemap (rust !)
669 persist the node -> rev mapping on disk to speedup lookup (rust !)
669 persist the node -> rev mapping on disk to speedup lookup (rust !)
670 (rust !)
670 (rust !)
671 repository lacks features used by the default config options:
671 repository lacks features used by the default config options:
672
672
673 dotencode
673 dotencode
674 storage of filenames beginning with a period or space may not work correctly
674 storage of filenames beginning with a period or space may not work correctly
675
675
676
676
677 performing an upgrade with "--run" will make the following changes:
677 performing an upgrade with "--run" will make the following changes:
678
678
679 requirements
679 requirements
680 preserved: revlogv1, store
680 preserved: revlogv1, store
681 added: fncache, generaldelta, share-safe, sparserevlog (no-rust !)
681 added: fncache, generaldelta, share-safe, sparserevlog (no-rust !)
682 added: fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
682 added: fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
683
683
684 fncache
684 fncache
685 repository will be more resilient to storing certain paths and performance of certain operations should be improved
685 repository will be more resilient to storing certain paths and performance of certain operations should be improved
686
686
687 generaldelta
687 generaldelta
688 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
688 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
689
689
690 share-safe
690 share-safe
691 Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
691 Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
692
692
693 sparserevlog
693 sparserevlog
694 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
694 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
695
695
696 persistent-nodemap (rust !)
696 persistent-nodemap (rust !)
697 Speedup revision lookup by node id. (rust !)
697 Speedup revision lookup by node id. (rust !)
698 (rust !)
698 (rust !)
699 processed revlogs:
699 processed revlogs:
700 - all-filelogs
700 - all-filelogs
701 - changelog
701 - changelog
702 - manifest
702 - manifest
703
703
704 additional optimizations are available by specifying "--optimize <name>":
704 additional optimizations are available by specifying "--optimize <name>":
705
705
706 re-delta-parent
706 re-delta-parent
707 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
707 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
708
708
709 re-delta-multibase
709 re-delta-multibase
710 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
710 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
711
711
712 re-delta-all
712 re-delta-all
713 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
713 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
714
714
715 re-delta-fulladd
715 re-delta-fulladd
716 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
716 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
717
717
718
718
719 $ cd ..
719 $ cd ..
720
720
721 Upgrading a repository that is already modern essentially no-ops
721 Upgrading a repository that is already modern essentially no-ops
722
722
723 $ hg init modern
723 $ hg init modern
724 $ hg -R modern debugupgraderepo --run
724 $ hg -R modern debugupgraderepo --run
725 nothing to do
725 nothing to do
726
726
727 Upgrading a repository to generaldelta works
727 Upgrading a repository to generaldelta works
728
728
729 $ hg --config format.usegeneraldelta=false init upgradegd
729 $ hg --config format.usegeneraldelta=false init upgradegd
730 $ cd upgradegd
730 $ cd upgradegd
731 $ touch f0
731 $ touch f0
732 $ hg -q commit -A -m initial
732 $ hg -q commit -A -m initial
733 $ mkdir FooBarDirectory.d
733 $ mkdir FooBarDirectory.d
734 $ touch FooBarDirectory.d/f1
734 $ touch FooBarDirectory.d/f1
735 $ hg -q commit -A -m 'add f1'
735 $ hg -q commit -A -m 'add f1'
736 $ hg -q up -r 0
736 $ hg -q up -r 0
737 >>> import random
737 >>> import random
738 >>> random.seed(0) # have a reproducible content
738 >>> random.seed(0) # have a reproducible content
739 >>> with open("f2", "wb") as f:
739 >>> with open("f2", "wb") as f:
740 ... for i in range(100000):
740 ... for i in range(100000):
741 ... f.write(b"%d\n" % random.randint(1000000000, 9999999999)) and None
741 ... f.write(b"%d\n" % random.randint(1000000000, 9999999999)) and None
742 $ hg -q commit -A -m 'add f2'
742 $ hg -q commit -A -m 'add f2'
743
743
744 make sure we have a .d file
744 make sure we have a .d file
745
745
746 $ ls -d .hg/store/data/*
746 $ ls -d .hg/store/data/*
747 .hg/store/data/_foo_bar_directory.d.hg
747 .hg/store/data/_foo_bar_directory.d.hg
748 .hg/store/data/f0.i
748 .hg/store/data/f0.i
749 .hg/store/data/f2.d
749 .hg/store/data/f2.d
750 .hg/store/data/f2.i
750 .hg/store/data/f2.i
751
751
752 $ hg debugupgraderepo --run --config format.sparse-revlog=false
752 $ hg debugupgraderepo --run --config format.sparse-revlog=false
753 note: selecting all-filelogs for processing to change: generaldelta
753 note: selecting all-filelogs for processing to change: generaldelta
754 note: selecting all-manifestlogs for processing to change: generaldelta
754 note: selecting all-manifestlogs for processing to change: generaldelta
755 note: selecting changelog for processing to change: generaldelta
755 note: selecting changelog for processing to change: generaldelta
756
756
757 upgrade will perform the following actions:
757 upgrade will perform the following actions:
758
758
759 requirements
759 requirements
760 preserved: dotencode, fncache, revlogv1, share-safe, store (no-rust !)
760 preserved: dotencode, fncache, revlogv1, share-safe, store (no-rust !)
761 preserved: dotencode, fncache, persistent-nodemap, revlogv1, share-safe, store (rust !)
761 preserved: dotencode, fncache, persistent-nodemap, revlogv1, share-safe, store (rust !)
762 added: generaldelta
762 added: generaldelta
763
763
764 generaldelta
764 generaldelta
765 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
765 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
766
766
767 processed revlogs:
767 processed revlogs:
768 - all-filelogs
768 - all-filelogs
769 - changelog
769 - changelog
770 - manifest
770 - manifest
771
771
772 beginning upgrade...
772 beginning upgrade...
773 repository locked and read-only
773 repository locked and read-only
774 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
774 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
775 (it is safe to interrupt this process any time before data migration completes)
775 (it is safe to interrupt this process any time before data migration completes)
776 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
776 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
777 migrating 519 KB in store; 1.05 MB tracked data
777 migrating 519 KB in store; 1.05 MB tracked data
778 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
778 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
779 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
779 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
780 migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
780 migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
781 finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
781 finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
782 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
782 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
783 finished migrating 3 changelog revisions; change in size: 0 bytes
783 finished migrating 3 changelog revisions; change in size: 0 bytes
784 finished migrating 9 total revisions; total change in store size: -17 bytes
784 finished migrating 9 total revisions; total change in store size: -17 bytes
785 copying phaseroots
785 copying phaseroots
786 copying requires
786 copying requires
787 data fully upgraded in a temporary repository
787 data fully upgraded in a temporary repository
788 marking source repository as being upgraded; clients will be unable to read from repository
788 marking source repository as being upgraded; clients will be unable to read from repository
789 starting in-place swap of repository data
789 starting in-place swap of repository data
790 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
790 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
791 replacing store...
791 replacing store...
792 store replacement complete; repository was inconsistent for *s (glob)
792 store replacement complete; repository was inconsistent for *s (glob)
793 finalizing requirements file and making repository readable again
793 finalizing requirements file and making repository readable again
794 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
794 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
795 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
795 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
796 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
796 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
797
797
798 Original requirements backed up
798 Original requirements backed up
799
799
800 $ cat .hg/upgradebackup.*/requires
800 $ cat .hg/upgradebackup.*/requires
801 share-safe
801 share-safe
802 $ cat .hg/upgradebackup.*/store/requires
802 $ cat .hg/upgradebackup.*/store/requires
803 dotencode
803 dotencode
804 fncache
804 fncache
805 persistent-nodemap (rust !)
805 persistent-nodemap (rust !)
806 revlogv1
806 revlogv1
807 store
807 store
808 upgradeinprogress
808 upgradeinprogress
809
809
810 generaldelta added to original requirements files
810 generaldelta added to original requirements files
811
811
812 $ hg debugrequires
812 $ hg debugrequires
813 dotencode
813 dotencode
814 fncache
814 fncache
815 generaldelta
815 generaldelta
816 persistent-nodemap (rust !)
816 persistent-nodemap (rust !)
817 revlogv1
817 revlogv1
818 share-safe
818 share-safe
819 store
819 store
820
820
821 store directory has files we expect
821 store directory has files we expect
822
822
823 $ ls .hg/store
823 $ ls .hg/store
824 00changelog.i
824 00changelog.i
825 00manifest.i
825 00manifest.i
826 data
826 data
827 fncache
827 fncache
828 phaseroots
828 phaseroots
829 requires
829 requires
830 undo
830 undo
831 undo.backupfiles
831 undo.backupfiles
832 undo.phaseroots
832 undo.phaseroots
833
833
834 manifest should be generaldelta
834 manifest should be generaldelta
835
835
836 $ hg debugrevlog -m | grep flags
836 $ hg debugrevlog -m | grep flags
837 flags : inline, generaldelta
837 flags : inline, generaldelta
838
838
839 verify should be happy
839 verify should be happy
840
840
841 $ hg verify
841 $ hg verify
842 checking changesets
842 checking changesets
843 checking manifests
843 checking manifests
844 crosschecking files in changesets and manifests
844 crosschecking files in changesets and manifests
845 checking files
845 checking files
846 checked 3 changesets with 3 changes to 3 files
846 checked 3 changesets with 3 changes to 3 files
847
847
848 old store should be backed up
848 old store should be backed up
849
849
850 $ ls -d .hg/upgradebackup.*/
850 $ ls -d .hg/upgradebackup.*/
851 .hg/upgradebackup.*/ (glob)
851 .hg/upgradebackup.*/ (glob)
852 $ ls .hg/upgradebackup.*/store
852 $ ls .hg/upgradebackup.*/store
853 00changelog.i
853 00changelog.i
854 00manifest.i
854 00manifest.i
855 data
855 data
856 fncache
856 fncache
857 phaseroots
857 phaseroots
858 requires
858 requires
859 undo
859 undo
860 undo.backup.fncache
860 undo.backup.fncache
861 undo.backupfiles
861 undo.backupfiles
862 undo.phaseroots
862 undo.phaseroots
863
863
864 unless --no-backup is passed
864 unless --no-backup is passed
865
865
866 $ rm -rf .hg/upgradebackup.*/
866 $ rm -rf .hg/upgradebackup.*/
867 $ hg debugupgraderepo --run --no-backup
867 $ hg debugupgraderepo --run --no-backup
868 note: selecting all-filelogs for processing to change: sparserevlog
868 note: selecting all-filelogs for processing to change: sparserevlog
869 note: selecting all-manifestlogs for processing to change: sparserevlog
869 note: selecting all-manifestlogs for processing to change: sparserevlog
870 note: selecting changelog for processing to change: sparserevlog
870 note: selecting changelog for processing to change: sparserevlog
871
871
872 upgrade will perform the following actions:
872 upgrade will perform the following actions:
873
873
874 requirements
874 requirements
875 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
875 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
876 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
876 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
877 added: sparserevlog
877 added: sparserevlog
878
878
879 sparserevlog
879 sparserevlog
880 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
880 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
881
881
882 processed revlogs:
882 processed revlogs:
883 - all-filelogs
883 - all-filelogs
884 - changelog
884 - changelog
885 - manifest
885 - manifest
886
886
887 beginning upgrade...
887 beginning upgrade...
888 repository locked and read-only
888 repository locked and read-only
889 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
889 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
890 (it is safe to interrupt this process any time before data migration completes)
890 (it is safe to interrupt this process any time before data migration completes)
891 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
891 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
892 migrating 519 KB in store; 1.05 MB tracked data
892 migrating 519 KB in store; 1.05 MB tracked data
893 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
893 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
894 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
894 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
895 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
895 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
896 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
896 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
897 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
897 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
898 finished migrating 3 changelog revisions; change in size: 0 bytes
898 finished migrating 3 changelog revisions; change in size: 0 bytes
899 finished migrating 9 total revisions; total change in store size: 0 bytes
899 finished migrating 9 total revisions; total change in store size: 0 bytes
900 copying phaseroots
900 copying phaseroots
901 copying requires
901 copying requires
902 data fully upgraded in a temporary repository
902 data fully upgraded in a temporary repository
903 marking source repository as being upgraded; clients will be unable to read from repository
903 marking source repository as being upgraded; clients will be unable to read from repository
904 starting in-place swap of repository data
904 starting in-place swap of repository data
905 replacing store...
905 replacing store...
906 store replacement complete; repository was inconsistent for * (glob)
906 store replacement complete; repository was inconsistent for * (glob)
907 finalizing requirements file and making repository readable again
907 finalizing requirements file and making repository readable again
908 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
908 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
909 $ ls -1 .hg/ | grep upgradebackup
909 $ ls -1 .hg/ | grep upgradebackup
910 [1]
910 [1]
911
911
912 We can restrict optimization to some revlog:
912 We can restrict optimization to some revlog:
913
913
914 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
914 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
915 upgrade will perform the following actions:
915 upgrade will perform the following actions:
916
916
917 requirements
917 requirements
918 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
918 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
919 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
919 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
920
920
921 optimisations: re-delta-parent
921 optimisations: re-delta-parent
922
922
923 re-delta-parent
923 re-delta-parent
924 deltas within internal storage will choose a new base revision if needed
924 deltas within internal storage will choose a new base revision if needed
925
925
926 processed revlogs:
926 processed revlogs:
927 - manifest
927 - manifest
928
928
929 beginning upgrade...
929 beginning upgrade...
930 repository locked and read-only
930 repository locked and read-only
931 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
931 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
932 (it is safe to interrupt this process any time before data migration completes)
932 (it is safe to interrupt this process any time before data migration completes)
933 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
933 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
934 migrating 519 KB in store; 1.05 MB tracked data
934 migrating 519 KB in store; 1.05 MB tracked data
935 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
935 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
936 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
936 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
937 blindly copying data/f0.i containing 1 revisions
937 blindly copying data/f0.i containing 1 revisions
938 blindly copying data/f2.i containing 1 revisions
938 blindly copying data/f2.i containing 1 revisions
939 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
939 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
940 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
940 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
941 cloning 3 revisions from 00manifest.i
941 cloning 3 revisions from 00manifest.i
942 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
942 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
943 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
943 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
944 blindly copying 00changelog.i containing 3 revisions
944 blindly copying 00changelog.i containing 3 revisions
945 finished migrating 3 changelog revisions; change in size: 0 bytes
945 finished migrating 3 changelog revisions; change in size: 0 bytes
946 finished migrating 9 total revisions; total change in store size: 0 bytes
946 finished migrating 9 total revisions; total change in store size: 0 bytes
947 copying phaseroots
947 copying phaseroots
948 copying requires
948 copying requires
949 data fully upgraded in a temporary repository
949 data fully upgraded in a temporary repository
950 marking source repository as being upgraded; clients will be unable to read from repository
950 marking source repository as being upgraded; clients will be unable to read from repository
951 starting in-place swap of repository data
951 starting in-place swap of repository data
952 replacing store...
952 replacing store...
953 store replacement complete; repository was inconsistent for *s (glob)
953 store replacement complete; repository was inconsistent for *s (glob)
954 finalizing requirements file and making repository readable again
954 finalizing requirements file and making repository readable again
955 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
955 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
956
956
957 Check that the repo still works fine
957 Check that the repo still works fine
958
958
959 $ hg log -G --stat
959 $ hg log -G --stat
960 @ changeset: 2:fca376863211 (py3 !)
960 @ changeset: 2:fca376863211 (py3 !)
961 | tag: tip
961 | tag: tip
962 | parent: 0:ba592bf28da2
962 | parent: 0:ba592bf28da2
963 | user: test
963 | user: test
964 | date: Thu Jan 01 00:00:00 1970 +0000
964 | date: Thu Jan 01 00:00:00 1970 +0000
965 | summary: add f2
965 | summary: add f2
966 |
966 |
967 | f2 | 100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
967 | f2 | 100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
968 | 1 files changed, 100000 insertions(+), 0 deletions(-)
968 | 1 files changed, 100000 insertions(+), 0 deletions(-)
969 |
969 |
970 | o changeset: 1:2029ce2354e2
970 | o changeset: 1:2029ce2354e2
971 |/ user: test
971 |/ user: test
972 | date: Thu Jan 01 00:00:00 1970 +0000
972 | date: Thu Jan 01 00:00:00 1970 +0000
973 | summary: add f1
973 | summary: add f1
974 |
974 |
975 |
975 |
976 o changeset: 0:ba592bf28da2
976 o changeset: 0:ba592bf28da2
977 user: test
977 user: test
978 date: Thu Jan 01 00:00:00 1970 +0000
978 date: Thu Jan 01 00:00:00 1970 +0000
979 summary: initial
979 summary: initial
980
980
981
981
982
982
983 $ hg verify
983 $ hg verify
984 checking changesets
984 checking changesets
985 checking manifests
985 checking manifests
986 crosschecking files in changesets and manifests
986 crosschecking files in changesets and manifests
987 checking files
987 checking files
988 checked 3 changesets with 3 changes to 3 files
988 checked 3 changesets with 3 changes to 3 files
989
989
990 Check we can select negatively
990 Check we can select negatively
991
991
992 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
992 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
993 upgrade will perform the following actions:
993 upgrade will perform the following actions:
994
994
995 requirements
995 requirements
996 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
996 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
997 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
997 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
998
998
999 optimisations: re-delta-parent
999 optimisations: re-delta-parent
1000
1000
1001 re-delta-parent
1001 re-delta-parent
1002 deltas within internal storage will choose a new base revision if needed
1002 deltas within internal storage will choose a new base revision if needed
1003
1003
1004 processed revlogs:
1004 processed revlogs:
1005 - all-filelogs
1005 - all-filelogs
1006 - changelog
1006 - changelog
1007
1007
1008 beginning upgrade...
1008 beginning upgrade...
1009 repository locked and read-only
1009 repository locked and read-only
1010 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1010 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1011 (it is safe to interrupt this process any time before data migration completes)
1011 (it is safe to interrupt this process any time before data migration completes)
1012 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1012 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1013 migrating 519 KB in store; 1.05 MB tracked data
1013 migrating 519 KB in store; 1.05 MB tracked data
1014 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1014 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1015 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1015 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1016 cloning 1 revisions from data/f0.i
1016 cloning 1 revisions from data/f0.i
1017 cloning 1 revisions from data/f2.i
1017 cloning 1 revisions from data/f2.i
1018 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1018 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1019 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1019 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1020 blindly copying 00manifest.i containing 3 revisions
1020 blindly copying 00manifest.i containing 3 revisions
1021 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1021 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1022 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1022 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1023 cloning 3 revisions from 00changelog.i
1023 cloning 3 revisions from 00changelog.i
1024 finished migrating 3 changelog revisions; change in size: 0 bytes
1024 finished migrating 3 changelog revisions; change in size: 0 bytes
1025 finished migrating 9 total revisions; total change in store size: 0 bytes
1025 finished migrating 9 total revisions; total change in store size: 0 bytes
1026 copying phaseroots
1026 copying phaseroots
1027 copying requires
1027 copying requires
1028 data fully upgraded in a temporary repository
1028 data fully upgraded in a temporary repository
1029 marking source repository as being upgraded; clients will be unable to read from repository
1029 marking source repository as being upgraded; clients will be unable to read from repository
1030 starting in-place swap of repository data
1030 starting in-place swap of repository data
1031 replacing store...
1031 replacing store...
1032 store replacement complete; repository was inconsistent for *s (glob)
1032 store replacement complete; repository was inconsistent for *s (glob)
1033 finalizing requirements file and making repository readable again
1033 finalizing requirements file and making repository readable again
1034 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1034 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1035 $ hg verify
1035 $ hg verify
1036 checking changesets
1036 checking changesets
1037 checking manifests
1037 checking manifests
1038 crosschecking files in changesets and manifests
1038 crosschecking files in changesets and manifests
1039 checking files
1039 checking files
1040 checked 3 changesets with 3 changes to 3 files
1040 checked 3 changesets with 3 changes to 3 files
1041
1041
1042 Check that we can select changelog only
1042 Check that we can select changelog only
1043
1043
1044 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
1044 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
1045 upgrade will perform the following actions:
1045 upgrade will perform the following actions:
1046
1046
1047 requirements
1047 requirements
1048 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1048 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1049 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1049 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1050
1050
1051 optimisations: re-delta-parent
1051 optimisations: re-delta-parent
1052
1052
1053 re-delta-parent
1053 re-delta-parent
1054 deltas within internal storage will choose a new base revision if needed
1054 deltas within internal storage will choose a new base revision if needed
1055
1055
1056 processed revlogs:
1056 processed revlogs:
1057 - changelog
1057 - changelog
1058
1058
1059 beginning upgrade...
1059 beginning upgrade...
1060 repository locked and read-only
1060 repository locked and read-only
1061 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1061 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1062 (it is safe to interrupt this process any time before data migration completes)
1062 (it is safe to interrupt this process any time before data migration completes)
1063 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1063 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1064 migrating 519 KB in store; 1.05 MB tracked data
1064 migrating 519 KB in store; 1.05 MB tracked data
1065 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1065 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1066 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
1066 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
1067 blindly copying data/f0.i containing 1 revisions
1067 blindly copying data/f0.i containing 1 revisions
1068 blindly copying data/f2.i containing 1 revisions
1068 blindly copying data/f2.i containing 1 revisions
1069 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1069 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1070 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1070 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1071 blindly copying 00manifest.i containing 3 revisions
1071 blindly copying 00manifest.i containing 3 revisions
1072 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1072 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1073 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1073 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1074 cloning 3 revisions from 00changelog.i
1074 cloning 3 revisions from 00changelog.i
1075 finished migrating 3 changelog revisions; change in size: 0 bytes
1075 finished migrating 3 changelog revisions; change in size: 0 bytes
1076 finished migrating 9 total revisions; total change in store size: 0 bytes
1076 finished migrating 9 total revisions; total change in store size: 0 bytes
1077 copying phaseroots
1077 copying phaseroots
1078 copying requires
1078 copying requires
1079 data fully upgraded in a temporary repository
1079 data fully upgraded in a temporary repository
1080 marking source repository as being upgraded; clients will be unable to read from repository
1080 marking source repository as being upgraded; clients will be unable to read from repository
1081 starting in-place swap of repository data
1081 starting in-place swap of repository data
1082 replacing store...
1082 replacing store...
1083 store replacement complete; repository was inconsistent for *s (glob)
1083 store replacement complete; repository was inconsistent for *s (glob)
1084 finalizing requirements file and making repository readable again
1084 finalizing requirements file and making repository readable again
1085 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1085 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1086 $ hg verify
1086 $ hg verify
1087 checking changesets
1087 checking changesets
1088 checking manifests
1088 checking manifests
1089 crosschecking files in changesets and manifests
1089 crosschecking files in changesets and manifests
1090 checking files
1090 checking files
1091 checked 3 changesets with 3 changes to 3 files
1091 checked 3 changesets with 3 changes to 3 files
1092
1092
1093 Check that we can select filelog only
1093 Check that we can select filelog only
1094
1094
1095 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
1095 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
1096 upgrade will perform the following actions:
1096 upgrade will perform the following actions:
1097
1097
1098 requirements
1098 requirements
1099 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1099 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1100 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1100 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1101
1101
1102 optimisations: re-delta-parent
1102 optimisations: re-delta-parent
1103
1103
1104 re-delta-parent
1104 re-delta-parent
1105 deltas within internal storage will choose a new base revision if needed
1105 deltas within internal storage will choose a new base revision if needed
1106
1106
1107 processed revlogs:
1107 processed revlogs:
1108 - all-filelogs
1108 - all-filelogs
1109
1109
1110 beginning upgrade...
1110 beginning upgrade...
1111 repository locked and read-only
1111 repository locked and read-only
1112 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1112 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1113 (it is safe to interrupt this process any time before data migration completes)
1113 (it is safe to interrupt this process any time before data migration completes)
1114 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1114 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1115 migrating 519 KB in store; 1.05 MB tracked data
1115 migrating 519 KB in store; 1.05 MB tracked data
1116 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1116 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1117 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1117 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1118 cloning 1 revisions from data/f0.i
1118 cloning 1 revisions from data/f0.i
1119 cloning 1 revisions from data/f2.i
1119 cloning 1 revisions from data/f2.i
1120 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1120 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1121 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1121 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1122 blindly copying 00manifest.i containing 3 revisions
1122 blindly copying 00manifest.i containing 3 revisions
1123 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1123 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1124 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1124 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1125 blindly copying 00changelog.i containing 3 revisions
1125 blindly copying 00changelog.i containing 3 revisions
1126 finished migrating 3 changelog revisions; change in size: 0 bytes
1126 finished migrating 3 changelog revisions; change in size: 0 bytes
1127 finished migrating 9 total revisions; total change in store size: 0 bytes
1127 finished migrating 9 total revisions; total change in store size: 0 bytes
1128 copying phaseroots
1128 copying phaseroots
1129 copying requires
1129 copying requires
1130 data fully upgraded in a temporary repository
1130 data fully upgraded in a temporary repository
1131 marking source repository as being upgraded; clients will be unable to read from repository
1131 marking source repository as being upgraded; clients will be unable to read from repository
1132 starting in-place swap of repository data
1132 starting in-place swap of repository data
1133 replacing store...
1133 replacing store...
1134 store replacement complete; repository was inconsistent for *s (glob)
1134 store replacement complete; repository was inconsistent for *s (glob)
1135 finalizing requirements file and making repository readable again
1135 finalizing requirements file and making repository readable again
1136 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1136 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1137 $ hg verify
1137 $ hg verify
1138 checking changesets
1138 checking changesets
1139 checking manifests
1139 checking manifests
1140 crosschecking files in changesets and manifests
1140 crosschecking files in changesets and manifests
1141 checking files
1141 checking files
1142 checked 3 changesets with 3 changes to 3 files
1142 checked 3 changesets with 3 changes to 3 files
1143
1143
1144
1144
1145 Check you can't skip revlog clone during important format downgrade
1145 Check you can't skip revlog clone during important format downgrade
1146
1146
1147 $ echo "[format]" > .hg/hgrc
1147 $ echo "[format]" > .hg/hgrc
1148 $ echo "sparse-revlog=no" >> .hg/hgrc
1148 $ echo "sparse-revlog=no" >> .hg/hgrc
1149 $ hg debugupgrade --optimize re-delta-parent --no-manifest --no-backup --quiet
1149 $ hg debugupgrade --optimize re-delta-parent --no-manifest --no-backup --quiet
1150 warning: ignoring --no-manifest, as upgrade is changing: sparserevlog
1150 warning: ignoring --no-manifest, as upgrade is changing: sparserevlog
1151
1151
1152 requirements
1152 requirements
1153 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1153 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1154 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1154 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1155 removed: sparserevlog
1155 removed: sparserevlog
1156
1156
1157 optimisations: re-delta-parent
1157 optimisations: re-delta-parent
1158
1158
1159 processed revlogs:
1159 processed revlogs:
1160 - all-filelogs
1160 - all-filelogs
1161 - changelog
1161 - changelog
1162 - manifest
1162 - manifest
1163
1163
1164 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
1164 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
1165 note: selecting all-filelogs for processing to change: sparserevlog
1165 note: selecting all-filelogs for processing to change: sparserevlog
1166 note: selecting changelog for processing to change: sparserevlog
1166 note: selecting changelog for processing to change: sparserevlog
1167
1167
1168 upgrade will perform the following actions:
1168 upgrade will perform the following actions:
1169
1169
1170 requirements
1170 requirements
1171 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1171 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1172 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1172 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1173 removed: sparserevlog
1173 removed: sparserevlog
1174
1174
1175 optimisations: re-delta-parent
1175 optimisations: re-delta-parent
1176
1176
1177 re-delta-parent
1177 re-delta-parent
1178 deltas within internal storage will choose a new base revision if needed
1178 deltas within internal storage will choose a new base revision if needed
1179
1179
1180 processed revlogs:
1180 processed revlogs:
1181 - all-filelogs
1181 - all-filelogs
1182 - changelog
1182 - changelog
1183 - manifest
1183 - manifest
1184
1184
1185 beginning upgrade...
1185 beginning upgrade...
1186 repository locked and read-only
1186 repository locked and read-only
1187 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1187 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1188 (it is safe to interrupt this process any time before data migration completes)
1188 (it is safe to interrupt this process any time before data migration completes)
1189 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1189 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1190 migrating 519 KB in store; 1.05 MB tracked data
1190 migrating 519 KB in store; 1.05 MB tracked data
1191 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1191 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1192 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1192 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1193 cloning 1 revisions from data/f0.i
1193 cloning 1 revisions from data/f0.i
1194 cloning 1 revisions from data/f2.i
1194 cloning 1 revisions from data/f2.i
1195 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1195 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1196 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1196 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1197 cloning 3 revisions from 00manifest.i
1197 cloning 3 revisions from 00manifest.i
1198 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1198 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1199 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1199 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1200 cloning 3 revisions from 00changelog.i
1200 cloning 3 revisions from 00changelog.i
1201 finished migrating 3 changelog revisions; change in size: 0 bytes
1201 finished migrating 3 changelog revisions; change in size: 0 bytes
1202 finished migrating 9 total revisions; total change in store size: 0 bytes
1202 finished migrating 9 total revisions; total change in store size: 0 bytes
1203 copying phaseroots
1203 copying phaseroots
1204 copying requires
1204 copying requires
1205 data fully upgraded in a temporary repository
1205 data fully upgraded in a temporary repository
1206 marking source repository as being upgraded; clients will be unable to read from repository
1206 marking source repository as being upgraded; clients will be unable to read from repository
1207 starting in-place swap of repository data
1207 starting in-place swap of repository data
1208 replacing store...
1208 replacing store...
1209 store replacement complete; repository was inconsistent for *s (glob)
1209 store replacement complete; repository was inconsistent for *s (glob)
1210 finalizing requirements file and making repository readable again
1210 finalizing requirements file and making repository readable again
1211 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1211 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1212 $ hg verify
1212 $ hg verify
1213 checking changesets
1213 checking changesets
1214 checking manifests
1214 checking manifests
1215 crosschecking files in changesets and manifests
1215 crosschecking files in changesets and manifests
1216 checking files
1216 checking files
1217 checked 3 changesets with 3 changes to 3 files
1217 checked 3 changesets with 3 changes to 3 files
1218
1218
1219 Check you can't skip revlog clone during important format upgrade
1219 Check you can't skip revlog clone during important format upgrade
1220
1220
1221 $ echo "sparse-revlog=yes" >> .hg/hgrc
1221 $ echo "sparse-revlog=yes" >> .hg/hgrc
1222 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
1222 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
1223 note: selecting all-filelogs for processing to change: sparserevlog
1223 note: selecting all-filelogs for processing to change: sparserevlog
1224 note: selecting changelog for processing to change: sparserevlog
1224 note: selecting changelog for processing to change: sparserevlog
1225
1225
1226 upgrade will perform the following actions:
1226 upgrade will perform the following actions:
1227
1227
1228 requirements
1228 requirements
1229 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1229 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1230 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1230 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1231 added: sparserevlog
1231 added: sparserevlog
1232
1232
1233 optimisations: re-delta-parent
1233 optimisations: re-delta-parent
1234
1234
1235 sparserevlog
1235 sparserevlog
1236 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
1236 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
1237
1237
1238 re-delta-parent
1238 re-delta-parent
1239 deltas within internal storage will choose a new base revision if needed
1239 deltas within internal storage will choose a new base revision if needed
1240
1240
1241 processed revlogs:
1241 processed revlogs:
1242 - all-filelogs
1242 - all-filelogs
1243 - changelog
1243 - changelog
1244 - manifest
1244 - manifest
1245
1245
1246 beginning upgrade...
1246 beginning upgrade...
1247 repository locked and read-only
1247 repository locked and read-only
1248 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1248 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1249 (it is safe to interrupt this process any time before data migration completes)
1249 (it is safe to interrupt this process any time before data migration completes)
1250 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1250 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1251 migrating 519 KB in store; 1.05 MB tracked data
1251 migrating 519 KB in store; 1.05 MB tracked data
1252 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1252 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1253 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1253 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1254 cloning 1 revisions from data/f0.i
1254 cloning 1 revisions from data/f0.i
1255 cloning 1 revisions from data/f2.i
1255 cloning 1 revisions from data/f2.i
1256 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1256 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1257 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1257 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1258 cloning 3 revisions from 00manifest.i
1258 cloning 3 revisions from 00manifest.i
1259 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1259 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1260 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1260 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1261 cloning 3 revisions from 00changelog.i
1261 cloning 3 revisions from 00changelog.i
1262 finished migrating 3 changelog revisions; change in size: 0 bytes
1262 finished migrating 3 changelog revisions; change in size: 0 bytes
1263 finished migrating 9 total revisions; total change in store size: 0 bytes
1263 finished migrating 9 total revisions; total change in store size: 0 bytes
1264 copying phaseroots
1264 copying phaseroots
1265 copying requires
1265 copying requires
1266 data fully upgraded in a temporary repository
1266 data fully upgraded in a temporary repository
1267 marking source repository as being upgraded; clients will be unable to read from repository
1267 marking source repository as being upgraded; clients will be unable to read from repository
1268 starting in-place swap of repository data
1268 starting in-place swap of repository data
1269 replacing store...
1269 replacing store...
1270 store replacement complete; repository was inconsistent for *s (glob)
1270 store replacement complete; repository was inconsistent for *s (glob)
1271 finalizing requirements file and making repository readable again
1271 finalizing requirements file and making repository readable again
1272 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1272 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1273 $ hg verify
1273 $ hg verify
1274 checking changesets
1274 checking changesets
1275 checking manifests
1275 checking manifests
1276 crosschecking files in changesets and manifests
1276 crosschecking files in changesets and manifests
1277 checking files
1277 checking files
1278 checked 3 changesets with 3 changes to 3 files
1278 checked 3 changesets with 3 changes to 3 files
1279
1279
1280 $ cd ..
1280 $ cd ..
1281
1281
1282 store files with special filenames aren't encoded during copy
1282 store files with special filenames aren't encoded during copy
1283
1283
1284 $ hg init store-filenames
1284 $ hg init store-filenames
1285 $ cd store-filenames
1285 $ cd store-filenames
1286 $ touch foo
1286 $ touch foo
1287 $ hg -q commit -A -m initial
1287 $ hg -q commit -A -m initial
1288 $ touch .hg/store/.XX_special_filename
1288 $ touch .hg/store/.XX_special_filename
1289
1289
1290 $ hg debugupgraderepo --run
1290 $ hg debugupgraderepo --run
1291 nothing to do
1291 nothing to do
1292 $ hg debugupgraderepo --run --optimize 're-delta-fulladd'
1292 $ hg debugupgraderepo --run --optimize 're-delta-fulladd'
1293 upgrade will perform the following actions:
1293 upgrade will perform the following actions:
1294
1294
1295 requirements
1295 requirements
1296 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1296 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1297 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1297 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1298
1298
1299 optimisations: re-delta-fulladd
1299 optimisations: re-delta-fulladd
1300
1300
1301 re-delta-fulladd
1301 re-delta-fulladd
1302 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
1302 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
1303
1303
1304 processed revlogs:
1304 processed revlogs:
1305 - all-filelogs
1305 - all-filelogs
1306 - changelog
1306 - changelog
1307 - manifest
1307 - manifest
1308
1308
1309 beginning upgrade...
1309 beginning upgrade...
1310 repository locked and read-only
1310 repository locked and read-only
1311 creating temporary repository to stage upgraded data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1311 creating temporary repository to stage upgraded data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1312 (it is safe to interrupt this process any time before data migration completes)
1312 (it is safe to interrupt this process any time before data migration completes)
1313 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1313 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1314 migrating 301 bytes in store; 107 bytes tracked data
1314 migrating 301 bytes in store; 107 bytes tracked data
1315 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1315 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1316 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1316 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1317 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1317 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1318 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1318 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1319 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1319 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1320 finished migrating 1 changelog revisions; change in size: 0 bytes
1320 finished migrating 1 changelog revisions; change in size: 0 bytes
1321 finished migrating 3 total revisions; total change in store size: 0 bytes
1321 finished migrating 3 total revisions; total change in store size: 0 bytes
1322 copying .XX_special_filename
1322 copying .XX_special_filename
1323 copying phaseroots
1323 copying phaseroots
1324 copying requires
1324 copying requires
1325 data fully upgraded in a temporary repository
1325 data fully upgraded in a temporary repository
1326 marking source repository as being upgraded; clients will be unable to read from repository
1326 marking source repository as being upgraded; clients will be unable to read from repository
1327 starting in-place swap of repository data
1327 starting in-place swap of repository data
1328 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1328 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1329 replacing store...
1329 replacing store...
1330 store replacement complete; repository was inconsistent for *s (glob)
1330 store replacement complete; repository was inconsistent for *s (glob)
1331 finalizing requirements file and making repository readable again
1331 finalizing requirements file and making repository readable again
1332 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1332 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1333 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1333 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1334 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1334 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1335
1335
1336 fncache is valid after upgrade
1336 fncache is valid after upgrade
1337
1337
1338 $ hg debugrebuildfncache
1338 $ hg debugrebuildfncache
1339 fncache already up to date
1339 fncache already up to date
1340
1340
1341 $ cd ..
1341 $ cd ..
1342
1342
1343 Check upgrading a large file repository
1343 Check upgrading a large file repository
1344 ---------------------------------------
1344 ---------------------------------------
1345
1345
1346 $ hg init largefilesrepo
1346 $ hg init largefilesrepo
1347 $ cat << EOF >> largefilesrepo/.hg/hgrc
1347 $ cat << EOF >> largefilesrepo/.hg/hgrc
1348 > [extensions]
1348 > [extensions]
1349 > largefiles =
1349 > largefiles =
1350 > EOF
1350 > EOF
1351
1351
1352 $ cd largefilesrepo
1352 $ cd largefilesrepo
1353 $ touch foo
1353 $ touch foo
1354 $ hg add --large foo
1354 $ hg add --large foo
1355 $ hg -q commit -m initial
1355 $ hg -q commit -m initial
1356 $ hg debugrequires
1356 $ hg debugrequires
1357 dotencode
1357 dotencode
1358 fncache
1358 fncache
1359 generaldelta
1359 generaldelta
1360 largefiles
1360 largefiles
1361 persistent-nodemap (rust !)
1361 persistent-nodemap (rust !)
1362 revlogv1
1362 revlogv1
1363 share-safe
1363 share-safe
1364 sparserevlog
1364 sparserevlog
1365 store
1365 store
1366
1366
1367 $ hg debugupgraderepo --run
1367 $ hg debugupgraderepo --run
1368 nothing to do
1368 nothing to do
1369 $ hg debugrequires
1369 $ hg debugrequires
1370 dotencode
1370 dotencode
1371 fncache
1371 fncache
1372 generaldelta
1372 generaldelta
1373 largefiles
1373 largefiles
1374 persistent-nodemap (rust !)
1374 persistent-nodemap (rust !)
1375 revlogv1
1375 revlogv1
1376 share-safe
1376 share-safe
1377 sparserevlog
1377 sparserevlog
1378 store
1378 store
1379
1379
1380 $ cat << EOF >> .hg/hgrc
1380 $ cat << EOF >> .hg/hgrc
1381 > [extensions]
1381 > [extensions]
1382 > lfs =
1382 > lfs =
1383 > [lfs]
1383 > [lfs]
1384 > threshold = 10
1384 > threshold = 10
1385 > EOF
1385 > EOF
1386 $ echo '123456789012345' > lfs.bin
1386 $ echo '123456789012345' > lfs.bin
1387 $ hg ci -Am 'lfs.bin'
1387 $ hg ci -Am 'lfs.bin'
1388 adding lfs.bin
1388 adding lfs.bin
1389 $ hg debugrequires | grep lfs
1389 $ hg debugrequires | grep lfs
1390 lfs
1390 lfs
1391 $ find .hg/store/lfs -type f
1391 $ find .hg/store/lfs -type f
1392 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1392 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1393
1393
1394 $ hg debugupgraderepo --run
1394 $ hg debugupgraderepo --run
1395 nothing to do
1395 nothing to do
1396
1396
1397 $ hg debugrequires | grep lfs
1397 $ hg debugrequires | grep lfs
1398 lfs
1398 lfs
1399 $ find .hg/store/lfs -type f
1399 $ find .hg/store/lfs -type f
1400 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1400 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1401 $ hg verify
1401 $ hg verify
1402 checking changesets
1402 checking changesets
1403 checking manifests
1403 checking manifests
1404 crosschecking files in changesets and manifests
1404 crosschecking files in changesets and manifests
1405 checking files
1405 checking files
1406 checked 2 changesets with 2 changes to 2 files
1406 checked 2 changesets with 2 changes to 2 files
1407 $ hg debugdata lfs.bin 0
1407 $ hg debugdata lfs.bin 0
1408 version https://git-lfs.github.com/spec/v1
1408 version https://git-lfs.github.com/spec/v1
1409 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1409 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1410 size 16
1410 size 16
1411 x-is-binary 0
1411 x-is-binary 0
1412
1412
1413 $ cd ..
1413 $ cd ..
1414
1414
1415 repository config is taken in account
1415 repository config is taken in account
1416 -------------------------------------
1416 -------------------------------------
1417
1417
1418 $ cat << EOF >> $HGRCPATH
1418 $ cat << EOF >> $HGRCPATH
1419 > [format]
1419 > [format]
1420 > maxchainlen = 1
1420 > maxchainlen = 1
1421 > EOF
1421 > EOF
1422
1422
1423 $ hg init localconfig
1423 $ hg init localconfig
1424 $ cd localconfig
1424 $ cd localconfig
1425 $ cat << EOF > file
1425 $ cat << EOF > file
1426 > some content
1426 > some content
1427 > with some length
1427 > with some length
1428 > to make sure we get a delta
1428 > to make sure we get a delta
1429 > after changes
1429 > after changes
1430 > very long
1430 > very long
1431 > very long
1431 > very long
1432 > very long
1432 > very long
1433 > very long
1433 > very long
1434 > very long
1434 > very long
1435 > very long
1435 > very long
1436 > very long
1436 > very long
1437 > very long
1437 > very long
1438 > very long
1438 > very long
1439 > very long
1439 > very long
1440 > very long
1440 > very long
1441 > EOF
1441 > EOF
1442 $ hg -q commit -A -m A
1442 $ hg -q commit -A -m A
1443 $ echo "new line" >> file
1443 $ echo "new line" >> file
1444 $ hg -q commit -m B
1444 $ hg -q commit -m B
1445 $ echo "new line" >> file
1445 $ echo "new line" >> file
1446 $ hg -q commit -m C
1446 $ hg -q commit -m C
1447
1447
1448 $ cat << EOF >> .hg/hgrc
1448 $ cat << EOF >> .hg/hgrc
1449 > [format]
1449 > [format]
1450 > maxchainlen = 9001
1450 > maxchainlen = 9001
1451 > EOF
1451 > EOF
1452 $ hg config format
1452 $ hg config format
1453 format.revlog-compression=$BUNDLE2_COMPRESSIONS$
1453 format.revlog-compression=$BUNDLE2_COMPRESSIONS$
1454 format.maxchainlen=9001
1454 format.maxchainlen=9001
1455 $ hg debugdeltachain file
1455 $ hg debugdeltachain file
1456 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1456 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1457 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1457 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1458 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1458 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1459 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1459 2 1 2 0 snap 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1460
1460
1461 $ hg debugupgraderepo --run --optimize 're-delta-all'
1461 $ hg debugupgraderepo --run --optimize 're-delta-all'
1462 upgrade will perform the following actions:
1462 upgrade will perform the following actions:
1463
1463
1464 requirements
1464 requirements
1465 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1465 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1466 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1466 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1467
1467
1468 optimisations: re-delta-all
1468 optimisations: re-delta-all
1469
1469
1470 re-delta-all
1470 re-delta-all
1471 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
1471 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
1472
1472
1473 processed revlogs:
1473 processed revlogs:
1474 - all-filelogs
1474 - all-filelogs
1475 - changelog
1475 - changelog
1476 - manifest
1476 - manifest
1477
1477
1478 beginning upgrade...
1478 beginning upgrade...
1479 repository locked and read-only
1479 repository locked and read-only
1480 creating temporary repository to stage upgraded data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
1480 creating temporary repository to stage upgraded data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
1481 (it is safe to interrupt this process any time before data migration completes)
1481 (it is safe to interrupt this process any time before data migration completes)
1482 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1482 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1483 migrating 1019 bytes in store; 882 bytes tracked data
1483 migrating 1019 bytes in store; 882 bytes tracked data
1484 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
1484 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
1485 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1485 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1486 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1486 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1487 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1487 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1488 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1488 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1489 finished migrating 3 changelog revisions; change in size: 0 bytes
1489 finished migrating 3 changelog revisions; change in size: 0 bytes
1490 finished migrating 9 total revisions; total change in store size: -9 bytes
1490 finished migrating 9 total revisions; total change in store size: -9 bytes
1491 copying phaseroots
1491 copying phaseroots
1492 copying requires
1492 copying requires
1493 data fully upgraded in a temporary repository
1493 data fully upgraded in a temporary repository
1494 marking source repository as being upgraded; clients will be unable to read from repository
1494 marking source repository as being upgraded; clients will be unable to read from repository
1495 starting in-place swap of repository data
1495 starting in-place swap of repository data
1496 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1496 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1497 replacing store...
1497 replacing store...
1498 store replacement complete; repository was inconsistent for *s (glob)
1498 store replacement complete; repository was inconsistent for *s (glob)
1499 finalizing requirements file and making repository readable again
1499 finalizing requirements file and making repository readable again
1500 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1500 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1501 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1501 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1502 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1502 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1503 $ hg debugdeltachain file
1503 $ hg debugdeltachain file
1504 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1504 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1505 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1505 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1506 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1506 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1507 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1507 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1508 $ cd ..
1508 $ cd ..
1509
1509
1510 $ cat << EOF >> $HGRCPATH
1510 $ cat << EOF >> $HGRCPATH
1511 > [format]
1511 > [format]
1512 > maxchainlen = 9001
1512 > maxchainlen = 9001
1513 > EOF
1513 > EOF
1514
1514
1515 Check upgrading a sparse-revlog repository
1515 Check upgrading a sparse-revlog repository
1516 ---------------------------------------
1516 ---------------------------------------
1517
1517
1518 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1518 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1519 $ cd sparserevlogrepo
1519 $ cd sparserevlogrepo
1520 $ touch foo
1520 $ touch foo
1521 $ hg add foo
1521 $ hg add foo
1522 $ hg -q commit -m "foo"
1522 $ hg -q commit -m "foo"
1523 $ hg debugrequires
1523 $ hg debugrequires
1524 dotencode
1524 dotencode
1525 fncache
1525 fncache
1526 generaldelta
1526 generaldelta
1527 persistent-nodemap (rust !)
1527 persistent-nodemap (rust !)
1528 revlogv1
1528 revlogv1
1529 share-safe
1529 share-safe
1530 store
1530 store
1531
1531
1532 Check that we can add the sparse-revlog format requirement
1532 Check that we can add the sparse-revlog format requirement
1533 $ hg --config format.sparse-revlog=yes debugupgraderepo --run --quiet
1533 $ hg --config format.sparse-revlog=yes debugupgraderepo --run --quiet
1534 upgrade will perform the following actions:
1534 upgrade will perform the following actions:
1535
1535
1536 requirements
1536 requirements
1537 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1537 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1538 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1538 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1539 added: sparserevlog
1539 added: sparserevlog
1540
1540
1541 processed revlogs:
1541 processed revlogs:
1542 - all-filelogs
1542 - all-filelogs
1543 - changelog
1543 - changelog
1544 - manifest
1544 - manifest
1545
1545
1546 $ hg debugrequires
1546 $ hg debugrequires
1547 dotencode
1547 dotencode
1548 fncache
1548 fncache
1549 generaldelta
1549 generaldelta
1550 persistent-nodemap (rust !)
1550 persistent-nodemap (rust !)
1551 revlogv1
1551 revlogv1
1552 share-safe
1552 share-safe
1553 sparserevlog
1553 sparserevlog
1554 store
1554 store
1555
1555
1556 Check that we can remove the sparse-revlog format requirement
1556 Check that we can remove the sparse-revlog format requirement
1557 $ hg --config format.sparse-revlog=no debugupgraderepo --run --quiet
1557 $ hg --config format.sparse-revlog=no debugupgraderepo --run --quiet
1558 upgrade will perform the following actions:
1558 upgrade will perform the following actions:
1559
1559
1560 requirements
1560 requirements
1561 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1561 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1562 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1562 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1563 removed: sparserevlog
1563 removed: sparserevlog
1564
1564
1565 processed revlogs:
1565 processed revlogs:
1566 - all-filelogs
1566 - all-filelogs
1567 - changelog
1567 - changelog
1568 - manifest
1568 - manifest
1569
1569
1570 $ hg debugrequires
1570 $ hg debugrequires
1571 dotencode
1571 dotencode
1572 fncache
1572 fncache
1573 generaldelta
1573 generaldelta
1574 persistent-nodemap (rust !)
1574 persistent-nodemap (rust !)
1575 revlogv1
1575 revlogv1
1576 share-safe
1576 share-safe
1577 store
1577 store
1578
1578
1579 #if zstd
1579 #if zstd
1580
1580
1581 Check upgrading to a zstd revlog
1581 Check upgrading to a zstd revlog
1582 --------------------------------
1582 --------------------------------
1583
1583
1584 upgrade
1584 upgrade
1585
1585
1586 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup --quiet
1586 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup --quiet
1587 upgrade will perform the following actions:
1587 upgrade will perform the following actions:
1588
1588
1589 requirements
1589 requirements
1590 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1590 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1591 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1591 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1592 added: revlog-compression-zstd, sparserevlog
1592 added: revlog-compression-zstd, sparserevlog
1593
1593
1594 processed revlogs:
1594 processed revlogs:
1595 - all-filelogs
1595 - all-filelogs
1596 - changelog
1596 - changelog
1597 - manifest
1597 - manifest
1598
1598
1599 $ hg debugformat -v
1599 $ hg debugformat -v
1600 format-variant repo config default
1600 format-variant repo config default
1601 fncache: yes yes yes
1601 fncache: yes yes yes
1602 dirstate-v2: no no no
1602 dirstate-v2: no no no
1603 tracked-hint: no no no
1603 tracked-hint: no no no
1604 dotencode: yes yes yes
1604 dotencode: yes yes yes
1605 generaldelta: yes yes yes
1605 generaldelta: yes yes yes
1606 share-safe: yes yes yes
1606 share-safe: yes yes yes
1607 sparserevlog: yes yes yes
1607 sparserevlog: yes yes yes
1608 persistent-nodemap: no no no (no-rust !)
1608 persistent-nodemap: no no no (no-rust !)
1609 persistent-nodemap: yes yes no (rust !)
1609 persistent-nodemap: yes yes no (rust !)
1610 copies-sdc: no no no
1610 copies-sdc: no no no
1611 revlog-v2: no no no
1611 revlog-v2: no no no
1612 changelog-v2: no no no
1612 changelog-v2: no no no
1613 plain-cl-delta: yes yes yes
1613 plain-cl-delta: yes yes yes
1614 compression: zlib zlib zlib (no-zstd !)
1614 compression: zlib zlib zlib (no-zstd !)
1615 compression: zstd zlib zstd (zstd !)
1615 compression: zstd zlib zstd (zstd !)
1616 compression-level: default default default
1616 compression-level: default default default
1617 $ hg debugrequires
1617 $ hg debugrequires
1618 dotencode
1618 dotencode
1619 fncache
1619 fncache
1620 generaldelta
1620 generaldelta
1621 persistent-nodemap (rust !)
1621 persistent-nodemap (rust !)
1622 revlog-compression-zstd
1622 revlog-compression-zstd
1623 revlogv1
1623 revlogv1
1624 share-safe
1624 share-safe
1625 sparserevlog
1625 sparserevlog
1626 store
1626 store
1627
1627
1628 downgrade
1628 downgrade
1629
1629
1630 $ hg debugupgraderepo --run --no-backup --quiet
1630 $ hg debugupgraderepo --run --no-backup --quiet
1631 upgrade will perform the following actions:
1631 upgrade will perform the following actions:
1632
1632
1633 requirements
1633 requirements
1634 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1634 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1635 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1635 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1636 removed: revlog-compression-zstd
1636 removed: revlog-compression-zstd
1637
1637
1638 processed revlogs:
1638 processed revlogs:
1639 - all-filelogs
1639 - all-filelogs
1640 - changelog
1640 - changelog
1641 - manifest
1641 - manifest
1642
1642
1643 $ hg debugformat -v
1643 $ hg debugformat -v
1644 format-variant repo config default
1644 format-variant repo config default
1645 fncache: yes yes yes
1645 fncache: yes yes yes
1646 dirstate-v2: no no no
1646 dirstate-v2: no no no
1647 tracked-hint: no no no
1647 tracked-hint: no no no
1648 dotencode: yes yes yes
1648 dotencode: yes yes yes
1649 generaldelta: yes yes yes
1649 generaldelta: yes yes yes
1650 share-safe: yes yes yes
1650 share-safe: yes yes yes
1651 sparserevlog: yes yes yes
1651 sparserevlog: yes yes yes
1652 persistent-nodemap: no no no (no-rust !)
1652 persistent-nodemap: no no no (no-rust !)
1653 persistent-nodemap: yes yes no (rust !)
1653 persistent-nodemap: yes yes no (rust !)
1654 copies-sdc: no no no
1654 copies-sdc: no no no
1655 revlog-v2: no no no
1655 revlog-v2: no no no
1656 changelog-v2: no no no
1656 changelog-v2: no no no
1657 plain-cl-delta: yes yes yes
1657 plain-cl-delta: yes yes yes
1658 compression: zlib zlib zlib (no-zstd !)
1658 compression: zlib zlib zlib (no-zstd !)
1659 compression: zlib zlib zstd (zstd !)
1659 compression: zlib zlib zstd (zstd !)
1660 compression-level: default default default
1660 compression-level: default default default
1661 $ hg debugrequires
1661 $ hg debugrequires
1662 dotencode
1662 dotencode
1663 fncache
1663 fncache
1664 generaldelta
1664 generaldelta
1665 persistent-nodemap (rust !)
1665 persistent-nodemap (rust !)
1666 revlogv1
1666 revlogv1
1667 share-safe
1667 share-safe
1668 sparserevlog
1668 sparserevlog
1669 store
1669 store
1670
1670
1671 upgrade from hgrc
1671 upgrade from hgrc
1672
1672
1673 $ cat >> .hg/hgrc << EOF
1673 $ cat >> .hg/hgrc << EOF
1674 > [format]
1674 > [format]
1675 > revlog-compression=zstd
1675 > revlog-compression=zstd
1676 > EOF
1676 > EOF
1677 $ hg debugupgraderepo --run --no-backup --quiet
1677 $ hg debugupgraderepo --run --no-backup --quiet
1678 upgrade will perform the following actions:
1678 upgrade will perform the following actions:
1679
1679
1680 requirements
1680 requirements
1681 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1681 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1682 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1682 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1683 added: revlog-compression-zstd
1683 added: revlog-compression-zstd
1684
1684
1685 processed revlogs:
1685 processed revlogs:
1686 - all-filelogs
1686 - all-filelogs
1687 - changelog
1687 - changelog
1688 - manifest
1688 - manifest
1689
1689
1690 $ hg debugformat -v
1690 $ hg debugformat -v
1691 format-variant repo config default
1691 format-variant repo config default
1692 fncache: yes yes yes
1692 fncache: yes yes yes
1693 dirstate-v2: no no no
1693 dirstate-v2: no no no
1694 tracked-hint: no no no
1694 tracked-hint: no no no
1695 dotencode: yes yes yes
1695 dotencode: yes yes yes
1696 generaldelta: yes yes yes
1696 generaldelta: yes yes yes
1697 share-safe: yes yes yes
1697 share-safe: yes yes yes
1698 sparserevlog: yes yes yes
1698 sparserevlog: yes yes yes
1699 persistent-nodemap: no no no (no-rust !)
1699 persistent-nodemap: no no no (no-rust !)
1700 persistent-nodemap: yes yes no (rust !)
1700 persistent-nodemap: yes yes no (rust !)
1701 copies-sdc: no no no
1701 copies-sdc: no no no
1702 revlog-v2: no no no
1702 revlog-v2: no no no
1703 changelog-v2: no no no
1703 changelog-v2: no no no
1704 plain-cl-delta: yes yes yes
1704 plain-cl-delta: yes yes yes
1705 compression: zlib zlib zlib (no-zstd !)
1705 compression: zlib zlib zlib (no-zstd !)
1706 compression: zstd zstd zstd (zstd !)
1706 compression: zstd zstd zstd (zstd !)
1707 compression-level: default default default
1707 compression-level: default default default
1708 $ hg debugrequires
1708 $ hg debugrequires
1709 dotencode
1709 dotencode
1710 fncache
1710 fncache
1711 generaldelta
1711 generaldelta
1712 persistent-nodemap (rust !)
1712 persistent-nodemap (rust !)
1713 revlog-compression-zstd
1713 revlog-compression-zstd
1714 revlogv1
1714 revlogv1
1715 share-safe
1715 share-safe
1716 sparserevlog
1716 sparserevlog
1717 store
1717 store
1718
1718
1719 #endif
1719 #endif
1720
1720
1721 Check upgrading to a revlog format supporting sidedata
1721 Check upgrading to a revlog format supporting sidedata
1722 ------------------------------------------------------
1722 ------------------------------------------------------
1723
1723
1724 upgrade
1724 upgrade
1725
1725
1726 $ hg debugsidedata -c 0
1726 $ hg debugsidedata -c 0
1727 $ hg --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
1727 $ hg --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
1728 upgrade will perform the following actions:
1728 upgrade will perform the following actions:
1729
1729
1730 requirements
1730 requirements
1731 preserved: dotencode, fncache, generaldelta, share-safe, store (no-zstd !)
1731 preserved: dotencode, fncache, generaldelta, share-safe, store (no-zstd !)
1732 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1732 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1733 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1733 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1734 removed: revlogv1
1734 removed: revlogv1
1735 added: exp-revlogv2.2 (zstd !)
1735 added: exp-revlogv2.2 (zstd !)
1736 added: exp-revlogv2.2, sparserevlog (no-zstd !)
1736 added: exp-revlogv2.2, sparserevlog (no-zstd !)
1737
1737
1738 processed revlogs:
1738 processed revlogs:
1739 - all-filelogs
1739 - all-filelogs
1740 - changelog
1740 - changelog
1741 - manifest
1741 - manifest
1742
1742
1743 $ hg debugformat -v
1743 $ hg debugformat -v
1744 format-variant repo config default
1744 format-variant repo config default
1745 fncache: yes yes yes
1745 fncache: yes yes yes
1746 dirstate-v2: no no no
1746 dirstate-v2: no no no
1747 tracked-hint: no no no
1747 tracked-hint: no no no
1748 dotencode: yes yes yes
1748 dotencode: yes yes yes
1749 generaldelta: yes yes yes
1749 generaldelta: yes yes yes
1750 share-safe: yes yes yes
1750 share-safe: yes yes yes
1751 sparserevlog: yes yes yes
1751 sparserevlog: yes yes yes
1752 persistent-nodemap: no no no (no-rust !)
1752 persistent-nodemap: no no no (no-rust !)
1753 persistent-nodemap: yes yes no (rust !)
1753 persistent-nodemap: yes yes no (rust !)
1754 copies-sdc: no no no
1754 copies-sdc: no no no
1755 revlog-v2: yes no no
1755 revlog-v2: yes no no
1756 changelog-v2: no no no
1756 changelog-v2: no no no
1757 plain-cl-delta: yes yes yes
1757 plain-cl-delta: yes yes yes
1758 compression: zlib zlib zlib (no-zstd !)
1758 compression: zlib zlib zlib (no-zstd !)
1759 compression: zstd zstd zstd (zstd !)
1759 compression: zstd zstd zstd (zstd !)
1760 compression-level: default default default
1760 compression-level: default default default
1761 $ hg debugrequires
1761 $ hg debugrequires
1762 dotencode
1762 dotencode
1763 exp-revlogv2.2
1763 exp-revlogv2.2
1764 fncache
1764 fncache
1765 generaldelta
1765 generaldelta
1766 persistent-nodemap (rust !)
1766 persistent-nodemap (rust !)
1767 revlog-compression-zstd (zstd !)
1767 revlog-compression-zstd (zstd !)
1768 share-safe
1768 share-safe
1769 sparserevlog
1769 sparserevlog
1770 store
1770 store
1771 $ hg debugsidedata -c 0
1771 $ hg debugsidedata -c 0
1772 2 sidedata entries
1772 2 sidedata entries
1773 entry-0001 size 4
1773 entry-0001 size 4
1774 entry-0002 size 32
1774 entry-0002 size 32
1775
1775
1776 downgrade
1776 downgrade
1777
1777
1778 $ hg debugupgraderepo --config experimental.revlogv2=no --run --no-backup --quiet
1778 $ hg debugupgraderepo --config experimental.revlogv2=no --run --no-backup --quiet
1779 upgrade will perform the following actions:
1779 upgrade will perform the following actions:
1780
1780
1781 requirements
1781 requirements
1782 preserved: dotencode, fncache, generaldelta, share-safe, sparserevlog, store (no-zstd !)
1782 preserved: dotencode, fncache, generaldelta, share-safe, sparserevlog, store (no-zstd !)
1783 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1783 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1784 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1784 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1785 removed: exp-revlogv2.2
1785 removed: exp-revlogv2.2
1786 added: revlogv1
1786 added: revlogv1
1787
1787
1788 processed revlogs:
1788 processed revlogs:
1789 - all-filelogs
1789 - all-filelogs
1790 - changelog
1790 - changelog
1791 - manifest
1791 - manifest
1792
1792
1793 $ hg debugformat -v
1793 $ hg debugformat -v
1794 format-variant repo config default
1794 format-variant repo config default
1795 fncache: yes yes yes
1795 fncache: yes yes yes
1796 dirstate-v2: no no no
1796 dirstate-v2: no no no
1797 tracked-hint: no no no
1797 tracked-hint: no no no
1798 dotencode: yes yes yes
1798 dotencode: yes yes yes
1799 generaldelta: yes yes yes
1799 generaldelta: yes yes yes
1800 share-safe: yes yes yes
1800 share-safe: yes yes yes
1801 sparserevlog: yes yes yes
1801 sparserevlog: yes yes yes
1802 persistent-nodemap: no no no (no-rust !)
1802 persistent-nodemap: no no no (no-rust !)
1803 persistent-nodemap: yes yes no (rust !)
1803 persistent-nodemap: yes yes no (rust !)
1804 copies-sdc: no no no
1804 copies-sdc: no no no
1805 revlog-v2: no no no
1805 revlog-v2: no no no
1806 changelog-v2: no no no
1806 changelog-v2: no no no
1807 plain-cl-delta: yes yes yes
1807 plain-cl-delta: yes yes yes
1808 compression: zlib zlib zlib (no-zstd !)
1808 compression: zlib zlib zlib (no-zstd !)
1809 compression: zstd zstd zstd (zstd !)
1809 compression: zstd zstd zstd (zstd !)
1810 compression-level: default default default
1810 compression-level: default default default
1811 $ hg debugrequires
1811 $ hg debugrequires
1812 dotencode
1812 dotencode
1813 fncache
1813 fncache
1814 generaldelta
1814 generaldelta
1815 persistent-nodemap (rust !)
1815 persistent-nodemap (rust !)
1816 revlog-compression-zstd (zstd !)
1816 revlog-compression-zstd (zstd !)
1817 revlogv1
1817 revlogv1
1818 share-safe
1818 share-safe
1819 sparserevlog
1819 sparserevlog
1820 store
1820 store
1821 $ hg debugsidedata -c 0
1821 $ hg debugsidedata -c 0
1822
1822
1823 upgrade from hgrc
1823 upgrade from hgrc
1824
1824
1825 $ cat >> .hg/hgrc << EOF
1825 $ cat >> .hg/hgrc << EOF
1826 > [experimental]
1826 > [experimental]
1827 > revlogv2=enable-unstable-format-and-corrupt-my-data
1827 > revlogv2=enable-unstable-format-and-corrupt-my-data
1828 > EOF
1828 > EOF
1829 $ hg debugupgraderepo --run --no-backup --quiet
1829 $ hg debugupgraderepo --run --no-backup --quiet
1830 upgrade will perform the following actions:
1830 upgrade will perform the following actions:
1831
1831
1832 requirements
1832 requirements
1833 preserved: dotencode, fncache, generaldelta, share-safe, sparserevlog, store (no-zstd !)
1833 preserved: dotencode, fncache, generaldelta, share-safe, sparserevlog, store (no-zstd !)
1834 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1834 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1835 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1835 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1836 removed: revlogv1
1836 removed: revlogv1
1837 added: exp-revlogv2.2
1837 added: exp-revlogv2.2
1838
1838
1839 processed revlogs:
1839 processed revlogs:
1840 - all-filelogs
1840 - all-filelogs
1841 - changelog
1841 - changelog
1842 - manifest
1842 - manifest
1843
1843
1844 $ hg debugformat -v
1844 $ hg debugformat -v
1845 format-variant repo config default
1845 format-variant repo config default
1846 fncache: yes yes yes
1846 fncache: yes yes yes
1847 dirstate-v2: no no no
1847 dirstate-v2: no no no
1848 tracked-hint: no no no
1848 tracked-hint: no no no
1849 dotencode: yes yes yes
1849 dotencode: yes yes yes
1850 generaldelta: yes yes yes
1850 generaldelta: yes yes yes
1851 share-safe: yes yes yes
1851 share-safe: yes yes yes
1852 sparserevlog: yes yes yes
1852 sparserevlog: yes yes yes
1853 persistent-nodemap: no no no (no-rust !)
1853 persistent-nodemap: no no no (no-rust !)
1854 persistent-nodemap: yes yes no (rust !)
1854 persistent-nodemap: yes yes no (rust !)
1855 copies-sdc: no no no
1855 copies-sdc: no no no
1856 revlog-v2: yes yes no
1856 revlog-v2: yes yes no
1857 changelog-v2: no no no
1857 changelog-v2: no no no
1858 plain-cl-delta: yes yes yes
1858 plain-cl-delta: yes yes yes
1859 compression: zlib zlib zlib (no-zstd !)
1859 compression: zlib zlib zlib (no-zstd !)
1860 compression: zstd zstd zstd (zstd !)
1860 compression: zstd zstd zstd (zstd !)
1861 compression-level: default default default
1861 compression-level: default default default
1862 $ hg debugrequires
1862 $ hg debugrequires
1863 dotencode
1863 dotencode
1864 exp-revlogv2.2
1864 exp-revlogv2.2
1865 fncache
1865 fncache
1866 generaldelta
1866 generaldelta
1867 persistent-nodemap (rust !)
1867 persistent-nodemap (rust !)
1868 revlog-compression-zstd (zstd !)
1868 revlog-compression-zstd (zstd !)
1869 share-safe
1869 share-safe
1870 sparserevlog
1870 sparserevlog
1871 store
1871 store
1872 $ hg debugsidedata -c 0
1872 $ hg debugsidedata -c 0
1873
1873
1874 Demonstrate that nothing to perform upgrade will still run all the way through
1874 Demonstrate that nothing to perform upgrade will still run all the way through
1875
1875
1876 $ hg debugupgraderepo --run
1876 $ hg debugupgraderepo --run
1877 nothing to do
1877 nothing to do
1878
1878
1879 #if no-rust
1879 #if no-rust
1880
1880
1881 $ cat << EOF >> $HGRCPATH
1881 $ cat << EOF >> $HGRCPATH
1882 > [storage]
1882 > [storage]
1883 > dirstate-v2.slow-path = allow
1883 > dirstate-v2.slow-path = allow
1884 > EOF
1884 > EOF
1885
1885
1886 #endif
1886 #endif
1887
1887
1888 Upgrade to dirstate-v2
1888 Upgrade to dirstate-v2
1889
1889
1890 $ hg debugformat -v --config format.use-dirstate-v2=1 | grep dirstate-v2
1890 $ hg debugformat -v --config format.use-dirstate-v2=1 | grep dirstate-v2
1891 dirstate-v2: no yes no
1891 dirstate-v2: no yes no
1892 $ hg debugupgraderepo --config format.use-dirstate-v2=1 --run
1892 $ hg debugupgraderepo --config format.use-dirstate-v2=1 --run
1893 upgrade will perform the following actions:
1893 upgrade will perform the following actions:
1894
1894
1895 requirements
1895 requirements
1896 preserved: * (glob)
1896 preserved: * (glob)
1897 added: dirstate-v2
1897 added: dirstate-v2
1898
1898
1899 dirstate-v2
1899 dirstate-v2
1900 "hg status" will be faster
1900 "hg status" will be faster
1901
1901
1902 no revlogs to process
1902 no revlogs to process
1903
1903
1904 beginning upgrade...
1904 beginning upgrade...
1905 repository locked and read-only
1905 repository locked and read-only
1906 creating temporary repository to stage upgraded data: $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1906 creating temporary repository to stage upgraded data: $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1907 (it is safe to interrupt this process any time before data migration completes)
1907 (it is safe to interrupt this process any time before data migration completes)
1908 upgrading to dirstate-v2 from v1
1908 upgrading to dirstate-v2 from v1
1909 replaced files will be backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1909 replaced files will be backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1910 removing temporary repository $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1910 removing temporary repository $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1911 $ ls .hg/upgradebackup.*/dirstate
1911 $ ls .hg/upgradebackup.*/dirstate
1912 .hg/upgradebackup.*/dirstate (glob)
1912 .hg/upgradebackup.*/dirstate (glob)
1913 $ hg debugformat -v | grep dirstate-v2
1913 $ hg debugformat -v | grep dirstate-v2
1914 dirstate-v2: yes no no
1914 dirstate-v2: yes no no
1915 $ hg status
1915 $ hg status
1916 $ dd bs=12 count=1 if=.hg/dirstate 2> /dev/null
1916 $ dd bs=12 count=1 if=.hg/dirstate 2> /dev/null
1917 dirstate-v2
1917 dirstate-v2
1918
1918
1919 Downgrade from dirstate-v2
1919 Downgrade from dirstate-v2
1920
1920
1921 $ hg debugupgraderepo --run
1921 $ hg debugupgraderepo --run
1922 upgrade will perform the following actions:
1922 upgrade will perform the following actions:
1923
1923
1924 requirements
1924 requirements
1925 preserved: * (glob)
1925 preserved: * (glob)
1926 removed: dirstate-v2
1926 removed: dirstate-v2
1927
1927
1928 no revlogs to process
1928 no revlogs to process
1929
1929
1930 beginning upgrade...
1930 beginning upgrade...
1931 repository locked and read-only
1931 repository locked and read-only
1932 creating temporary repository to stage upgraded data: $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1932 creating temporary repository to stage upgraded data: $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1933 (it is safe to interrupt this process any time before data migration completes)
1933 (it is safe to interrupt this process any time before data migration completes)
1934 downgrading from dirstate-v2 to v1
1934 downgrading from dirstate-v2 to v1
1935 replaced files will be backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1935 replaced files will be backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1936 removing temporary repository $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1936 removing temporary repository $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1937 $ hg debugformat -v | grep dirstate-v2
1937 $ hg debugformat -v | grep dirstate-v2
1938 dirstate-v2: no no no
1938 dirstate-v2: no no no
1939 $ hg status
1939 $ hg status
1940
1940
1941 $ cd ..
1941 $ cd ..
1942
1942
1943 dirstate-v2: upgrade and downgrade from and empty repository:
1943 dirstate-v2: upgrade and downgrade from and empty repository:
1944 -------------------------------------------------------------
1944 -------------------------------------------------------------
1945
1945
1946 $ hg init --config format.use-dirstate-v2=no dirstate-v2-empty
1946 $ hg init --config format.use-dirstate-v2=no dirstate-v2-empty
1947 $ cd dirstate-v2-empty
1947 $ cd dirstate-v2-empty
1948 $ hg debugformat | grep dirstate-v2
1948 $ hg debugformat | grep dirstate-v2
1949 dirstate-v2: no
1949 dirstate-v2: no
1950
1950
1951 upgrade
1951 upgrade
1952
1952
1953 $ hg debugupgraderepo --run --config format.use-dirstate-v2=yes
1953 $ hg debugupgraderepo --run --config format.use-dirstate-v2=yes
1954 upgrade will perform the following actions:
1954 upgrade will perform the following actions:
1955
1955
1956 requirements
1956 requirements
1957 preserved: * (glob)
1957 preserved: * (glob)
1958 added: dirstate-v2
1958 added: dirstate-v2
1959
1959
1960 dirstate-v2
1960 dirstate-v2
1961 "hg status" will be faster
1961 "hg status" will be faster
1962
1962
1963 no revlogs to process
1963 no revlogs to process
1964
1964
1965 beginning upgrade...
1965 beginning upgrade...
1966 repository locked and read-only
1966 repository locked and read-only
1967 creating temporary repository to stage upgraded data: $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1967 creating temporary repository to stage upgraded data: $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1968 (it is safe to interrupt this process any time before data migration completes)
1968 (it is safe to interrupt this process any time before data migration completes)
1969 upgrading to dirstate-v2 from v1
1969 upgrading to dirstate-v2 from v1
1970 replaced files will be backed up at $TESTTMP/dirstate-v2-empty/.hg/upgradebackup.* (glob)
1970 replaced files will be backed up at $TESTTMP/dirstate-v2-empty/.hg/upgradebackup.* (glob)
1971 removing temporary repository $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1971 removing temporary repository $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1972 $ hg debugformat | grep dirstate-v2
1972 $ hg debugformat | grep dirstate-v2
1973 dirstate-v2: yes
1973 dirstate-v2: yes
1974
1974
1975 downgrade
1975 downgrade
1976
1976
1977 $ hg debugupgraderepo --run --config format.use-dirstate-v2=no
1977 $ hg debugupgraderepo --run --config format.use-dirstate-v2=no
1978 upgrade will perform the following actions:
1978 upgrade will perform the following actions:
1979
1979
1980 requirements
1980 requirements
1981 preserved: * (glob)
1981 preserved: * (glob)
1982 removed: dirstate-v2
1982 removed: dirstate-v2
1983
1983
1984 no revlogs to process
1984 no revlogs to process
1985
1985
1986 beginning upgrade...
1986 beginning upgrade...
1987 repository locked and read-only
1987 repository locked and read-only
1988 creating temporary repository to stage upgraded data: $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1988 creating temporary repository to stage upgraded data: $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1989 (it is safe to interrupt this process any time before data migration completes)
1989 (it is safe to interrupt this process any time before data migration completes)
1990 downgrading from dirstate-v2 to v1
1990 downgrading from dirstate-v2 to v1
1991 replaced files will be backed up at $TESTTMP/dirstate-v2-empty/.hg/upgradebackup.* (glob)
1991 replaced files will be backed up at $TESTTMP/dirstate-v2-empty/.hg/upgradebackup.* (glob)
1992 removing temporary repository $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1992 removing temporary repository $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1993 $ hg debugformat | grep dirstate-v2
1993 $ hg debugformat | grep dirstate-v2
1994 dirstate-v2: no
1994 dirstate-v2: no
1995
1995
1996 $ cd ..
1996 $ cd ..
1997
1997
1998 Test automatic upgrade/downgrade
1998 Test automatic upgrade/downgrade
1999 ================================
1999 ================================
2000
2000
2001
2001
2002 For dirstate v2
2002 For dirstate v2
2003 ---------------
2003 ---------------
2004
2004
2005 create an initial repository
2005 create an initial repository
2006
2006
2007 $ hg init auto-upgrade \
2007 $ hg init auto-upgrade \
2008 > --config format.use-dirstate-v2=no \
2008 > --config format.use-dirstate-v2=no \
2009 > --config format.use-dirstate-tracked-hint=yes \
2009 > --config format.use-dirstate-tracked-hint=yes \
2010 > --config format.use-share-safe=no
2010 > --config format.use-share-safe=no
2011 $ hg debugbuilddag -R auto-upgrade --new-file .+5
2011 $ hg debugbuilddag -R auto-upgrade --new-file .+5
2012 $ hg -R auto-upgrade update
2012 $ hg -R auto-upgrade update
2013 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
2013 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
2014 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2014 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2015 dirstate-v2: no
2015 dirstate-v2: no
2016
2016
2017 upgrade it to dirstate-v2 automatically
2017 upgrade it to dirstate-v2 automatically
2018
2018
2019 $ hg status -R auto-upgrade \
2019 $ hg status -R auto-upgrade \
2020 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2020 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2021 > --config format.use-dirstate-v2=yes
2021 > --config format.use-dirstate-v2=yes
2022 automatically upgrading repository to the `dirstate-v2` feature
2022 automatically upgrading repository to the `dirstate-v2` feature
2023 (see `hg help config.format.use-dirstate-v2` for details)
2023 (see `hg help config.format.use-dirstate-v2` for details)
2024 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2024 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2025 dirstate-v2: yes
2025 dirstate-v2: yes
2026
2026
2027 downgrade it from dirstate-v2 automatically
2027 downgrade it from dirstate-v2 automatically
2028
2028
2029 $ hg status -R auto-upgrade \
2029 $ hg status -R auto-upgrade \
2030 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2030 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2031 > --config format.use-dirstate-v2=no
2031 > --config format.use-dirstate-v2=no
2032 automatically downgrading repository from the `dirstate-v2` feature
2032 automatically downgrading repository from the `dirstate-v2` feature
2033 (see `hg help config.format.use-dirstate-v2` for details)
2033 (see `hg help config.format.use-dirstate-v2` for details)
2034 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2034 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2035 dirstate-v2: no
2035 dirstate-v2: no
2036
2036
2037
2037
2038 For multiple change at the same time
2038 For multiple change at the same time
2039 ------------------------------------
2039 ------------------------------------
2040
2040
2041 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2041 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2042 dirstate-v2: no
2042 dirstate-v2: no
2043 tracked-hint: yes
2043 tracked-hint: yes
2044 share-safe: no
2044 share-safe: no
2045
2045
2046 $ hg status -R auto-upgrade \
2046 $ hg status -R auto-upgrade \
2047 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2047 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2048 > --config format.use-dirstate-v2=yes \
2048 > --config format.use-dirstate-v2=yes \
2049 > --config format.use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories=yes \
2049 > --config format.use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories=yes \
2050 > --config format.use-dirstate-tracked-hint=no\
2050 > --config format.use-dirstate-tracked-hint=no\
2051 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories=yes \
2051 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories=yes \
2052 > --config format.use-share-safe=yes
2052 > --config format.use-share-safe=yes
2053 automatically upgrading repository to the `dirstate-v2` feature
2053 automatically upgrading repository to the `dirstate-v2` feature
2054 (see `hg help config.format.use-dirstate-v2` for details)
2054 (see `hg help config.format.use-dirstate-v2` for details)
2055 automatically upgrading repository to the `share-safe` feature
2055 automatically upgrading repository to the `share-safe` feature
2056 (see `hg help config.format.use-share-safe` for details)
2056 (see `hg help config.format.use-share-safe` for details)
2057 automatically downgrading repository from the `tracked-hint` feature
2057 automatically downgrading repository from the `tracked-hint` feature
2058 (see `hg help config.format.use-dirstate-tracked-hint` for details)
2058 (see `hg help config.format.use-dirstate-tracked-hint` for details)
2059 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2059 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2060 dirstate-v2: yes
2060 dirstate-v2: yes
2061 tracked-hint: no
2061 tracked-hint: no
2062 share-safe: yes
2062 share-safe: yes
2063
2063
2064 Attempting Auto-upgrade on a read-only repository
2064 Attempting Auto-upgrade on a read-only repository
2065 -------------------------------------------------
2065 -------------------------------------------------
2066
2066
2067 $ chmod -R a-w auto-upgrade
2067 $ chmod -R a-w auto-upgrade
2068
2068
2069 $ hg status -R auto-upgrade \
2069 $ hg status -R auto-upgrade \
2070 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2070 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2071 > --config format.use-dirstate-v2=no
2071 > --config format.use-dirstate-v2=no
2072 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2072 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2073 dirstate-v2: yes
2073 dirstate-v2: yes
2074
2074
2075 $ chmod -R u+w auto-upgrade
2075 $ chmod -R u+w auto-upgrade
2076
2076
2077 Attempting Auto-upgrade on a locked repository
2077 Attempting Auto-upgrade on a locked repository
2078 ----------------------------------------------
2078 ----------------------------------------------
2079
2079
2080 $ hg -R auto-upgrade debuglock --set-lock --quiet &
2080 $ hg -R auto-upgrade debuglock --set-lock --quiet &
2081 $ echo $! >> $DAEMON_PIDS
2081 $ echo $! >> $DAEMON_PIDS
2082 $ $RUNTESTDIR/testlib/wait-on-file 10 auto-upgrade/.hg/store/lock
2082 $ $RUNTESTDIR/testlib/wait-on-file 10 auto-upgrade/.hg/store/lock
2083 $ hg status -R auto-upgrade \
2083 $ hg status -R auto-upgrade \
2084 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2084 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2085 > --config format.use-dirstate-v2=no
2085 > --config format.use-dirstate-v2=no
2086 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2086 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2087 dirstate-v2: yes
2087 dirstate-v2: yes
2088
2088
2089 $ killdaemons.py
2089 $ killdaemons.py
General Comments 0
You need to be logged in to leave comments. Login now