##// END OF EJS Templates
upgrade: drop support for old style optimization names...
Pulkit Goyal -
r46825:083438d6 default
parent child Browse files
Show More
@@ -1,4661 +1,4661 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import difflib
12 import difflib
13 import errno
13 import errno
14 import glob
14 import glob
15 import operator
15 import operator
16 import os
16 import os
17 import platform
17 import platform
18 import random
18 import random
19 import re
19 import re
20 import socket
20 import socket
21 import ssl
21 import ssl
22 import stat
22 import stat
23 import string
23 import string
24 import subprocess
24 import subprocess
25 import sys
25 import sys
26 import time
26 import time
27
27
28 from .i18n import _
28 from .i18n import _
29 from .node import (
29 from .node import (
30 bin,
30 bin,
31 hex,
31 hex,
32 nullid,
32 nullid,
33 nullrev,
33 nullrev,
34 short,
34 short,
35 )
35 )
36 from .pycompat import (
36 from .pycompat import (
37 getattr,
37 getattr,
38 open,
38 open,
39 )
39 )
40 from . import (
40 from . import (
41 bundle2,
41 bundle2,
42 bundlerepo,
42 bundlerepo,
43 changegroup,
43 changegroup,
44 cmdutil,
44 cmdutil,
45 color,
45 color,
46 context,
46 context,
47 copies,
47 copies,
48 dagparser,
48 dagparser,
49 encoding,
49 encoding,
50 error,
50 error,
51 exchange,
51 exchange,
52 extensions,
52 extensions,
53 filemerge,
53 filemerge,
54 filesetlang,
54 filesetlang,
55 formatter,
55 formatter,
56 hg,
56 hg,
57 httppeer,
57 httppeer,
58 localrepo,
58 localrepo,
59 lock as lockmod,
59 lock as lockmod,
60 logcmdutil,
60 logcmdutil,
61 mergestate as mergestatemod,
61 mergestate as mergestatemod,
62 metadata,
62 metadata,
63 obsolete,
63 obsolete,
64 obsutil,
64 obsutil,
65 pathutil,
65 pathutil,
66 phases,
66 phases,
67 policy,
67 policy,
68 pvec,
68 pvec,
69 pycompat,
69 pycompat,
70 registrar,
70 registrar,
71 repair,
71 repair,
72 revlog,
72 revlog,
73 revset,
73 revset,
74 revsetlang,
74 revsetlang,
75 scmutil,
75 scmutil,
76 setdiscovery,
76 setdiscovery,
77 simplemerge,
77 simplemerge,
78 sshpeer,
78 sshpeer,
79 sslutil,
79 sslutil,
80 streamclone,
80 streamclone,
81 strip,
81 strip,
82 tags as tagsmod,
82 tags as tagsmod,
83 templater,
83 templater,
84 treediscovery,
84 treediscovery,
85 upgrade,
85 upgrade,
86 url as urlmod,
86 url as urlmod,
87 util,
87 util,
88 vfs as vfsmod,
88 vfs as vfsmod,
89 wireprotoframing,
89 wireprotoframing,
90 wireprotoserver,
90 wireprotoserver,
91 wireprotov2peer,
91 wireprotov2peer,
92 )
92 )
93 from .utils import (
93 from .utils import (
94 cborutil,
94 cborutil,
95 compression,
95 compression,
96 dateutil,
96 dateutil,
97 procutil,
97 procutil,
98 stringutil,
98 stringutil,
99 )
99 )
100
100
101 from .revlogutils import (
101 from .revlogutils import (
102 deltas as deltautil,
102 deltas as deltautil,
103 nodemap,
103 nodemap,
104 sidedata,
104 sidedata,
105 )
105 )
106
106
107 release = lockmod.release
107 release = lockmod.release
108
108
109 table = {}
109 table = {}
110 table.update(strip.command._table)
110 table.update(strip.command._table)
111 command = registrar.command(table)
111 command = registrar.command(table)
112
112
113
113
114 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
114 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
115 def debugancestor(ui, repo, *args):
115 def debugancestor(ui, repo, *args):
116 """find the ancestor revision of two revisions in a given index"""
116 """find the ancestor revision of two revisions in a given index"""
117 if len(args) == 3:
117 if len(args) == 3:
118 index, rev1, rev2 = args
118 index, rev1, rev2 = args
119 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
119 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
120 lookup = r.lookup
120 lookup = r.lookup
121 elif len(args) == 2:
121 elif len(args) == 2:
122 if not repo:
122 if not repo:
123 raise error.Abort(
123 raise error.Abort(
124 _(b'there is no Mercurial repository here (.hg not found)')
124 _(b'there is no Mercurial repository here (.hg not found)')
125 )
125 )
126 rev1, rev2 = args
126 rev1, rev2 = args
127 r = repo.changelog
127 r = repo.changelog
128 lookup = repo.lookup
128 lookup = repo.lookup
129 else:
129 else:
130 raise error.Abort(_(b'either two or three arguments required'))
130 raise error.Abort(_(b'either two or three arguments required'))
131 a = r.ancestor(lookup(rev1), lookup(rev2))
131 a = r.ancestor(lookup(rev1), lookup(rev2))
132 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
132 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
133
133
134
134
135 @command(b'debugantivirusrunning', [])
135 @command(b'debugantivirusrunning', [])
136 def debugantivirusrunning(ui, repo):
136 def debugantivirusrunning(ui, repo):
137 """attempt to trigger an antivirus scanner to see if one is active"""
137 """attempt to trigger an antivirus scanner to see if one is active"""
138 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
138 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
139 f.write(
139 f.write(
140 util.b85decode(
140 util.b85decode(
141 # This is a base85-armored version of the EICAR test file. See
141 # This is a base85-armored version of the EICAR test file. See
142 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
142 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
143 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
143 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
144 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
144 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
145 )
145 )
146 )
146 )
147 # Give an AV engine time to scan the file.
147 # Give an AV engine time to scan the file.
148 time.sleep(2)
148 time.sleep(2)
149 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
149 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
150
150
151
151
152 @command(b'debugapplystreamclonebundle', [], b'FILE')
152 @command(b'debugapplystreamclonebundle', [], b'FILE')
153 def debugapplystreamclonebundle(ui, repo, fname):
153 def debugapplystreamclonebundle(ui, repo, fname):
154 """apply a stream clone bundle file"""
154 """apply a stream clone bundle file"""
155 f = hg.openpath(ui, fname)
155 f = hg.openpath(ui, fname)
156 gen = exchange.readbundle(ui, f, fname)
156 gen = exchange.readbundle(ui, f, fname)
157 gen.apply(repo)
157 gen.apply(repo)
158
158
159
159
160 @command(
160 @command(
161 b'debugbuilddag',
161 b'debugbuilddag',
162 [
162 [
163 (
163 (
164 b'm',
164 b'm',
165 b'mergeable-file',
165 b'mergeable-file',
166 None,
166 None,
167 _(b'add single file mergeable changes'),
167 _(b'add single file mergeable changes'),
168 ),
168 ),
169 (
169 (
170 b'o',
170 b'o',
171 b'overwritten-file',
171 b'overwritten-file',
172 None,
172 None,
173 _(b'add single file all revs overwrite'),
173 _(b'add single file all revs overwrite'),
174 ),
174 ),
175 (b'n', b'new-file', None, _(b'add new file at each rev')),
175 (b'n', b'new-file', None, _(b'add new file at each rev')),
176 ],
176 ],
177 _(b'[OPTION]... [TEXT]'),
177 _(b'[OPTION]... [TEXT]'),
178 )
178 )
179 def debugbuilddag(
179 def debugbuilddag(
180 ui,
180 ui,
181 repo,
181 repo,
182 text=None,
182 text=None,
183 mergeable_file=False,
183 mergeable_file=False,
184 overwritten_file=False,
184 overwritten_file=False,
185 new_file=False,
185 new_file=False,
186 ):
186 ):
187 """builds a repo with a given DAG from scratch in the current empty repo
187 """builds a repo with a given DAG from scratch in the current empty repo
188
188
189 The description of the DAG is read from stdin if not given on the
189 The description of the DAG is read from stdin if not given on the
190 command line.
190 command line.
191
191
192 Elements:
192 Elements:
193
193
194 - "+n" is a linear run of n nodes based on the current default parent
194 - "+n" is a linear run of n nodes based on the current default parent
195 - "." is a single node based on the current default parent
195 - "." is a single node based on the current default parent
196 - "$" resets the default parent to null (implied at the start);
196 - "$" resets the default parent to null (implied at the start);
197 otherwise the default parent is always the last node created
197 otherwise the default parent is always the last node created
198 - "<p" sets the default parent to the backref p
198 - "<p" sets the default parent to the backref p
199 - "*p" is a fork at parent p, which is a backref
199 - "*p" is a fork at parent p, which is a backref
200 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
200 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
201 - "/p2" is a merge of the preceding node and p2
201 - "/p2" is a merge of the preceding node and p2
202 - ":tag" defines a local tag for the preceding node
202 - ":tag" defines a local tag for the preceding node
203 - "@branch" sets the named branch for subsequent nodes
203 - "@branch" sets the named branch for subsequent nodes
204 - "#...\\n" is a comment up to the end of the line
204 - "#...\\n" is a comment up to the end of the line
205
205
206 Whitespace between the above elements is ignored.
206 Whitespace between the above elements is ignored.
207
207
208 A backref is either
208 A backref is either
209
209
210 - a number n, which references the node curr-n, where curr is the current
210 - a number n, which references the node curr-n, where curr is the current
211 node, or
211 node, or
212 - the name of a local tag you placed earlier using ":tag", or
212 - the name of a local tag you placed earlier using ":tag", or
213 - empty to denote the default parent.
213 - empty to denote the default parent.
214
214
215 All string valued-elements are either strictly alphanumeric, or must
215 All string valued-elements are either strictly alphanumeric, or must
216 be enclosed in double quotes ("..."), with "\\" as escape character.
216 be enclosed in double quotes ("..."), with "\\" as escape character.
217 """
217 """
218
218
219 if text is None:
219 if text is None:
220 ui.status(_(b"reading DAG from stdin\n"))
220 ui.status(_(b"reading DAG from stdin\n"))
221 text = ui.fin.read()
221 text = ui.fin.read()
222
222
223 cl = repo.changelog
223 cl = repo.changelog
224 if len(cl) > 0:
224 if len(cl) > 0:
225 raise error.Abort(_(b'repository is not empty'))
225 raise error.Abort(_(b'repository is not empty'))
226
226
227 # determine number of revs in DAG
227 # determine number of revs in DAG
228 total = 0
228 total = 0
229 for type, data in dagparser.parsedag(text):
229 for type, data in dagparser.parsedag(text):
230 if type == b'n':
230 if type == b'n':
231 total += 1
231 total += 1
232
232
233 if mergeable_file:
233 if mergeable_file:
234 linesperrev = 2
234 linesperrev = 2
235 # make a file with k lines per rev
235 # make a file with k lines per rev
236 initialmergedlines = [
236 initialmergedlines = [
237 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
237 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
238 ]
238 ]
239 initialmergedlines.append(b"")
239 initialmergedlines.append(b"")
240
240
241 tags = []
241 tags = []
242 progress = ui.makeprogress(
242 progress = ui.makeprogress(
243 _(b'building'), unit=_(b'revisions'), total=total
243 _(b'building'), unit=_(b'revisions'), total=total
244 )
244 )
245 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
245 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
246 at = -1
246 at = -1
247 atbranch = b'default'
247 atbranch = b'default'
248 nodeids = []
248 nodeids = []
249 id = 0
249 id = 0
250 progress.update(id)
250 progress.update(id)
251 for type, data in dagparser.parsedag(text):
251 for type, data in dagparser.parsedag(text):
252 if type == b'n':
252 if type == b'n':
253 ui.note((b'node %s\n' % pycompat.bytestr(data)))
253 ui.note((b'node %s\n' % pycompat.bytestr(data)))
254 id, ps = data
254 id, ps = data
255
255
256 files = []
256 files = []
257 filecontent = {}
257 filecontent = {}
258
258
259 p2 = None
259 p2 = None
260 if mergeable_file:
260 if mergeable_file:
261 fn = b"mf"
261 fn = b"mf"
262 p1 = repo[ps[0]]
262 p1 = repo[ps[0]]
263 if len(ps) > 1:
263 if len(ps) > 1:
264 p2 = repo[ps[1]]
264 p2 = repo[ps[1]]
265 pa = p1.ancestor(p2)
265 pa = p1.ancestor(p2)
266 base, local, other = [
266 base, local, other = [
267 x[fn].data() for x in (pa, p1, p2)
267 x[fn].data() for x in (pa, p1, p2)
268 ]
268 ]
269 m3 = simplemerge.Merge3Text(base, local, other)
269 m3 = simplemerge.Merge3Text(base, local, other)
270 ml = [l.strip() for l in m3.merge_lines()]
270 ml = [l.strip() for l in m3.merge_lines()]
271 ml.append(b"")
271 ml.append(b"")
272 elif at > 0:
272 elif at > 0:
273 ml = p1[fn].data().split(b"\n")
273 ml = p1[fn].data().split(b"\n")
274 else:
274 else:
275 ml = initialmergedlines
275 ml = initialmergedlines
276 ml[id * linesperrev] += b" r%i" % id
276 ml[id * linesperrev] += b" r%i" % id
277 mergedtext = b"\n".join(ml)
277 mergedtext = b"\n".join(ml)
278 files.append(fn)
278 files.append(fn)
279 filecontent[fn] = mergedtext
279 filecontent[fn] = mergedtext
280
280
281 if overwritten_file:
281 if overwritten_file:
282 fn = b"of"
282 fn = b"of"
283 files.append(fn)
283 files.append(fn)
284 filecontent[fn] = b"r%i\n" % id
284 filecontent[fn] = b"r%i\n" % id
285
285
286 if new_file:
286 if new_file:
287 fn = b"nf%i" % id
287 fn = b"nf%i" % id
288 files.append(fn)
288 files.append(fn)
289 filecontent[fn] = b"r%i\n" % id
289 filecontent[fn] = b"r%i\n" % id
290 if len(ps) > 1:
290 if len(ps) > 1:
291 if not p2:
291 if not p2:
292 p2 = repo[ps[1]]
292 p2 = repo[ps[1]]
293 for fn in p2:
293 for fn in p2:
294 if fn.startswith(b"nf"):
294 if fn.startswith(b"nf"):
295 files.append(fn)
295 files.append(fn)
296 filecontent[fn] = p2[fn].data()
296 filecontent[fn] = p2[fn].data()
297
297
298 def fctxfn(repo, cx, path):
298 def fctxfn(repo, cx, path):
299 if path in filecontent:
299 if path in filecontent:
300 return context.memfilectx(
300 return context.memfilectx(
301 repo, cx, path, filecontent[path]
301 repo, cx, path, filecontent[path]
302 )
302 )
303 return None
303 return None
304
304
305 if len(ps) == 0 or ps[0] < 0:
305 if len(ps) == 0 or ps[0] < 0:
306 pars = [None, None]
306 pars = [None, None]
307 elif len(ps) == 1:
307 elif len(ps) == 1:
308 pars = [nodeids[ps[0]], None]
308 pars = [nodeids[ps[0]], None]
309 else:
309 else:
310 pars = [nodeids[p] for p in ps]
310 pars = [nodeids[p] for p in ps]
311 cx = context.memctx(
311 cx = context.memctx(
312 repo,
312 repo,
313 pars,
313 pars,
314 b"r%i" % id,
314 b"r%i" % id,
315 files,
315 files,
316 fctxfn,
316 fctxfn,
317 date=(id, 0),
317 date=(id, 0),
318 user=b"debugbuilddag",
318 user=b"debugbuilddag",
319 extra={b'branch': atbranch},
319 extra={b'branch': atbranch},
320 )
320 )
321 nodeid = repo.commitctx(cx)
321 nodeid = repo.commitctx(cx)
322 nodeids.append(nodeid)
322 nodeids.append(nodeid)
323 at = id
323 at = id
324 elif type == b'l':
324 elif type == b'l':
325 id, name = data
325 id, name = data
326 ui.note((b'tag %s\n' % name))
326 ui.note((b'tag %s\n' % name))
327 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
327 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
328 elif type == b'a':
328 elif type == b'a':
329 ui.note((b'branch %s\n' % data))
329 ui.note((b'branch %s\n' % data))
330 atbranch = data
330 atbranch = data
331 progress.update(id)
331 progress.update(id)
332
332
333 if tags:
333 if tags:
334 repo.vfs.write(b"localtags", b"".join(tags))
334 repo.vfs.write(b"localtags", b"".join(tags))
335
335
336
336
337 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
337 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
338 indent_string = b' ' * indent
338 indent_string = b' ' * indent
339 if all:
339 if all:
340 ui.writenoi18n(
340 ui.writenoi18n(
341 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
341 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
342 % indent_string
342 % indent_string
343 )
343 )
344
344
345 def showchunks(named):
345 def showchunks(named):
346 ui.write(b"\n%s%s\n" % (indent_string, named))
346 ui.write(b"\n%s%s\n" % (indent_string, named))
347 for deltadata in gen.deltaiter():
347 for deltadata in gen.deltaiter():
348 node, p1, p2, cs, deltabase, delta, flags = deltadata
348 node, p1, p2, cs, deltabase, delta, flags = deltadata
349 ui.write(
349 ui.write(
350 b"%s%s %s %s %s %s %d\n"
350 b"%s%s %s %s %s %s %d\n"
351 % (
351 % (
352 indent_string,
352 indent_string,
353 hex(node),
353 hex(node),
354 hex(p1),
354 hex(p1),
355 hex(p2),
355 hex(p2),
356 hex(cs),
356 hex(cs),
357 hex(deltabase),
357 hex(deltabase),
358 len(delta),
358 len(delta),
359 )
359 )
360 )
360 )
361
361
362 gen.changelogheader()
362 gen.changelogheader()
363 showchunks(b"changelog")
363 showchunks(b"changelog")
364 gen.manifestheader()
364 gen.manifestheader()
365 showchunks(b"manifest")
365 showchunks(b"manifest")
366 for chunkdata in iter(gen.filelogheader, {}):
366 for chunkdata in iter(gen.filelogheader, {}):
367 fname = chunkdata[b'filename']
367 fname = chunkdata[b'filename']
368 showchunks(fname)
368 showchunks(fname)
369 else:
369 else:
370 if isinstance(gen, bundle2.unbundle20):
370 if isinstance(gen, bundle2.unbundle20):
371 raise error.Abort(_(b'use debugbundle2 for this file'))
371 raise error.Abort(_(b'use debugbundle2 for this file'))
372 gen.changelogheader()
372 gen.changelogheader()
373 for deltadata in gen.deltaiter():
373 for deltadata in gen.deltaiter():
374 node, p1, p2, cs, deltabase, delta, flags = deltadata
374 node, p1, p2, cs, deltabase, delta, flags = deltadata
375 ui.write(b"%s%s\n" % (indent_string, hex(node)))
375 ui.write(b"%s%s\n" % (indent_string, hex(node)))
376
376
377
377
378 def _debugobsmarkers(ui, part, indent=0, **opts):
378 def _debugobsmarkers(ui, part, indent=0, **opts):
379 """display version and markers contained in 'data'"""
379 """display version and markers contained in 'data'"""
380 opts = pycompat.byteskwargs(opts)
380 opts = pycompat.byteskwargs(opts)
381 data = part.read()
381 data = part.read()
382 indent_string = b' ' * indent
382 indent_string = b' ' * indent
383 try:
383 try:
384 version, markers = obsolete._readmarkers(data)
384 version, markers = obsolete._readmarkers(data)
385 except error.UnknownVersion as exc:
385 except error.UnknownVersion as exc:
386 msg = b"%sunsupported version: %s (%d bytes)\n"
386 msg = b"%sunsupported version: %s (%d bytes)\n"
387 msg %= indent_string, exc.version, len(data)
387 msg %= indent_string, exc.version, len(data)
388 ui.write(msg)
388 ui.write(msg)
389 else:
389 else:
390 msg = b"%sversion: %d (%d bytes)\n"
390 msg = b"%sversion: %d (%d bytes)\n"
391 msg %= indent_string, version, len(data)
391 msg %= indent_string, version, len(data)
392 ui.write(msg)
392 ui.write(msg)
393 fm = ui.formatter(b'debugobsolete', opts)
393 fm = ui.formatter(b'debugobsolete', opts)
394 for rawmarker in sorted(markers):
394 for rawmarker in sorted(markers):
395 m = obsutil.marker(None, rawmarker)
395 m = obsutil.marker(None, rawmarker)
396 fm.startitem()
396 fm.startitem()
397 fm.plain(indent_string)
397 fm.plain(indent_string)
398 cmdutil.showmarker(fm, m)
398 cmdutil.showmarker(fm, m)
399 fm.end()
399 fm.end()
400
400
401
401
402 def _debugphaseheads(ui, data, indent=0):
402 def _debugphaseheads(ui, data, indent=0):
403 """display version and markers contained in 'data'"""
403 """display version and markers contained in 'data'"""
404 indent_string = b' ' * indent
404 indent_string = b' ' * indent
405 headsbyphase = phases.binarydecode(data)
405 headsbyphase = phases.binarydecode(data)
406 for phase in phases.allphases:
406 for phase in phases.allphases:
407 for head in headsbyphase[phase]:
407 for head in headsbyphase[phase]:
408 ui.write(indent_string)
408 ui.write(indent_string)
409 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
409 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
410
410
411
411
412 def _quasirepr(thing):
412 def _quasirepr(thing):
413 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
413 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
414 return b'{%s}' % (
414 return b'{%s}' % (
415 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
415 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
416 )
416 )
417 return pycompat.bytestr(repr(thing))
417 return pycompat.bytestr(repr(thing))
418
418
419
419
420 def _debugbundle2(ui, gen, all=None, **opts):
420 def _debugbundle2(ui, gen, all=None, **opts):
421 """lists the contents of a bundle2"""
421 """lists the contents of a bundle2"""
422 if not isinstance(gen, bundle2.unbundle20):
422 if not isinstance(gen, bundle2.unbundle20):
423 raise error.Abort(_(b'not a bundle2 file'))
423 raise error.Abort(_(b'not a bundle2 file'))
424 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
424 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
425 parttypes = opts.get('part_type', [])
425 parttypes = opts.get('part_type', [])
426 for part in gen.iterparts():
426 for part in gen.iterparts():
427 if parttypes and part.type not in parttypes:
427 if parttypes and part.type not in parttypes:
428 continue
428 continue
429 msg = b'%s -- %s (mandatory: %r)\n'
429 msg = b'%s -- %s (mandatory: %r)\n'
430 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
430 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
431 if part.type == b'changegroup':
431 if part.type == b'changegroup':
432 version = part.params.get(b'version', b'01')
432 version = part.params.get(b'version', b'01')
433 cg = changegroup.getunbundler(version, part, b'UN')
433 cg = changegroup.getunbundler(version, part, b'UN')
434 if not ui.quiet:
434 if not ui.quiet:
435 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
435 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
436 if part.type == b'obsmarkers':
436 if part.type == b'obsmarkers':
437 if not ui.quiet:
437 if not ui.quiet:
438 _debugobsmarkers(ui, part, indent=4, **opts)
438 _debugobsmarkers(ui, part, indent=4, **opts)
439 if part.type == b'phase-heads':
439 if part.type == b'phase-heads':
440 if not ui.quiet:
440 if not ui.quiet:
441 _debugphaseheads(ui, part, indent=4)
441 _debugphaseheads(ui, part, indent=4)
442
442
443
443
444 @command(
444 @command(
445 b'debugbundle',
445 b'debugbundle',
446 [
446 [
447 (b'a', b'all', None, _(b'show all details')),
447 (b'a', b'all', None, _(b'show all details')),
448 (b'', b'part-type', [], _(b'show only the named part type')),
448 (b'', b'part-type', [], _(b'show only the named part type')),
449 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
449 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
450 ],
450 ],
451 _(b'FILE'),
451 _(b'FILE'),
452 norepo=True,
452 norepo=True,
453 )
453 )
454 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
454 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
455 """lists the contents of a bundle"""
455 """lists the contents of a bundle"""
456 with hg.openpath(ui, bundlepath) as f:
456 with hg.openpath(ui, bundlepath) as f:
457 if spec:
457 if spec:
458 spec = exchange.getbundlespec(ui, f)
458 spec = exchange.getbundlespec(ui, f)
459 ui.write(b'%s\n' % spec)
459 ui.write(b'%s\n' % spec)
460 return
460 return
461
461
462 gen = exchange.readbundle(ui, f, bundlepath)
462 gen = exchange.readbundle(ui, f, bundlepath)
463 if isinstance(gen, bundle2.unbundle20):
463 if isinstance(gen, bundle2.unbundle20):
464 return _debugbundle2(ui, gen, all=all, **opts)
464 return _debugbundle2(ui, gen, all=all, **opts)
465 _debugchangegroup(ui, gen, all=all, **opts)
465 _debugchangegroup(ui, gen, all=all, **opts)
466
466
467
467
468 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
468 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
469 def debugcapabilities(ui, path, **opts):
469 def debugcapabilities(ui, path, **opts):
470 """lists the capabilities of a remote peer"""
470 """lists the capabilities of a remote peer"""
471 opts = pycompat.byteskwargs(opts)
471 opts = pycompat.byteskwargs(opts)
472 peer = hg.peer(ui, opts, path)
472 peer = hg.peer(ui, opts, path)
473 caps = peer.capabilities()
473 caps = peer.capabilities()
474 ui.writenoi18n(b'Main capabilities:\n')
474 ui.writenoi18n(b'Main capabilities:\n')
475 for c in sorted(caps):
475 for c in sorted(caps):
476 ui.write(b' %s\n' % c)
476 ui.write(b' %s\n' % c)
477 b2caps = bundle2.bundle2caps(peer)
477 b2caps = bundle2.bundle2caps(peer)
478 if b2caps:
478 if b2caps:
479 ui.writenoi18n(b'Bundle2 capabilities:\n')
479 ui.writenoi18n(b'Bundle2 capabilities:\n')
480 for key, values in sorted(pycompat.iteritems(b2caps)):
480 for key, values in sorted(pycompat.iteritems(b2caps)):
481 ui.write(b' %s\n' % key)
481 ui.write(b' %s\n' % key)
482 for v in values:
482 for v in values:
483 ui.write(b' %s\n' % v)
483 ui.write(b' %s\n' % v)
484
484
485
485
486 @command(b'debugchangedfiles', [], b'REV')
486 @command(b'debugchangedfiles', [], b'REV')
487 def debugchangedfiles(ui, repo, rev):
487 def debugchangedfiles(ui, repo, rev):
488 """list the stored files changes for a revision"""
488 """list the stored files changes for a revision"""
489 ctx = scmutil.revsingle(repo, rev, None)
489 ctx = scmutil.revsingle(repo, rev, None)
490 sd = repo.changelog.sidedata(ctx.rev())
490 sd = repo.changelog.sidedata(ctx.rev())
491 files_block = sd.get(sidedata.SD_FILES)
491 files_block = sd.get(sidedata.SD_FILES)
492 if files_block is not None:
492 if files_block is not None:
493 files = metadata.decode_files_sidedata(sd)
493 files = metadata.decode_files_sidedata(sd)
494 for f in sorted(files.touched):
494 for f in sorted(files.touched):
495 if f in files.added:
495 if f in files.added:
496 action = b"added"
496 action = b"added"
497 elif f in files.removed:
497 elif f in files.removed:
498 action = b"removed"
498 action = b"removed"
499 elif f in files.merged:
499 elif f in files.merged:
500 action = b"merged"
500 action = b"merged"
501 elif f in files.salvaged:
501 elif f in files.salvaged:
502 action = b"salvaged"
502 action = b"salvaged"
503 else:
503 else:
504 action = b"touched"
504 action = b"touched"
505
505
506 copy_parent = b""
506 copy_parent = b""
507 copy_source = b""
507 copy_source = b""
508 if f in files.copied_from_p1:
508 if f in files.copied_from_p1:
509 copy_parent = b"p1"
509 copy_parent = b"p1"
510 copy_source = files.copied_from_p1[f]
510 copy_source = files.copied_from_p1[f]
511 elif f in files.copied_from_p2:
511 elif f in files.copied_from_p2:
512 copy_parent = b"p2"
512 copy_parent = b"p2"
513 copy_source = files.copied_from_p2[f]
513 copy_source = files.copied_from_p2[f]
514
514
515 data = (action, copy_parent, f, copy_source)
515 data = (action, copy_parent, f, copy_source)
516 template = b"%-8s %2s: %s, %s;\n"
516 template = b"%-8s %2s: %s, %s;\n"
517 ui.write(template % data)
517 ui.write(template % data)
518
518
519
519
520 @command(b'debugcheckstate', [], b'')
520 @command(b'debugcheckstate', [], b'')
521 def debugcheckstate(ui, repo):
521 def debugcheckstate(ui, repo):
522 """validate the correctness of the current dirstate"""
522 """validate the correctness of the current dirstate"""
523 parent1, parent2 = repo.dirstate.parents()
523 parent1, parent2 = repo.dirstate.parents()
524 m1 = repo[parent1].manifest()
524 m1 = repo[parent1].manifest()
525 m2 = repo[parent2].manifest()
525 m2 = repo[parent2].manifest()
526 errors = 0
526 errors = 0
527 for f in repo.dirstate:
527 for f in repo.dirstate:
528 state = repo.dirstate[f]
528 state = repo.dirstate[f]
529 if state in b"nr" and f not in m1:
529 if state in b"nr" and f not in m1:
530 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
530 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
531 errors += 1
531 errors += 1
532 if state in b"a" and f in m1:
532 if state in b"a" and f in m1:
533 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
533 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
534 errors += 1
534 errors += 1
535 if state in b"m" and f not in m1 and f not in m2:
535 if state in b"m" and f not in m1 and f not in m2:
536 ui.warn(
536 ui.warn(
537 _(b"%s in state %s, but not in either manifest\n") % (f, state)
537 _(b"%s in state %s, but not in either manifest\n") % (f, state)
538 )
538 )
539 errors += 1
539 errors += 1
540 for f in m1:
540 for f in m1:
541 state = repo.dirstate[f]
541 state = repo.dirstate[f]
542 if state not in b"nrm":
542 if state not in b"nrm":
543 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
543 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
544 errors += 1
544 errors += 1
545 if errors:
545 if errors:
546 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
546 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
547 raise error.Abort(errstr)
547 raise error.Abort(errstr)
548
548
549
549
550 @command(
550 @command(
551 b'debugcolor',
551 b'debugcolor',
552 [(b'', b'style', None, _(b'show all configured styles'))],
552 [(b'', b'style', None, _(b'show all configured styles'))],
553 b'hg debugcolor',
553 b'hg debugcolor',
554 )
554 )
555 def debugcolor(ui, repo, **opts):
555 def debugcolor(ui, repo, **opts):
556 """show available color, effects or style"""
556 """show available color, effects or style"""
557 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
557 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
558 if opts.get('style'):
558 if opts.get('style'):
559 return _debugdisplaystyle(ui)
559 return _debugdisplaystyle(ui)
560 else:
560 else:
561 return _debugdisplaycolor(ui)
561 return _debugdisplaycolor(ui)
562
562
563
563
564 def _debugdisplaycolor(ui):
564 def _debugdisplaycolor(ui):
565 ui = ui.copy()
565 ui = ui.copy()
566 ui._styles.clear()
566 ui._styles.clear()
567 for effect in color._activeeffects(ui).keys():
567 for effect in color._activeeffects(ui).keys():
568 ui._styles[effect] = effect
568 ui._styles[effect] = effect
569 if ui._terminfoparams:
569 if ui._terminfoparams:
570 for k, v in ui.configitems(b'color'):
570 for k, v in ui.configitems(b'color'):
571 if k.startswith(b'color.'):
571 if k.startswith(b'color.'):
572 ui._styles[k] = k[6:]
572 ui._styles[k] = k[6:]
573 elif k.startswith(b'terminfo.'):
573 elif k.startswith(b'terminfo.'):
574 ui._styles[k] = k[9:]
574 ui._styles[k] = k[9:]
575 ui.write(_(b'available colors:\n'))
575 ui.write(_(b'available colors:\n'))
576 # sort label with a '_' after the other to group '_background' entry.
576 # sort label with a '_' after the other to group '_background' entry.
577 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
577 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
578 for colorname, label in items:
578 for colorname, label in items:
579 ui.write(b'%s\n' % colorname, label=label)
579 ui.write(b'%s\n' % colorname, label=label)
580
580
581
581
582 def _debugdisplaystyle(ui):
582 def _debugdisplaystyle(ui):
583 ui.write(_(b'available style:\n'))
583 ui.write(_(b'available style:\n'))
584 if not ui._styles:
584 if not ui._styles:
585 return
585 return
586 width = max(len(s) for s in ui._styles)
586 width = max(len(s) for s in ui._styles)
587 for label, effects in sorted(ui._styles.items()):
587 for label, effects in sorted(ui._styles.items()):
588 ui.write(b'%s' % label, label=label)
588 ui.write(b'%s' % label, label=label)
589 if effects:
589 if effects:
590 # 50
590 # 50
591 ui.write(b': ')
591 ui.write(b': ')
592 ui.write(b' ' * (max(0, width - len(label))))
592 ui.write(b' ' * (max(0, width - len(label))))
593 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
593 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
594 ui.write(b'\n')
594 ui.write(b'\n')
595
595
596
596
597 @command(b'debugcreatestreamclonebundle', [], b'FILE')
597 @command(b'debugcreatestreamclonebundle', [], b'FILE')
598 def debugcreatestreamclonebundle(ui, repo, fname):
598 def debugcreatestreamclonebundle(ui, repo, fname):
599 """create a stream clone bundle file
599 """create a stream clone bundle file
600
600
601 Stream bundles are special bundles that are essentially archives of
601 Stream bundles are special bundles that are essentially archives of
602 revlog files. They are commonly used for cloning very quickly.
602 revlog files. They are commonly used for cloning very quickly.
603 """
603 """
604 # TODO we may want to turn this into an abort when this functionality
604 # TODO we may want to turn this into an abort when this functionality
605 # is moved into `hg bundle`.
605 # is moved into `hg bundle`.
606 if phases.hassecret(repo):
606 if phases.hassecret(repo):
607 ui.warn(
607 ui.warn(
608 _(
608 _(
609 b'(warning: stream clone bundle will contain secret '
609 b'(warning: stream clone bundle will contain secret '
610 b'revisions)\n'
610 b'revisions)\n'
611 )
611 )
612 )
612 )
613
613
614 requirements, gen = streamclone.generatebundlev1(repo)
614 requirements, gen = streamclone.generatebundlev1(repo)
615 changegroup.writechunks(ui, gen, fname)
615 changegroup.writechunks(ui, gen, fname)
616
616
617 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
617 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
618
618
619
619
620 @command(
620 @command(
621 b'debugdag',
621 b'debugdag',
622 [
622 [
623 (b't', b'tags', None, _(b'use tags as labels')),
623 (b't', b'tags', None, _(b'use tags as labels')),
624 (b'b', b'branches', None, _(b'annotate with branch names')),
624 (b'b', b'branches', None, _(b'annotate with branch names')),
625 (b'', b'dots', None, _(b'use dots for runs')),
625 (b'', b'dots', None, _(b'use dots for runs')),
626 (b's', b'spaces', None, _(b'separate elements by spaces')),
626 (b's', b'spaces', None, _(b'separate elements by spaces')),
627 ],
627 ],
628 _(b'[OPTION]... [FILE [REV]...]'),
628 _(b'[OPTION]... [FILE [REV]...]'),
629 optionalrepo=True,
629 optionalrepo=True,
630 )
630 )
631 def debugdag(ui, repo, file_=None, *revs, **opts):
631 def debugdag(ui, repo, file_=None, *revs, **opts):
632 """format the changelog or an index DAG as a concise textual description
632 """format the changelog or an index DAG as a concise textual description
633
633
634 If you pass a revlog index, the revlog's DAG is emitted. If you list
634 If you pass a revlog index, the revlog's DAG is emitted. If you list
635 revision numbers, they get labeled in the output as rN.
635 revision numbers, they get labeled in the output as rN.
636
636
637 Otherwise, the changelog DAG of the current repo is emitted.
637 Otherwise, the changelog DAG of the current repo is emitted.
638 """
638 """
639 spaces = opts.get('spaces')
639 spaces = opts.get('spaces')
640 dots = opts.get('dots')
640 dots = opts.get('dots')
641 if file_:
641 if file_:
642 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
642 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
643 revs = {int(r) for r in revs}
643 revs = {int(r) for r in revs}
644
644
645 def events():
645 def events():
646 for r in rlog:
646 for r in rlog:
647 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
647 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
648 if r in revs:
648 if r in revs:
649 yield b'l', (r, b"r%i" % r)
649 yield b'l', (r, b"r%i" % r)
650
650
651 elif repo:
651 elif repo:
652 cl = repo.changelog
652 cl = repo.changelog
653 tags = opts.get('tags')
653 tags = opts.get('tags')
654 branches = opts.get('branches')
654 branches = opts.get('branches')
655 if tags:
655 if tags:
656 labels = {}
656 labels = {}
657 for l, n in repo.tags().items():
657 for l, n in repo.tags().items():
658 labels.setdefault(cl.rev(n), []).append(l)
658 labels.setdefault(cl.rev(n), []).append(l)
659
659
660 def events():
660 def events():
661 b = b"default"
661 b = b"default"
662 for r in cl:
662 for r in cl:
663 if branches:
663 if branches:
664 newb = cl.read(cl.node(r))[5][b'branch']
664 newb = cl.read(cl.node(r))[5][b'branch']
665 if newb != b:
665 if newb != b:
666 yield b'a', newb
666 yield b'a', newb
667 b = newb
667 b = newb
668 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
668 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
669 if tags:
669 if tags:
670 ls = labels.get(r)
670 ls = labels.get(r)
671 if ls:
671 if ls:
672 for l in ls:
672 for l in ls:
673 yield b'l', (r, l)
673 yield b'l', (r, l)
674
674
675 else:
675 else:
676 raise error.Abort(_(b'need repo for changelog dag'))
676 raise error.Abort(_(b'need repo for changelog dag'))
677
677
678 for line in dagparser.dagtextlines(
678 for line in dagparser.dagtextlines(
679 events(),
679 events(),
680 addspaces=spaces,
680 addspaces=spaces,
681 wraplabels=True,
681 wraplabels=True,
682 wrapannotations=True,
682 wrapannotations=True,
683 wrapnonlinear=dots,
683 wrapnonlinear=dots,
684 usedots=dots,
684 usedots=dots,
685 maxlinewidth=70,
685 maxlinewidth=70,
686 ):
686 ):
687 ui.write(line)
687 ui.write(line)
688 ui.write(b"\n")
688 ui.write(b"\n")
689
689
690
690
691 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
691 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
692 def debugdata(ui, repo, file_, rev=None, **opts):
692 def debugdata(ui, repo, file_, rev=None, **opts):
693 """dump the contents of a data file revision"""
693 """dump the contents of a data file revision"""
694 opts = pycompat.byteskwargs(opts)
694 opts = pycompat.byteskwargs(opts)
695 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
695 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
696 if rev is not None:
696 if rev is not None:
697 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
697 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
698 file_, rev = None, file_
698 file_, rev = None, file_
699 elif rev is None:
699 elif rev is None:
700 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
700 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
701 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
701 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
702 try:
702 try:
703 ui.write(r.rawdata(r.lookup(rev)))
703 ui.write(r.rawdata(r.lookup(rev)))
704 except KeyError:
704 except KeyError:
705 raise error.Abort(_(b'invalid revision identifier %s') % rev)
705 raise error.Abort(_(b'invalid revision identifier %s') % rev)
706
706
707
707
708 @command(
708 @command(
709 b'debugdate',
709 b'debugdate',
710 [(b'e', b'extended', None, _(b'try extended date formats'))],
710 [(b'e', b'extended', None, _(b'try extended date formats'))],
711 _(b'[-e] DATE [RANGE]'),
711 _(b'[-e] DATE [RANGE]'),
712 norepo=True,
712 norepo=True,
713 optionalrepo=True,
713 optionalrepo=True,
714 )
714 )
715 def debugdate(ui, date, range=None, **opts):
715 def debugdate(ui, date, range=None, **opts):
716 """parse and display a date"""
716 """parse and display a date"""
717 if opts["extended"]:
717 if opts["extended"]:
718 d = dateutil.parsedate(date, dateutil.extendeddateformats)
718 d = dateutil.parsedate(date, dateutil.extendeddateformats)
719 else:
719 else:
720 d = dateutil.parsedate(date)
720 d = dateutil.parsedate(date)
721 ui.writenoi18n(b"internal: %d %d\n" % d)
721 ui.writenoi18n(b"internal: %d %d\n" % d)
722 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
722 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
723 if range:
723 if range:
724 m = dateutil.matchdate(range)
724 m = dateutil.matchdate(range)
725 ui.writenoi18n(b"match: %s\n" % m(d[0]))
725 ui.writenoi18n(b"match: %s\n" % m(d[0]))
726
726
727
727
728 @command(
728 @command(
729 b'debugdeltachain',
729 b'debugdeltachain',
730 cmdutil.debugrevlogopts + cmdutil.formatteropts,
730 cmdutil.debugrevlogopts + cmdutil.formatteropts,
731 _(b'-c|-m|FILE'),
731 _(b'-c|-m|FILE'),
732 optionalrepo=True,
732 optionalrepo=True,
733 )
733 )
734 def debugdeltachain(ui, repo, file_=None, **opts):
734 def debugdeltachain(ui, repo, file_=None, **opts):
735 """dump information about delta chains in a revlog
735 """dump information about delta chains in a revlog
736
736
737 Output can be templatized. Available template keywords are:
737 Output can be templatized. Available template keywords are:
738
738
739 :``rev``: revision number
739 :``rev``: revision number
740 :``chainid``: delta chain identifier (numbered by unique base)
740 :``chainid``: delta chain identifier (numbered by unique base)
741 :``chainlen``: delta chain length to this revision
741 :``chainlen``: delta chain length to this revision
742 :``prevrev``: previous revision in delta chain
742 :``prevrev``: previous revision in delta chain
743 :``deltatype``: role of delta / how it was computed
743 :``deltatype``: role of delta / how it was computed
744 :``compsize``: compressed size of revision
744 :``compsize``: compressed size of revision
745 :``uncompsize``: uncompressed size of revision
745 :``uncompsize``: uncompressed size of revision
746 :``chainsize``: total size of compressed revisions in chain
746 :``chainsize``: total size of compressed revisions in chain
747 :``chainratio``: total chain size divided by uncompressed revision size
747 :``chainratio``: total chain size divided by uncompressed revision size
748 (new delta chains typically start at ratio 2.00)
748 (new delta chains typically start at ratio 2.00)
749 :``lindist``: linear distance from base revision in delta chain to end
749 :``lindist``: linear distance from base revision in delta chain to end
750 of this revision
750 of this revision
751 :``extradist``: total size of revisions not part of this delta chain from
751 :``extradist``: total size of revisions not part of this delta chain from
752 base of delta chain to end of this revision; a measurement
752 base of delta chain to end of this revision; a measurement
753 of how much extra data we need to read/seek across to read
753 of how much extra data we need to read/seek across to read
754 the delta chain for this revision
754 the delta chain for this revision
755 :``extraratio``: extradist divided by chainsize; another representation of
755 :``extraratio``: extradist divided by chainsize; another representation of
756 how much unrelated data is needed to load this delta chain
756 how much unrelated data is needed to load this delta chain
757
757
758 If the repository is configured to use the sparse read, additional keywords
758 If the repository is configured to use the sparse read, additional keywords
759 are available:
759 are available:
760
760
761 :``readsize``: total size of data read from the disk for a revision
761 :``readsize``: total size of data read from the disk for a revision
762 (sum of the sizes of all the blocks)
762 (sum of the sizes of all the blocks)
763 :``largestblock``: size of the largest block of data read from the disk
763 :``largestblock``: size of the largest block of data read from the disk
764 :``readdensity``: density of useful bytes in the data read from the disk
764 :``readdensity``: density of useful bytes in the data read from the disk
765 :``srchunks``: in how many data hunks the whole revision would be read
765 :``srchunks``: in how many data hunks the whole revision would be read
766
766
767 The sparse read can be enabled with experimental.sparse-read = True
767 The sparse read can be enabled with experimental.sparse-read = True
768 """
768 """
769 opts = pycompat.byteskwargs(opts)
769 opts = pycompat.byteskwargs(opts)
770 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
770 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
771 index = r.index
771 index = r.index
772 start = r.start
772 start = r.start
773 length = r.length
773 length = r.length
774 generaldelta = r.version & revlog.FLAG_GENERALDELTA
774 generaldelta = r.version & revlog.FLAG_GENERALDELTA
775 withsparseread = getattr(r, '_withsparseread', False)
775 withsparseread = getattr(r, '_withsparseread', False)
776
776
777 def revinfo(rev):
777 def revinfo(rev):
778 e = index[rev]
778 e = index[rev]
779 compsize = e[1]
779 compsize = e[1]
780 uncompsize = e[2]
780 uncompsize = e[2]
781 chainsize = 0
781 chainsize = 0
782
782
783 if generaldelta:
783 if generaldelta:
784 if e[3] == e[5]:
784 if e[3] == e[5]:
785 deltatype = b'p1'
785 deltatype = b'p1'
786 elif e[3] == e[6]:
786 elif e[3] == e[6]:
787 deltatype = b'p2'
787 deltatype = b'p2'
788 elif e[3] == rev - 1:
788 elif e[3] == rev - 1:
789 deltatype = b'prev'
789 deltatype = b'prev'
790 elif e[3] == rev:
790 elif e[3] == rev:
791 deltatype = b'base'
791 deltatype = b'base'
792 else:
792 else:
793 deltatype = b'other'
793 deltatype = b'other'
794 else:
794 else:
795 if e[3] == rev:
795 if e[3] == rev:
796 deltatype = b'base'
796 deltatype = b'base'
797 else:
797 else:
798 deltatype = b'prev'
798 deltatype = b'prev'
799
799
800 chain = r._deltachain(rev)[0]
800 chain = r._deltachain(rev)[0]
801 for iterrev in chain:
801 for iterrev in chain:
802 e = index[iterrev]
802 e = index[iterrev]
803 chainsize += e[1]
803 chainsize += e[1]
804
804
805 return compsize, uncompsize, deltatype, chain, chainsize
805 return compsize, uncompsize, deltatype, chain, chainsize
806
806
807 fm = ui.formatter(b'debugdeltachain', opts)
807 fm = ui.formatter(b'debugdeltachain', opts)
808
808
809 fm.plain(
809 fm.plain(
810 b' rev chain# chainlen prev delta '
810 b' rev chain# chainlen prev delta '
811 b'size rawsize chainsize ratio lindist extradist '
811 b'size rawsize chainsize ratio lindist extradist '
812 b'extraratio'
812 b'extraratio'
813 )
813 )
814 if withsparseread:
814 if withsparseread:
815 fm.plain(b' readsize largestblk rddensity srchunks')
815 fm.plain(b' readsize largestblk rddensity srchunks')
816 fm.plain(b'\n')
816 fm.plain(b'\n')
817
817
818 chainbases = {}
818 chainbases = {}
819 for rev in r:
819 for rev in r:
820 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
820 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
821 chainbase = chain[0]
821 chainbase = chain[0]
822 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
822 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
823 basestart = start(chainbase)
823 basestart = start(chainbase)
824 revstart = start(rev)
824 revstart = start(rev)
825 lineardist = revstart + comp - basestart
825 lineardist = revstart + comp - basestart
826 extradist = lineardist - chainsize
826 extradist = lineardist - chainsize
827 try:
827 try:
828 prevrev = chain[-2]
828 prevrev = chain[-2]
829 except IndexError:
829 except IndexError:
830 prevrev = -1
830 prevrev = -1
831
831
832 if uncomp != 0:
832 if uncomp != 0:
833 chainratio = float(chainsize) / float(uncomp)
833 chainratio = float(chainsize) / float(uncomp)
834 else:
834 else:
835 chainratio = chainsize
835 chainratio = chainsize
836
836
837 if chainsize != 0:
837 if chainsize != 0:
838 extraratio = float(extradist) / float(chainsize)
838 extraratio = float(extradist) / float(chainsize)
839 else:
839 else:
840 extraratio = extradist
840 extraratio = extradist
841
841
842 fm.startitem()
842 fm.startitem()
843 fm.write(
843 fm.write(
844 b'rev chainid chainlen prevrev deltatype compsize '
844 b'rev chainid chainlen prevrev deltatype compsize '
845 b'uncompsize chainsize chainratio lindist extradist '
845 b'uncompsize chainsize chainratio lindist extradist '
846 b'extraratio',
846 b'extraratio',
847 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
847 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
848 rev,
848 rev,
849 chainid,
849 chainid,
850 len(chain),
850 len(chain),
851 prevrev,
851 prevrev,
852 deltatype,
852 deltatype,
853 comp,
853 comp,
854 uncomp,
854 uncomp,
855 chainsize,
855 chainsize,
856 chainratio,
856 chainratio,
857 lineardist,
857 lineardist,
858 extradist,
858 extradist,
859 extraratio,
859 extraratio,
860 rev=rev,
860 rev=rev,
861 chainid=chainid,
861 chainid=chainid,
862 chainlen=len(chain),
862 chainlen=len(chain),
863 prevrev=prevrev,
863 prevrev=prevrev,
864 deltatype=deltatype,
864 deltatype=deltatype,
865 compsize=comp,
865 compsize=comp,
866 uncompsize=uncomp,
866 uncompsize=uncomp,
867 chainsize=chainsize,
867 chainsize=chainsize,
868 chainratio=chainratio,
868 chainratio=chainratio,
869 lindist=lineardist,
869 lindist=lineardist,
870 extradist=extradist,
870 extradist=extradist,
871 extraratio=extraratio,
871 extraratio=extraratio,
872 )
872 )
873 if withsparseread:
873 if withsparseread:
874 readsize = 0
874 readsize = 0
875 largestblock = 0
875 largestblock = 0
876 srchunks = 0
876 srchunks = 0
877
877
878 for revschunk in deltautil.slicechunk(r, chain):
878 for revschunk in deltautil.slicechunk(r, chain):
879 srchunks += 1
879 srchunks += 1
880 blkend = start(revschunk[-1]) + length(revschunk[-1])
880 blkend = start(revschunk[-1]) + length(revschunk[-1])
881 blksize = blkend - start(revschunk[0])
881 blksize = blkend - start(revschunk[0])
882
882
883 readsize += blksize
883 readsize += blksize
884 if largestblock < blksize:
884 if largestblock < blksize:
885 largestblock = blksize
885 largestblock = blksize
886
886
887 if readsize:
887 if readsize:
888 readdensity = float(chainsize) / float(readsize)
888 readdensity = float(chainsize) / float(readsize)
889 else:
889 else:
890 readdensity = 1
890 readdensity = 1
891
891
892 fm.write(
892 fm.write(
893 b'readsize largestblock readdensity srchunks',
893 b'readsize largestblock readdensity srchunks',
894 b' %10d %10d %9.5f %8d',
894 b' %10d %10d %9.5f %8d',
895 readsize,
895 readsize,
896 largestblock,
896 largestblock,
897 readdensity,
897 readdensity,
898 srchunks,
898 srchunks,
899 readsize=readsize,
899 readsize=readsize,
900 largestblock=largestblock,
900 largestblock=largestblock,
901 readdensity=readdensity,
901 readdensity=readdensity,
902 srchunks=srchunks,
902 srchunks=srchunks,
903 )
903 )
904
904
905 fm.plain(b'\n')
905 fm.plain(b'\n')
906
906
907 fm.end()
907 fm.end()
908
908
909
909
910 @command(
910 @command(
911 b'debugdirstate|debugstate',
911 b'debugdirstate|debugstate',
912 [
912 [
913 (
913 (
914 b'',
914 b'',
915 b'nodates',
915 b'nodates',
916 None,
916 None,
917 _(b'do not display the saved mtime (DEPRECATED)'),
917 _(b'do not display the saved mtime (DEPRECATED)'),
918 ),
918 ),
919 (b'', b'dates', True, _(b'display the saved mtime')),
919 (b'', b'dates', True, _(b'display the saved mtime')),
920 (b'', b'datesort', None, _(b'sort by saved mtime')),
920 (b'', b'datesort', None, _(b'sort by saved mtime')),
921 ],
921 ],
922 _(b'[OPTION]...'),
922 _(b'[OPTION]...'),
923 )
923 )
924 def debugstate(ui, repo, **opts):
924 def debugstate(ui, repo, **opts):
925 """show the contents of the current dirstate"""
925 """show the contents of the current dirstate"""
926
926
927 nodates = not opts['dates']
927 nodates = not opts['dates']
928 if opts.get('nodates') is not None:
928 if opts.get('nodates') is not None:
929 nodates = True
929 nodates = True
930 datesort = opts.get('datesort')
930 datesort = opts.get('datesort')
931
931
932 if datesort:
932 if datesort:
933 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
933 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
934 else:
934 else:
935 keyfunc = None # sort by filename
935 keyfunc = None # sort by filename
936 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
936 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
937 if ent[3] == -1:
937 if ent[3] == -1:
938 timestr = b'unset '
938 timestr = b'unset '
939 elif nodates:
939 elif nodates:
940 timestr = b'set '
940 timestr = b'set '
941 else:
941 else:
942 timestr = time.strftime(
942 timestr = time.strftime(
943 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
943 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
944 )
944 )
945 timestr = encoding.strtolocal(timestr)
945 timestr = encoding.strtolocal(timestr)
946 if ent[1] & 0o20000:
946 if ent[1] & 0o20000:
947 mode = b'lnk'
947 mode = b'lnk'
948 else:
948 else:
949 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
949 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
950 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
950 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
951 for f in repo.dirstate.copies():
951 for f in repo.dirstate.copies():
952 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
952 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
953
953
954
954
955 @command(
955 @command(
956 b'debugdiscovery',
956 b'debugdiscovery',
957 [
957 [
958 (b'', b'old', None, _(b'use old-style discovery')),
958 (b'', b'old', None, _(b'use old-style discovery')),
959 (
959 (
960 b'',
960 b'',
961 b'nonheads',
961 b'nonheads',
962 None,
962 None,
963 _(b'use old-style discovery with non-heads included'),
963 _(b'use old-style discovery with non-heads included'),
964 ),
964 ),
965 (b'', b'rev', [], b'restrict discovery to this set of revs'),
965 (b'', b'rev', [], b'restrict discovery to this set of revs'),
966 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
966 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
967 ]
967 ]
968 + cmdutil.remoteopts,
968 + cmdutil.remoteopts,
969 _(b'[--rev REV] [OTHER]'),
969 _(b'[--rev REV] [OTHER]'),
970 )
970 )
971 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
971 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
972 """runs the changeset discovery protocol in isolation"""
972 """runs the changeset discovery protocol in isolation"""
973 opts = pycompat.byteskwargs(opts)
973 opts = pycompat.byteskwargs(opts)
974 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
974 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
975 remote = hg.peer(repo, opts, remoteurl)
975 remote = hg.peer(repo, opts, remoteurl)
976 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
976 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
977
977
978 # make sure tests are repeatable
978 # make sure tests are repeatable
979 random.seed(int(opts[b'seed']))
979 random.seed(int(opts[b'seed']))
980
980
981 data = {}
981 data = {}
982 if opts.get(b'old'):
982 if opts.get(b'old'):
983
983
984 def doit(pushedrevs, remoteheads, remote=remote):
984 def doit(pushedrevs, remoteheads, remote=remote):
985 if not util.safehasattr(remote, b'branches'):
985 if not util.safehasattr(remote, b'branches'):
986 # enable in-client legacy support
986 # enable in-client legacy support
987 remote = localrepo.locallegacypeer(remote.local())
987 remote = localrepo.locallegacypeer(remote.local())
988 common, _in, hds = treediscovery.findcommonincoming(
988 common, _in, hds = treediscovery.findcommonincoming(
989 repo, remote, force=True, audit=data
989 repo, remote, force=True, audit=data
990 )
990 )
991 common = set(common)
991 common = set(common)
992 if not opts.get(b'nonheads'):
992 if not opts.get(b'nonheads'):
993 ui.writenoi18n(
993 ui.writenoi18n(
994 b"unpruned common: %s\n"
994 b"unpruned common: %s\n"
995 % b" ".join(sorted(short(n) for n in common))
995 % b" ".join(sorted(short(n) for n in common))
996 )
996 )
997
997
998 clnode = repo.changelog.node
998 clnode = repo.changelog.node
999 common = repo.revs(b'heads(::%ln)', common)
999 common = repo.revs(b'heads(::%ln)', common)
1000 common = {clnode(r) for r in common}
1000 common = {clnode(r) for r in common}
1001 return common, hds
1001 return common, hds
1002
1002
1003 else:
1003 else:
1004
1004
1005 def doit(pushedrevs, remoteheads, remote=remote):
1005 def doit(pushedrevs, remoteheads, remote=remote):
1006 nodes = None
1006 nodes = None
1007 if pushedrevs:
1007 if pushedrevs:
1008 revs = scmutil.revrange(repo, pushedrevs)
1008 revs = scmutil.revrange(repo, pushedrevs)
1009 nodes = [repo[r].node() for r in revs]
1009 nodes = [repo[r].node() for r in revs]
1010 common, any, hds = setdiscovery.findcommonheads(
1010 common, any, hds = setdiscovery.findcommonheads(
1011 ui, repo, remote, ancestorsof=nodes, audit=data
1011 ui, repo, remote, ancestorsof=nodes, audit=data
1012 )
1012 )
1013 return common, hds
1013 return common, hds
1014
1014
1015 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1015 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1016 localrevs = opts[b'rev']
1016 localrevs = opts[b'rev']
1017 with util.timedcm('debug-discovery') as t:
1017 with util.timedcm('debug-discovery') as t:
1018 common, hds = doit(localrevs, remoterevs)
1018 common, hds = doit(localrevs, remoterevs)
1019
1019
1020 # compute all statistics
1020 # compute all statistics
1021 heads_common = set(common)
1021 heads_common = set(common)
1022 heads_remote = set(hds)
1022 heads_remote = set(hds)
1023 heads_local = set(repo.heads())
1023 heads_local = set(repo.heads())
1024 # note: they cannot be a local or remote head that is in common and not
1024 # note: they cannot be a local or remote head that is in common and not
1025 # itself a head of common.
1025 # itself a head of common.
1026 heads_common_local = heads_common & heads_local
1026 heads_common_local = heads_common & heads_local
1027 heads_common_remote = heads_common & heads_remote
1027 heads_common_remote = heads_common & heads_remote
1028 heads_common_both = heads_common & heads_remote & heads_local
1028 heads_common_both = heads_common & heads_remote & heads_local
1029
1029
1030 all = repo.revs(b'all()')
1030 all = repo.revs(b'all()')
1031 common = repo.revs(b'::%ln', common)
1031 common = repo.revs(b'::%ln', common)
1032 roots_common = repo.revs(b'roots(::%ld)', common)
1032 roots_common = repo.revs(b'roots(::%ld)', common)
1033 missing = repo.revs(b'not ::%ld', common)
1033 missing = repo.revs(b'not ::%ld', common)
1034 heads_missing = repo.revs(b'heads(%ld)', missing)
1034 heads_missing = repo.revs(b'heads(%ld)', missing)
1035 roots_missing = repo.revs(b'roots(%ld)', missing)
1035 roots_missing = repo.revs(b'roots(%ld)', missing)
1036 assert len(common) + len(missing) == len(all)
1036 assert len(common) + len(missing) == len(all)
1037
1037
1038 initial_undecided = repo.revs(
1038 initial_undecided = repo.revs(
1039 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1039 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1040 )
1040 )
1041 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1041 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1042 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1042 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1043 common_initial_undecided = initial_undecided & common
1043 common_initial_undecided = initial_undecided & common
1044 missing_initial_undecided = initial_undecided & missing
1044 missing_initial_undecided = initial_undecided & missing
1045
1045
1046 data[b'elapsed'] = t.elapsed
1046 data[b'elapsed'] = t.elapsed
1047 data[b'nb-common-heads'] = len(heads_common)
1047 data[b'nb-common-heads'] = len(heads_common)
1048 data[b'nb-common-heads-local'] = len(heads_common_local)
1048 data[b'nb-common-heads-local'] = len(heads_common_local)
1049 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1049 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1050 data[b'nb-common-heads-both'] = len(heads_common_both)
1050 data[b'nb-common-heads-both'] = len(heads_common_both)
1051 data[b'nb-common-roots'] = len(roots_common)
1051 data[b'nb-common-roots'] = len(roots_common)
1052 data[b'nb-head-local'] = len(heads_local)
1052 data[b'nb-head-local'] = len(heads_local)
1053 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1053 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1054 data[b'nb-head-remote'] = len(heads_remote)
1054 data[b'nb-head-remote'] = len(heads_remote)
1055 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1055 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1056 heads_common_remote
1056 heads_common_remote
1057 )
1057 )
1058 data[b'nb-revs'] = len(all)
1058 data[b'nb-revs'] = len(all)
1059 data[b'nb-revs-common'] = len(common)
1059 data[b'nb-revs-common'] = len(common)
1060 data[b'nb-revs-missing'] = len(missing)
1060 data[b'nb-revs-missing'] = len(missing)
1061 data[b'nb-missing-heads'] = len(heads_missing)
1061 data[b'nb-missing-heads'] = len(heads_missing)
1062 data[b'nb-missing-roots'] = len(roots_missing)
1062 data[b'nb-missing-roots'] = len(roots_missing)
1063 data[b'nb-ini_und'] = len(initial_undecided)
1063 data[b'nb-ini_und'] = len(initial_undecided)
1064 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1064 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1065 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1065 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1066 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1066 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1067 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1067 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1068
1068
1069 # display discovery summary
1069 # display discovery summary
1070 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
1070 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
1071 ui.writenoi18n(b"round-trips: %(total-roundtrips)9d\n" % data)
1071 ui.writenoi18n(b"round-trips: %(total-roundtrips)9d\n" % data)
1072 ui.writenoi18n(b"heads summary:\n")
1072 ui.writenoi18n(b"heads summary:\n")
1073 ui.writenoi18n(b" total common heads: %(nb-common-heads)9d\n" % data)
1073 ui.writenoi18n(b" total common heads: %(nb-common-heads)9d\n" % data)
1074 ui.writenoi18n(
1074 ui.writenoi18n(
1075 b" also local heads: %(nb-common-heads-local)9d\n" % data
1075 b" also local heads: %(nb-common-heads-local)9d\n" % data
1076 )
1076 )
1077 ui.writenoi18n(
1077 ui.writenoi18n(
1078 b" also remote heads: %(nb-common-heads-remote)9d\n" % data
1078 b" also remote heads: %(nb-common-heads-remote)9d\n" % data
1079 )
1079 )
1080 ui.writenoi18n(b" both: %(nb-common-heads-both)9d\n" % data)
1080 ui.writenoi18n(b" both: %(nb-common-heads-both)9d\n" % data)
1081 ui.writenoi18n(b" local heads: %(nb-head-local)9d\n" % data)
1081 ui.writenoi18n(b" local heads: %(nb-head-local)9d\n" % data)
1082 ui.writenoi18n(
1082 ui.writenoi18n(
1083 b" common: %(nb-common-heads-local)9d\n" % data
1083 b" common: %(nb-common-heads-local)9d\n" % data
1084 )
1084 )
1085 ui.writenoi18n(
1085 ui.writenoi18n(
1086 b" missing: %(nb-head-local-missing)9d\n" % data
1086 b" missing: %(nb-head-local-missing)9d\n" % data
1087 )
1087 )
1088 ui.writenoi18n(b" remote heads: %(nb-head-remote)9d\n" % data)
1088 ui.writenoi18n(b" remote heads: %(nb-head-remote)9d\n" % data)
1089 ui.writenoi18n(
1089 ui.writenoi18n(
1090 b" common: %(nb-common-heads-remote)9d\n" % data
1090 b" common: %(nb-common-heads-remote)9d\n" % data
1091 )
1091 )
1092 ui.writenoi18n(
1092 ui.writenoi18n(
1093 b" unknown: %(nb-head-remote-unknown)9d\n" % data
1093 b" unknown: %(nb-head-remote-unknown)9d\n" % data
1094 )
1094 )
1095 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
1095 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
1096 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
1096 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
1097 ui.writenoi18n(b" heads: %(nb-common-heads)9d\n" % data)
1097 ui.writenoi18n(b" heads: %(nb-common-heads)9d\n" % data)
1098 ui.writenoi18n(b" roots: %(nb-common-roots)9d\n" % data)
1098 ui.writenoi18n(b" roots: %(nb-common-roots)9d\n" % data)
1099 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
1099 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
1100 ui.writenoi18n(b" heads: %(nb-missing-heads)9d\n" % data)
1100 ui.writenoi18n(b" heads: %(nb-missing-heads)9d\n" % data)
1101 ui.writenoi18n(b" roots: %(nb-missing-roots)9d\n" % data)
1101 ui.writenoi18n(b" roots: %(nb-missing-roots)9d\n" % data)
1102 ui.writenoi18n(b" first undecided set: %(nb-ini_und)9d\n" % data)
1102 ui.writenoi18n(b" first undecided set: %(nb-ini_und)9d\n" % data)
1103 ui.writenoi18n(b" heads: %(nb-ini_und-heads)9d\n" % data)
1103 ui.writenoi18n(b" heads: %(nb-ini_und-heads)9d\n" % data)
1104 ui.writenoi18n(b" roots: %(nb-ini_und-roots)9d\n" % data)
1104 ui.writenoi18n(b" roots: %(nb-ini_und-roots)9d\n" % data)
1105 ui.writenoi18n(b" common: %(nb-ini_und-common)9d\n" % data)
1105 ui.writenoi18n(b" common: %(nb-ini_und-common)9d\n" % data)
1106 ui.writenoi18n(b" missing: %(nb-ini_und-missing)9d\n" % data)
1106 ui.writenoi18n(b" missing: %(nb-ini_und-missing)9d\n" % data)
1107
1107
1108 if ui.verbose:
1108 if ui.verbose:
1109 ui.writenoi18n(
1109 ui.writenoi18n(
1110 b"common heads: %s\n"
1110 b"common heads: %s\n"
1111 % b" ".join(sorted(short(n) for n in heads_common))
1111 % b" ".join(sorted(short(n) for n in heads_common))
1112 )
1112 )
1113
1113
1114
1114
1115 _chunksize = 4 << 10
1115 _chunksize = 4 << 10
1116
1116
1117
1117
1118 @command(
1118 @command(
1119 b'debugdownload',
1119 b'debugdownload',
1120 [
1120 [
1121 (b'o', b'output', b'', _(b'path')),
1121 (b'o', b'output', b'', _(b'path')),
1122 ],
1122 ],
1123 optionalrepo=True,
1123 optionalrepo=True,
1124 )
1124 )
1125 def debugdownload(ui, repo, url, output=None, **opts):
1125 def debugdownload(ui, repo, url, output=None, **opts):
1126 """download a resource using Mercurial logic and config"""
1126 """download a resource using Mercurial logic and config"""
1127 fh = urlmod.open(ui, url, output)
1127 fh = urlmod.open(ui, url, output)
1128
1128
1129 dest = ui
1129 dest = ui
1130 if output:
1130 if output:
1131 dest = open(output, b"wb", _chunksize)
1131 dest = open(output, b"wb", _chunksize)
1132 try:
1132 try:
1133 data = fh.read(_chunksize)
1133 data = fh.read(_chunksize)
1134 while data:
1134 while data:
1135 dest.write(data)
1135 dest.write(data)
1136 data = fh.read(_chunksize)
1136 data = fh.read(_chunksize)
1137 finally:
1137 finally:
1138 if output:
1138 if output:
1139 dest.close()
1139 dest.close()
1140
1140
1141
1141
1142 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1142 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1143 def debugextensions(ui, repo, **opts):
1143 def debugextensions(ui, repo, **opts):
1144 '''show information about active extensions'''
1144 '''show information about active extensions'''
1145 opts = pycompat.byteskwargs(opts)
1145 opts = pycompat.byteskwargs(opts)
1146 exts = extensions.extensions(ui)
1146 exts = extensions.extensions(ui)
1147 hgver = util.version()
1147 hgver = util.version()
1148 fm = ui.formatter(b'debugextensions', opts)
1148 fm = ui.formatter(b'debugextensions', opts)
1149 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1149 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1150 isinternal = extensions.ismoduleinternal(extmod)
1150 isinternal = extensions.ismoduleinternal(extmod)
1151 extsource = None
1151 extsource = None
1152
1152
1153 if util.safehasattr(extmod, '__file__'):
1153 if util.safehasattr(extmod, '__file__'):
1154 extsource = pycompat.fsencode(extmod.__file__)
1154 extsource = pycompat.fsencode(extmod.__file__)
1155 elif getattr(sys, 'oxidized', False):
1155 elif getattr(sys, 'oxidized', False):
1156 extsource = pycompat.sysexecutable
1156 extsource = pycompat.sysexecutable
1157 if isinternal:
1157 if isinternal:
1158 exttestedwith = [] # never expose magic string to users
1158 exttestedwith = [] # never expose magic string to users
1159 else:
1159 else:
1160 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1160 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1161 extbuglink = getattr(extmod, 'buglink', None)
1161 extbuglink = getattr(extmod, 'buglink', None)
1162
1162
1163 fm.startitem()
1163 fm.startitem()
1164
1164
1165 if ui.quiet or ui.verbose:
1165 if ui.quiet or ui.verbose:
1166 fm.write(b'name', b'%s\n', extname)
1166 fm.write(b'name', b'%s\n', extname)
1167 else:
1167 else:
1168 fm.write(b'name', b'%s', extname)
1168 fm.write(b'name', b'%s', extname)
1169 if isinternal or hgver in exttestedwith:
1169 if isinternal or hgver in exttestedwith:
1170 fm.plain(b'\n')
1170 fm.plain(b'\n')
1171 elif not exttestedwith:
1171 elif not exttestedwith:
1172 fm.plain(_(b' (untested!)\n'))
1172 fm.plain(_(b' (untested!)\n'))
1173 else:
1173 else:
1174 lasttestedversion = exttestedwith[-1]
1174 lasttestedversion = exttestedwith[-1]
1175 fm.plain(b' (%s!)\n' % lasttestedversion)
1175 fm.plain(b' (%s!)\n' % lasttestedversion)
1176
1176
1177 fm.condwrite(
1177 fm.condwrite(
1178 ui.verbose and extsource,
1178 ui.verbose and extsource,
1179 b'source',
1179 b'source',
1180 _(b' location: %s\n'),
1180 _(b' location: %s\n'),
1181 extsource or b"",
1181 extsource or b"",
1182 )
1182 )
1183
1183
1184 if ui.verbose:
1184 if ui.verbose:
1185 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1185 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1186 fm.data(bundled=isinternal)
1186 fm.data(bundled=isinternal)
1187
1187
1188 fm.condwrite(
1188 fm.condwrite(
1189 ui.verbose and exttestedwith,
1189 ui.verbose and exttestedwith,
1190 b'testedwith',
1190 b'testedwith',
1191 _(b' tested with: %s\n'),
1191 _(b' tested with: %s\n'),
1192 fm.formatlist(exttestedwith, name=b'ver'),
1192 fm.formatlist(exttestedwith, name=b'ver'),
1193 )
1193 )
1194
1194
1195 fm.condwrite(
1195 fm.condwrite(
1196 ui.verbose and extbuglink,
1196 ui.verbose and extbuglink,
1197 b'buglink',
1197 b'buglink',
1198 _(b' bug reporting: %s\n'),
1198 _(b' bug reporting: %s\n'),
1199 extbuglink or b"",
1199 extbuglink or b"",
1200 )
1200 )
1201
1201
1202 fm.end()
1202 fm.end()
1203
1203
1204
1204
1205 @command(
1205 @command(
1206 b'debugfileset',
1206 b'debugfileset',
1207 [
1207 [
1208 (
1208 (
1209 b'r',
1209 b'r',
1210 b'rev',
1210 b'rev',
1211 b'',
1211 b'',
1212 _(b'apply the filespec on this revision'),
1212 _(b'apply the filespec on this revision'),
1213 _(b'REV'),
1213 _(b'REV'),
1214 ),
1214 ),
1215 (
1215 (
1216 b'',
1216 b'',
1217 b'all-files',
1217 b'all-files',
1218 False,
1218 False,
1219 _(b'test files from all revisions and working directory'),
1219 _(b'test files from all revisions and working directory'),
1220 ),
1220 ),
1221 (
1221 (
1222 b's',
1222 b's',
1223 b'show-matcher',
1223 b'show-matcher',
1224 None,
1224 None,
1225 _(b'print internal representation of matcher'),
1225 _(b'print internal representation of matcher'),
1226 ),
1226 ),
1227 (
1227 (
1228 b'p',
1228 b'p',
1229 b'show-stage',
1229 b'show-stage',
1230 [],
1230 [],
1231 _(b'print parsed tree at the given stage'),
1231 _(b'print parsed tree at the given stage'),
1232 _(b'NAME'),
1232 _(b'NAME'),
1233 ),
1233 ),
1234 ],
1234 ],
1235 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1235 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1236 )
1236 )
1237 def debugfileset(ui, repo, expr, **opts):
1237 def debugfileset(ui, repo, expr, **opts):
1238 '''parse and apply a fileset specification'''
1238 '''parse and apply a fileset specification'''
1239 from . import fileset
1239 from . import fileset
1240
1240
1241 fileset.symbols # force import of fileset so we have predicates to optimize
1241 fileset.symbols # force import of fileset so we have predicates to optimize
1242 opts = pycompat.byteskwargs(opts)
1242 opts = pycompat.byteskwargs(opts)
1243 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1243 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1244
1244
1245 stages = [
1245 stages = [
1246 (b'parsed', pycompat.identity),
1246 (b'parsed', pycompat.identity),
1247 (b'analyzed', filesetlang.analyze),
1247 (b'analyzed', filesetlang.analyze),
1248 (b'optimized', filesetlang.optimize),
1248 (b'optimized', filesetlang.optimize),
1249 ]
1249 ]
1250 stagenames = {n for n, f in stages}
1250 stagenames = {n for n, f in stages}
1251
1251
1252 showalways = set()
1252 showalways = set()
1253 if ui.verbose and not opts[b'show_stage']:
1253 if ui.verbose and not opts[b'show_stage']:
1254 # show parsed tree by --verbose (deprecated)
1254 # show parsed tree by --verbose (deprecated)
1255 showalways.add(b'parsed')
1255 showalways.add(b'parsed')
1256 if opts[b'show_stage'] == [b'all']:
1256 if opts[b'show_stage'] == [b'all']:
1257 showalways.update(stagenames)
1257 showalways.update(stagenames)
1258 else:
1258 else:
1259 for n in opts[b'show_stage']:
1259 for n in opts[b'show_stage']:
1260 if n not in stagenames:
1260 if n not in stagenames:
1261 raise error.Abort(_(b'invalid stage name: %s') % n)
1261 raise error.Abort(_(b'invalid stage name: %s') % n)
1262 showalways.update(opts[b'show_stage'])
1262 showalways.update(opts[b'show_stage'])
1263
1263
1264 tree = filesetlang.parse(expr)
1264 tree = filesetlang.parse(expr)
1265 for n, f in stages:
1265 for n, f in stages:
1266 tree = f(tree)
1266 tree = f(tree)
1267 if n in showalways:
1267 if n in showalways:
1268 if opts[b'show_stage'] or n != b'parsed':
1268 if opts[b'show_stage'] or n != b'parsed':
1269 ui.write(b"* %s:\n" % n)
1269 ui.write(b"* %s:\n" % n)
1270 ui.write(filesetlang.prettyformat(tree), b"\n")
1270 ui.write(filesetlang.prettyformat(tree), b"\n")
1271
1271
1272 files = set()
1272 files = set()
1273 if opts[b'all_files']:
1273 if opts[b'all_files']:
1274 for r in repo:
1274 for r in repo:
1275 c = repo[r]
1275 c = repo[r]
1276 files.update(c.files())
1276 files.update(c.files())
1277 files.update(c.substate)
1277 files.update(c.substate)
1278 if opts[b'all_files'] or ctx.rev() is None:
1278 if opts[b'all_files'] or ctx.rev() is None:
1279 wctx = repo[None]
1279 wctx = repo[None]
1280 files.update(
1280 files.update(
1281 repo.dirstate.walk(
1281 repo.dirstate.walk(
1282 scmutil.matchall(repo),
1282 scmutil.matchall(repo),
1283 subrepos=list(wctx.substate),
1283 subrepos=list(wctx.substate),
1284 unknown=True,
1284 unknown=True,
1285 ignored=True,
1285 ignored=True,
1286 )
1286 )
1287 )
1287 )
1288 files.update(wctx.substate)
1288 files.update(wctx.substate)
1289 else:
1289 else:
1290 files.update(ctx.files())
1290 files.update(ctx.files())
1291 files.update(ctx.substate)
1291 files.update(ctx.substate)
1292
1292
1293 m = ctx.matchfileset(repo.getcwd(), expr)
1293 m = ctx.matchfileset(repo.getcwd(), expr)
1294 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1294 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1295 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1295 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1296 for f in sorted(files):
1296 for f in sorted(files):
1297 if not m(f):
1297 if not m(f):
1298 continue
1298 continue
1299 ui.write(b"%s\n" % f)
1299 ui.write(b"%s\n" % f)
1300
1300
1301
1301
1302 @command(b'debugformat', [] + cmdutil.formatteropts)
1302 @command(b'debugformat', [] + cmdutil.formatteropts)
1303 def debugformat(ui, repo, **opts):
1303 def debugformat(ui, repo, **opts):
1304 """display format information about the current repository
1304 """display format information about the current repository
1305
1305
1306 Use --verbose to get extra information about current config value and
1306 Use --verbose to get extra information about current config value and
1307 Mercurial default."""
1307 Mercurial default."""
1308 opts = pycompat.byteskwargs(opts)
1308 opts = pycompat.byteskwargs(opts)
1309 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1309 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1310 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1310 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1311
1311
1312 def makeformatname(name):
1312 def makeformatname(name):
1313 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1313 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1314
1314
1315 fm = ui.formatter(b'debugformat', opts)
1315 fm = ui.formatter(b'debugformat', opts)
1316 if fm.isplain():
1316 if fm.isplain():
1317
1317
1318 def formatvalue(value):
1318 def formatvalue(value):
1319 if util.safehasattr(value, b'startswith'):
1319 if util.safehasattr(value, b'startswith'):
1320 return value
1320 return value
1321 if value:
1321 if value:
1322 return b'yes'
1322 return b'yes'
1323 else:
1323 else:
1324 return b'no'
1324 return b'no'
1325
1325
1326 else:
1326 else:
1327 formatvalue = pycompat.identity
1327 formatvalue = pycompat.identity
1328
1328
1329 fm.plain(b'format-variant')
1329 fm.plain(b'format-variant')
1330 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1330 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1331 fm.plain(b' repo')
1331 fm.plain(b' repo')
1332 if ui.verbose:
1332 if ui.verbose:
1333 fm.plain(b' config default')
1333 fm.plain(b' config default')
1334 fm.plain(b'\n')
1334 fm.plain(b'\n')
1335 for fv in upgrade.allformatvariant:
1335 for fv in upgrade.allformatvariant:
1336 fm.startitem()
1336 fm.startitem()
1337 repovalue = fv.fromrepo(repo)
1337 repovalue = fv.fromrepo(repo)
1338 configvalue = fv.fromconfig(repo)
1338 configvalue = fv.fromconfig(repo)
1339
1339
1340 if repovalue != configvalue:
1340 if repovalue != configvalue:
1341 namelabel = b'formatvariant.name.mismatchconfig'
1341 namelabel = b'formatvariant.name.mismatchconfig'
1342 repolabel = b'formatvariant.repo.mismatchconfig'
1342 repolabel = b'formatvariant.repo.mismatchconfig'
1343 elif repovalue != fv.default:
1343 elif repovalue != fv.default:
1344 namelabel = b'formatvariant.name.mismatchdefault'
1344 namelabel = b'formatvariant.name.mismatchdefault'
1345 repolabel = b'formatvariant.repo.mismatchdefault'
1345 repolabel = b'formatvariant.repo.mismatchdefault'
1346 else:
1346 else:
1347 namelabel = b'formatvariant.name.uptodate'
1347 namelabel = b'formatvariant.name.uptodate'
1348 repolabel = b'formatvariant.repo.uptodate'
1348 repolabel = b'formatvariant.repo.uptodate'
1349
1349
1350 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1350 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1351 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1351 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1352 if fv.default != configvalue:
1352 if fv.default != configvalue:
1353 configlabel = b'formatvariant.config.special'
1353 configlabel = b'formatvariant.config.special'
1354 else:
1354 else:
1355 configlabel = b'formatvariant.config.default'
1355 configlabel = b'formatvariant.config.default'
1356 fm.condwrite(
1356 fm.condwrite(
1357 ui.verbose,
1357 ui.verbose,
1358 b'config',
1358 b'config',
1359 b' %6s',
1359 b' %6s',
1360 formatvalue(configvalue),
1360 formatvalue(configvalue),
1361 label=configlabel,
1361 label=configlabel,
1362 )
1362 )
1363 fm.condwrite(
1363 fm.condwrite(
1364 ui.verbose,
1364 ui.verbose,
1365 b'default',
1365 b'default',
1366 b' %7s',
1366 b' %7s',
1367 formatvalue(fv.default),
1367 formatvalue(fv.default),
1368 label=b'formatvariant.default',
1368 label=b'formatvariant.default',
1369 )
1369 )
1370 fm.plain(b'\n')
1370 fm.plain(b'\n')
1371 fm.end()
1371 fm.end()
1372
1372
1373
1373
1374 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1374 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1375 def debugfsinfo(ui, path=b"."):
1375 def debugfsinfo(ui, path=b"."):
1376 """show information detected about current filesystem"""
1376 """show information detected about current filesystem"""
1377 ui.writenoi18n(b'path: %s\n' % path)
1377 ui.writenoi18n(b'path: %s\n' % path)
1378 ui.writenoi18n(
1378 ui.writenoi18n(
1379 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1379 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1380 )
1380 )
1381 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1381 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1382 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1382 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1383 ui.writenoi18n(
1383 ui.writenoi18n(
1384 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1384 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1385 )
1385 )
1386 ui.writenoi18n(
1386 ui.writenoi18n(
1387 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1387 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1388 )
1388 )
1389 casesensitive = b'(unknown)'
1389 casesensitive = b'(unknown)'
1390 try:
1390 try:
1391 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1391 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1392 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1392 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1393 except OSError:
1393 except OSError:
1394 pass
1394 pass
1395 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1395 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1396
1396
1397
1397
1398 @command(
1398 @command(
1399 b'debuggetbundle',
1399 b'debuggetbundle',
1400 [
1400 [
1401 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1401 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1402 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1402 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1403 (
1403 (
1404 b't',
1404 b't',
1405 b'type',
1405 b'type',
1406 b'bzip2',
1406 b'bzip2',
1407 _(b'bundle compression type to use'),
1407 _(b'bundle compression type to use'),
1408 _(b'TYPE'),
1408 _(b'TYPE'),
1409 ),
1409 ),
1410 ],
1410 ],
1411 _(b'REPO FILE [-H|-C ID]...'),
1411 _(b'REPO FILE [-H|-C ID]...'),
1412 norepo=True,
1412 norepo=True,
1413 )
1413 )
1414 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1414 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1415 """retrieves a bundle from a repo
1415 """retrieves a bundle from a repo
1416
1416
1417 Every ID must be a full-length hex node id string. Saves the bundle to the
1417 Every ID must be a full-length hex node id string. Saves the bundle to the
1418 given file.
1418 given file.
1419 """
1419 """
1420 opts = pycompat.byteskwargs(opts)
1420 opts = pycompat.byteskwargs(opts)
1421 repo = hg.peer(ui, opts, repopath)
1421 repo = hg.peer(ui, opts, repopath)
1422 if not repo.capable(b'getbundle'):
1422 if not repo.capable(b'getbundle'):
1423 raise error.Abort(b"getbundle() not supported by target repository")
1423 raise error.Abort(b"getbundle() not supported by target repository")
1424 args = {}
1424 args = {}
1425 if common:
1425 if common:
1426 args['common'] = [bin(s) for s in common]
1426 args['common'] = [bin(s) for s in common]
1427 if head:
1427 if head:
1428 args['heads'] = [bin(s) for s in head]
1428 args['heads'] = [bin(s) for s in head]
1429 # TODO: get desired bundlecaps from command line.
1429 # TODO: get desired bundlecaps from command line.
1430 args['bundlecaps'] = None
1430 args['bundlecaps'] = None
1431 bundle = repo.getbundle(b'debug', **args)
1431 bundle = repo.getbundle(b'debug', **args)
1432
1432
1433 bundletype = opts.get(b'type', b'bzip2').lower()
1433 bundletype = opts.get(b'type', b'bzip2').lower()
1434 btypes = {
1434 btypes = {
1435 b'none': b'HG10UN',
1435 b'none': b'HG10UN',
1436 b'bzip2': b'HG10BZ',
1436 b'bzip2': b'HG10BZ',
1437 b'gzip': b'HG10GZ',
1437 b'gzip': b'HG10GZ',
1438 b'bundle2': b'HG20',
1438 b'bundle2': b'HG20',
1439 }
1439 }
1440 bundletype = btypes.get(bundletype)
1440 bundletype = btypes.get(bundletype)
1441 if bundletype not in bundle2.bundletypes:
1441 if bundletype not in bundle2.bundletypes:
1442 raise error.Abort(_(b'unknown bundle type specified with --type'))
1442 raise error.Abort(_(b'unknown bundle type specified with --type'))
1443 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1443 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1444
1444
1445
1445
1446 @command(b'debugignore', [], b'[FILE]')
1446 @command(b'debugignore', [], b'[FILE]')
1447 def debugignore(ui, repo, *files, **opts):
1447 def debugignore(ui, repo, *files, **opts):
1448 """display the combined ignore pattern and information about ignored files
1448 """display the combined ignore pattern and information about ignored files
1449
1449
1450 With no argument display the combined ignore pattern.
1450 With no argument display the combined ignore pattern.
1451
1451
1452 Given space separated file names, shows if the given file is ignored and
1452 Given space separated file names, shows if the given file is ignored and
1453 if so, show the ignore rule (file and line number) that matched it.
1453 if so, show the ignore rule (file and line number) that matched it.
1454 """
1454 """
1455 ignore = repo.dirstate._ignore
1455 ignore = repo.dirstate._ignore
1456 if not files:
1456 if not files:
1457 # Show all the patterns
1457 # Show all the patterns
1458 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1458 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1459 else:
1459 else:
1460 m = scmutil.match(repo[None], pats=files)
1460 m = scmutil.match(repo[None], pats=files)
1461 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1461 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1462 for f in m.files():
1462 for f in m.files():
1463 nf = util.normpath(f)
1463 nf = util.normpath(f)
1464 ignored = None
1464 ignored = None
1465 ignoredata = None
1465 ignoredata = None
1466 if nf != b'.':
1466 if nf != b'.':
1467 if ignore(nf):
1467 if ignore(nf):
1468 ignored = nf
1468 ignored = nf
1469 ignoredata = repo.dirstate._ignorefileandline(nf)
1469 ignoredata = repo.dirstate._ignorefileandline(nf)
1470 else:
1470 else:
1471 for p in pathutil.finddirs(nf):
1471 for p in pathutil.finddirs(nf):
1472 if ignore(p):
1472 if ignore(p):
1473 ignored = p
1473 ignored = p
1474 ignoredata = repo.dirstate._ignorefileandline(p)
1474 ignoredata = repo.dirstate._ignorefileandline(p)
1475 break
1475 break
1476 if ignored:
1476 if ignored:
1477 if ignored == nf:
1477 if ignored == nf:
1478 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1478 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1479 else:
1479 else:
1480 ui.write(
1480 ui.write(
1481 _(
1481 _(
1482 b"%s is ignored because of "
1482 b"%s is ignored because of "
1483 b"containing directory %s\n"
1483 b"containing directory %s\n"
1484 )
1484 )
1485 % (uipathfn(f), ignored)
1485 % (uipathfn(f), ignored)
1486 )
1486 )
1487 ignorefile, lineno, line = ignoredata
1487 ignorefile, lineno, line = ignoredata
1488 ui.write(
1488 ui.write(
1489 _(b"(ignore rule in %s, line %d: '%s')\n")
1489 _(b"(ignore rule in %s, line %d: '%s')\n")
1490 % (ignorefile, lineno, line)
1490 % (ignorefile, lineno, line)
1491 )
1491 )
1492 else:
1492 else:
1493 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1493 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1494
1494
1495
1495
1496 @command(
1496 @command(
1497 b'debugindex',
1497 b'debugindex',
1498 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1498 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1499 _(b'-c|-m|FILE'),
1499 _(b'-c|-m|FILE'),
1500 )
1500 )
1501 def debugindex(ui, repo, file_=None, **opts):
1501 def debugindex(ui, repo, file_=None, **opts):
1502 """dump index data for a storage primitive"""
1502 """dump index data for a storage primitive"""
1503 opts = pycompat.byteskwargs(opts)
1503 opts = pycompat.byteskwargs(opts)
1504 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1504 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1505
1505
1506 if ui.debugflag:
1506 if ui.debugflag:
1507 shortfn = hex
1507 shortfn = hex
1508 else:
1508 else:
1509 shortfn = short
1509 shortfn = short
1510
1510
1511 idlen = 12
1511 idlen = 12
1512 for i in store:
1512 for i in store:
1513 idlen = len(shortfn(store.node(i)))
1513 idlen = len(shortfn(store.node(i)))
1514 break
1514 break
1515
1515
1516 fm = ui.formatter(b'debugindex', opts)
1516 fm = ui.formatter(b'debugindex', opts)
1517 fm.plain(
1517 fm.plain(
1518 b' rev linkrev %s %s p2\n'
1518 b' rev linkrev %s %s p2\n'
1519 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1519 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1520 )
1520 )
1521
1521
1522 for rev in store:
1522 for rev in store:
1523 node = store.node(rev)
1523 node = store.node(rev)
1524 parents = store.parents(node)
1524 parents = store.parents(node)
1525
1525
1526 fm.startitem()
1526 fm.startitem()
1527 fm.write(b'rev', b'%6d ', rev)
1527 fm.write(b'rev', b'%6d ', rev)
1528 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1528 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1529 fm.write(b'node', b'%s ', shortfn(node))
1529 fm.write(b'node', b'%s ', shortfn(node))
1530 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1530 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1531 fm.write(b'p2', b'%s', shortfn(parents[1]))
1531 fm.write(b'p2', b'%s', shortfn(parents[1]))
1532 fm.plain(b'\n')
1532 fm.plain(b'\n')
1533
1533
1534 fm.end()
1534 fm.end()
1535
1535
1536
1536
1537 @command(
1537 @command(
1538 b'debugindexdot',
1538 b'debugindexdot',
1539 cmdutil.debugrevlogopts,
1539 cmdutil.debugrevlogopts,
1540 _(b'-c|-m|FILE'),
1540 _(b'-c|-m|FILE'),
1541 optionalrepo=True,
1541 optionalrepo=True,
1542 )
1542 )
1543 def debugindexdot(ui, repo, file_=None, **opts):
1543 def debugindexdot(ui, repo, file_=None, **opts):
1544 """dump an index DAG as a graphviz dot file"""
1544 """dump an index DAG as a graphviz dot file"""
1545 opts = pycompat.byteskwargs(opts)
1545 opts = pycompat.byteskwargs(opts)
1546 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1546 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1547 ui.writenoi18n(b"digraph G {\n")
1547 ui.writenoi18n(b"digraph G {\n")
1548 for i in r:
1548 for i in r:
1549 node = r.node(i)
1549 node = r.node(i)
1550 pp = r.parents(node)
1550 pp = r.parents(node)
1551 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1551 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1552 if pp[1] != nullid:
1552 if pp[1] != nullid:
1553 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1553 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1554 ui.write(b"}\n")
1554 ui.write(b"}\n")
1555
1555
1556
1556
1557 @command(b'debugindexstats', [])
1557 @command(b'debugindexstats', [])
1558 def debugindexstats(ui, repo):
1558 def debugindexstats(ui, repo):
1559 """show stats related to the changelog index"""
1559 """show stats related to the changelog index"""
1560 repo.changelog.shortest(nullid, 1)
1560 repo.changelog.shortest(nullid, 1)
1561 index = repo.changelog.index
1561 index = repo.changelog.index
1562 if not util.safehasattr(index, b'stats'):
1562 if not util.safehasattr(index, b'stats'):
1563 raise error.Abort(_(b'debugindexstats only works with native code'))
1563 raise error.Abort(_(b'debugindexstats only works with native code'))
1564 for k, v in sorted(index.stats().items()):
1564 for k, v in sorted(index.stats().items()):
1565 ui.write(b'%s: %d\n' % (k, v))
1565 ui.write(b'%s: %d\n' % (k, v))
1566
1566
1567
1567
1568 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1568 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1569 def debuginstall(ui, **opts):
1569 def debuginstall(ui, **opts):
1570 """test Mercurial installation
1570 """test Mercurial installation
1571
1571
1572 Returns 0 on success.
1572 Returns 0 on success.
1573 """
1573 """
1574 opts = pycompat.byteskwargs(opts)
1574 opts = pycompat.byteskwargs(opts)
1575
1575
1576 problems = 0
1576 problems = 0
1577
1577
1578 fm = ui.formatter(b'debuginstall', opts)
1578 fm = ui.formatter(b'debuginstall', opts)
1579 fm.startitem()
1579 fm.startitem()
1580
1580
1581 # encoding might be unknown or wrong. don't translate these messages.
1581 # encoding might be unknown or wrong. don't translate these messages.
1582 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1582 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1583 err = None
1583 err = None
1584 try:
1584 try:
1585 codecs.lookup(pycompat.sysstr(encoding.encoding))
1585 codecs.lookup(pycompat.sysstr(encoding.encoding))
1586 except LookupError as inst:
1586 except LookupError as inst:
1587 err = stringutil.forcebytestr(inst)
1587 err = stringutil.forcebytestr(inst)
1588 problems += 1
1588 problems += 1
1589 fm.condwrite(
1589 fm.condwrite(
1590 err,
1590 err,
1591 b'encodingerror',
1591 b'encodingerror',
1592 b" %s\n (check that your locale is properly set)\n",
1592 b" %s\n (check that your locale is properly set)\n",
1593 err,
1593 err,
1594 )
1594 )
1595
1595
1596 # Python
1596 # Python
1597 pythonlib = None
1597 pythonlib = None
1598 if util.safehasattr(os, '__file__'):
1598 if util.safehasattr(os, '__file__'):
1599 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1599 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1600 elif getattr(sys, 'oxidized', False):
1600 elif getattr(sys, 'oxidized', False):
1601 pythonlib = pycompat.sysexecutable
1601 pythonlib = pycompat.sysexecutable
1602
1602
1603 fm.write(
1603 fm.write(
1604 b'pythonexe',
1604 b'pythonexe',
1605 _(b"checking Python executable (%s)\n"),
1605 _(b"checking Python executable (%s)\n"),
1606 pycompat.sysexecutable or _(b"unknown"),
1606 pycompat.sysexecutable or _(b"unknown"),
1607 )
1607 )
1608 fm.write(
1608 fm.write(
1609 b'pythonimplementation',
1609 b'pythonimplementation',
1610 _(b"checking Python implementation (%s)\n"),
1610 _(b"checking Python implementation (%s)\n"),
1611 pycompat.sysbytes(platform.python_implementation()),
1611 pycompat.sysbytes(platform.python_implementation()),
1612 )
1612 )
1613 fm.write(
1613 fm.write(
1614 b'pythonver',
1614 b'pythonver',
1615 _(b"checking Python version (%s)\n"),
1615 _(b"checking Python version (%s)\n"),
1616 (b"%d.%d.%d" % sys.version_info[:3]),
1616 (b"%d.%d.%d" % sys.version_info[:3]),
1617 )
1617 )
1618 fm.write(
1618 fm.write(
1619 b'pythonlib',
1619 b'pythonlib',
1620 _(b"checking Python lib (%s)...\n"),
1620 _(b"checking Python lib (%s)...\n"),
1621 pythonlib or _(b"unknown"),
1621 pythonlib or _(b"unknown"),
1622 )
1622 )
1623
1623
1624 try:
1624 try:
1625 from . import rustext
1625 from . import rustext
1626
1626
1627 rustext.__doc__ # trigger lazy import
1627 rustext.__doc__ # trigger lazy import
1628 except ImportError:
1628 except ImportError:
1629 rustext = None
1629 rustext = None
1630
1630
1631 security = set(sslutil.supportedprotocols)
1631 security = set(sslutil.supportedprotocols)
1632 if sslutil.hassni:
1632 if sslutil.hassni:
1633 security.add(b'sni')
1633 security.add(b'sni')
1634
1634
1635 fm.write(
1635 fm.write(
1636 b'pythonsecurity',
1636 b'pythonsecurity',
1637 _(b"checking Python security support (%s)\n"),
1637 _(b"checking Python security support (%s)\n"),
1638 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1638 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1639 )
1639 )
1640
1640
1641 # These are warnings, not errors. So don't increment problem count. This
1641 # These are warnings, not errors. So don't increment problem count. This
1642 # may change in the future.
1642 # may change in the future.
1643 if b'tls1.2' not in security:
1643 if b'tls1.2' not in security:
1644 fm.plain(
1644 fm.plain(
1645 _(
1645 _(
1646 b' TLS 1.2 not supported by Python install; '
1646 b' TLS 1.2 not supported by Python install; '
1647 b'network connections lack modern security\n'
1647 b'network connections lack modern security\n'
1648 )
1648 )
1649 )
1649 )
1650 if b'sni' not in security:
1650 if b'sni' not in security:
1651 fm.plain(
1651 fm.plain(
1652 _(
1652 _(
1653 b' SNI not supported by Python install; may have '
1653 b' SNI not supported by Python install; may have '
1654 b'connectivity issues with some servers\n'
1654 b'connectivity issues with some servers\n'
1655 )
1655 )
1656 )
1656 )
1657
1657
1658 fm.plain(
1658 fm.plain(
1659 _(
1659 _(
1660 b"checking Rust extensions (%s)\n"
1660 b"checking Rust extensions (%s)\n"
1661 % (b'missing' if rustext is None else b'installed')
1661 % (b'missing' if rustext is None else b'installed')
1662 ),
1662 ),
1663 )
1663 )
1664
1664
1665 # TODO print CA cert info
1665 # TODO print CA cert info
1666
1666
1667 # hg version
1667 # hg version
1668 hgver = util.version()
1668 hgver = util.version()
1669 fm.write(
1669 fm.write(
1670 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1670 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1671 )
1671 )
1672 fm.write(
1672 fm.write(
1673 b'hgverextra',
1673 b'hgverextra',
1674 _(b"checking Mercurial custom build (%s)\n"),
1674 _(b"checking Mercurial custom build (%s)\n"),
1675 b'+'.join(hgver.split(b'+')[1:]),
1675 b'+'.join(hgver.split(b'+')[1:]),
1676 )
1676 )
1677
1677
1678 # compiled modules
1678 # compiled modules
1679 hgmodules = None
1679 hgmodules = None
1680 if util.safehasattr(sys.modules[__name__], '__file__'):
1680 if util.safehasattr(sys.modules[__name__], '__file__'):
1681 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1681 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1682 elif getattr(sys, 'oxidized', False):
1682 elif getattr(sys, 'oxidized', False):
1683 hgmodules = pycompat.sysexecutable
1683 hgmodules = pycompat.sysexecutable
1684
1684
1685 fm.write(
1685 fm.write(
1686 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1686 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1687 )
1687 )
1688 fm.write(
1688 fm.write(
1689 b'hgmodules',
1689 b'hgmodules',
1690 _(b"checking installed modules (%s)...\n"),
1690 _(b"checking installed modules (%s)...\n"),
1691 hgmodules or _(b"unknown"),
1691 hgmodules or _(b"unknown"),
1692 )
1692 )
1693
1693
1694 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1694 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1695 rustext = rustandc # for now, that's the only case
1695 rustext = rustandc # for now, that's the only case
1696 cext = policy.policy in (b'c', b'allow') or rustandc
1696 cext = policy.policy in (b'c', b'allow') or rustandc
1697 nopure = cext or rustext
1697 nopure = cext or rustext
1698 if nopure:
1698 if nopure:
1699 err = None
1699 err = None
1700 try:
1700 try:
1701 if cext:
1701 if cext:
1702 from .cext import ( # pytype: disable=import-error
1702 from .cext import ( # pytype: disable=import-error
1703 base85,
1703 base85,
1704 bdiff,
1704 bdiff,
1705 mpatch,
1705 mpatch,
1706 osutil,
1706 osutil,
1707 )
1707 )
1708
1708
1709 # quiet pyflakes
1709 # quiet pyflakes
1710 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1710 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1711 if rustext:
1711 if rustext:
1712 from .rustext import ( # pytype: disable=import-error
1712 from .rustext import ( # pytype: disable=import-error
1713 ancestor,
1713 ancestor,
1714 dirstate,
1714 dirstate,
1715 )
1715 )
1716
1716
1717 dir(ancestor), dir(dirstate) # quiet pyflakes
1717 dir(ancestor), dir(dirstate) # quiet pyflakes
1718 except Exception as inst:
1718 except Exception as inst:
1719 err = stringutil.forcebytestr(inst)
1719 err = stringutil.forcebytestr(inst)
1720 problems += 1
1720 problems += 1
1721 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1721 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1722
1722
1723 compengines = util.compengines._engines.values()
1723 compengines = util.compengines._engines.values()
1724 fm.write(
1724 fm.write(
1725 b'compengines',
1725 b'compengines',
1726 _(b'checking registered compression engines (%s)\n'),
1726 _(b'checking registered compression engines (%s)\n'),
1727 fm.formatlist(
1727 fm.formatlist(
1728 sorted(e.name() for e in compengines),
1728 sorted(e.name() for e in compengines),
1729 name=b'compengine',
1729 name=b'compengine',
1730 fmt=b'%s',
1730 fmt=b'%s',
1731 sep=b', ',
1731 sep=b', ',
1732 ),
1732 ),
1733 )
1733 )
1734 fm.write(
1734 fm.write(
1735 b'compenginesavail',
1735 b'compenginesavail',
1736 _(b'checking available compression engines (%s)\n'),
1736 _(b'checking available compression engines (%s)\n'),
1737 fm.formatlist(
1737 fm.formatlist(
1738 sorted(e.name() for e in compengines if e.available()),
1738 sorted(e.name() for e in compengines if e.available()),
1739 name=b'compengine',
1739 name=b'compengine',
1740 fmt=b'%s',
1740 fmt=b'%s',
1741 sep=b', ',
1741 sep=b', ',
1742 ),
1742 ),
1743 )
1743 )
1744 wirecompengines = compression.compengines.supportedwireengines(
1744 wirecompengines = compression.compengines.supportedwireengines(
1745 compression.SERVERROLE
1745 compression.SERVERROLE
1746 )
1746 )
1747 fm.write(
1747 fm.write(
1748 b'compenginesserver',
1748 b'compenginesserver',
1749 _(
1749 _(
1750 b'checking available compression engines '
1750 b'checking available compression engines '
1751 b'for wire protocol (%s)\n'
1751 b'for wire protocol (%s)\n'
1752 ),
1752 ),
1753 fm.formatlist(
1753 fm.formatlist(
1754 [e.name() for e in wirecompengines if e.wireprotosupport()],
1754 [e.name() for e in wirecompengines if e.wireprotosupport()],
1755 name=b'compengine',
1755 name=b'compengine',
1756 fmt=b'%s',
1756 fmt=b'%s',
1757 sep=b', ',
1757 sep=b', ',
1758 ),
1758 ),
1759 )
1759 )
1760 re2 = b'missing'
1760 re2 = b'missing'
1761 if util._re2:
1761 if util._re2:
1762 re2 = b'available'
1762 re2 = b'available'
1763 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1763 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1764 fm.data(re2=bool(util._re2))
1764 fm.data(re2=bool(util._re2))
1765
1765
1766 # templates
1766 # templates
1767 p = templater.templatedir()
1767 p = templater.templatedir()
1768 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1768 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1769 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1769 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1770 if p:
1770 if p:
1771 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1771 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1772 if m:
1772 if m:
1773 # template found, check if it is working
1773 # template found, check if it is working
1774 err = None
1774 err = None
1775 try:
1775 try:
1776 templater.templater.frommapfile(m)
1776 templater.templater.frommapfile(m)
1777 except Exception as inst:
1777 except Exception as inst:
1778 err = stringutil.forcebytestr(inst)
1778 err = stringutil.forcebytestr(inst)
1779 p = None
1779 p = None
1780 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1780 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1781 else:
1781 else:
1782 p = None
1782 p = None
1783 fm.condwrite(
1783 fm.condwrite(
1784 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1784 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1785 )
1785 )
1786 fm.condwrite(
1786 fm.condwrite(
1787 not m,
1787 not m,
1788 b'defaulttemplatenotfound',
1788 b'defaulttemplatenotfound',
1789 _(b" template '%s' not found\n"),
1789 _(b" template '%s' not found\n"),
1790 b"default",
1790 b"default",
1791 )
1791 )
1792 if not p:
1792 if not p:
1793 problems += 1
1793 problems += 1
1794 fm.condwrite(
1794 fm.condwrite(
1795 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1795 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1796 )
1796 )
1797
1797
1798 # editor
1798 # editor
1799 editor = ui.geteditor()
1799 editor = ui.geteditor()
1800 editor = util.expandpath(editor)
1800 editor = util.expandpath(editor)
1801 editorbin = procutil.shellsplit(editor)[0]
1801 editorbin = procutil.shellsplit(editor)[0]
1802 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1802 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1803 cmdpath = procutil.findexe(editorbin)
1803 cmdpath = procutil.findexe(editorbin)
1804 fm.condwrite(
1804 fm.condwrite(
1805 not cmdpath and editor == b'vi',
1805 not cmdpath and editor == b'vi',
1806 b'vinotfound',
1806 b'vinotfound',
1807 _(
1807 _(
1808 b" No commit editor set and can't find %s in PATH\n"
1808 b" No commit editor set and can't find %s in PATH\n"
1809 b" (specify a commit editor in your configuration"
1809 b" (specify a commit editor in your configuration"
1810 b" file)\n"
1810 b" file)\n"
1811 ),
1811 ),
1812 not cmdpath and editor == b'vi' and editorbin,
1812 not cmdpath and editor == b'vi' and editorbin,
1813 )
1813 )
1814 fm.condwrite(
1814 fm.condwrite(
1815 not cmdpath and editor != b'vi',
1815 not cmdpath and editor != b'vi',
1816 b'editornotfound',
1816 b'editornotfound',
1817 _(
1817 _(
1818 b" Can't find editor '%s' in PATH\n"
1818 b" Can't find editor '%s' in PATH\n"
1819 b" (specify a commit editor in your configuration"
1819 b" (specify a commit editor in your configuration"
1820 b" file)\n"
1820 b" file)\n"
1821 ),
1821 ),
1822 not cmdpath and editorbin,
1822 not cmdpath and editorbin,
1823 )
1823 )
1824 if not cmdpath and editor != b'vi':
1824 if not cmdpath and editor != b'vi':
1825 problems += 1
1825 problems += 1
1826
1826
1827 # check username
1827 # check username
1828 username = None
1828 username = None
1829 err = None
1829 err = None
1830 try:
1830 try:
1831 username = ui.username()
1831 username = ui.username()
1832 except error.Abort as e:
1832 except error.Abort as e:
1833 err = e.message
1833 err = e.message
1834 problems += 1
1834 problems += 1
1835
1835
1836 fm.condwrite(
1836 fm.condwrite(
1837 username, b'username', _(b"checking username (%s)\n"), username
1837 username, b'username', _(b"checking username (%s)\n"), username
1838 )
1838 )
1839 fm.condwrite(
1839 fm.condwrite(
1840 err,
1840 err,
1841 b'usernameerror',
1841 b'usernameerror',
1842 _(
1842 _(
1843 b"checking username...\n %s\n"
1843 b"checking username...\n %s\n"
1844 b" (specify a username in your configuration file)\n"
1844 b" (specify a username in your configuration file)\n"
1845 ),
1845 ),
1846 err,
1846 err,
1847 )
1847 )
1848
1848
1849 for name, mod in extensions.extensions():
1849 for name, mod in extensions.extensions():
1850 handler = getattr(mod, 'debuginstall', None)
1850 handler = getattr(mod, 'debuginstall', None)
1851 if handler is not None:
1851 if handler is not None:
1852 problems += handler(ui, fm)
1852 problems += handler(ui, fm)
1853
1853
1854 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1854 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1855 if not problems:
1855 if not problems:
1856 fm.data(problems=problems)
1856 fm.data(problems=problems)
1857 fm.condwrite(
1857 fm.condwrite(
1858 problems,
1858 problems,
1859 b'problems',
1859 b'problems',
1860 _(b"%d problems detected, please check your install!\n"),
1860 _(b"%d problems detected, please check your install!\n"),
1861 problems,
1861 problems,
1862 )
1862 )
1863 fm.end()
1863 fm.end()
1864
1864
1865 return problems
1865 return problems
1866
1866
1867
1867
1868 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1868 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1869 def debugknown(ui, repopath, *ids, **opts):
1869 def debugknown(ui, repopath, *ids, **opts):
1870 """test whether node ids are known to a repo
1870 """test whether node ids are known to a repo
1871
1871
1872 Every ID must be a full-length hex node id string. Returns a list of 0s
1872 Every ID must be a full-length hex node id string. Returns a list of 0s
1873 and 1s indicating unknown/known.
1873 and 1s indicating unknown/known.
1874 """
1874 """
1875 opts = pycompat.byteskwargs(opts)
1875 opts = pycompat.byteskwargs(opts)
1876 repo = hg.peer(ui, opts, repopath)
1876 repo = hg.peer(ui, opts, repopath)
1877 if not repo.capable(b'known'):
1877 if not repo.capable(b'known'):
1878 raise error.Abort(b"known() not supported by target repository")
1878 raise error.Abort(b"known() not supported by target repository")
1879 flags = repo.known([bin(s) for s in ids])
1879 flags = repo.known([bin(s) for s in ids])
1880 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1880 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1881
1881
1882
1882
1883 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1883 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1884 def debuglabelcomplete(ui, repo, *args):
1884 def debuglabelcomplete(ui, repo, *args):
1885 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1885 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1886 debugnamecomplete(ui, repo, *args)
1886 debugnamecomplete(ui, repo, *args)
1887
1887
1888
1888
1889 @command(
1889 @command(
1890 b'debuglocks',
1890 b'debuglocks',
1891 [
1891 [
1892 (b'L', b'force-lock', None, _(b'free the store lock (DANGEROUS)')),
1892 (b'L', b'force-lock', None, _(b'free the store lock (DANGEROUS)')),
1893 (
1893 (
1894 b'W',
1894 b'W',
1895 b'force-wlock',
1895 b'force-wlock',
1896 None,
1896 None,
1897 _(b'free the working state lock (DANGEROUS)'),
1897 _(b'free the working state lock (DANGEROUS)'),
1898 ),
1898 ),
1899 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1899 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1900 (
1900 (
1901 b'S',
1901 b'S',
1902 b'set-wlock',
1902 b'set-wlock',
1903 None,
1903 None,
1904 _(b'set the working state lock until stopped'),
1904 _(b'set the working state lock until stopped'),
1905 ),
1905 ),
1906 ],
1906 ],
1907 _(b'[OPTION]...'),
1907 _(b'[OPTION]...'),
1908 )
1908 )
1909 def debuglocks(ui, repo, **opts):
1909 def debuglocks(ui, repo, **opts):
1910 """show or modify state of locks
1910 """show or modify state of locks
1911
1911
1912 By default, this command will show which locks are held. This
1912 By default, this command will show which locks are held. This
1913 includes the user and process holding the lock, the amount of time
1913 includes the user and process holding the lock, the amount of time
1914 the lock has been held, and the machine name where the process is
1914 the lock has been held, and the machine name where the process is
1915 running if it's not local.
1915 running if it's not local.
1916
1916
1917 Locks protect the integrity of Mercurial's data, so should be
1917 Locks protect the integrity of Mercurial's data, so should be
1918 treated with care. System crashes or other interruptions may cause
1918 treated with care. System crashes or other interruptions may cause
1919 locks to not be properly released, though Mercurial will usually
1919 locks to not be properly released, though Mercurial will usually
1920 detect and remove such stale locks automatically.
1920 detect and remove such stale locks automatically.
1921
1921
1922 However, detecting stale locks may not always be possible (for
1922 However, detecting stale locks may not always be possible (for
1923 instance, on a shared filesystem). Removing locks may also be
1923 instance, on a shared filesystem). Removing locks may also be
1924 blocked by filesystem permissions.
1924 blocked by filesystem permissions.
1925
1925
1926 Setting a lock will prevent other commands from changing the data.
1926 Setting a lock will prevent other commands from changing the data.
1927 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1927 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1928 The set locks are removed when the command exits.
1928 The set locks are removed when the command exits.
1929
1929
1930 Returns 0 if no locks are held.
1930 Returns 0 if no locks are held.
1931
1931
1932 """
1932 """
1933
1933
1934 if opts.get('force_lock'):
1934 if opts.get('force_lock'):
1935 repo.svfs.unlink(b'lock')
1935 repo.svfs.unlink(b'lock')
1936 if opts.get('force_wlock'):
1936 if opts.get('force_wlock'):
1937 repo.vfs.unlink(b'wlock')
1937 repo.vfs.unlink(b'wlock')
1938 if opts.get('force_lock') or opts.get('force_wlock'):
1938 if opts.get('force_lock') or opts.get('force_wlock'):
1939 return 0
1939 return 0
1940
1940
1941 locks = []
1941 locks = []
1942 try:
1942 try:
1943 if opts.get('set_wlock'):
1943 if opts.get('set_wlock'):
1944 try:
1944 try:
1945 locks.append(repo.wlock(False))
1945 locks.append(repo.wlock(False))
1946 except error.LockHeld:
1946 except error.LockHeld:
1947 raise error.Abort(_(b'wlock is already held'))
1947 raise error.Abort(_(b'wlock is already held'))
1948 if opts.get('set_lock'):
1948 if opts.get('set_lock'):
1949 try:
1949 try:
1950 locks.append(repo.lock(False))
1950 locks.append(repo.lock(False))
1951 except error.LockHeld:
1951 except error.LockHeld:
1952 raise error.Abort(_(b'lock is already held'))
1952 raise error.Abort(_(b'lock is already held'))
1953 if len(locks):
1953 if len(locks):
1954 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
1954 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
1955 return 0
1955 return 0
1956 finally:
1956 finally:
1957 release(*locks)
1957 release(*locks)
1958
1958
1959 now = time.time()
1959 now = time.time()
1960 held = 0
1960 held = 0
1961
1961
1962 def report(vfs, name, method):
1962 def report(vfs, name, method):
1963 # this causes stale locks to get reaped for more accurate reporting
1963 # this causes stale locks to get reaped for more accurate reporting
1964 try:
1964 try:
1965 l = method(False)
1965 l = method(False)
1966 except error.LockHeld:
1966 except error.LockHeld:
1967 l = None
1967 l = None
1968
1968
1969 if l:
1969 if l:
1970 l.release()
1970 l.release()
1971 else:
1971 else:
1972 try:
1972 try:
1973 st = vfs.lstat(name)
1973 st = vfs.lstat(name)
1974 age = now - st[stat.ST_MTIME]
1974 age = now - st[stat.ST_MTIME]
1975 user = util.username(st.st_uid)
1975 user = util.username(st.st_uid)
1976 locker = vfs.readlock(name)
1976 locker = vfs.readlock(name)
1977 if b":" in locker:
1977 if b":" in locker:
1978 host, pid = locker.split(b':')
1978 host, pid = locker.split(b':')
1979 if host == socket.gethostname():
1979 if host == socket.gethostname():
1980 locker = b'user %s, process %s' % (user or b'None', pid)
1980 locker = b'user %s, process %s' % (user or b'None', pid)
1981 else:
1981 else:
1982 locker = b'user %s, process %s, host %s' % (
1982 locker = b'user %s, process %s, host %s' % (
1983 user or b'None',
1983 user or b'None',
1984 pid,
1984 pid,
1985 host,
1985 host,
1986 )
1986 )
1987 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
1987 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
1988 return 1
1988 return 1
1989 except OSError as e:
1989 except OSError as e:
1990 if e.errno != errno.ENOENT:
1990 if e.errno != errno.ENOENT:
1991 raise
1991 raise
1992
1992
1993 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
1993 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
1994 return 0
1994 return 0
1995
1995
1996 held += report(repo.svfs, b"lock", repo.lock)
1996 held += report(repo.svfs, b"lock", repo.lock)
1997 held += report(repo.vfs, b"wlock", repo.wlock)
1997 held += report(repo.vfs, b"wlock", repo.wlock)
1998
1998
1999 return held
1999 return held
2000
2000
2001
2001
2002 @command(
2002 @command(
2003 b'debugmanifestfulltextcache',
2003 b'debugmanifestfulltextcache',
2004 [
2004 [
2005 (b'', b'clear', False, _(b'clear the cache')),
2005 (b'', b'clear', False, _(b'clear the cache')),
2006 (
2006 (
2007 b'a',
2007 b'a',
2008 b'add',
2008 b'add',
2009 [],
2009 [],
2010 _(b'add the given manifest nodes to the cache'),
2010 _(b'add the given manifest nodes to the cache'),
2011 _(b'NODE'),
2011 _(b'NODE'),
2012 ),
2012 ),
2013 ],
2013 ],
2014 b'',
2014 b'',
2015 )
2015 )
2016 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2016 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2017 """show, clear or amend the contents of the manifest fulltext cache"""
2017 """show, clear or amend the contents of the manifest fulltext cache"""
2018
2018
2019 def getcache():
2019 def getcache():
2020 r = repo.manifestlog.getstorage(b'')
2020 r = repo.manifestlog.getstorage(b'')
2021 try:
2021 try:
2022 return r._fulltextcache
2022 return r._fulltextcache
2023 except AttributeError:
2023 except AttributeError:
2024 msg = _(
2024 msg = _(
2025 b"Current revlog implementation doesn't appear to have a "
2025 b"Current revlog implementation doesn't appear to have a "
2026 b"manifest fulltext cache\n"
2026 b"manifest fulltext cache\n"
2027 )
2027 )
2028 raise error.Abort(msg)
2028 raise error.Abort(msg)
2029
2029
2030 if opts.get('clear'):
2030 if opts.get('clear'):
2031 with repo.wlock():
2031 with repo.wlock():
2032 cache = getcache()
2032 cache = getcache()
2033 cache.clear(clear_persisted_data=True)
2033 cache.clear(clear_persisted_data=True)
2034 return
2034 return
2035
2035
2036 if add:
2036 if add:
2037 with repo.wlock():
2037 with repo.wlock():
2038 m = repo.manifestlog
2038 m = repo.manifestlog
2039 store = m.getstorage(b'')
2039 store = m.getstorage(b'')
2040 for n in add:
2040 for n in add:
2041 try:
2041 try:
2042 manifest = m[store.lookup(n)]
2042 manifest = m[store.lookup(n)]
2043 except error.LookupError as e:
2043 except error.LookupError as e:
2044 raise error.Abort(e, hint=b"Check your manifest node id")
2044 raise error.Abort(e, hint=b"Check your manifest node id")
2045 manifest.read() # stores revisision in cache too
2045 manifest.read() # stores revisision in cache too
2046 return
2046 return
2047
2047
2048 cache = getcache()
2048 cache = getcache()
2049 if not len(cache):
2049 if not len(cache):
2050 ui.write(_(b'cache empty\n'))
2050 ui.write(_(b'cache empty\n'))
2051 else:
2051 else:
2052 ui.write(
2052 ui.write(
2053 _(
2053 _(
2054 b'cache contains %d manifest entries, in order of most to '
2054 b'cache contains %d manifest entries, in order of most to '
2055 b'least recent:\n'
2055 b'least recent:\n'
2056 )
2056 )
2057 % (len(cache),)
2057 % (len(cache),)
2058 )
2058 )
2059 totalsize = 0
2059 totalsize = 0
2060 for nodeid in cache:
2060 for nodeid in cache:
2061 # Use cache.get to not update the LRU order
2061 # Use cache.get to not update the LRU order
2062 data = cache.peek(nodeid)
2062 data = cache.peek(nodeid)
2063 size = len(data)
2063 size = len(data)
2064 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2064 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2065 ui.write(
2065 ui.write(
2066 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2066 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2067 )
2067 )
2068 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2068 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2069 ui.write(
2069 ui.write(
2070 _(b'total cache data size %s, on-disk %s\n')
2070 _(b'total cache data size %s, on-disk %s\n')
2071 % (util.bytecount(totalsize), util.bytecount(ondisk))
2071 % (util.bytecount(totalsize), util.bytecount(ondisk))
2072 )
2072 )
2073
2073
2074
2074
2075 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2075 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2076 def debugmergestate(ui, repo, *args, **opts):
2076 def debugmergestate(ui, repo, *args, **opts):
2077 """print merge state
2077 """print merge state
2078
2078
2079 Use --verbose to print out information about whether v1 or v2 merge state
2079 Use --verbose to print out information about whether v1 or v2 merge state
2080 was chosen."""
2080 was chosen."""
2081
2081
2082 if ui.verbose:
2082 if ui.verbose:
2083 ms = mergestatemod.mergestate(repo)
2083 ms = mergestatemod.mergestate(repo)
2084
2084
2085 # sort so that reasonable information is on top
2085 # sort so that reasonable information is on top
2086 v1records = ms._readrecordsv1()
2086 v1records = ms._readrecordsv1()
2087 v2records = ms._readrecordsv2()
2087 v2records = ms._readrecordsv2()
2088
2088
2089 if not v1records and not v2records:
2089 if not v1records and not v2records:
2090 pass
2090 pass
2091 elif not v2records:
2091 elif not v2records:
2092 ui.writenoi18n(b'no version 2 merge state\n')
2092 ui.writenoi18n(b'no version 2 merge state\n')
2093 elif ms._v1v2match(v1records, v2records):
2093 elif ms._v1v2match(v1records, v2records):
2094 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2094 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2095 else:
2095 else:
2096 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2096 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2097
2097
2098 opts = pycompat.byteskwargs(opts)
2098 opts = pycompat.byteskwargs(opts)
2099 if not opts[b'template']:
2099 if not opts[b'template']:
2100 opts[b'template'] = (
2100 opts[b'template'] = (
2101 b'{if(commits, "", "no merge state found\n")}'
2101 b'{if(commits, "", "no merge state found\n")}'
2102 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2102 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2103 b'{files % "file: {path} (state \\"{state}\\")\n'
2103 b'{files % "file: {path} (state \\"{state}\\")\n'
2104 b'{if(local_path, "'
2104 b'{if(local_path, "'
2105 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2105 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2106 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2106 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2107 b' other path: {other_path} (node {other_node})\n'
2107 b' other path: {other_path} (node {other_node})\n'
2108 b'")}'
2108 b'")}'
2109 b'{if(rename_side, "'
2109 b'{if(rename_side, "'
2110 b' rename side: {rename_side}\n'
2110 b' rename side: {rename_side}\n'
2111 b' renamed path: {renamed_path}\n'
2111 b' renamed path: {renamed_path}\n'
2112 b'")}'
2112 b'")}'
2113 b'{extras % " extra: {key} = {value}\n"}'
2113 b'{extras % " extra: {key} = {value}\n"}'
2114 b'"}'
2114 b'"}'
2115 b'{extras % "extra: {file} ({key} = {value})\n"}'
2115 b'{extras % "extra: {file} ({key} = {value})\n"}'
2116 )
2116 )
2117
2117
2118 ms = mergestatemod.mergestate.read(repo)
2118 ms = mergestatemod.mergestate.read(repo)
2119
2119
2120 fm = ui.formatter(b'debugmergestate', opts)
2120 fm = ui.formatter(b'debugmergestate', opts)
2121 fm.startitem()
2121 fm.startitem()
2122
2122
2123 fm_commits = fm.nested(b'commits')
2123 fm_commits = fm.nested(b'commits')
2124 if ms.active():
2124 if ms.active():
2125 for name, node, label_index in (
2125 for name, node, label_index in (
2126 (b'local', ms.local, 0),
2126 (b'local', ms.local, 0),
2127 (b'other', ms.other, 1),
2127 (b'other', ms.other, 1),
2128 ):
2128 ):
2129 fm_commits.startitem()
2129 fm_commits.startitem()
2130 fm_commits.data(name=name)
2130 fm_commits.data(name=name)
2131 fm_commits.data(node=hex(node))
2131 fm_commits.data(node=hex(node))
2132 if ms._labels and len(ms._labels) > label_index:
2132 if ms._labels and len(ms._labels) > label_index:
2133 fm_commits.data(label=ms._labels[label_index])
2133 fm_commits.data(label=ms._labels[label_index])
2134 fm_commits.end()
2134 fm_commits.end()
2135
2135
2136 fm_files = fm.nested(b'files')
2136 fm_files = fm.nested(b'files')
2137 if ms.active():
2137 if ms.active():
2138 for f in ms:
2138 for f in ms:
2139 fm_files.startitem()
2139 fm_files.startitem()
2140 fm_files.data(path=f)
2140 fm_files.data(path=f)
2141 state = ms._state[f]
2141 state = ms._state[f]
2142 fm_files.data(state=state[0])
2142 fm_files.data(state=state[0])
2143 if state[0] in (
2143 if state[0] in (
2144 mergestatemod.MERGE_RECORD_UNRESOLVED,
2144 mergestatemod.MERGE_RECORD_UNRESOLVED,
2145 mergestatemod.MERGE_RECORD_RESOLVED,
2145 mergestatemod.MERGE_RECORD_RESOLVED,
2146 ):
2146 ):
2147 fm_files.data(local_key=state[1])
2147 fm_files.data(local_key=state[1])
2148 fm_files.data(local_path=state[2])
2148 fm_files.data(local_path=state[2])
2149 fm_files.data(ancestor_path=state[3])
2149 fm_files.data(ancestor_path=state[3])
2150 fm_files.data(ancestor_node=state[4])
2150 fm_files.data(ancestor_node=state[4])
2151 fm_files.data(other_path=state[5])
2151 fm_files.data(other_path=state[5])
2152 fm_files.data(other_node=state[6])
2152 fm_files.data(other_node=state[6])
2153 fm_files.data(local_flags=state[7])
2153 fm_files.data(local_flags=state[7])
2154 elif state[0] in (
2154 elif state[0] in (
2155 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2155 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2156 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2156 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2157 ):
2157 ):
2158 fm_files.data(renamed_path=state[1])
2158 fm_files.data(renamed_path=state[1])
2159 fm_files.data(rename_side=state[2])
2159 fm_files.data(rename_side=state[2])
2160 fm_extras = fm_files.nested(b'extras')
2160 fm_extras = fm_files.nested(b'extras')
2161 for k, v in sorted(ms.extras(f).items()):
2161 for k, v in sorted(ms.extras(f).items()):
2162 fm_extras.startitem()
2162 fm_extras.startitem()
2163 fm_extras.data(key=k)
2163 fm_extras.data(key=k)
2164 fm_extras.data(value=v)
2164 fm_extras.data(value=v)
2165 fm_extras.end()
2165 fm_extras.end()
2166
2166
2167 fm_files.end()
2167 fm_files.end()
2168
2168
2169 fm_extras = fm.nested(b'extras')
2169 fm_extras = fm.nested(b'extras')
2170 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2170 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2171 if f in ms:
2171 if f in ms:
2172 # If file is in mergestate, we have already processed it's extras
2172 # If file is in mergestate, we have already processed it's extras
2173 continue
2173 continue
2174 for k, v in pycompat.iteritems(d):
2174 for k, v in pycompat.iteritems(d):
2175 fm_extras.startitem()
2175 fm_extras.startitem()
2176 fm_extras.data(file=f)
2176 fm_extras.data(file=f)
2177 fm_extras.data(key=k)
2177 fm_extras.data(key=k)
2178 fm_extras.data(value=v)
2178 fm_extras.data(value=v)
2179 fm_extras.end()
2179 fm_extras.end()
2180
2180
2181 fm.end()
2181 fm.end()
2182
2182
2183
2183
2184 @command(b'debugnamecomplete', [], _(b'NAME...'))
2184 @command(b'debugnamecomplete', [], _(b'NAME...'))
2185 def debugnamecomplete(ui, repo, *args):
2185 def debugnamecomplete(ui, repo, *args):
2186 '''complete "names" - tags, open branch names, bookmark names'''
2186 '''complete "names" - tags, open branch names, bookmark names'''
2187
2187
2188 names = set()
2188 names = set()
2189 # since we previously only listed open branches, we will handle that
2189 # since we previously only listed open branches, we will handle that
2190 # specially (after this for loop)
2190 # specially (after this for loop)
2191 for name, ns in pycompat.iteritems(repo.names):
2191 for name, ns in pycompat.iteritems(repo.names):
2192 if name != b'branches':
2192 if name != b'branches':
2193 names.update(ns.listnames(repo))
2193 names.update(ns.listnames(repo))
2194 names.update(
2194 names.update(
2195 tag
2195 tag
2196 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2196 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2197 if not closed
2197 if not closed
2198 )
2198 )
2199 completions = set()
2199 completions = set()
2200 if not args:
2200 if not args:
2201 args = [b'']
2201 args = [b'']
2202 for a in args:
2202 for a in args:
2203 completions.update(n for n in names if n.startswith(a))
2203 completions.update(n for n in names if n.startswith(a))
2204 ui.write(b'\n'.join(sorted(completions)))
2204 ui.write(b'\n'.join(sorted(completions)))
2205 ui.write(b'\n')
2205 ui.write(b'\n')
2206
2206
2207
2207
2208 @command(
2208 @command(
2209 b'debugnodemap',
2209 b'debugnodemap',
2210 [
2210 [
2211 (
2211 (
2212 b'',
2212 b'',
2213 b'dump-new',
2213 b'dump-new',
2214 False,
2214 False,
2215 _(b'write a (new) persistent binary nodemap on stdin'),
2215 _(b'write a (new) persistent binary nodemap on stdin'),
2216 ),
2216 ),
2217 (b'', b'dump-disk', False, _(b'dump on-disk data on stdin')),
2217 (b'', b'dump-disk', False, _(b'dump on-disk data on stdin')),
2218 (
2218 (
2219 b'',
2219 b'',
2220 b'check',
2220 b'check',
2221 False,
2221 False,
2222 _(b'check that the data on disk data are correct.'),
2222 _(b'check that the data on disk data are correct.'),
2223 ),
2223 ),
2224 (
2224 (
2225 b'',
2225 b'',
2226 b'metadata',
2226 b'metadata',
2227 False,
2227 False,
2228 _(b'display the on disk meta data for the nodemap'),
2228 _(b'display the on disk meta data for the nodemap'),
2229 ),
2229 ),
2230 ],
2230 ],
2231 )
2231 )
2232 def debugnodemap(ui, repo, **opts):
2232 def debugnodemap(ui, repo, **opts):
2233 """write and inspect on disk nodemap"""
2233 """write and inspect on disk nodemap"""
2234 if opts['dump_new']:
2234 if opts['dump_new']:
2235 unfi = repo.unfiltered()
2235 unfi = repo.unfiltered()
2236 cl = unfi.changelog
2236 cl = unfi.changelog
2237 if util.safehasattr(cl.index, "nodemap_data_all"):
2237 if util.safehasattr(cl.index, "nodemap_data_all"):
2238 data = cl.index.nodemap_data_all()
2238 data = cl.index.nodemap_data_all()
2239 else:
2239 else:
2240 data = nodemap.persistent_data(cl.index)
2240 data = nodemap.persistent_data(cl.index)
2241 ui.write(data)
2241 ui.write(data)
2242 elif opts['dump_disk']:
2242 elif opts['dump_disk']:
2243 unfi = repo.unfiltered()
2243 unfi = repo.unfiltered()
2244 cl = unfi.changelog
2244 cl = unfi.changelog
2245 nm_data = nodemap.persisted_data(cl)
2245 nm_data = nodemap.persisted_data(cl)
2246 if nm_data is not None:
2246 if nm_data is not None:
2247 docket, data = nm_data
2247 docket, data = nm_data
2248 ui.write(data[:])
2248 ui.write(data[:])
2249 elif opts['check']:
2249 elif opts['check']:
2250 unfi = repo.unfiltered()
2250 unfi = repo.unfiltered()
2251 cl = unfi.changelog
2251 cl = unfi.changelog
2252 nm_data = nodemap.persisted_data(cl)
2252 nm_data = nodemap.persisted_data(cl)
2253 if nm_data is not None:
2253 if nm_data is not None:
2254 docket, data = nm_data
2254 docket, data = nm_data
2255 return nodemap.check_data(ui, cl.index, data)
2255 return nodemap.check_data(ui, cl.index, data)
2256 elif opts['metadata']:
2256 elif opts['metadata']:
2257 unfi = repo.unfiltered()
2257 unfi = repo.unfiltered()
2258 cl = unfi.changelog
2258 cl = unfi.changelog
2259 nm_data = nodemap.persisted_data(cl)
2259 nm_data = nodemap.persisted_data(cl)
2260 if nm_data is not None:
2260 if nm_data is not None:
2261 docket, data = nm_data
2261 docket, data = nm_data
2262 ui.write((b"uid: %s\n") % docket.uid)
2262 ui.write((b"uid: %s\n") % docket.uid)
2263 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2263 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2264 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2264 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2265 ui.write((b"data-length: %d\n") % docket.data_length)
2265 ui.write((b"data-length: %d\n") % docket.data_length)
2266 ui.write((b"data-unused: %d\n") % docket.data_unused)
2266 ui.write((b"data-unused: %d\n") % docket.data_unused)
2267 unused_perc = docket.data_unused * 100.0 / docket.data_length
2267 unused_perc = docket.data_unused * 100.0 / docket.data_length
2268 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2268 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2269
2269
2270
2270
2271 @command(
2271 @command(
2272 b'debugobsolete',
2272 b'debugobsolete',
2273 [
2273 [
2274 (b'', b'flags', 0, _(b'markers flag')),
2274 (b'', b'flags', 0, _(b'markers flag')),
2275 (
2275 (
2276 b'',
2276 b'',
2277 b'record-parents',
2277 b'record-parents',
2278 False,
2278 False,
2279 _(b'record parent information for the precursor'),
2279 _(b'record parent information for the precursor'),
2280 ),
2280 ),
2281 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2281 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2282 (
2282 (
2283 b'',
2283 b'',
2284 b'exclusive',
2284 b'exclusive',
2285 False,
2285 False,
2286 _(b'restrict display to markers only relevant to REV'),
2286 _(b'restrict display to markers only relevant to REV'),
2287 ),
2287 ),
2288 (b'', b'index', False, _(b'display index of the marker')),
2288 (b'', b'index', False, _(b'display index of the marker')),
2289 (b'', b'delete', [], _(b'delete markers specified by indices')),
2289 (b'', b'delete', [], _(b'delete markers specified by indices')),
2290 ]
2290 ]
2291 + cmdutil.commitopts2
2291 + cmdutil.commitopts2
2292 + cmdutil.formatteropts,
2292 + cmdutil.formatteropts,
2293 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2293 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2294 )
2294 )
2295 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2295 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2296 """create arbitrary obsolete marker
2296 """create arbitrary obsolete marker
2297
2297
2298 With no arguments, displays the list of obsolescence markers."""
2298 With no arguments, displays the list of obsolescence markers."""
2299
2299
2300 opts = pycompat.byteskwargs(opts)
2300 opts = pycompat.byteskwargs(opts)
2301
2301
2302 def parsenodeid(s):
2302 def parsenodeid(s):
2303 try:
2303 try:
2304 # We do not use revsingle/revrange functions here to accept
2304 # We do not use revsingle/revrange functions here to accept
2305 # arbitrary node identifiers, possibly not present in the
2305 # arbitrary node identifiers, possibly not present in the
2306 # local repository.
2306 # local repository.
2307 n = bin(s)
2307 n = bin(s)
2308 if len(n) != len(nullid):
2308 if len(n) != len(nullid):
2309 raise TypeError()
2309 raise TypeError()
2310 return n
2310 return n
2311 except TypeError:
2311 except TypeError:
2312 raise error.InputError(
2312 raise error.InputError(
2313 b'changeset references must be full hexadecimal '
2313 b'changeset references must be full hexadecimal '
2314 b'node identifiers'
2314 b'node identifiers'
2315 )
2315 )
2316
2316
2317 if opts.get(b'delete'):
2317 if opts.get(b'delete'):
2318 indices = []
2318 indices = []
2319 for v in opts.get(b'delete'):
2319 for v in opts.get(b'delete'):
2320 try:
2320 try:
2321 indices.append(int(v))
2321 indices.append(int(v))
2322 except ValueError:
2322 except ValueError:
2323 raise error.InputError(
2323 raise error.InputError(
2324 _(b'invalid index value: %r') % v,
2324 _(b'invalid index value: %r') % v,
2325 hint=_(b'use integers for indices'),
2325 hint=_(b'use integers for indices'),
2326 )
2326 )
2327
2327
2328 if repo.currenttransaction():
2328 if repo.currenttransaction():
2329 raise error.Abort(
2329 raise error.Abort(
2330 _(b'cannot delete obsmarkers in the middle of transaction.')
2330 _(b'cannot delete obsmarkers in the middle of transaction.')
2331 )
2331 )
2332
2332
2333 with repo.lock():
2333 with repo.lock():
2334 n = repair.deleteobsmarkers(repo.obsstore, indices)
2334 n = repair.deleteobsmarkers(repo.obsstore, indices)
2335 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2335 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2336
2336
2337 return
2337 return
2338
2338
2339 if precursor is not None:
2339 if precursor is not None:
2340 if opts[b'rev']:
2340 if opts[b'rev']:
2341 raise error.InputError(
2341 raise error.InputError(
2342 b'cannot select revision when creating marker'
2342 b'cannot select revision when creating marker'
2343 )
2343 )
2344 metadata = {}
2344 metadata = {}
2345 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2345 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2346 succs = tuple(parsenodeid(succ) for succ in successors)
2346 succs = tuple(parsenodeid(succ) for succ in successors)
2347 l = repo.lock()
2347 l = repo.lock()
2348 try:
2348 try:
2349 tr = repo.transaction(b'debugobsolete')
2349 tr = repo.transaction(b'debugobsolete')
2350 try:
2350 try:
2351 date = opts.get(b'date')
2351 date = opts.get(b'date')
2352 if date:
2352 if date:
2353 date = dateutil.parsedate(date)
2353 date = dateutil.parsedate(date)
2354 else:
2354 else:
2355 date = None
2355 date = None
2356 prec = parsenodeid(precursor)
2356 prec = parsenodeid(precursor)
2357 parents = None
2357 parents = None
2358 if opts[b'record_parents']:
2358 if opts[b'record_parents']:
2359 if prec not in repo.unfiltered():
2359 if prec not in repo.unfiltered():
2360 raise error.Abort(
2360 raise error.Abort(
2361 b'cannot used --record-parents on '
2361 b'cannot used --record-parents on '
2362 b'unknown changesets'
2362 b'unknown changesets'
2363 )
2363 )
2364 parents = repo.unfiltered()[prec].parents()
2364 parents = repo.unfiltered()[prec].parents()
2365 parents = tuple(p.node() for p in parents)
2365 parents = tuple(p.node() for p in parents)
2366 repo.obsstore.create(
2366 repo.obsstore.create(
2367 tr,
2367 tr,
2368 prec,
2368 prec,
2369 succs,
2369 succs,
2370 opts[b'flags'],
2370 opts[b'flags'],
2371 parents=parents,
2371 parents=parents,
2372 date=date,
2372 date=date,
2373 metadata=metadata,
2373 metadata=metadata,
2374 ui=ui,
2374 ui=ui,
2375 )
2375 )
2376 tr.close()
2376 tr.close()
2377 except ValueError as exc:
2377 except ValueError as exc:
2378 raise error.Abort(
2378 raise error.Abort(
2379 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2379 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2380 )
2380 )
2381 finally:
2381 finally:
2382 tr.release()
2382 tr.release()
2383 finally:
2383 finally:
2384 l.release()
2384 l.release()
2385 else:
2385 else:
2386 if opts[b'rev']:
2386 if opts[b'rev']:
2387 revs = scmutil.revrange(repo, opts[b'rev'])
2387 revs = scmutil.revrange(repo, opts[b'rev'])
2388 nodes = [repo[r].node() for r in revs]
2388 nodes = [repo[r].node() for r in revs]
2389 markers = list(
2389 markers = list(
2390 obsutil.getmarkers(
2390 obsutil.getmarkers(
2391 repo, nodes=nodes, exclusive=opts[b'exclusive']
2391 repo, nodes=nodes, exclusive=opts[b'exclusive']
2392 )
2392 )
2393 )
2393 )
2394 markers.sort(key=lambda x: x._data)
2394 markers.sort(key=lambda x: x._data)
2395 else:
2395 else:
2396 markers = obsutil.getmarkers(repo)
2396 markers = obsutil.getmarkers(repo)
2397
2397
2398 markerstoiter = markers
2398 markerstoiter = markers
2399 isrelevant = lambda m: True
2399 isrelevant = lambda m: True
2400 if opts.get(b'rev') and opts.get(b'index'):
2400 if opts.get(b'rev') and opts.get(b'index'):
2401 markerstoiter = obsutil.getmarkers(repo)
2401 markerstoiter = obsutil.getmarkers(repo)
2402 markerset = set(markers)
2402 markerset = set(markers)
2403 isrelevant = lambda m: m in markerset
2403 isrelevant = lambda m: m in markerset
2404
2404
2405 fm = ui.formatter(b'debugobsolete', opts)
2405 fm = ui.formatter(b'debugobsolete', opts)
2406 for i, m in enumerate(markerstoiter):
2406 for i, m in enumerate(markerstoiter):
2407 if not isrelevant(m):
2407 if not isrelevant(m):
2408 # marker can be irrelevant when we're iterating over a set
2408 # marker can be irrelevant when we're iterating over a set
2409 # of markers (markerstoiter) which is bigger than the set
2409 # of markers (markerstoiter) which is bigger than the set
2410 # of markers we want to display (markers)
2410 # of markers we want to display (markers)
2411 # this can happen if both --index and --rev options are
2411 # this can happen if both --index and --rev options are
2412 # provided and thus we need to iterate over all of the markers
2412 # provided and thus we need to iterate over all of the markers
2413 # to get the correct indices, but only display the ones that
2413 # to get the correct indices, but only display the ones that
2414 # are relevant to --rev value
2414 # are relevant to --rev value
2415 continue
2415 continue
2416 fm.startitem()
2416 fm.startitem()
2417 ind = i if opts.get(b'index') else None
2417 ind = i if opts.get(b'index') else None
2418 cmdutil.showmarker(fm, m, index=ind)
2418 cmdutil.showmarker(fm, m, index=ind)
2419 fm.end()
2419 fm.end()
2420
2420
2421
2421
2422 @command(
2422 @command(
2423 b'debugp1copies',
2423 b'debugp1copies',
2424 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2424 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2425 _(b'[-r REV]'),
2425 _(b'[-r REV]'),
2426 )
2426 )
2427 def debugp1copies(ui, repo, **opts):
2427 def debugp1copies(ui, repo, **opts):
2428 """dump copy information compared to p1"""
2428 """dump copy information compared to p1"""
2429
2429
2430 opts = pycompat.byteskwargs(opts)
2430 opts = pycompat.byteskwargs(opts)
2431 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2431 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2432 for dst, src in ctx.p1copies().items():
2432 for dst, src in ctx.p1copies().items():
2433 ui.write(b'%s -> %s\n' % (src, dst))
2433 ui.write(b'%s -> %s\n' % (src, dst))
2434
2434
2435
2435
2436 @command(
2436 @command(
2437 b'debugp2copies',
2437 b'debugp2copies',
2438 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2438 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2439 _(b'[-r REV]'),
2439 _(b'[-r REV]'),
2440 )
2440 )
2441 def debugp1copies(ui, repo, **opts):
2441 def debugp1copies(ui, repo, **opts):
2442 """dump copy information compared to p2"""
2442 """dump copy information compared to p2"""
2443
2443
2444 opts = pycompat.byteskwargs(opts)
2444 opts = pycompat.byteskwargs(opts)
2445 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2445 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2446 for dst, src in ctx.p2copies().items():
2446 for dst, src in ctx.p2copies().items():
2447 ui.write(b'%s -> %s\n' % (src, dst))
2447 ui.write(b'%s -> %s\n' % (src, dst))
2448
2448
2449
2449
2450 @command(
2450 @command(
2451 b'debugpathcomplete',
2451 b'debugpathcomplete',
2452 [
2452 [
2453 (b'f', b'full', None, _(b'complete an entire path')),
2453 (b'f', b'full', None, _(b'complete an entire path')),
2454 (b'n', b'normal', None, _(b'show only normal files')),
2454 (b'n', b'normal', None, _(b'show only normal files')),
2455 (b'a', b'added', None, _(b'show only added files')),
2455 (b'a', b'added', None, _(b'show only added files')),
2456 (b'r', b'removed', None, _(b'show only removed files')),
2456 (b'r', b'removed', None, _(b'show only removed files')),
2457 ],
2457 ],
2458 _(b'FILESPEC...'),
2458 _(b'FILESPEC...'),
2459 )
2459 )
2460 def debugpathcomplete(ui, repo, *specs, **opts):
2460 def debugpathcomplete(ui, repo, *specs, **opts):
2461 """complete part or all of a tracked path
2461 """complete part or all of a tracked path
2462
2462
2463 This command supports shells that offer path name completion. It
2463 This command supports shells that offer path name completion. It
2464 currently completes only files already known to the dirstate.
2464 currently completes only files already known to the dirstate.
2465
2465
2466 Completion extends only to the next path segment unless
2466 Completion extends only to the next path segment unless
2467 --full is specified, in which case entire paths are used."""
2467 --full is specified, in which case entire paths are used."""
2468
2468
2469 def complete(path, acceptable):
2469 def complete(path, acceptable):
2470 dirstate = repo.dirstate
2470 dirstate = repo.dirstate
2471 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2471 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2472 rootdir = repo.root + pycompat.ossep
2472 rootdir = repo.root + pycompat.ossep
2473 if spec != repo.root and not spec.startswith(rootdir):
2473 if spec != repo.root and not spec.startswith(rootdir):
2474 return [], []
2474 return [], []
2475 if os.path.isdir(spec):
2475 if os.path.isdir(spec):
2476 spec += b'/'
2476 spec += b'/'
2477 spec = spec[len(rootdir) :]
2477 spec = spec[len(rootdir) :]
2478 fixpaths = pycompat.ossep != b'/'
2478 fixpaths = pycompat.ossep != b'/'
2479 if fixpaths:
2479 if fixpaths:
2480 spec = spec.replace(pycompat.ossep, b'/')
2480 spec = spec.replace(pycompat.ossep, b'/')
2481 speclen = len(spec)
2481 speclen = len(spec)
2482 fullpaths = opts['full']
2482 fullpaths = opts['full']
2483 files, dirs = set(), set()
2483 files, dirs = set(), set()
2484 adddir, addfile = dirs.add, files.add
2484 adddir, addfile = dirs.add, files.add
2485 for f, st in pycompat.iteritems(dirstate):
2485 for f, st in pycompat.iteritems(dirstate):
2486 if f.startswith(spec) and st[0] in acceptable:
2486 if f.startswith(spec) and st[0] in acceptable:
2487 if fixpaths:
2487 if fixpaths:
2488 f = f.replace(b'/', pycompat.ossep)
2488 f = f.replace(b'/', pycompat.ossep)
2489 if fullpaths:
2489 if fullpaths:
2490 addfile(f)
2490 addfile(f)
2491 continue
2491 continue
2492 s = f.find(pycompat.ossep, speclen)
2492 s = f.find(pycompat.ossep, speclen)
2493 if s >= 0:
2493 if s >= 0:
2494 adddir(f[:s])
2494 adddir(f[:s])
2495 else:
2495 else:
2496 addfile(f)
2496 addfile(f)
2497 return files, dirs
2497 return files, dirs
2498
2498
2499 acceptable = b''
2499 acceptable = b''
2500 if opts['normal']:
2500 if opts['normal']:
2501 acceptable += b'nm'
2501 acceptable += b'nm'
2502 if opts['added']:
2502 if opts['added']:
2503 acceptable += b'a'
2503 acceptable += b'a'
2504 if opts['removed']:
2504 if opts['removed']:
2505 acceptable += b'r'
2505 acceptable += b'r'
2506 cwd = repo.getcwd()
2506 cwd = repo.getcwd()
2507 if not specs:
2507 if not specs:
2508 specs = [b'.']
2508 specs = [b'.']
2509
2509
2510 files, dirs = set(), set()
2510 files, dirs = set(), set()
2511 for spec in specs:
2511 for spec in specs:
2512 f, d = complete(spec, acceptable or b'nmar')
2512 f, d = complete(spec, acceptable or b'nmar')
2513 files.update(f)
2513 files.update(f)
2514 dirs.update(d)
2514 dirs.update(d)
2515 files.update(dirs)
2515 files.update(dirs)
2516 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2516 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2517 ui.write(b'\n')
2517 ui.write(b'\n')
2518
2518
2519
2519
2520 @command(
2520 @command(
2521 b'debugpathcopies',
2521 b'debugpathcopies',
2522 cmdutil.walkopts,
2522 cmdutil.walkopts,
2523 b'hg debugpathcopies REV1 REV2 [FILE]',
2523 b'hg debugpathcopies REV1 REV2 [FILE]',
2524 inferrepo=True,
2524 inferrepo=True,
2525 )
2525 )
2526 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2526 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2527 """show copies between two revisions"""
2527 """show copies between two revisions"""
2528 ctx1 = scmutil.revsingle(repo, rev1)
2528 ctx1 = scmutil.revsingle(repo, rev1)
2529 ctx2 = scmutil.revsingle(repo, rev2)
2529 ctx2 = scmutil.revsingle(repo, rev2)
2530 m = scmutil.match(ctx1, pats, opts)
2530 m = scmutil.match(ctx1, pats, opts)
2531 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2531 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2532 ui.write(b'%s -> %s\n' % (src, dst))
2532 ui.write(b'%s -> %s\n' % (src, dst))
2533
2533
2534
2534
2535 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2535 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2536 def debugpeer(ui, path):
2536 def debugpeer(ui, path):
2537 """establish a connection to a peer repository"""
2537 """establish a connection to a peer repository"""
2538 # Always enable peer request logging. Requires --debug to display
2538 # Always enable peer request logging. Requires --debug to display
2539 # though.
2539 # though.
2540 overrides = {
2540 overrides = {
2541 (b'devel', b'debug.peer-request'): True,
2541 (b'devel', b'debug.peer-request'): True,
2542 }
2542 }
2543
2543
2544 with ui.configoverride(overrides):
2544 with ui.configoverride(overrides):
2545 peer = hg.peer(ui, {}, path)
2545 peer = hg.peer(ui, {}, path)
2546
2546
2547 local = peer.local() is not None
2547 local = peer.local() is not None
2548 canpush = peer.canpush()
2548 canpush = peer.canpush()
2549
2549
2550 ui.write(_(b'url: %s\n') % peer.url())
2550 ui.write(_(b'url: %s\n') % peer.url())
2551 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2551 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2552 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2552 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2553
2553
2554
2554
2555 @command(
2555 @command(
2556 b'debugpickmergetool',
2556 b'debugpickmergetool',
2557 [
2557 [
2558 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2558 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2559 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2559 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2560 ]
2560 ]
2561 + cmdutil.walkopts
2561 + cmdutil.walkopts
2562 + cmdutil.mergetoolopts,
2562 + cmdutil.mergetoolopts,
2563 _(b'[PATTERN]...'),
2563 _(b'[PATTERN]...'),
2564 inferrepo=True,
2564 inferrepo=True,
2565 )
2565 )
2566 def debugpickmergetool(ui, repo, *pats, **opts):
2566 def debugpickmergetool(ui, repo, *pats, **opts):
2567 """examine which merge tool is chosen for specified file
2567 """examine which merge tool is chosen for specified file
2568
2568
2569 As described in :hg:`help merge-tools`, Mercurial examines
2569 As described in :hg:`help merge-tools`, Mercurial examines
2570 configurations below in this order to decide which merge tool is
2570 configurations below in this order to decide which merge tool is
2571 chosen for specified file.
2571 chosen for specified file.
2572
2572
2573 1. ``--tool`` option
2573 1. ``--tool`` option
2574 2. ``HGMERGE`` environment variable
2574 2. ``HGMERGE`` environment variable
2575 3. configurations in ``merge-patterns`` section
2575 3. configurations in ``merge-patterns`` section
2576 4. configuration of ``ui.merge``
2576 4. configuration of ``ui.merge``
2577 5. configurations in ``merge-tools`` section
2577 5. configurations in ``merge-tools`` section
2578 6. ``hgmerge`` tool (for historical reason only)
2578 6. ``hgmerge`` tool (for historical reason only)
2579 7. default tool for fallback (``:merge`` or ``:prompt``)
2579 7. default tool for fallback (``:merge`` or ``:prompt``)
2580
2580
2581 This command writes out examination result in the style below::
2581 This command writes out examination result in the style below::
2582
2582
2583 FILE = MERGETOOL
2583 FILE = MERGETOOL
2584
2584
2585 By default, all files known in the first parent context of the
2585 By default, all files known in the first parent context of the
2586 working directory are examined. Use file patterns and/or -I/-X
2586 working directory are examined. Use file patterns and/or -I/-X
2587 options to limit target files. -r/--rev is also useful to examine
2587 options to limit target files. -r/--rev is also useful to examine
2588 files in another context without actual updating to it.
2588 files in another context without actual updating to it.
2589
2589
2590 With --debug, this command shows warning messages while matching
2590 With --debug, this command shows warning messages while matching
2591 against ``merge-patterns`` and so on, too. It is recommended to
2591 against ``merge-patterns`` and so on, too. It is recommended to
2592 use this option with explicit file patterns and/or -I/-X options,
2592 use this option with explicit file patterns and/or -I/-X options,
2593 because this option increases amount of output per file according
2593 because this option increases amount of output per file according
2594 to configurations in hgrc.
2594 to configurations in hgrc.
2595
2595
2596 With -v/--verbose, this command shows configurations below at
2596 With -v/--verbose, this command shows configurations below at
2597 first (only if specified).
2597 first (only if specified).
2598
2598
2599 - ``--tool`` option
2599 - ``--tool`` option
2600 - ``HGMERGE`` environment variable
2600 - ``HGMERGE`` environment variable
2601 - configuration of ``ui.merge``
2601 - configuration of ``ui.merge``
2602
2602
2603 If merge tool is chosen before matching against
2603 If merge tool is chosen before matching against
2604 ``merge-patterns``, this command can't show any helpful
2604 ``merge-patterns``, this command can't show any helpful
2605 information, even with --debug. In such case, information above is
2605 information, even with --debug. In such case, information above is
2606 useful to know why a merge tool is chosen.
2606 useful to know why a merge tool is chosen.
2607 """
2607 """
2608 opts = pycompat.byteskwargs(opts)
2608 opts = pycompat.byteskwargs(opts)
2609 overrides = {}
2609 overrides = {}
2610 if opts[b'tool']:
2610 if opts[b'tool']:
2611 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2611 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2612 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2612 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2613
2613
2614 with ui.configoverride(overrides, b'debugmergepatterns'):
2614 with ui.configoverride(overrides, b'debugmergepatterns'):
2615 hgmerge = encoding.environ.get(b"HGMERGE")
2615 hgmerge = encoding.environ.get(b"HGMERGE")
2616 if hgmerge is not None:
2616 if hgmerge is not None:
2617 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2617 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2618 uimerge = ui.config(b"ui", b"merge")
2618 uimerge = ui.config(b"ui", b"merge")
2619 if uimerge:
2619 if uimerge:
2620 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2620 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2621
2621
2622 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2622 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2623 m = scmutil.match(ctx, pats, opts)
2623 m = scmutil.match(ctx, pats, opts)
2624 changedelete = opts[b'changedelete']
2624 changedelete = opts[b'changedelete']
2625 for path in ctx.walk(m):
2625 for path in ctx.walk(m):
2626 fctx = ctx[path]
2626 fctx = ctx[path]
2627 try:
2627 try:
2628 if not ui.debugflag:
2628 if not ui.debugflag:
2629 ui.pushbuffer(error=True)
2629 ui.pushbuffer(error=True)
2630 tool, toolpath = filemerge._picktool(
2630 tool, toolpath = filemerge._picktool(
2631 repo,
2631 repo,
2632 ui,
2632 ui,
2633 path,
2633 path,
2634 fctx.isbinary(),
2634 fctx.isbinary(),
2635 b'l' in fctx.flags(),
2635 b'l' in fctx.flags(),
2636 changedelete,
2636 changedelete,
2637 )
2637 )
2638 finally:
2638 finally:
2639 if not ui.debugflag:
2639 if not ui.debugflag:
2640 ui.popbuffer()
2640 ui.popbuffer()
2641 ui.write(b'%s = %s\n' % (path, tool))
2641 ui.write(b'%s = %s\n' % (path, tool))
2642
2642
2643
2643
2644 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2644 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2645 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2645 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2646 """access the pushkey key/value protocol
2646 """access the pushkey key/value protocol
2647
2647
2648 With two args, list the keys in the given namespace.
2648 With two args, list the keys in the given namespace.
2649
2649
2650 With five args, set a key to new if it currently is set to old.
2650 With five args, set a key to new if it currently is set to old.
2651 Reports success or failure.
2651 Reports success or failure.
2652 """
2652 """
2653
2653
2654 target = hg.peer(ui, {}, repopath)
2654 target = hg.peer(ui, {}, repopath)
2655 if keyinfo:
2655 if keyinfo:
2656 key, old, new = keyinfo
2656 key, old, new = keyinfo
2657 with target.commandexecutor() as e:
2657 with target.commandexecutor() as e:
2658 r = e.callcommand(
2658 r = e.callcommand(
2659 b'pushkey',
2659 b'pushkey',
2660 {
2660 {
2661 b'namespace': namespace,
2661 b'namespace': namespace,
2662 b'key': key,
2662 b'key': key,
2663 b'old': old,
2663 b'old': old,
2664 b'new': new,
2664 b'new': new,
2665 },
2665 },
2666 ).result()
2666 ).result()
2667
2667
2668 ui.status(pycompat.bytestr(r) + b'\n')
2668 ui.status(pycompat.bytestr(r) + b'\n')
2669 return not r
2669 return not r
2670 else:
2670 else:
2671 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2671 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2672 ui.write(
2672 ui.write(
2673 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2673 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2674 )
2674 )
2675
2675
2676
2676
2677 @command(b'debugpvec', [], _(b'A B'))
2677 @command(b'debugpvec', [], _(b'A B'))
2678 def debugpvec(ui, repo, a, b=None):
2678 def debugpvec(ui, repo, a, b=None):
2679 ca = scmutil.revsingle(repo, a)
2679 ca = scmutil.revsingle(repo, a)
2680 cb = scmutil.revsingle(repo, b)
2680 cb = scmutil.revsingle(repo, b)
2681 pa = pvec.ctxpvec(ca)
2681 pa = pvec.ctxpvec(ca)
2682 pb = pvec.ctxpvec(cb)
2682 pb = pvec.ctxpvec(cb)
2683 if pa == pb:
2683 if pa == pb:
2684 rel = b"="
2684 rel = b"="
2685 elif pa > pb:
2685 elif pa > pb:
2686 rel = b">"
2686 rel = b">"
2687 elif pa < pb:
2687 elif pa < pb:
2688 rel = b"<"
2688 rel = b"<"
2689 elif pa | pb:
2689 elif pa | pb:
2690 rel = b"|"
2690 rel = b"|"
2691 ui.write(_(b"a: %s\n") % pa)
2691 ui.write(_(b"a: %s\n") % pa)
2692 ui.write(_(b"b: %s\n") % pb)
2692 ui.write(_(b"b: %s\n") % pb)
2693 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2693 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2694 ui.write(
2694 ui.write(
2695 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2695 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2696 % (
2696 % (
2697 abs(pa._depth - pb._depth),
2697 abs(pa._depth - pb._depth),
2698 pvec._hamming(pa._vec, pb._vec),
2698 pvec._hamming(pa._vec, pb._vec),
2699 pa.distance(pb),
2699 pa.distance(pb),
2700 rel,
2700 rel,
2701 )
2701 )
2702 )
2702 )
2703
2703
2704
2704
2705 @command(
2705 @command(
2706 b'debugrebuilddirstate|debugrebuildstate',
2706 b'debugrebuilddirstate|debugrebuildstate',
2707 [
2707 [
2708 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2708 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2709 (
2709 (
2710 b'',
2710 b'',
2711 b'minimal',
2711 b'minimal',
2712 None,
2712 None,
2713 _(
2713 _(
2714 b'only rebuild files that are inconsistent with '
2714 b'only rebuild files that are inconsistent with '
2715 b'the working copy parent'
2715 b'the working copy parent'
2716 ),
2716 ),
2717 ),
2717 ),
2718 ],
2718 ],
2719 _(b'[-r REV]'),
2719 _(b'[-r REV]'),
2720 )
2720 )
2721 def debugrebuilddirstate(ui, repo, rev, **opts):
2721 def debugrebuilddirstate(ui, repo, rev, **opts):
2722 """rebuild the dirstate as it would look like for the given revision
2722 """rebuild the dirstate as it would look like for the given revision
2723
2723
2724 If no revision is specified the first current parent will be used.
2724 If no revision is specified the first current parent will be used.
2725
2725
2726 The dirstate will be set to the files of the given revision.
2726 The dirstate will be set to the files of the given revision.
2727 The actual working directory content or existing dirstate
2727 The actual working directory content or existing dirstate
2728 information such as adds or removes is not considered.
2728 information such as adds or removes is not considered.
2729
2729
2730 ``minimal`` will only rebuild the dirstate status for files that claim to be
2730 ``minimal`` will only rebuild the dirstate status for files that claim to be
2731 tracked but are not in the parent manifest, or that exist in the parent
2731 tracked but are not in the parent manifest, or that exist in the parent
2732 manifest but are not in the dirstate. It will not change adds, removes, or
2732 manifest but are not in the dirstate. It will not change adds, removes, or
2733 modified files that are in the working copy parent.
2733 modified files that are in the working copy parent.
2734
2734
2735 One use of this command is to make the next :hg:`status` invocation
2735 One use of this command is to make the next :hg:`status` invocation
2736 check the actual file content.
2736 check the actual file content.
2737 """
2737 """
2738 ctx = scmutil.revsingle(repo, rev)
2738 ctx = scmutil.revsingle(repo, rev)
2739 with repo.wlock():
2739 with repo.wlock():
2740 dirstate = repo.dirstate
2740 dirstate = repo.dirstate
2741 changedfiles = None
2741 changedfiles = None
2742 # See command doc for what minimal does.
2742 # See command doc for what minimal does.
2743 if opts.get('minimal'):
2743 if opts.get('minimal'):
2744 manifestfiles = set(ctx.manifest().keys())
2744 manifestfiles = set(ctx.manifest().keys())
2745 dirstatefiles = set(dirstate)
2745 dirstatefiles = set(dirstate)
2746 manifestonly = manifestfiles - dirstatefiles
2746 manifestonly = manifestfiles - dirstatefiles
2747 dsonly = dirstatefiles - manifestfiles
2747 dsonly = dirstatefiles - manifestfiles
2748 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2748 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2749 changedfiles = manifestonly | dsnotadded
2749 changedfiles = manifestonly | dsnotadded
2750
2750
2751 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2751 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2752
2752
2753
2753
2754 @command(b'debugrebuildfncache', [], b'')
2754 @command(b'debugrebuildfncache', [], b'')
2755 def debugrebuildfncache(ui, repo):
2755 def debugrebuildfncache(ui, repo):
2756 """rebuild the fncache file"""
2756 """rebuild the fncache file"""
2757 repair.rebuildfncache(ui, repo)
2757 repair.rebuildfncache(ui, repo)
2758
2758
2759
2759
2760 @command(
2760 @command(
2761 b'debugrename',
2761 b'debugrename',
2762 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2762 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2763 _(b'[-r REV] [FILE]...'),
2763 _(b'[-r REV] [FILE]...'),
2764 )
2764 )
2765 def debugrename(ui, repo, *pats, **opts):
2765 def debugrename(ui, repo, *pats, **opts):
2766 """dump rename information"""
2766 """dump rename information"""
2767
2767
2768 opts = pycompat.byteskwargs(opts)
2768 opts = pycompat.byteskwargs(opts)
2769 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2769 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2770 m = scmutil.match(ctx, pats, opts)
2770 m = scmutil.match(ctx, pats, opts)
2771 for abs in ctx.walk(m):
2771 for abs in ctx.walk(m):
2772 fctx = ctx[abs]
2772 fctx = ctx[abs]
2773 o = fctx.filelog().renamed(fctx.filenode())
2773 o = fctx.filelog().renamed(fctx.filenode())
2774 rel = repo.pathto(abs)
2774 rel = repo.pathto(abs)
2775 if o:
2775 if o:
2776 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2776 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2777 else:
2777 else:
2778 ui.write(_(b"%s not renamed\n") % rel)
2778 ui.write(_(b"%s not renamed\n") % rel)
2779
2779
2780
2780
2781 @command(b'debugrequires|debugrequirements', [], b'')
2781 @command(b'debugrequires|debugrequirements', [], b'')
2782 def debugrequirements(ui, repo):
2782 def debugrequirements(ui, repo):
2783 """ print the current repo requirements """
2783 """ print the current repo requirements """
2784 for r in sorted(repo.requirements):
2784 for r in sorted(repo.requirements):
2785 ui.write(b"%s\n" % r)
2785 ui.write(b"%s\n" % r)
2786
2786
2787
2787
2788 @command(
2788 @command(
2789 b'debugrevlog',
2789 b'debugrevlog',
2790 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2790 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2791 _(b'-c|-m|FILE'),
2791 _(b'-c|-m|FILE'),
2792 optionalrepo=True,
2792 optionalrepo=True,
2793 )
2793 )
2794 def debugrevlog(ui, repo, file_=None, **opts):
2794 def debugrevlog(ui, repo, file_=None, **opts):
2795 """show data and statistics about a revlog"""
2795 """show data and statistics about a revlog"""
2796 opts = pycompat.byteskwargs(opts)
2796 opts = pycompat.byteskwargs(opts)
2797 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2797 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2798
2798
2799 if opts.get(b"dump"):
2799 if opts.get(b"dump"):
2800 numrevs = len(r)
2800 numrevs = len(r)
2801 ui.write(
2801 ui.write(
2802 (
2802 (
2803 b"# rev p1rev p2rev start end deltastart base p1 p2"
2803 b"# rev p1rev p2rev start end deltastart base p1 p2"
2804 b" rawsize totalsize compression heads chainlen\n"
2804 b" rawsize totalsize compression heads chainlen\n"
2805 )
2805 )
2806 )
2806 )
2807 ts = 0
2807 ts = 0
2808 heads = set()
2808 heads = set()
2809
2809
2810 for rev in pycompat.xrange(numrevs):
2810 for rev in pycompat.xrange(numrevs):
2811 dbase = r.deltaparent(rev)
2811 dbase = r.deltaparent(rev)
2812 if dbase == -1:
2812 if dbase == -1:
2813 dbase = rev
2813 dbase = rev
2814 cbase = r.chainbase(rev)
2814 cbase = r.chainbase(rev)
2815 clen = r.chainlen(rev)
2815 clen = r.chainlen(rev)
2816 p1, p2 = r.parentrevs(rev)
2816 p1, p2 = r.parentrevs(rev)
2817 rs = r.rawsize(rev)
2817 rs = r.rawsize(rev)
2818 ts = ts + rs
2818 ts = ts + rs
2819 heads -= set(r.parentrevs(rev))
2819 heads -= set(r.parentrevs(rev))
2820 heads.add(rev)
2820 heads.add(rev)
2821 try:
2821 try:
2822 compression = ts / r.end(rev)
2822 compression = ts / r.end(rev)
2823 except ZeroDivisionError:
2823 except ZeroDivisionError:
2824 compression = 0
2824 compression = 0
2825 ui.write(
2825 ui.write(
2826 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2826 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2827 b"%11d %5d %8d\n"
2827 b"%11d %5d %8d\n"
2828 % (
2828 % (
2829 rev,
2829 rev,
2830 p1,
2830 p1,
2831 p2,
2831 p2,
2832 r.start(rev),
2832 r.start(rev),
2833 r.end(rev),
2833 r.end(rev),
2834 r.start(dbase),
2834 r.start(dbase),
2835 r.start(cbase),
2835 r.start(cbase),
2836 r.start(p1),
2836 r.start(p1),
2837 r.start(p2),
2837 r.start(p2),
2838 rs,
2838 rs,
2839 ts,
2839 ts,
2840 compression,
2840 compression,
2841 len(heads),
2841 len(heads),
2842 clen,
2842 clen,
2843 )
2843 )
2844 )
2844 )
2845 return 0
2845 return 0
2846
2846
2847 v = r.version
2847 v = r.version
2848 format = v & 0xFFFF
2848 format = v & 0xFFFF
2849 flags = []
2849 flags = []
2850 gdelta = False
2850 gdelta = False
2851 if v & revlog.FLAG_INLINE_DATA:
2851 if v & revlog.FLAG_INLINE_DATA:
2852 flags.append(b'inline')
2852 flags.append(b'inline')
2853 if v & revlog.FLAG_GENERALDELTA:
2853 if v & revlog.FLAG_GENERALDELTA:
2854 gdelta = True
2854 gdelta = True
2855 flags.append(b'generaldelta')
2855 flags.append(b'generaldelta')
2856 if not flags:
2856 if not flags:
2857 flags = [b'(none)']
2857 flags = [b'(none)']
2858
2858
2859 ### tracks merge vs single parent
2859 ### tracks merge vs single parent
2860 nummerges = 0
2860 nummerges = 0
2861
2861
2862 ### tracks ways the "delta" are build
2862 ### tracks ways the "delta" are build
2863 # nodelta
2863 # nodelta
2864 numempty = 0
2864 numempty = 0
2865 numemptytext = 0
2865 numemptytext = 0
2866 numemptydelta = 0
2866 numemptydelta = 0
2867 # full file content
2867 # full file content
2868 numfull = 0
2868 numfull = 0
2869 # intermediate snapshot against a prior snapshot
2869 # intermediate snapshot against a prior snapshot
2870 numsemi = 0
2870 numsemi = 0
2871 # snapshot count per depth
2871 # snapshot count per depth
2872 numsnapdepth = collections.defaultdict(lambda: 0)
2872 numsnapdepth = collections.defaultdict(lambda: 0)
2873 # delta against previous revision
2873 # delta against previous revision
2874 numprev = 0
2874 numprev = 0
2875 # delta against first or second parent (not prev)
2875 # delta against first or second parent (not prev)
2876 nump1 = 0
2876 nump1 = 0
2877 nump2 = 0
2877 nump2 = 0
2878 # delta against neither prev nor parents
2878 # delta against neither prev nor parents
2879 numother = 0
2879 numother = 0
2880 # delta against prev that are also first or second parent
2880 # delta against prev that are also first or second parent
2881 # (details of `numprev`)
2881 # (details of `numprev`)
2882 nump1prev = 0
2882 nump1prev = 0
2883 nump2prev = 0
2883 nump2prev = 0
2884
2884
2885 # data about delta chain of each revs
2885 # data about delta chain of each revs
2886 chainlengths = []
2886 chainlengths = []
2887 chainbases = []
2887 chainbases = []
2888 chainspans = []
2888 chainspans = []
2889
2889
2890 # data about each revision
2890 # data about each revision
2891 datasize = [None, 0, 0]
2891 datasize = [None, 0, 0]
2892 fullsize = [None, 0, 0]
2892 fullsize = [None, 0, 0]
2893 semisize = [None, 0, 0]
2893 semisize = [None, 0, 0]
2894 # snapshot count per depth
2894 # snapshot count per depth
2895 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2895 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2896 deltasize = [None, 0, 0]
2896 deltasize = [None, 0, 0]
2897 chunktypecounts = {}
2897 chunktypecounts = {}
2898 chunktypesizes = {}
2898 chunktypesizes = {}
2899
2899
2900 def addsize(size, l):
2900 def addsize(size, l):
2901 if l[0] is None or size < l[0]:
2901 if l[0] is None or size < l[0]:
2902 l[0] = size
2902 l[0] = size
2903 if size > l[1]:
2903 if size > l[1]:
2904 l[1] = size
2904 l[1] = size
2905 l[2] += size
2905 l[2] += size
2906
2906
2907 numrevs = len(r)
2907 numrevs = len(r)
2908 for rev in pycompat.xrange(numrevs):
2908 for rev in pycompat.xrange(numrevs):
2909 p1, p2 = r.parentrevs(rev)
2909 p1, p2 = r.parentrevs(rev)
2910 delta = r.deltaparent(rev)
2910 delta = r.deltaparent(rev)
2911 if format > 0:
2911 if format > 0:
2912 addsize(r.rawsize(rev), datasize)
2912 addsize(r.rawsize(rev), datasize)
2913 if p2 != nullrev:
2913 if p2 != nullrev:
2914 nummerges += 1
2914 nummerges += 1
2915 size = r.length(rev)
2915 size = r.length(rev)
2916 if delta == nullrev:
2916 if delta == nullrev:
2917 chainlengths.append(0)
2917 chainlengths.append(0)
2918 chainbases.append(r.start(rev))
2918 chainbases.append(r.start(rev))
2919 chainspans.append(size)
2919 chainspans.append(size)
2920 if size == 0:
2920 if size == 0:
2921 numempty += 1
2921 numempty += 1
2922 numemptytext += 1
2922 numemptytext += 1
2923 else:
2923 else:
2924 numfull += 1
2924 numfull += 1
2925 numsnapdepth[0] += 1
2925 numsnapdepth[0] += 1
2926 addsize(size, fullsize)
2926 addsize(size, fullsize)
2927 addsize(size, snapsizedepth[0])
2927 addsize(size, snapsizedepth[0])
2928 else:
2928 else:
2929 chainlengths.append(chainlengths[delta] + 1)
2929 chainlengths.append(chainlengths[delta] + 1)
2930 baseaddr = chainbases[delta]
2930 baseaddr = chainbases[delta]
2931 revaddr = r.start(rev)
2931 revaddr = r.start(rev)
2932 chainbases.append(baseaddr)
2932 chainbases.append(baseaddr)
2933 chainspans.append((revaddr - baseaddr) + size)
2933 chainspans.append((revaddr - baseaddr) + size)
2934 if size == 0:
2934 if size == 0:
2935 numempty += 1
2935 numempty += 1
2936 numemptydelta += 1
2936 numemptydelta += 1
2937 elif r.issnapshot(rev):
2937 elif r.issnapshot(rev):
2938 addsize(size, semisize)
2938 addsize(size, semisize)
2939 numsemi += 1
2939 numsemi += 1
2940 depth = r.snapshotdepth(rev)
2940 depth = r.snapshotdepth(rev)
2941 numsnapdepth[depth] += 1
2941 numsnapdepth[depth] += 1
2942 addsize(size, snapsizedepth[depth])
2942 addsize(size, snapsizedepth[depth])
2943 else:
2943 else:
2944 addsize(size, deltasize)
2944 addsize(size, deltasize)
2945 if delta == rev - 1:
2945 if delta == rev - 1:
2946 numprev += 1
2946 numprev += 1
2947 if delta == p1:
2947 if delta == p1:
2948 nump1prev += 1
2948 nump1prev += 1
2949 elif delta == p2:
2949 elif delta == p2:
2950 nump2prev += 1
2950 nump2prev += 1
2951 elif delta == p1:
2951 elif delta == p1:
2952 nump1 += 1
2952 nump1 += 1
2953 elif delta == p2:
2953 elif delta == p2:
2954 nump2 += 1
2954 nump2 += 1
2955 elif delta != nullrev:
2955 elif delta != nullrev:
2956 numother += 1
2956 numother += 1
2957
2957
2958 # Obtain data on the raw chunks in the revlog.
2958 # Obtain data on the raw chunks in the revlog.
2959 if util.safehasattr(r, b'_getsegmentforrevs'):
2959 if util.safehasattr(r, b'_getsegmentforrevs'):
2960 segment = r._getsegmentforrevs(rev, rev)[1]
2960 segment = r._getsegmentforrevs(rev, rev)[1]
2961 else:
2961 else:
2962 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2962 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2963 if segment:
2963 if segment:
2964 chunktype = bytes(segment[0:1])
2964 chunktype = bytes(segment[0:1])
2965 else:
2965 else:
2966 chunktype = b'empty'
2966 chunktype = b'empty'
2967
2967
2968 if chunktype not in chunktypecounts:
2968 if chunktype not in chunktypecounts:
2969 chunktypecounts[chunktype] = 0
2969 chunktypecounts[chunktype] = 0
2970 chunktypesizes[chunktype] = 0
2970 chunktypesizes[chunktype] = 0
2971
2971
2972 chunktypecounts[chunktype] += 1
2972 chunktypecounts[chunktype] += 1
2973 chunktypesizes[chunktype] += size
2973 chunktypesizes[chunktype] += size
2974
2974
2975 # Adjust size min value for empty cases
2975 # Adjust size min value for empty cases
2976 for size in (datasize, fullsize, semisize, deltasize):
2976 for size in (datasize, fullsize, semisize, deltasize):
2977 if size[0] is None:
2977 if size[0] is None:
2978 size[0] = 0
2978 size[0] = 0
2979
2979
2980 numdeltas = numrevs - numfull - numempty - numsemi
2980 numdeltas = numrevs - numfull - numempty - numsemi
2981 numoprev = numprev - nump1prev - nump2prev
2981 numoprev = numprev - nump1prev - nump2prev
2982 totalrawsize = datasize[2]
2982 totalrawsize = datasize[2]
2983 datasize[2] /= numrevs
2983 datasize[2] /= numrevs
2984 fulltotal = fullsize[2]
2984 fulltotal = fullsize[2]
2985 if numfull == 0:
2985 if numfull == 0:
2986 fullsize[2] = 0
2986 fullsize[2] = 0
2987 else:
2987 else:
2988 fullsize[2] /= numfull
2988 fullsize[2] /= numfull
2989 semitotal = semisize[2]
2989 semitotal = semisize[2]
2990 snaptotal = {}
2990 snaptotal = {}
2991 if numsemi > 0:
2991 if numsemi > 0:
2992 semisize[2] /= numsemi
2992 semisize[2] /= numsemi
2993 for depth in snapsizedepth:
2993 for depth in snapsizedepth:
2994 snaptotal[depth] = snapsizedepth[depth][2]
2994 snaptotal[depth] = snapsizedepth[depth][2]
2995 snapsizedepth[depth][2] /= numsnapdepth[depth]
2995 snapsizedepth[depth][2] /= numsnapdepth[depth]
2996
2996
2997 deltatotal = deltasize[2]
2997 deltatotal = deltasize[2]
2998 if numdeltas > 0:
2998 if numdeltas > 0:
2999 deltasize[2] /= numdeltas
2999 deltasize[2] /= numdeltas
3000 totalsize = fulltotal + semitotal + deltatotal
3000 totalsize = fulltotal + semitotal + deltatotal
3001 avgchainlen = sum(chainlengths) / numrevs
3001 avgchainlen = sum(chainlengths) / numrevs
3002 maxchainlen = max(chainlengths)
3002 maxchainlen = max(chainlengths)
3003 maxchainspan = max(chainspans)
3003 maxchainspan = max(chainspans)
3004 compratio = 1
3004 compratio = 1
3005 if totalsize:
3005 if totalsize:
3006 compratio = totalrawsize / totalsize
3006 compratio = totalrawsize / totalsize
3007
3007
3008 basedfmtstr = b'%%%dd\n'
3008 basedfmtstr = b'%%%dd\n'
3009 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3009 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3010
3010
3011 def dfmtstr(max):
3011 def dfmtstr(max):
3012 return basedfmtstr % len(str(max))
3012 return basedfmtstr % len(str(max))
3013
3013
3014 def pcfmtstr(max, padding=0):
3014 def pcfmtstr(max, padding=0):
3015 return basepcfmtstr % (len(str(max)), b' ' * padding)
3015 return basepcfmtstr % (len(str(max)), b' ' * padding)
3016
3016
3017 def pcfmt(value, total):
3017 def pcfmt(value, total):
3018 if total:
3018 if total:
3019 return (value, 100 * float(value) / total)
3019 return (value, 100 * float(value) / total)
3020 else:
3020 else:
3021 return value, 100.0
3021 return value, 100.0
3022
3022
3023 ui.writenoi18n(b'format : %d\n' % format)
3023 ui.writenoi18n(b'format : %d\n' % format)
3024 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3024 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3025
3025
3026 ui.write(b'\n')
3026 ui.write(b'\n')
3027 fmt = pcfmtstr(totalsize)
3027 fmt = pcfmtstr(totalsize)
3028 fmt2 = dfmtstr(totalsize)
3028 fmt2 = dfmtstr(totalsize)
3029 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3029 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3030 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3030 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3031 ui.writenoi18n(
3031 ui.writenoi18n(
3032 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3032 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3033 )
3033 )
3034 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3034 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3035 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3035 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3036 ui.writenoi18n(
3036 ui.writenoi18n(
3037 b' text : '
3037 b' text : '
3038 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3038 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3039 )
3039 )
3040 ui.writenoi18n(
3040 ui.writenoi18n(
3041 b' delta : '
3041 b' delta : '
3042 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3042 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3043 )
3043 )
3044 ui.writenoi18n(
3044 ui.writenoi18n(
3045 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3045 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3046 )
3046 )
3047 for depth in sorted(numsnapdepth):
3047 for depth in sorted(numsnapdepth):
3048 ui.write(
3048 ui.write(
3049 (b' lvl-%-3d : ' % depth)
3049 (b' lvl-%-3d : ' % depth)
3050 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3050 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3051 )
3051 )
3052 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3052 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3053 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3053 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3054 ui.writenoi18n(
3054 ui.writenoi18n(
3055 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3055 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3056 )
3056 )
3057 for depth in sorted(numsnapdepth):
3057 for depth in sorted(numsnapdepth):
3058 ui.write(
3058 ui.write(
3059 (b' lvl-%-3d : ' % depth)
3059 (b' lvl-%-3d : ' % depth)
3060 + fmt % pcfmt(snaptotal[depth], totalsize)
3060 + fmt % pcfmt(snaptotal[depth], totalsize)
3061 )
3061 )
3062 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3062 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3063
3063
3064 def fmtchunktype(chunktype):
3064 def fmtchunktype(chunktype):
3065 if chunktype == b'empty':
3065 if chunktype == b'empty':
3066 return b' %s : ' % chunktype
3066 return b' %s : ' % chunktype
3067 elif chunktype in pycompat.bytestr(string.ascii_letters):
3067 elif chunktype in pycompat.bytestr(string.ascii_letters):
3068 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3068 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3069 else:
3069 else:
3070 return b' 0x%s : ' % hex(chunktype)
3070 return b' 0x%s : ' % hex(chunktype)
3071
3071
3072 ui.write(b'\n')
3072 ui.write(b'\n')
3073 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3073 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3074 for chunktype in sorted(chunktypecounts):
3074 for chunktype in sorted(chunktypecounts):
3075 ui.write(fmtchunktype(chunktype))
3075 ui.write(fmtchunktype(chunktype))
3076 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3076 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3077 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3077 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3078 for chunktype in sorted(chunktypecounts):
3078 for chunktype in sorted(chunktypecounts):
3079 ui.write(fmtchunktype(chunktype))
3079 ui.write(fmtchunktype(chunktype))
3080 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3080 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3081
3081
3082 ui.write(b'\n')
3082 ui.write(b'\n')
3083 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3083 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3084 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3084 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3085 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3085 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3086 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3086 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3087 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3087 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3088
3088
3089 if format > 0:
3089 if format > 0:
3090 ui.write(b'\n')
3090 ui.write(b'\n')
3091 ui.writenoi18n(
3091 ui.writenoi18n(
3092 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3092 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3093 % tuple(datasize)
3093 % tuple(datasize)
3094 )
3094 )
3095 ui.writenoi18n(
3095 ui.writenoi18n(
3096 b'full revision size (min/max/avg) : %d / %d / %d\n'
3096 b'full revision size (min/max/avg) : %d / %d / %d\n'
3097 % tuple(fullsize)
3097 % tuple(fullsize)
3098 )
3098 )
3099 ui.writenoi18n(
3099 ui.writenoi18n(
3100 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3100 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3101 % tuple(semisize)
3101 % tuple(semisize)
3102 )
3102 )
3103 for depth in sorted(snapsizedepth):
3103 for depth in sorted(snapsizedepth):
3104 if depth == 0:
3104 if depth == 0:
3105 continue
3105 continue
3106 ui.writenoi18n(
3106 ui.writenoi18n(
3107 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3107 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3108 % ((depth,) + tuple(snapsizedepth[depth]))
3108 % ((depth,) + tuple(snapsizedepth[depth]))
3109 )
3109 )
3110 ui.writenoi18n(
3110 ui.writenoi18n(
3111 b'delta size (min/max/avg) : %d / %d / %d\n'
3111 b'delta size (min/max/avg) : %d / %d / %d\n'
3112 % tuple(deltasize)
3112 % tuple(deltasize)
3113 )
3113 )
3114
3114
3115 if numdeltas > 0:
3115 if numdeltas > 0:
3116 ui.write(b'\n')
3116 ui.write(b'\n')
3117 fmt = pcfmtstr(numdeltas)
3117 fmt = pcfmtstr(numdeltas)
3118 fmt2 = pcfmtstr(numdeltas, 4)
3118 fmt2 = pcfmtstr(numdeltas, 4)
3119 ui.writenoi18n(
3119 ui.writenoi18n(
3120 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3120 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3121 )
3121 )
3122 if numprev > 0:
3122 if numprev > 0:
3123 ui.writenoi18n(
3123 ui.writenoi18n(
3124 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3124 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3125 )
3125 )
3126 ui.writenoi18n(
3126 ui.writenoi18n(
3127 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3127 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3128 )
3128 )
3129 ui.writenoi18n(
3129 ui.writenoi18n(
3130 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3130 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3131 )
3131 )
3132 if gdelta:
3132 if gdelta:
3133 ui.writenoi18n(
3133 ui.writenoi18n(
3134 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3134 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3135 )
3135 )
3136 ui.writenoi18n(
3136 ui.writenoi18n(
3137 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3137 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3138 )
3138 )
3139 ui.writenoi18n(
3139 ui.writenoi18n(
3140 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3140 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3141 )
3141 )
3142
3142
3143
3143
3144 @command(
3144 @command(
3145 b'debugrevlogindex',
3145 b'debugrevlogindex',
3146 cmdutil.debugrevlogopts
3146 cmdutil.debugrevlogopts
3147 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3147 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3148 _(b'[-f FORMAT] -c|-m|FILE'),
3148 _(b'[-f FORMAT] -c|-m|FILE'),
3149 optionalrepo=True,
3149 optionalrepo=True,
3150 )
3150 )
3151 def debugrevlogindex(ui, repo, file_=None, **opts):
3151 def debugrevlogindex(ui, repo, file_=None, **opts):
3152 """dump the contents of a revlog index"""
3152 """dump the contents of a revlog index"""
3153 opts = pycompat.byteskwargs(opts)
3153 opts = pycompat.byteskwargs(opts)
3154 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3154 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3155 format = opts.get(b'format', 0)
3155 format = opts.get(b'format', 0)
3156 if format not in (0, 1):
3156 if format not in (0, 1):
3157 raise error.Abort(_(b"unknown format %d") % format)
3157 raise error.Abort(_(b"unknown format %d") % format)
3158
3158
3159 if ui.debugflag:
3159 if ui.debugflag:
3160 shortfn = hex
3160 shortfn = hex
3161 else:
3161 else:
3162 shortfn = short
3162 shortfn = short
3163
3163
3164 # There might not be anything in r, so have a sane default
3164 # There might not be anything in r, so have a sane default
3165 idlen = 12
3165 idlen = 12
3166 for i in r:
3166 for i in r:
3167 idlen = len(shortfn(r.node(i)))
3167 idlen = len(shortfn(r.node(i)))
3168 break
3168 break
3169
3169
3170 if format == 0:
3170 if format == 0:
3171 if ui.verbose:
3171 if ui.verbose:
3172 ui.writenoi18n(
3172 ui.writenoi18n(
3173 b" rev offset length linkrev %s %s p2\n"
3173 b" rev offset length linkrev %s %s p2\n"
3174 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3174 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3175 )
3175 )
3176 else:
3176 else:
3177 ui.writenoi18n(
3177 ui.writenoi18n(
3178 b" rev linkrev %s %s p2\n"
3178 b" rev linkrev %s %s p2\n"
3179 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3179 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3180 )
3180 )
3181 elif format == 1:
3181 elif format == 1:
3182 if ui.verbose:
3182 if ui.verbose:
3183 ui.writenoi18n(
3183 ui.writenoi18n(
3184 (
3184 (
3185 b" rev flag offset length size link p1"
3185 b" rev flag offset length size link p1"
3186 b" p2 %s\n"
3186 b" p2 %s\n"
3187 )
3187 )
3188 % b"nodeid".rjust(idlen)
3188 % b"nodeid".rjust(idlen)
3189 )
3189 )
3190 else:
3190 else:
3191 ui.writenoi18n(
3191 ui.writenoi18n(
3192 b" rev flag size link p1 p2 %s\n"
3192 b" rev flag size link p1 p2 %s\n"
3193 % b"nodeid".rjust(idlen)
3193 % b"nodeid".rjust(idlen)
3194 )
3194 )
3195
3195
3196 for i in r:
3196 for i in r:
3197 node = r.node(i)
3197 node = r.node(i)
3198 if format == 0:
3198 if format == 0:
3199 try:
3199 try:
3200 pp = r.parents(node)
3200 pp = r.parents(node)
3201 except Exception:
3201 except Exception:
3202 pp = [nullid, nullid]
3202 pp = [nullid, nullid]
3203 if ui.verbose:
3203 if ui.verbose:
3204 ui.write(
3204 ui.write(
3205 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3205 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3206 % (
3206 % (
3207 i,
3207 i,
3208 r.start(i),
3208 r.start(i),
3209 r.length(i),
3209 r.length(i),
3210 r.linkrev(i),
3210 r.linkrev(i),
3211 shortfn(node),
3211 shortfn(node),
3212 shortfn(pp[0]),
3212 shortfn(pp[0]),
3213 shortfn(pp[1]),
3213 shortfn(pp[1]),
3214 )
3214 )
3215 )
3215 )
3216 else:
3216 else:
3217 ui.write(
3217 ui.write(
3218 b"% 6d % 7d %s %s %s\n"
3218 b"% 6d % 7d %s %s %s\n"
3219 % (
3219 % (
3220 i,
3220 i,
3221 r.linkrev(i),
3221 r.linkrev(i),
3222 shortfn(node),
3222 shortfn(node),
3223 shortfn(pp[0]),
3223 shortfn(pp[0]),
3224 shortfn(pp[1]),
3224 shortfn(pp[1]),
3225 )
3225 )
3226 )
3226 )
3227 elif format == 1:
3227 elif format == 1:
3228 pr = r.parentrevs(i)
3228 pr = r.parentrevs(i)
3229 if ui.verbose:
3229 if ui.verbose:
3230 ui.write(
3230 ui.write(
3231 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3231 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3232 % (
3232 % (
3233 i,
3233 i,
3234 r.flags(i),
3234 r.flags(i),
3235 r.start(i),
3235 r.start(i),
3236 r.length(i),
3236 r.length(i),
3237 r.rawsize(i),
3237 r.rawsize(i),
3238 r.linkrev(i),
3238 r.linkrev(i),
3239 pr[0],
3239 pr[0],
3240 pr[1],
3240 pr[1],
3241 shortfn(node),
3241 shortfn(node),
3242 )
3242 )
3243 )
3243 )
3244 else:
3244 else:
3245 ui.write(
3245 ui.write(
3246 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3246 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3247 % (
3247 % (
3248 i,
3248 i,
3249 r.flags(i),
3249 r.flags(i),
3250 r.rawsize(i),
3250 r.rawsize(i),
3251 r.linkrev(i),
3251 r.linkrev(i),
3252 pr[0],
3252 pr[0],
3253 pr[1],
3253 pr[1],
3254 shortfn(node),
3254 shortfn(node),
3255 )
3255 )
3256 )
3256 )
3257
3257
3258
3258
3259 @command(
3259 @command(
3260 b'debugrevspec',
3260 b'debugrevspec',
3261 [
3261 [
3262 (
3262 (
3263 b'',
3263 b'',
3264 b'optimize',
3264 b'optimize',
3265 None,
3265 None,
3266 _(b'print parsed tree after optimizing (DEPRECATED)'),
3266 _(b'print parsed tree after optimizing (DEPRECATED)'),
3267 ),
3267 ),
3268 (
3268 (
3269 b'',
3269 b'',
3270 b'show-revs',
3270 b'show-revs',
3271 True,
3271 True,
3272 _(b'print list of result revisions (default)'),
3272 _(b'print list of result revisions (default)'),
3273 ),
3273 ),
3274 (
3274 (
3275 b's',
3275 b's',
3276 b'show-set',
3276 b'show-set',
3277 None,
3277 None,
3278 _(b'print internal representation of result set'),
3278 _(b'print internal representation of result set'),
3279 ),
3279 ),
3280 (
3280 (
3281 b'p',
3281 b'p',
3282 b'show-stage',
3282 b'show-stage',
3283 [],
3283 [],
3284 _(b'print parsed tree at the given stage'),
3284 _(b'print parsed tree at the given stage'),
3285 _(b'NAME'),
3285 _(b'NAME'),
3286 ),
3286 ),
3287 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3287 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3288 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3288 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3289 ],
3289 ],
3290 b'REVSPEC',
3290 b'REVSPEC',
3291 )
3291 )
3292 def debugrevspec(ui, repo, expr, **opts):
3292 def debugrevspec(ui, repo, expr, **opts):
3293 """parse and apply a revision specification
3293 """parse and apply a revision specification
3294
3294
3295 Use -p/--show-stage option to print the parsed tree at the given stages.
3295 Use -p/--show-stage option to print the parsed tree at the given stages.
3296 Use -p all to print tree at every stage.
3296 Use -p all to print tree at every stage.
3297
3297
3298 Use --no-show-revs option with -s or -p to print only the set
3298 Use --no-show-revs option with -s or -p to print only the set
3299 representation or the parsed tree respectively.
3299 representation or the parsed tree respectively.
3300
3300
3301 Use --verify-optimized to compare the optimized result with the unoptimized
3301 Use --verify-optimized to compare the optimized result with the unoptimized
3302 one. Returns 1 if the optimized result differs.
3302 one. Returns 1 if the optimized result differs.
3303 """
3303 """
3304 opts = pycompat.byteskwargs(opts)
3304 opts = pycompat.byteskwargs(opts)
3305 aliases = ui.configitems(b'revsetalias')
3305 aliases = ui.configitems(b'revsetalias')
3306 stages = [
3306 stages = [
3307 (b'parsed', lambda tree: tree),
3307 (b'parsed', lambda tree: tree),
3308 (
3308 (
3309 b'expanded',
3309 b'expanded',
3310 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3310 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3311 ),
3311 ),
3312 (b'concatenated', revsetlang.foldconcat),
3312 (b'concatenated', revsetlang.foldconcat),
3313 (b'analyzed', revsetlang.analyze),
3313 (b'analyzed', revsetlang.analyze),
3314 (b'optimized', revsetlang.optimize),
3314 (b'optimized', revsetlang.optimize),
3315 ]
3315 ]
3316 if opts[b'no_optimized']:
3316 if opts[b'no_optimized']:
3317 stages = stages[:-1]
3317 stages = stages[:-1]
3318 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3318 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3319 raise error.Abort(
3319 raise error.Abort(
3320 _(b'cannot use --verify-optimized with --no-optimized')
3320 _(b'cannot use --verify-optimized with --no-optimized')
3321 )
3321 )
3322 stagenames = {n for n, f in stages}
3322 stagenames = {n for n, f in stages}
3323
3323
3324 showalways = set()
3324 showalways = set()
3325 showchanged = set()
3325 showchanged = set()
3326 if ui.verbose and not opts[b'show_stage']:
3326 if ui.verbose and not opts[b'show_stage']:
3327 # show parsed tree by --verbose (deprecated)
3327 # show parsed tree by --verbose (deprecated)
3328 showalways.add(b'parsed')
3328 showalways.add(b'parsed')
3329 showchanged.update([b'expanded', b'concatenated'])
3329 showchanged.update([b'expanded', b'concatenated'])
3330 if opts[b'optimize']:
3330 if opts[b'optimize']:
3331 showalways.add(b'optimized')
3331 showalways.add(b'optimized')
3332 if opts[b'show_stage'] and opts[b'optimize']:
3332 if opts[b'show_stage'] and opts[b'optimize']:
3333 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3333 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3334 if opts[b'show_stage'] == [b'all']:
3334 if opts[b'show_stage'] == [b'all']:
3335 showalways.update(stagenames)
3335 showalways.update(stagenames)
3336 else:
3336 else:
3337 for n in opts[b'show_stage']:
3337 for n in opts[b'show_stage']:
3338 if n not in stagenames:
3338 if n not in stagenames:
3339 raise error.Abort(_(b'invalid stage name: %s') % n)
3339 raise error.Abort(_(b'invalid stage name: %s') % n)
3340 showalways.update(opts[b'show_stage'])
3340 showalways.update(opts[b'show_stage'])
3341
3341
3342 treebystage = {}
3342 treebystage = {}
3343 printedtree = None
3343 printedtree = None
3344 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3344 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3345 for n, f in stages:
3345 for n, f in stages:
3346 treebystage[n] = tree = f(tree)
3346 treebystage[n] = tree = f(tree)
3347 if n in showalways or (n in showchanged and tree != printedtree):
3347 if n in showalways or (n in showchanged and tree != printedtree):
3348 if opts[b'show_stage'] or n != b'parsed':
3348 if opts[b'show_stage'] or n != b'parsed':
3349 ui.write(b"* %s:\n" % n)
3349 ui.write(b"* %s:\n" % n)
3350 ui.write(revsetlang.prettyformat(tree), b"\n")
3350 ui.write(revsetlang.prettyformat(tree), b"\n")
3351 printedtree = tree
3351 printedtree = tree
3352
3352
3353 if opts[b'verify_optimized']:
3353 if opts[b'verify_optimized']:
3354 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3354 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3355 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3355 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3356 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3356 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3357 ui.writenoi18n(
3357 ui.writenoi18n(
3358 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3358 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3359 )
3359 )
3360 ui.writenoi18n(
3360 ui.writenoi18n(
3361 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3361 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3362 )
3362 )
3363 arevs = list(arevs)
3363 arevs = list(arevs)
3364 brevs = list(brevs)
3364 brevs = list(brevs)
3365 if arevs == brevs:
3365 if arevs == brevs:
3366 return 0
3366 return 0
3367 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3367 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3368 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3368 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3369 sm = difflib.SequenceMatcher(None, arevs, brevs)
3369 sm = difflib.SequenceMatcher(None, arevs, brevs)
3370 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3370 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3371 if tag in ('delete', 'replace'):
3371 if tag in ('delete', 'replace'):
3372 for c in arevs[alo:ahi]:
3372 for c in arevs[alo:ahi]:
3373 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3373 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3374 if tag in ('insert', 'replace'):
3374 if tag in ('insert', 'replace'):
3375 for c in brevs[blo:bhi]:
3375 for c in brevs[blo:bhi]:
3376 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3376 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3377 if tag == 'equal':
3377 if tag == 'equal':
3378 for c in arevs[alo:ahi]:
3378 for c in arevs[alo:ahi]:
3379 ui.write(b' %d\n' % c)
3379 ui.write(b' %d\n' % c)
3380 return 1
3380 return 1
3381
3381
3382 func = revset.makematcher(tree)
3382 func = revset.makematcher(tree)
3383 revs = func(repo)
3383 revs = func(repo)
3384 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3384 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3385 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3385 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3386 if not opts[b'show_revs']:
3386 if not opts[b'show_revs']:
3387 return
3387 return
3388 for c in revs:
3388 for c in revs:
3389 ui.write(b"%d\n" % c)
3389 ui.write(b"%d\n" % c)
3390
3390
3391
3391
3392 @command(
3392 @command(
3393 b'debugserve',
3393 b'debugserve',
3394 [
3394 [
3395 (
3395 (
3396 b'',
3396 b'',
3397 b'sshstdio',
3397 b'sshstdio',
3398 False,
3398 False,
3399 _(b'run an SSH server bound to process handles'),
3399 _(b'run an SSH server bound to process handles'),
3400 ),
3400 ),
3401 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3401 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3402 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3402 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3403 ],
3403 ],
3404 b'',
3404 b'',
3405 )
3405 )
3406 def debugserve(ui, repo, **opts):
3406 def debugserve(ui, repo, **opts):
3407 """run a server with advanced settings
3407 """run a server with advanced settings
3408
3408
3409 This command is similar to :hg:`serve`. It exists partially as a
3409 This command is similar to :hg:`serve`. It exists partially as a
3410 workaround to the fact that ``hg serve --stdio`` must have specific
3410 workaround to the fact that ``hg serve --stdio`` must have specific
3411 arguments for security reasons.
3411 arguments for security reasons.
3412 """
3412 """
3413 opts = pycompat.byteskwargs(opts)
3413 opts = pycompat.byteskwargs(opts)
3414
3414
3415 if not opts[b'sshstdio']:
3415 if not opts[b'sshstdio']:
3416 raise error.Abort(_(b'only --sshstdio is currently supported'))
3416 raise error.Abort(_(b'only --sshstdio is currently supported'))
3417
3417
3418 logfh = None
3418 logfh = None
3419
3419
3420 if opts[b'logiofd'] and opts[b'logiofile']:
3420 if opts[b'logiofd'] and opts[b'logiofile']:
3421 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3421 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3422
3422
3423 if opts[b'logiofd']:
3423 if opts[b'logiofd']:
3424 # Ideally we would be line buffered. But line buffering in binary
3424 # Ideally we would be line buffered. But line buffering in binary
3425 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3425 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3426 # buffering could have performance impacts. But since this isn't
3426 # buffering could have performance impacts. But since this isn't
3427 # performance critical code, it should be fine.
3427 # performance critical code, it should be fine.
3428 try:
3428 try:
3429 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3429 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3430 except OSError as e:
3430 except OSError as e:
3431 if e.errno != errno.ESPIPE:
3431 if e.errno != errno.ESPIPE:
3432 raise
3432 raise
3433 # can't seek a pipe, so `ab` mode fails on py3
3433 # can't seek a pipe, so `ab` mode fails on py3
3434 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3434 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3435 elif opts[b'logiofile']:
3435 elif opts[b'logiofile']:
3436 logfh = open(opts[b'logiofile'], b'ab', 0)
3436 logfh = open(opts[b'logiofile'], b'ab', 0)
3437
3437
3438 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3438 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3439 s.serve_forever()
3439 s.serve_forever()
3440
3440
3441
3441
3442 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3442 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3443 def debugsetparents(ui, repo, rev1, rev2=None):
3443 def debugsetparents(ui, repo, rev1, rev2=None):
3444 """manually set the parents of the current working directory (DANGEROUS)
3444 """manually set the parents of the current working directory (DANGEROUS)
3445
3445
3446 This command is not what you are looking for and should not be used. Using
3446 This command is not what you are looking for and should not be used. Using
3447 this command will most certainly results in slight corruption of the file
3447 this command will most certainly results in slight corruption of the file
3448 level histories withing your repository. DO NOT USE THIS COMMAND.
3448 level histories withing your repository. DO NOT USE THIS COMMAND.
3449
3449
3450 The command update the p1 and p2 field in the dirstate, and not touching
3450 The command update the p1 and p2 field in the dirstate, and not touching
3451 anything else. This useful for writing repository conversion tools, but
3451 anything else. This useful for writing repository conversion tools, but
3452 should be used with extreme care. For example, neither the working
3452 should be used with extreme care. For example, neither the working
3453 directory nor the dirstate is updated, so file status may be incorrect
3453 directory nor the dirstate is updated, so file status may be incorrect
3454 after running this command. Only used if you are one of the few people that
3454 after running this command. Only used if you are one of the few people that
3455 deeply unstand both conversion tools and file level histories. If you are
3455 deeply unstand both conversion tools and file level histories. If you are
3456 reading this help, you are not one of this people (most of them sailed west
3456 reading this help, you are not one of this people (most of them sailed west
3457 from Mithlond anyway.
3457 from Mithlond anyway.
3458
3458
3459 So one last time DO NOT USE THIS COMMAND.
3459 So one last time DO NOT USE THIS COMMAND.
3460
3460
3461 Returns 0 on success.
3461 Returns 0 on success.
3462 """
3462 """
3463
3463
3464 node1 = scmutil.revsingle(repo, rev1).node()
3464 node1 = scmutil.revsingle(repo, rev1).node()
3465 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3465 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3466
3466
3467 with repo.wlock():
3467 with repo.wlock():
3468 repo.setparents(node1, node2)
3468 repo.setparents(node1, node2)
3469
3469
3470
3470
3471 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3471 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3472 def debugsidedata(ui, repo, file_, rev=None, **opts):
3472 def debugsidedata(ui, repo, file_, rev=None, **opts):
3473 """dump the side data for a cl/manifest/file revision
3473 """dump the side data for a cl/manifest/file revision
3474
3474
3475 Use --verbose to dump the sidedata content."""
3475 Use --verbose to dump the sidedata content."""
3476 opts = pycompat.byteskwargs(opts)
3476 opts = pycompat.byteskwargs(opts)
3477 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3477 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3478 if rev is not None:
3478 if rev is not None:
3479 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3479 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3480 file_, rev = None, file_
3480 file_, rev = None, file_
3481 elif rev is None:
3481 elif rev is None:
3482 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3482 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3483 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3483 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3484 r = getattr(r, '_revlog', r)
3484 r = getattr(r, '_revlog', r)
3485 try:
3485 try:
3486 sidedata = r.sidedata(r.lookup(rev))
3486 sidedata = r.sidedata(r.lookup(rev))
3487 except KeyError:
3487 except KeyError:
3488 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3488 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3489 if sidedata:
3489 if sidedata:
3490 sidedata = list(sidedata.items())
3490 sidedata = list(sidedata.items())
3491 sidedata.sort()
3491 sidedata.sort()
3492 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3492 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3493 for key, value in sidedata:
3493 for key, value in sidedata:
3494 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3494 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3495 if ui.verbose:
3495 if ui.verbose:
3496 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3496 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3497
3497
3498
3498
3499 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3499 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3500 def debugssl(ui, repo, source=None, **opts):
3500 def debugssl(ui, repo, source=None, **opts):
3501 """test a secure connection to a server
3501 """test a secure connection to a server
3502
3502
3503 This builds the certificate chain for the server on Windows, installing the
3503 This builds the certificate chain for the server on Windows, installing the
3504 missing intermediates and trusted root via Windows Update if necessary. It
3504 missing intermediates and trusted root via Windows Update if necessary. It
3505 does nothing on other platforms.
3505 does nothing on other platforms.
3506
3506
3507 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3507 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3508 that server is used. See :hg:`help urls` for more information.
3508 that server is used. See :hg:`help urls` for more information.
3509
3509
3510 If the update succeeds, retry the original operation. Otherwise, the cause
3510 If the update succeeds, retry the original operation. Otherwise, the cause
3511 of the SSL error is likely another issue.
3511 of the SSL error is likely another issue.
3512 """
3512 """
3513 if not pycompat.iswindows:
3513 if not pycompat.iswindows:
3514 raise error.Abort(
3514 raise error.Abort(
3515 _(b'certificate chain building is only possible on Windows')
3515 _(b'certificate chain building is only possible on Windows')
3516 )
3516 )
3517
3517
3518 if not source:
3518 if not source:
3519 if not repo:
3519 if not repo:
3520 raise error.Abort(
3520 raise error.Abort(
3521 _(
3521 _(
3522 b"there is no Mercurial repository here, and no "
3522 b"there is no Mercurial repository here, and no "
3523 b"server specified"
3523 b"server specified"
3524 )
3524 )
3525 )
3525 )
3526 source = b"default"
3526 source = b"default"
3527
3527
3528 source, branches = hg.parseurl(ui.expandpath(source))
3528 source, branches = hg.parseurl(ui.expandpath(source))
3529 url = util.url(source)
3529 url = util.url(source)
3530
3530
3531 defaultport = {b'https': 443, b'ssh': 22}
3531 defaultport = {b'https': 443, b'ssh': 22}
3532 if url.scheme in defaultport:
3532 if url.scheme in defaultport:
3533 try:
3533 try:
3534 addr = (url.host, int(url.port or defaultport[url.scheme]))
3534 addr = (url.host, int(url.port or defaultport[url.scheme]))
3535 except ValueError:
3535 except ValueError:
3536 raise error.Abort(_(b"malformed port number in URL"))
3536 raise error.Abort(_(b"malformed port number in URL"))
3537 else:
3537 else:
3538 raise error.Abort(_(b"only https and ssh connections are supported"))
3538 raise error.Abort(_(b"only https and ssh connections are supported"))
3539
3539
3540 from . import win32
3540 from . import win32
3541
3541
3542 s = ssl.wrap_socket(
3542 s = ssl.wrap_socket(
3543 socket.socket(),
3543 socket.socket(),
3544 ssl_version=ssl.PROTOCOL_TLS,
3544 ssl_version=ssl.PROTOCOL_TLS,
3545 cert_reqs=ssl.CERT_NONE,
3545 cert_reqs=ssl.CERT_NONE,
3546 ca_certs=None,
3546 ca_certs=None,
3547 )
3547 )
3548
3548
3549 try:
3549 try:
3550 s.connect(addr)
3550 s.connect(addr)
3551 cert = s.getpeercert(True)
3551 cert = s.getpeercert(True)
3552
3552
3553 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3553 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3554
3554
3555 complete = win32.checkcertificatechain(cert, build=False)
3555 complete = win32.checkcertificatechain(cert, build=False)
3556
3556
3557 if not complete:
3557 if not complete:
3558 ui.status(_(b'certificate chain is incomplete, updating... '))
3558 ui.status(_(b'certificate chain is incomplete, updating... '))
3559
3559
3560 if not win32.checkcertificatechain(cert):
3560 if not win32.checkcertificatechain(cert):
3561 ui.status(_(b'failed.\n'))
3561 ui.status(_(b'failed.\n'))
3562 else:
3562 else:
3563 ui.status(_(b'done.\n'))
3563 ui.status(_(b'done.\n'))
3564 else:
3564 else:
3565 ui.status(_(b'full certificate chain is available\n'))
3565 ui.status(_(b'full certificate chain is available\n'))
3566 finally:
3566 finally:
3567 s.close()
3567 s.close()
3568
3568
3569
3569
3570 @command(
3570 @command(
3571 b"debugbackupbundle",
3571 b"debugbackupbundle",
3572 [
3572 [
3573 (
3573 (
3574 b"",
3574 b"",
3575 b"recover",
3575 b"recover",
3576 b"",
3576 b"",
3577 b"brings the specified changeset back into the repository",
3577 b"brings the specified changeset back into the repository",
3578 )
3578 )
3579 ]
3579 ]
3580 + cmdutil.logopts,
3580 + cmdutil.logopts,
3581 _(b"hg debugbackupbundle [--recover HASH]"),
3581 _(b"hg debugbackupbundle [--recover HASH]"),
3582 )
3582 )
3583 def debugbackupbundle(ui, repo, *pats, **opts):
3583 def debugbackupbundle(ui, repo, *pats, **opts):
3584 """lists the changesets available in backup bundles
3584 """lists the changesets available in backup bundles
3585
3585
3586 Without any arguments, this command prints a list of the changesets in each
3586 Without any arguments, this command prints a list of the changesets in each
3587 backup bundle.
3587 backup bundle.
3588
3588
3589 --recover takes a changeset hash and unbundles the first bundle that
3589 --recover takes a changeset hash and unbundles the first bundle that
3590 contains that hash, which puts that changeset back in your repository.
3590 contains that hash, which puts that changeset back in your repository.
3591
3591
3592 --verbose will print the entire commit message and the bundle path for that
3592 --verbose will print the entire commit message and the bundle path for that
3593 backup.
3593 backup.
3594 """
3594 """
3595 backups = list(
3595 backups = list(
3596 filter(
3596 filter(
3597 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3597 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3598 )
3598 )
3599 )
3599 )
3600 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3600 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3601
3601
3602 opts = pycompat.byteskwargs(opts)
3602 opts = pycompat.byteskwargs(opts)
3603 opts[b"bundle"] = b""
3603 opts[b"bundle"] = b""
3604 opts[b"force"] = None
3604 opts[b"force"] = None
3605 limit = logcmdutil.getlimit(opts)
3605 limit = logcmdutil.getlimit(opts)
3606
3606
3607 def display(other, chlist, displayer):
3607 def display(other, chlist, displayer):
3608 if opts.get(b"newest_first"):
3608 if opts.get(b"newest_first"):
3609 chlist.reverse()
3609 chlist.reverse()
3610 count = 0
3610 count = 0
3611 for n in chlist:
3611 for n in chlist:
3612 if limit is not None and count >= limit:
3612 if limit is not None and count >= limit:
3613 break
3613 break
3614 parents = [True for p in other.changelog.parents(n) if p != nullid]
3614 parents = [True for p in other.changelog.parents(n) if p != nullid]
3615 if opts.get(b"no_merges") and len(parents) == 2:
3615 if opts.get(b"no_merges") and len(parents) == 2:
3616 continue
3616 continue
3617 count += 1
3617 count += 1
3618 displayer.show(other[n])
3618 displayer.show(other[n])
3619
3619
3620 recovernode = opts.get(b"recover")
3620 recovernode = opts.get(b"recover")
3621 if recovernode:
3621 if recovernode:
3622 if scmutil.isrevsymbol(repo, recovernode):
3622 if scmutil.isrevsymbol(repo, recovernode):
3623 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3623 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3624 return
3624 return
3625 elif backups:
3625 elif backups:
3626 msg = _(
3626 msg = _(
3627 b"Recover changesets using: hg debugbackupbundle --recover "
3627 b"Recover changesets using: hg debugbackupbundle --recover "
3628 b"<changeset hash>\n\nAvailable backup changesets:"
3628 b"<changeset hash>\n\nAvailable backup changesets:"
3629 )
3629 )
3630 ui.status(msg, label=b"status.removed")
3630 ui.status(msg, label=b"status.removed")
3631 else:
3631 else:
3632 ui.status(_(b"no backup changesets found\n"))
3632 ui.status(_(b"no backup changesets found\n"))
3633 return
3633 return
3634
3634
3635 for backup in backups:
3635 for backup in backups:
3636 # Much of this is copied from the hg incoming logic
3636 # Much of this is copied from the hg incoming logic
3637 source = ui.expandpath(os.path.relpath(backup, encoding.getcwd()))
3637 source = ui.expandpath(os.path.relpath(backup, encoding.getcwd()))
3638 source, branches = hg.parseurl(source, opts.get(b"branch"))
3638 source, branches = hg.parseurl(source, opts.get(b"branch"))
3639 try:
3639 try:
3640 other = hg.peer(repo, opts, source)
3640 other = hg.peer(repo, opts, source)
3641 except error.LookupError as ex:
3641 except error.LookupError as ex:
3642 msg = _(b"\nwarning: unable to open bundle %s") % source
3642 msg = _(b"\nwarning: unable to open bundle %s") % source
3643 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3643 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3644 ui.warn(msg, hint=hint)
3644 ui.warn(msg, hint=hint)
3645 continue
3645 continue
3646 revs, checkout = hg.addbranchrevs(
3646 revs, checkout = hg.addbranchrevs(
3647 repo, other, branches, opts.get(b"rev")
3647 repo, other, branches, opts.get(b"rev")
3648 )
3648 )
3649
3649
3650 if revs:
3650 if revs:
3651 revs = [other.lookup(rev) for rev in revs]
3651 revs = [other.lookup(rev) for rev in revs]
3652
3652
3653 quiet = ui.quiet
3653 quiet = ui.quiet
3654 try:
3654 try:
3655 ui.quiet = True
3655 ui.quiet = True
3656 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3656 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3657 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3657 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3658 )
3658 )
3659 except error.LookupError:
3659 except error.LookupError:
3660 continue
3660 continue
3661 finally:
3661 finally:
3662 ui.quiet = quiet
3662 ui.quiet = quiet
3663
3663
3664 try:
3664 try:
3665 if not chlist:
3665 if not chlist:
3666 continue
3666 continue
3667 if recovernode:
3667 if recovernode:
3668 with repo.lock(), repo.transaction(b"unbundle") as tr:
3668 with repo.lock(), repo.transaction(b"unbundle") as tr:
3669 if scmutil.isrevsymbol(other, recovernode):
3669 if scmutil.isrevsymbol(other, recovernode):
3670 ui.status(_(b"Unbundling %s\n") % (recovernode))
3670 ui.status(_(b"Unbundling %s\n") % (recovernode))
3671 f = hg.openpath(ui, source)
3671 f = hg.openpath(ui, source)
3672 gen = exchange.readbundle(ui, f, source)
3672 gen = exchange.readbundle(ui, f, source)
3673 if isinstance(gen, bundle2.unbundle20):
3673 if isinstance(gen, bundle2.unbundle20):
3674 bundle2.applybundle(
3674 bundle2.applybundle(
3675 repo,
3675 repo,
3676 gen,
3676 gen,
3677 tr,
3677 tr,
3678 source=b"unbundle",
3678 source=b"unbundle",
3679 url=b"bundle:" + source,
3679 url=b"bundle:" + source,
3680 )
3680 )
3681 else:
3681 else:
3682 gen.apply(repo, b"unbundle", b"bundle:" + source)
3682 gen.apply(repo, b"unbundle", b"bundle:" + source)
3683 break
3683 break
3684 else:
3684 else:
3685 backupdate = encoding.strtolocal(
3685 backupdate = encoding.strtolocal(
3686 time.strftime(
3686 time.strftime(
3687 "%a %H:%M, %Y-%m-%d",
3687 "%a %H:%M, %Y-%m-%d",
3688 time.localtime(os.path.getmtime(source)),
3688 time.localtime(os.path.getmtime(source)),
3689 )
3689 )
3690 )
3690 )
3691 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3691 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3692 if ui.verbose:
3692 if ui.verbose:
3693 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3693 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3694 else:
3694 else:
3695 opts[
3695 opts[
3696 b"template"
3696 b"template"
3697 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3697 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3698 displayer = logcmdutil.changesetdisplayer(
3698 displayer = logcmdutil.changesetdisplayer(
3699 ui, other, opts, False
3699 ui, other, opts, False
3700 )
3700 )
3701 display(other, chlist, displayer)
3701 display(other, chlist, displayer)
3702 displayer.close()
3702 displayer.close()
3703 finally:
3703 finally:
3704 cleanupfn()
3704 cleanupfn()
3705
3705
3706
3706
3707 @command(
3707 @command(
3708 b'debugsub',
3708 b'debugsub',
3709 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3709 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3710 _(b'[-r REV] [REV]'),
3710 _(b'[-r REV] [REV]'),
3711 )
3711 )
3712 def debugsub(ui, repo, rev=None):
3712 def debugsub(ui, repo, rev=None):
3713 ctx = scmutil.revsingle(repo, rev, None)
3713 ctx = scmutil.revsingle(repo, rev, None)
3714 for k, v in sorted(ctx.substate.items()):
3714 for k, v in sorted(ctx.substate.items()):
3715 ui.writenoi18n(b'path %s\n' % k)
3715 ui.writenoi18n(b'path %s\n' % k)
3716 ui.writenoi18n(b' source %s\n' % v[0])
3716 ui.writenoi18n(b' source %s\n' % v[0])
3717 ui.writenoi18n(b' revision %s\n' % v[1])
3717 ui.writenoi18n(b' revision %s\n' % v[1])
3718
3718
3719
3719
3720 @command(
3720 @command(
3721 b'debugsuccessorssets',
3721 b'debugsuccessorssets',
3722 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3722 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3723 _(b'[REV]'),
3723 _(b'[REV]'),
3724 )
3724 )
3725 def debugsuccessorssets(ui, repo, *revs, **opts):
3725 def debugsuccessorssets(ui, repo, *revs, **opts):
3726 """show set of successors for revision
3726 """show set of successors for revision
3727
3727
3728 A successors set of changeset A is a consistent group of revisions that
3728 A successors set of changeset A is a consistent group of revisions that
3729 succeed A. It contains non-obsolete changesets only unless closests
3729 succeed A. It contains non-obsolete changesets only unless closests
3730 successors set is set.
3730 successors set is set.
3731
3731
3732 In most cases a changeset A has a single successors set containing a single
3732 In most cases a changeset A has a single successors set containing a single
3733 successor (changeset A replaced by A').
3733 successor (changeset A replaced by A').
3734
3734
3735 A changeset that is made obsolete with no successors are called "pruned".
3735 A changeset that is made obsolete with no successors are called "pruned".
3736 Such changesets have no successors sets at all.
3736 Such changesets have no successors sets at all.
3737
3737
3738 A changeset that has been "split" will have a successors set containing
3738 A changeset that has been "split" will have a successors set containing
3739 more than one successor.
3739 more than one successor.
3740
3740
3741 A changeset that has been rewritten in multiple different ways is called
3741 A changeset that has been rewritten in multiple different ways is called
3742 "divergent". Such changesets have multiple successor sets (each of which
3742 "divergent". Such changesets have multiple successor sets (each of which
3743 may also be split, i.e. have multiple successors).
3743 may also be split, i.e. have multiple successors).
3744
3744
3745 Results are displayed as follows::
3745 Results are displayed as follows::
3746
3746
3747 <rev1>
3747 <rev1>
3748 <successors-1A>
3748 <successors-1A>
3749 <rev2>
3749 <rev2>
3750 <successors-2A>
3750 <successors-2A>
3751 <successors-2B1> <successors-2B2> <successors-2B3>
3751 <successors-2B1> <successors-2B2> <successors-2B3>
3752
3752
3753 Here rev2 has two possible (i.e. divergent) successors sets. The first
3753 Here rev2 has two possible (i.e. divergent) successors sets. The first
3754 holds one element, whereas the second holds three (i.e. the changeset has
3754 holds one element, whereas the second holds three (i.e. the changeset has
3755 been split).
3755 been split).
3756 """
3756 """
3757 # passed to successorssets caching computation from one call to another
3757 # passed to successorssets caching computation from one call to another
3758 cache = {}
3758 cache = {}
3759 ctx2str = bytes
3759 ctx2str = bytes
3760 node2str = short
3760 node2str = short
3761 for rev in scmutil.revrange(repo, revs):
3761 for rev in scmutil.revrange(repo, revs):
3762 ctx = repo[rev]
3762 ctx = repo[rev]
3763 ui.write(b'%s\n' % ctx2str(ctx))
3763 ui.write(b'%s\n' % ctx2str(ctx))
3764 for succsset in obsutil.successorssets(
3764 for succsset in obsutil.successorssets(
3765 repo, ctx.node(), closest=opts['closest'], cache=cache
3765 repo, ctx.node(), closest=opts['closest'], cache=cache
3766 ):
3766 ):
3767 if succsset:
3767 if succsset:
3768 ui.write(b' ')
3768 ui.write(b' ')
3769 ui.write(node2str(succsset[0]))
3769 ui.write(node2str(succsset[0]))
3770 for node in succsset[1:]:
3770 for node in succsset[1:]:
3771 ui.write(b' ')
3771 ui.write(b' ')
3772 ui.write(node2str(node))
3772 ui.write(node2str(node))
3773 ui.write(b'\n')
3773 ui.write(b'\n')
3774
3774
3775
3775
3776 @command(b'debugtagscache', [])
3776 @command(b'debugtagscache', [])
3777 def debugtagscache(ui, repo):
3777 def debugtagscache(ui, repo):
3778 """display the contents of .hg/cache/hgtagsfnodes1"""
3778 """display the contents of .hg/cache/hgtagsfnodes1"""
3779 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3779 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3780 for r in repo:
3780 for r in repo:
3781 node = repo[r].node()
3781 node = repo[r].node()
3782 tagsnode = cache.getfnode(node, computemissing=False)
3782 tagsnode = cache.getfnode(node, computemissing=False)
3783 tagsnodedisplay = hex(tagsnode) if tagsnode else b'missing/invalid'
3783 tagsnodedisplay = hex(tagsnode) if tagsnode else b'missing/invalid'
3784 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3784 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3785
3785
3786
3786
3787 @command(
3787 @command(
3788 b'debugtemplate',
3788 b'debugtemplate',
3789 [
3789 [
3790 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3790 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3791 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3791 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3792 ],
3792 ],
3793 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3793 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3794 optionalrepo=True,
3794 optionalrepo=True,
3795 )
3795 )
3796 def debugtemplate(ui, repo, tmpl, **opts):
3796 def debugtemplate(ui, repo, tmpl, **opts):
3797 """parse and apply a template
3797 """parse and apply a template
3798
3798
3799 If -r/--rev is given, the template is processed as a log template and
3799 If -r/--rev is given, the template is processed as a log template and
3800 applied to the given changesets. Otherwise, it is processed as a generic
3800 applied to the given changesets. Otherwise, it is processed as a generic
3801 template.
3801 template.
3802
3802
3803 Use --verbose to print the parsed tree.
3803 Use --verbose to print the parsed tree.
3804 """
3804 """
3805 revs = None
3805 revs = None
3806 if opts['rev']:
3806 if opts['rev']:
3807 if repo is None:
3807 if repo is None:
3808 raise error.RepoError(
3808 raise error.RepoError(
3809 _(b'there is no Mercurial repository here (.hg not found)')
3809 _(b'there is no Mercurial repository here (.hg not found)')
3810 )
3810 )
3811 revs = scmutil.revrange(repo, opts['rev'])
3811 revs = scmutil.revrange(repo, opts['rev'])
3812
3812
3813 props = {}
3813 props = {}
3814 for d in opts['define']:
3814 for d in opts['define']:
3815 try:
3815 try:
3816 k, v = (e.strip() for e in d.split(b'=', 1))
3816 k, v = (e.strip() for e in d.split(b'=', 1))
3817 if not k or k == b'ui':
3817 if not k or k == b'ui':
3818 raise ValueError
3818 raise ValueError
3819 props[k] = v
3819 props[k] = v
3820 except ValueError:
3820 except ValueError:
3821 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3821 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3822
3822
3823 if ui.verbose:
3823 if ui.verbose:
3824 aliases = ui.configitems(b'templatealias')
3824 aliases = ui.configitems(b'templatealias')
3825 tree = templater.parse(tmpl)
3825 tree = templater.parse(tmpl)
3826 ui.note(templater.prettyformat(tree), b'\n')
3826 ui.note(templater.prettyformat(tree), b'\n')
3827 newtree = templater.expandaliases(tree, aliases)
3827 newtree = templater.expandaliases(tree, aliases)
3828 if newtree != tree:
3828 if newtree != tree:
3829 ui.notenoi18n(
3829 ui.notenoi18n(
3830 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3830 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3831 )
3831 )
3832
3832
3833 if revs is None:
3833 if revs is None:
3834 tres = formatter.templateresources(ui, repo)
3834 tres = formatter.templateresources(ui, repo)
3835 t = formatter.maketemplater(ui, tmpl, resources=tres)
3835 t = formatter.maketemplater(ui, tmpl, resources=tres)
3836 if ui.verbose:
3836 if ui.verbose:
3837 kwds, funcs = t.symbolsuseddefault()
3837 kwds, funcs = t.symbolsuseddefault()
3838 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3838 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3839 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3839 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3840 ui.write(t.renderdefault(props))
3840 ui.write(t.renderdefault(props))
3841 else:
3841 else:
3842 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3842 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3843 if ui.verbose:
3843 if ui.verbose:
3844 kwds, funcs = displayer.t.symbolsuseddefault()
3844 kwds, funcs = displayer.t.symbolsuseddefault()
3845 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3845 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3846 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3846 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3847 for r in revs:
3847 for r in revs:
3848 displayer.show(repo[r], **pycompat.strkwargs(props))
3848 displayer.show(repo[r], **pycompat.strkwargs(props))
3849 displayer.close()
3849 displayer.close()
3850
3850
3851
3851
3852 @command(
3852 @command(
3853 b'debuguigetpass',
3853 b'debuguigetpass',
3854 [
3854 [
3855 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3855 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3856 ],
3856 ],
3857 _(b'[-p TEXT]'),
3857 _(b'[-p TEXT]'),
3858 norepo=True,
3858 norepo=True,
3859 )
3859 )
3860 def debuguigetpass(ui, prompt=b''):
3860 def debuguigetpass(ui, prompt=b''):
3861 """show prompt to type password"""
3861 """show prompt to type password"""
3862 r = ui.getpass(prompt)
3862 r = ui.getpass(prompt)
3863 if r is None:
3863 if r is None:
3864 r = b"<default response>"
3864 r = b"<default response>"
3865 ui.writenoi18n(b'response: %s\n' % r)
3865 ui.writenoi18n(b'response: %s\n' % r)
3866
3866
3867
3867
3868 @command(
3868 @command(
3869 b'debuguiprompt',
3869 b'debuguiprompt',
3870 [
3870 [
3871 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3871 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3872 ],
3872 ],
3873 _(b'[-p TEXT]'),
3873 _(b'[-p TEXT]'),
3874 norepo=True,
3874 norepo=True,
3875 )
3875 )
3876 def debuguiprompt(ui, prompt=b''):
3876 def debuguiprompt(ui, prompt=b''):
3877 """show plain prompt"""
3877 """show plain prompt"""
3878 r = ui.prompt(prompt)
3878 r = ui.prompt(prompt)
3879 ui.writenoi18n(b'response: %s\n' % r)
3879 ui.writenoi18n(b'response: %s\n' % r)
3880
3880
3881
3881
3882 @command(b'debugupdatecaches', [])
3882 @command(b'debugupdatecaches', [])
3883 def debugupdatecaches(ui, repo, *pats, **opts):
3883 def debugupdatecaches(ui, repo, *pats, **opts):
3884 """warm all known caches in the repository"""
3884 """warm all known caches in the repository"""
3885 with repo.wlock(), repo.lock():
3885 with repo.wlock(), repo.lock():
3886 repo.updatecaches(full=True)
3886 repo.updatecaches(full=True)
3887
3887
3888
3888
3889 @command(
3889 @command(
3890 b'debugupgraderepo',
3890 b'debugupgraderepo',
3891 [
3891 [
3892 (
3892 (
3893 b'o',
3893 b'o',
3894 b'optimize',
3894 b'optimize',
3895 [],
3895 [],
3896 _(b'extra optimization to perform'),
3896 _(b'extra optimization to perform'),
3897 _(b'NAME'),
3897 _(b'NAME'),
3898 ),
3898 ),
3899 (b'', b'run', False, _(b'performs an upgrade')),
3899 (b'', b'run', False, _(b'performs an upgrade')),
3900 (b'', b'backup', True, _(b'keep the old repository content around')),
3900 (b'', b'backup', True, _(b'keep the old repository content around')),
3901 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
3901 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
3902 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
3902 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
3903 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
3903 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
3904 ],
3904 ],
3905 )
3905 )
3906 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
3906 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
3907 """upgrade a repository to use different features
3907 """upgrade a repository to use different features
3908
3908
3909 If no arguments are specified, the repository is evaluated for upgrade
3909 If no arguments are specified, the repository is evaluated for upgrade
3910 and a list of problems and potential optimizations is printed.
3910 and a list of problems and potential optimizations is printed.
3911
3911
3912 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
3912 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
3913 can be influenced via additional arguments. More details will be provided
3913 can be influenced via additional arguments. More details will be provided
3914 by the command output when run without ``--run``.
3914 by the command output when run without ``--run``.
3915
3915
3916 During the upgrade, the repository will be locked and no writes will be
3916 During the upgrade, the repository will be locked and no writes will be
3917 allowed.
3917 allowed.
3918
3918
3919 At the end of the upgrade, the repository may not be readable while new
3919 At the end of the upgrade, the repository may not be readable while new
3920 repository data is swapped in. This window will be as long as it takes to
3920 repository data is swapped in. This window will be as long as it takes to
3921 rename some directories inside the ``.hg`` directory. On most machines, this
3921 rename some directories inside the ``.hg`` directory. On most machines, this
3922 should complete almost instantaneously and the chances of a consumer being
3922 should complete almost instantaneously and the chances of a consumer being
3923 unable to access the repository should be low.
3923 unable to access the repository should be low.
3924
3924
3925 By default, all revlog will be upgraded. You can restrict this using flag
3925 By default, all revlog will be upgraded. You can restrict this using flag
3926 such as `--manifest`:
3926 such as `--manifest`:
3927
3927
3928 * `--manifest`: only optimize the manifest
3928 * `--manifest`: only optimize the manifest
3929 * `--no-manifest`: optimize all revlog but the manifest
3929 * `--no-manifest`: optimize all revlog but the manifest
3930 * `--changelog`: optimize the changelog only
3930 * `--changelog`: optimize the changelog only
3931 * `--no-changelog --no-manifest`: optimize filelogs only
3931 * `--no-changelog --no-manifest`: optimize filelogs only
3932 * `--filelogs`: optimize the filelogs only
3932 * `--filelogs`: optimize the filelogs only
3933 * `--no-changelog --no-manifest --no-filelogs`: skip all filelog optimisation
3933 * `--no-changelog --no-manifest --no-filelogs`: skip all filelog optimisation
3934 """
3934 """
3935 return upgrade.upgraderepo(
3935 return upgrade.upgraderepo(
3936 ui, repo, run=run, optimize=optimize, backup=backup, **opts
3936 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
3937 )
3937 )
3938
3938
3939
3939
3940 @command(
3940 @command(
3941 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
3941 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
3942 )
3942 )
3943 def debugwalk(ui, repo, *pats, **opts):
3943 def debugwalk(ui, repo, *pats, **opts):
3944 """show how files match on given patterns"""
3944 """show how files match on given patterns"""
3945 opts = pycompat.byteskwargs(opts)
3945 opts = pycompat.byteskwargs(opts)
3946 m = scmutil.match(repo[None], pats, opts)
3946 m = scmutil.match(repo[None], pats, opts)
3947 if ui.verbose:
3947 if ui.verbose:
3948 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
3948 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
3949 items = list(repo[None].walk(m))
3949 items = list(repo[None].walk(m))
3950 if not items:
3950 if not items:
3951 return
3951 return
3952 f = lambda fn: fn
3952 f = lambda fn: fn
3953 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
3953 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
3954 f = lambda fn: util.normpath(fn)
3954 f = lambda fn: util.normpath(fn)
3955 fmt = b'f %%-%ds %%-%ds %%s' % (
3955 fmt = b'f %%-%ds %%-%ds %%s' % (
3956 max([len(abs) for abs in items]),
3956 max([len(abs) for abs in items]),
3957 max([len(repo.pathto(abs)) for abs in items]),
3957 max([len(repo.pathto(abs)) for abs in items]),
3958 )
3958 )
3959 for abs in items:
3959 for abs in items:
3960 line = fmt % (
3960 line = fmt % (
3961 abs,
3961 abs,
3962 f(repo.pathto(abs)),
3962 f(repo.pathto(abs)),
3963 m.exact(abs) and b'exact' or b'',
3963 m.exact(abs) and b'exact' or b'',
3964 )
3964 )
3965 ui.write(b"%s\n" % line.rstrip())
3965 ui.write(b"%s\n" % line.rstrip())
3966
3966
3967
3967
3968 @command(b'debugwhyunstable', [], _(b'REV'))
3968 @command(b'debugwhyunstable', [], _(b'REV'))
3969 def debugwhyunstable(ui, repo, rev):
3969 def debugwhyunstable(ui, repo, rev):
3970 """explain instabilities of a changeset"""
3970 """explain instabilities of a changeset"""
3971 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
3971 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
3972 dnodes = b''
3972 dnodes = b''
3973 if entry.get(b'divergentnodes'):
3973 if entry.get(b'divergentnodes'):
3974 dnodes = (
3974 dnodes = (
3975 b' '.join(
3975 b' '.join(
3976 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
3976 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
3977 for ctx in entry[b'divergentnodes']
3977 for ctx in entry[b'divergentnodes']
3978 )
3978 )
3979 + b' '
3979 + b' '
3980 )
3980 )
3981 ui.write(
3981 ui.write(
3982 b'%s: %s%s %s\n'
3982 b'%s: %s%s %s\n'
3983 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
3983 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
3984 )
3984 )
3985
3985
3986
3986
3987 @command(
3987 @command(
3988 b'debugwireargs',
3988 b'debugwireargs',
3989 [
3989 [
3990 (b'', b'three', b'', b'three'),
3990 (b'', b'three', b'', b'three'),
3991 (b'', b'four', b'', b'four'),
3991 (b'', b'four', b'', b'four'),
3992 (b'', b'five', b'', b'five'),
3992 (b'', b'five', b'', b'five'),
3993 ]
3993 ]
3994 + cmdutil.remoteopts,
3994 + cmdutil.remoteopts,
3995 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
3995 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
3996 norepo=True,
3996 norepo=True,
3997 )
3997 )
3998 def debugwireargs(ui, repopath, *vals, **opts):
3998 def debugwireargs(ui, repopath, *vals, **opts):
3999 opts = pycompat.byteskwargs(opts)
3999 opts = pycompat.byteskwargs(opts)
4000 repo = hg.peer(ui, opts, repopath)
4000 repo = hg.peer(ui, opts, repopath)
4001 for opt in cmdutil.remoteopts:
4001 for opt in cmdutil.remoteopts:
4002 del opts[opt[1]]
4002 del opts[opt[1]]
4003 args = {}
4003 args = {}
4004 for k, v in pycompat.iteritems(opts):
4004 for k, v in pycompat.iteritems(opts):
4005 if v:
4005 if v:
4006 args[k] = v
4006 args[k] = v
4007 args = pycompat.strkwargs(args)
4007 args = pycompat.strkwargs(args)
4008 # run twice to check that we don't mess up the stream for the next command
4008 # run twice to check that we don't mess up the stream for the next command
4009 res1 = repo.debugwireargs(*vals, **args)
4009 res1 = repo.debugwireargs(*vals, **args)
4010 res2 = repo.debugwireargs(*vals, **args)
4010 res2 = repo.debugwireargs(*vals, **args)
4011 ui.write(b"%s\n" % res1)
4011 ui.write(b"%s\n" % res1)
4012 if res1 != res2:
4012 if res1 != res2:
4013 ui.warn(b"%s\n" % res2)
4013 ui.warn(b"%s\n" % res2)
4014
4014
4015
4015
4016 def _parsewirelangblocks(fh):
4016 def _parsewirelangblocks(fh):
4017 activeaction = None
4017 activeaction = None
4018 blocklines = []
4018 blocklines = []
4019 lastindent = 0
4019 lastindent = 0
4020
4020
4021 for line in fh:
4021 for line in fh:
4022 line = line.rstrip()
4022 line = line.rstrip()
4023 if not line:
4023 if not line:
4024 continue
4024 continue
4025
4025
4026 if line.startswith(b'#'):
4026 if line.startswith(b'#'):
4027 continue
4027 continue
4028
4028
4029 if not line.startswith(b' '):
4029 if not line.startswith(b' '):
4030 # New block. Flush previous one.
4030 # New block. Flush previous one.
4031 if activeaction:
4031 if activeaction:
4032 yield activeaction, blocklines
4032 yield activeaction, blocklines
4033
4033
4034 activeaction = line
4034 activeaction = line
4035 blocklines = []
4035 blocklines = []
4036 lastindent = 0
4036 lastindent = 0
4037 continue
4037 continue
4038
4038
4039 # Else we start with an indent.
4039 # Else we start with an indent.
4040
4040
4041 if not activeaction:
4041 if not activeaction:
4042 raise error.Abort(_(b'indented line outside of block'))
4042 raise error.Abort(_(b'indented line outside of block'))
4043
4043
4044 indent = len(line) - len(line.lstrip())
4044 indent = len(line) - len(line.lstrip())
4045
4045
4046 # If this line is indented more than the last line, concatenate it.
4046 # If this line is indented more than the last line, concatenate it.
4047 if indent > lastindent and blocklines:
4047 if indent > lastindent and blocklines:
4048 blocklines[-1] += line.lstrip()
4048 blocklines[-1] += line.lstrip()
4049 else:
4049 else:
4050 blocklines.append(line)
4050 blocklines.append(line)
4051 lastindent = indent
4051 lastindent = indent
4052
4052
4053 # Flush last block.
4053 # Flush last block.
4054 if activeaction:
4054 if activeaction:
4055 yield activeaction, blocklines
4055 yield activeaction, blocklines
4056
4056
4057
4057
4058 @command(
4058 @command(
4059 b'debugwireproto',
4059 b'debugwireproto',
4060 [
4060 [
4061 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4061 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4062 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4062 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4063 (
4063 (
4064 b'',
4064 b'',
4065 b'noreadstderr',
4065 b'noreadstderr',
4066 False,
4066 False,
4067 _(b'do not read from stderr of the remote'),
4067 _(b'do not read from stderr of the remote'),
4068 ),
4068 ),
4069 (
4069 (
4070 b'',
4070 b'',
4071 b'nologhandshake',
4071 b'nologhandshake',
4072 False,
4072 False,
4073 _(b'do not log I/O related to the peer handshake'),
4073 _(b'do not log I/O related to the peer handshake'),
4074 ),
4074 ),
4075 ]
4075 ]
4076 + cmdutil.remoteopts,
4076 + cmdutil.remoteopts,
4077 _(b'[PATH]'),
4077 _(b'[PATH]'),
4078 optionalrepo=True,
4078 optionalrepo=True,
4079 )
4079 )
4080 def debugwireproto(ui, repo, path=None, **opts):
4080 def debugwireproto(ui, repo, path=None, **opts):
4081 """send wire protocol commands to a server
4081 """send wire protocol commands to a server
4082
4082
4083 This command can be used to issue wire protocol commands to remote
4083 This command can be used to issue wire protocol commands to remote
4084 peers and to debug the raw data being exchanged.
4084 peers and to debug the raw data being exchanged.
4085
4085
4086 ``--localssh`` will start an SSH server against the current repository
4086 ``--localssh`` will start an SSH server against the current repository
4087 and connect to that. By default, the connection will perform a handshake
4087 and connect to that. By default, the connection will perform a handshake
4088 and establish an appropriate peer instance.
4088 and establish an appropriate peer instance.
4089
4089
4090 ``--peer`` can be used to bypass the handshake protocol and construct a
4090 ``--peer`` can be used to bypass the handshake protocol and construct a
4091 peer instance using the specified class type. Valid values are ``raw``,
4091 peer instance using the specified class type. Valid values are ``raw``,
4092 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4092 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4093 raw data payloads and don't support higher-level command actions.
4093 raw data payloads and don't support higher-level command actions.
4094
4094
4095 ``--noreadstderr`` can be used to disable automatic reading from stderr
4095 ``--noreadstderr`` can be used to disable automatic reading from stderr
4096 of the peer (for SSH connections only). Disabling automatic reading of
4096 of the peer (for SSH connections only). Disabling automatic reading of
4097 stderr is useful for making output more deterministic.
4097 stderr is useful for making output more deterministic.
4098
4098
4099 Commands are issued via a mini language which is specified via stdin.
4099 Commands are issued via a mini language which is specified via stdin.
4100 The language consists of individual actions to perform. An action is
4100 The language consists of individual actions to perform. An action is
4101 defined by a block. A block is defined as a line with no leading
4101 defined by a block. A block is defined as a line with no leading
4102 space followed by 0 or more lines with leading space. Blocks are
4102 space followed by 0 or more lines with leading space. Blocks are
4103 effectively a high-level command with additional metadata.
4103 effectively a high-level command with additional metadata.
4104
4104
4105 Lines beginning with ``#`` are ignored.
4105 Lines beginning with ``#`` are ignored.
4106
4106
4107 The following sections denote available actions.
4107 The following sections denote available actions.
4108
4108
4109 raw
4109 raw
4110 ---
4110 ---
4111
4111
4112 Send raw data to the server.
4112 Send raw data to the server.
4113
4113
4114 The block payload contains the raw data to send as one atomic send
4114 The block payload contains the raw data to send as one atomic send
4115 operation. The data may not actually be delivered in a single system
4115 operation. The data may not actually be delivered in a single system
4116 call: it depends on the abilities of the transport being used.
4116 call: it depends on the abilities of the transport being used.
4117
4117
4118 Each line in the block is de-indented and concatenated. Then, that
4118 Each line in the block is de-indented and concatenated. Then, that
4119 value is evaluated as a Python b'' literal. This allows the use of
4119 value is evaluated as a Python b'' literal. This allows the use of
4120 backslash escaping, etc.
4120 backslash escaping, etc.
4121
4121
4122 raw+
4122 raw+
4123 ----
4123 ----
4124
4124
4125 Behaves like ``raw`` except flushes output afterwards.
4125 Behaves like ``raw`` except flushes output afterwards.
4126
4126
4127 command <X>
4127 command <X>
4128 -----------
4128 -----------
4129
4129
4130 Send a request to run a named command, whose name follows the ``command``
4130 Send a request to run a named command, whose name follows the ``command``
4131 string.
4131 string.
4132
4132
4133 Arguments to the command are defined as lines in this block. The format of
4133 Arguments to the command are defined as lines in this block. The format of
4134 each line is ``<key> <value>``. e.g.::
4134 each line is ``<key> <value>``. e.g.::
4135
4135
4136 command listkeys
4136 command listkeys
4137 namespace bookmarks
4137 namespace bookmarks
4138
4138
4139 If the value begins with ``eval:``, it will be interpreted as a Python
4139 If the value begins with ``eval:``, it will be interpreted as a Python
4140 literal expression. Otherwise values are interpreted as Python b'' literals.
4140 literal expression. Otherwise values are interpreted as Python b'' literals.
4141 This allows sending complex types and encoding special byte sequences via
4141 This allows sending complex types and encoding special byte sequences via
4142 backslash escaping.
4142 backslash escaping.
4143
4143
4144 The following arguments have special meaning:
4144 The following arguments have special meaning:
4145
4145
4146 ``PUSHFILE``
4146 ``PUSHFILE``
4147 When defined, the *push* mechanism of the peer will be used instead
4147 When defined, the *push* mechanism of the peer will be used instead
4148 of the static request-response mechanism and the content of the
4148 of the static request-response mechanism and the content of the
4149 file specified in the value of this argument will be sent as the
4149 file specified in the value of this argument will be sent as the
4150 command payload.
4150 command payload.
4151
4151
4152 This can be used to submit a local bundle file to the remote.
4152 This can be used to submit a local bundle file to the remote.
4153
4153
4154 batchbegin
4154 batchbegin
4155 ----------
4155 ----------
4156
4156
4157 Instruct the peer to begin a batched send.
4157 Instruct the peer to begin a batched send.
4158
4158
4159 All ``command`` blocks are queued for execution until the next
4159 All ``command`` blocks are queued for execution until the next
4160 ``batchsubmit`` block.
4160 ``batchsubmit`` block.
4161
4161
4162 batchsubmit
4162 batchsubmit
4163 -----------
4163 -----------
4164
4164
4165 Submit previously queued ``command`` blocks as a batch request.
4165 Submit previously queued ``command`` blocks as a batch request.
4166
4166
4167 This action MUST be paired with a ``batchbegin`` action.
4167 This action MUST be paired with a ``batchbegin`` action.
4168
4168
4169 httprequest <method> <path>
4169 httprequest <method> <path>
4170 ---------------------------
4170 ---------------------------
4171
4171
4172 (HTTP peer only)
4172 (HTTP peer only)
4173
4173
4174 Send an HTTP request to the peer.
4174 Send an HTTP request to the peer.
4175
4175
4176 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4176 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4177
4177
4178 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4178 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4179 headers to add to the request. e.g. ``Accept: foo``.
4179 headers to add to the request. e.g. ``Accept: foo``.
4180
4180
4181 The following arguments are special:
4181 The following arguments are special:
4182
4182
4183 ``BODYFILE``
4183 ``BODYFILE``
4184 The content of the file defined as the value to this argument will be
4184 The content of the file defined as the value to this argument will be
4185 transferred verbatim as the HTTP request body.
4185 transferred verbatim as the HTTP request body.
4186
4186
4187 ``frame <type> <flags> <payload>``
4187 ``frame <type> <flags> <payload>``
4188 Send a unified protocol frame as part of the request body.
4188 Send a unified protocol frame as part of the request body.
4189
4189
4190 All frames will be collected and sent as the body to the HTTP
4190 All frames will be collected and sent as the body to the HTTP
4191 request.
4191 request.
4192
4192
4193 close
4193 close
4194 -----
4194 -----
4195
4195
4196 Close the connection to the server.
4196 Close the connection to the server.
4197
4197
4198 flush
4198 flush
4199 -----
4199 -----
4200
4200
4201 Flush data written to the server.
4201 Flush data written to the server.
4202
4202
4203 readavailable
4203 readavailable
4204 -------------
4204 -------------
4205
4205
4206 Close the write end of the connection and read all available data from
4206 Close the write end of the connection and read all available data from
4207 the server.
4207 the server.
4208
4208
4209 If the connection to the server encompasses multiple pipes, we poll both
4209 If the connection to the server encompasses multiple pipes, we poll both
4210 pipes and read available data.
4210 pipes and read available data.
4211
4211
4212 readline
4212 readline
4213 --------
4213 --------
4214
4214
4215 Read a line of output from the server. If there are multiple output
4215 Read a line of output from the server. If there are multiple output
4216 pipes, reads only the main pipe.
4216 pipes, reads only the main pipe.
4217
4217
4218 ereadline
4218 ereadline
4219 ---------
4219 ---------
4220
4220
4221 Like ``readline``, but read from the stderr pipe, if available.
4221 Like ``readline``, but read from the stderr pipe, if available.
4222
4222
4223 read <X>
4223 read <X>
4224 --------
4224 --------
4225
4225
4226 ``read()`` N bytes from the server's main output pipe.
4226 ``read()`` N bytes from the server's main output pipe.
4227
4227
4228 eread <X>
4228 eread <X>
4229 ---------
4229 ---------
4230
4230
4231 ``read()`` N bytes from the server's stderr pipe, if available.
4231 ``read()`` N bytes from the server's stderr pipe, if available.
4232
4232
4233 Specifying Unified Frame-Based Protocol Frames
4233 Specifying Unified Frame-Based Protocol Frames
4234 ----------------------------------------------
4234 ----------------------------------------------
4235
4235
4236 It is possible to emit a *Unified Frame-Based Protocol* by using special
4236 It is possible to emit a *Unified Frame-Based Protocol* by using special
4237 syntax.
4237 syntax.
4238
4238
4239 A frame is composed as a type, flags, and payload. These can be parsed
4239 A frame is composed as a type, flags, and payload. These can be parsed
4240 from a string of the form:
4240 from a string of the form:
4241
4241
4242 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4242 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4243
4243
4244 ``request-id`` and ``stream-id`` are integers defining the request and
4244 ``request-id`` and ``stream-id`` are integers defining the request and
4245 stream identifiers.
4245 stream identifiers.
4246
4246
4247 ``type`` can be an integer value for the frame type or the string name
4247 ``type`` can be an integer value for the frame type or the string name
4248 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4248 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4249 ``command-name``.
4249 ``command-name``.
4250
4250
4251 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4251 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4252 components. Each component (and there can be just one) can be an integer
4252 components. Each component (and there can be just one) can be an integer
4253 or a flag name for stream flags or frame flags, respectively. Values are
4253 or a flag name for stream flags or frame flags, respectively. Values are
4254 resolved to integers and then bitwise OR'd together.
4254 resolved to integers and then bitwise OR'd together.
4255
4255
4256 ``payload`` represents the raw frame payload. If it begins with
4256 ``payload`` represents the raw frame payload. If it begins with
4257 ``cbor:``, the following string is evaluated as Python code and the
4257 ``cbor:``, the following string is evaluated as Python code and the
4258 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4258 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4259 as a Python byte string literal.
4259 as a Python byte string literal.
4260 """
4260 """
4261 opts = pycompat.byteskwargs(opts)
4261 opts = pycompat.byteskwargs(opts)
4262
4262
4263 if opts[b'localssh'] and not repo:
4263 if opts[b'localssh'] and not repo:
4264 raise error.Abort(_(b'--localssh requires a repository'))
4264 raise error.Abort(_(b'--localssh requires a repository'))
4265
4265
4266 if opts[b'peer'] and opts[b'peer'] not in (
4266 if opts[b'peer'] and opts[b'peer'] not in (
4267 b'raw',
4267 b'raw',
4268 b'http2',
4268 b'http2',
4269 b'ssh1',
4269 b'ssh1',
4270 b'ssh2',
4270 b'ssh2',
4271 ):
4271 ):
4272 raise error.Abort(
4272 raise error.Abort(
4273 _(b'invalid value for --peer'),
4273 _(b'invalid value for --peer'),
4274 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4274 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4275 )
4275 )
4276
4276
4277 if path and opts[b'localssh']:
4277 if path and opts[b'localssh']:
4278 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4278 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4279
4279
4280 if ui.interactive():
4280 if ui.interactive():
4281 ui.write(_(b'(waiting for commands on stdin)\n'))
4281 ui.write(_(b'(waiting for commands on stdin)\n'))
4282
4282
4283 blocks = list(_parsewirelangblocks(ui.fin))
4283 blocks = list(_parsewirelangblocks(ui.fin))
4284
4284
4285 proc = None
4285 proc = None
4286 stdin = None
4286 stdin = None
4287 stdout = None
4287 stdout = None
4288 stderr = None
4288 stderr = None
4289 opener = None
4289 opener = None
4290
4290
4291 if opts[b'localssh']:
4291 if opts[b'localssh']:
4292 # We start the SSH server in its own process so there is process
4292 # We start the SSH server in its own process so there is process
4293 # separation. This prevents a whole class of potential bugs around
4293 # separation. This prevents a whole class of potential bugs around
4294 # shared state from interfering with server operation.
4294 # shared state from interfering with server operation.
4295 args = procutil.hgcmd() + [
4295 args = procutil.hgcmd() + [
4296 b'-R',
4296 b'-R',
4297 repo.root,
4297 repo.root,
4298 b'debugserve',
4298 b'debugserve',
4299 b'--sshstdio',
4299 b'--sshstdio',
4300 ]
4300 ]
4301 proc = subprocess.Popen(
4301 proc = subprocess.Popen(
4302 pycompat.rapply(procutil.tonativestr, args),
4302 pycompat.rapply(procutil.tonativestr, args),
4303 stdin=subprocess.PIPE,
4303 stdin=subprocess.PIPE,
4304 stdout=subprocess.PIPE,
4304 stdout=subprocess.PIPE,
4305 stderr=subprocess.PIPE,
4305 stderr=subprocess.PIPE,
4306 bufsize=0,
4306 bufsize=0,
4307 )
4307 )
4308
4308
4309 stdin = proc.stdin
4309 stdin = proc.stdin
4310 stdout = proc.stdout
4310 stdout = proc.stdout
4311 stderr = proc.stderr
4311 stderr = proc.stderr
4312
4312
4313 # We turn the pipes into observers so we can log I/O.
4313 # We turn the pipes into observers so we can log I/O.
4314 if ui.verbose or opts[b'peer'] == b'raw':
4314 if ui.verbose or opts[b'peer'] == b'raw':
4315 stdin = util.makeloggingfileobject(
4315 stdin = util.makeloggingfileobject(
4316 ui, proc.stdin, b'i', logdata=True
4316 ui, proc.stdin, b'i', logdata=True
4317 )
4317 )
4318 stdout = util.makeloggingfileobject(
4318 stdout = util.makeloggingfileobject(
4319 ui, proc.stdout, b'o', logdata=True
4319 ui, proc.stdout, b'o', logdata=True
4320 )
4320 )
4321 stderr = util.makeloggingfileobject(
4321 stderr = util.makeloggingfileobject(
4322 ui, proc.stderr, b'e', logdata=True
4322 ui, proc.stderr, b'e', logdata=True
4323 )
4323 )
4324
4324
4325 # --localssh also implies the peer connection settings.
4325 # --localssh also implies the peer connection settings.
4326
4326
4327 url = b'ssh://localserver'
4327 url = b'ssh://localserver'
4328 autoreadstderr = not opts[b'noreadstderr']
4328 autoreadstderr = not opts[b'noreadstderr']
4329
4329
4330 if opts[b'peer'] == b'ssh1':
4330 if opts[b'peer'] == b'ssh1':
4331 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4331 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4332 peer = sshpeer.sshv1peer(
4332 peer = sshpeer.sshv1peer(
4333 ui,
4333 ui,
4334 url,
4334 url,
4335 proc,
4335 proc,
4336 stdin,
4336 stdin,
4337 stdout,
4337 stdout,
4338 stderr,
4338 stderr,
4339 None,
4339 None,
4340 autoreadstderr=autoreadstderr,
4340 autoreadstderr=autoreadstderr,
4341 )
4341 )
4342 elif opts[b'peer'] == b'ssh2':
4342 elif opts[b'peer'] == b'ssh2':
4343 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4343 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4344 peer = sshpeer.sshv2peer(
4344 peer = sshpeer.sshv2peer(
4345 ui,
4345 ui,
4346 url,
4346 url,
4347 proc,
4347 proc,
4348 stdin,
4348 stdin,
4349 stdout,
4349 stdout,
4350 stderr,
4350 stderr,
4351 None,
4351 None,
4352 autoreadstderr=autoreadstderr,
4352 autoreadstderr=autoreadstderr,
4353 )
4353 )
4354 elif opts[b'peer'] == b'raw':
4354 elif opts[b'peer'] == b'raw':
4355 ui.write(_(b'using raw connection to peer\n'))
4355 ui.write(_(b'using raw connection to peer\n'))
4356 peer = None
4356 peer = None
4357 else:
4357 else:
4358 ui.write(_(b'creating ssh peer from handshake results\n'))
4358 ui.write(_(b'creating ssh peer from handshake results\n'))
4359 peer = sshpeer.makepeer(
4359 peer = sshpeer.makepeer(
4360 ui,
4360 ui,
4361 url,
4361 url,
4362 proc,
4362 proc,
4363 stdin,
4363 stdin,
4364 stdout,
4364 stdout,
4365 stderr,
4365 stderr,
4366 autoreadstderr=autoreadstderr,
4366 autoreadstderr=autoreadstderr,
4367 )
4367 )
4368
4368
4369 elif path:
4369 elif path:
4370 # We bypass hg.peer() so we can proxy the sockets.
4370 # We bypass hg.peer() so we can proxy the sockets.
4371 # TODO consider not doing this because we skip
4371 # TODO consider not doing this because we skip
4372 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4372 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4373 u = util.url(path)
4373 u = util.url(path)
4374 if u.scheme != b'http':
4374 if u.scheme != b'http':
4375 raise error.Abort(_(b'only http:// paths are currently supported'))
4375 raise error.Abort(_(b'only http:// paths are currently supported'))
4376
4376
4377 url, authinfo = u.authinfo()
4377 url, authinfo = u.authinfo()
4378 openerargs = {
4378 openerargs = {
4379 'useragent': b'Mercurial debugwireproto',
4379 'useragent': b'Mercurial debugwireproto',
4380 }
4380 }
4381
4381
4382 # Turn pipes/sockets into observers so we can log I/O.
4382 # Turn pipes/sockets into observers so we can log I/O.
4383 if ui.verbose:
4383 if ui.verbose:
4384 openerargs.update(
4384 openerargs.update(
4385 {
4385 {
4386 'loggingfh': ui,
4386 'loggingfh': ui,
4387 'loggingname': b's',
4387 'loggingname': b's',
4388 'loggingopts': {
4388 'loggingopts': {
4389 'logdata': True,
4389 'logdata': True,
4390 'logdataapis': False,
4390 'logdataapis': False,
4391 },
4391 },
4392 }
4392 }
4393 )
4393 )
4394
4394
4395 if ui.debugflag:
4395 if ui.debugflag:
4396 openerargs['loggingopts']['logdataapis'] = True
4396 openerargs['loggingopts']['logdataapis'] = True
4397
4397
4398 # Don't send default headers when in raw mode. This allows us to
4398 # Don't send default headers when in raw mode. This allows us to
4399 # bypass most of the behavior of our URL handling code so we can
4399 # bypass most of the behavior of our URL handling code so we can
4400 # have near complete control over what's sent on the wire.
4400 # have near complete control over what's sent on the wire.
4401 if opts[b'peer'] == b'raw':
4401 if opts[b'peer'] == b'raw':
4402 openerargs['sendaccept'] = False
4402 openerargs['sendaccept'] = False
4403
4403
4404 opener = urlmod.opener(ui, authinfo, **openerargs)
4404 opener = urlmod.opener(ui, authinfo, **openerargs)
4405
4405
4406 if opts[b'peer'] == b'http2':
4406 if opts[b'peer'] == b'http2':
4407 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4407 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4408 # We go through makepeer() because we need an API descriptor for
4408 # We go through makepeer() because we need an API descriptor for
4409 # the peer instance to be useful.
4409 # the peer instance to be useful.
4410 with ui.configoverride(
4410 with ui.configoverride(
4411 {(b'experimental', b'httppeer.advertise-v2'): True}
4411 {(b'experimental', b'httppeer.advertise-v2'): True}
4412 ):
4412 ):
4413 if opts[b'nologhandshake']:
4413 if opts[b'nologhandshake']:
4414 ui.pushbuffer()
4414 ui.pushbuffer()
4415
4415
4416 peer = httppeer.makepeer(ui, path, opener=opener)
4416 peer = httppeer.makepeer(ui, path, opener=opener)
4417
4417
4418 if opts[b'nologhandshake']:
4418 if opts[b'nologhandshake']:
4419 ui.popbuffer()
4419 ui.popbuffer()
4420
4420
4421 if not isinstance(peer, httppeer.httpv2peer):
4421 if not isinstance(peer, httppeer.httpv2peer):
4422 raise error.Abort(
4422 raise error.Abort(
4423 _(
4423 _(
4424 b'could not instantiate HTTP peer for '
4424 b'could not instantiate HTTP peer for '
4425 b'wire protocol version 2'
4425 b'wire protocol version 2'
4426 ),
4426 ),
4427 hint=_(
4427 hint=_(
4428 b'the server may not have the feature '
4428 b'the server may not have the feature '
4429 b'enabled or is not allowing this '
4429 b'enabled or is not allowing this '
4430 b'client version'
4430 b'client version'
4431 ),
4431 ),
4432 )
4432 )
4433
4433
4434 elif opts[b'peer'] == b'raw':
4434 elif opts[b'peer'] == b'raw':
4435 ui.write(_(b'using raw connection to peer\n'))
4435 ui.write(_(b'using raw connection to peer\n'))
4436 peer = None
4436 peer = None
4437 elif opts[b'peer']:
4437 elif opts[b'peer']:
4438 raise error.Abort(
4438 raise error.Abort(
4439 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4439 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4440 )
4440 )
4441 else:
4441 else:
4442 peer = httppeer.makepeer(ui, path, opener=opener)
4442 peer = httppeer.makepeer(ui, path, opener=opener)
4443
4443
4444 # We /could/ populate stdin/stdout with sock.makefile()...
4444 # We /could/ populate stdin/stdout with sock.makefile()...
4445 else:
4445 else:
4446 raise error.Abort(_(b'unsupported connection configuration'))
4446 raise error.Abort(_(b'unsupported connection configuration'))
4447
4447
4448 batchedcommands = None
4448 batchedcommands = None
4449
4449
4450 # Now perform actions based on the parsed wire language instructions.
4450 # Now perform actions based on the parsed wire language instructions.
4451 for action, lines in blocks:
4451 for action, lines in blocks:
4452 if action in (b'raw', b'raw+'):
4452 if action in (b'raw', b'raw+'):
4453 if not stdin:
4453 if not stdin:
4454 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4454 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4455
4455
4456 # Concatenate the data together.
4456 # Concatenate the data together.
4457 data = b''.join(l.lstrip() for l in lines)
4457 data = b''.join(l.lstrip() for l in lines)
4458 data = stringutil.unescapestr(data)
4458 data = stringutil.unescapestr(data)
4459 stdin.write(data)
4459 stdin.write(data)
4460
4460
4461 if action == b'raw+':
4461 if action == b'raw+':
4462 stdin.flush()
4462 stdin.flush()
4463 elif action == b'flush':
4463 elif action == b'flush':
4464 if not stdin:
4464 if not stdin:
4465 raise error.Abort(_(b'cannot call flush on this peer'))
4465 raise error.Abort(_(b'cannot call flush on this peer'))
4466 stdin.flush()
4466 stdin.flush()
4467 elif action.startswith(b'command'):
4467 elif action.startswith(b'command'):
4468 if not peer:
4468 if not peer:
4469 raise error.Abort(
4469 raise error.Abort(
4470 _(
4470 _(
4471 b'cannot send commands unless peer instance '
4471 b'cannot send commands unless peer instance '
4472 b'is available'
4472 b'is available'
4473 )
4473 )
4474 )
4474 )
4475
4475
4476 command = action.split(b' ', 1)[1]
4476 command = action.split(b' ', 1)[1]
4477
4477
4478 args = {}
4478 args = {}
4479 for line in lines:
4479 for line in lines:
4480 # We need to allow empty values.
4480 # We need to allow empty values.
4481 fields = line.lstrip().split(b' ', 1)
4481 fields = line.lstrip().split(b' ', 1)
4482 if len(fields) == 1:
4482 if len(fields) == 1:
4483 key = fields[0]
4483 key = fields[0]
4484 value = b''
4484 value = b''
4485 else:
4485 else:
4486 key, value = fields
4486 key, value = fields
4487
4487
4488 if value.startswith(b'eval:'):
4488 if value.startswith(b'eval:'):
4489 value = stringutil.evalpythonliteral(value[5:])
4489 value = stringutil.evalpythonliteral(value[5:])
4490 else:
4490 else:
4491 value = stringutil.unescapestr(value)
4491 value = stringutil.unescapestr(value)
4492
4492
4493 args[key] = value
4493 args[key] = value
4494
4494
4495 if batchedcommands is not None:
4495 if batchedcommands is not None:
4496 batchedcommands.append((command, args))
4496 batchedcommands.append((command, args))
4497 continue
4497 continue
4498
4498
4499 ui.status(_(b'sending %s command\n') % command)
4499 ui.status(_(b'sending %s command\n') % command)
4500
4500
4501 if b'PUSHFILE' in args:
4501 if b'PUSHFILE' in args:
4502 with open(args[b'PUSHFILE'], 'rb') as fh:
4502 with open(args[b'PUSHFILE'], 'rb') as fh:
4503 del args[b'PUSHFILE']
4503 del args[b'PUSHFILE']
4504 res, output = peer._callpush(
4504 res, output = peer._callpush(
4505 command, fh, **pycompat.strkwargs(args)
4505 command, fh, **pycompat.strkwargs(args)
4506 )
4506 )
4507 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4507 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4508 ui.status(
4508 ui.status(
4509 _(b'remote output: %s\n') % stringutil.escapestr(output)
4509 _(b'remote output: %s\n') % stringutil.escapestr(output)
4510 )
4510 )
4511 else:
4511 else:
4512 with peer.commandexecutor() as e:
4512 with peer.commandexecutor() as e:
4513 res = e.callcommand(command, args).result()
4513 res = e.callcommand(command, args).result()
4514
4514
4515 if isinstance(res, wireprotov2peer.commandresponse):
4515 if isinstance(res, wireprotov2peer.commandresponse):
4516 val = res.objects()
4516 val = res.objects()
4517 ui.status(
4517 ui.status(
4518 _(b'response: %s\n')
4518 _(b'response: %s\n')
4519 % stringutil.pprint(val, bprefix=True, indent=2)
4519 % stringutil.pprint(val, bprefix=True, indent=2)
4520 )
4520 )
4521 else:
4521 else:
4522 ui.status(
4522 ui.status(
4523 _(b'response: %s\n')
4523 _(b'response: %s\n')
4524 % stringutil.pprint(res, bprefix=True, indent=2)
4524 % stringutil.pprint(res, bprefix=True, indent=2)
4525 )
4525 )
4526
4526
4527 elif action == b'batchbegin':
4527 elif action == b'batchbegin':
4528 if batchedcommands is not None:
4528 if batchedcommands is not None:
4529 raise error.Abort(_(b'nested batchbegin not allowed'))
4529 raise error.Abort(_(b'nested batchbegin not allowed'))
4530
4530
4531 batchedcommands = []
4531 batchedcommands = []
4532 elif action == b'batchsubmit':
4532 elif action == b'batchsubmit':
4533 # There is a batching API we could go through. But it would be
4533 # There is a batching API we could go through. But it would be
4534 # difficult to normalize requests into function calls. It is easier
4534 # difficult to normalize requests into function calls. It is easier
4535 # to bypass this layer and normalize to commands + args.
4535 # to bypass this layer and normalize to commands + args.
4536 ui.status(
4536 ui.status(
4537 _(b'sending batch with %d sub-commands\n')
4537 _(b'sending batch with %d sub-commands\n')
4538 % len(batchedcommands)
4538 % len(batchedcommands)
4539 )
4539 )
4540 assert peer is not None
4540 assert peer is not None
4541 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4541 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4542 ui.status(
4542 ui.status(
4543 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4543 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4544 )
4544 )
4545
4545
4546 batchedcommands = None
4546 batchedcommands = None
4547
4547
4548 elif action.startswith(b'httprequest '):
4548 elif action.startswith(b'httprequest '):
4549 if not opener:
4549 if not opener:
4550 raise error.Abort(
4550 raise error.Abort(
4551 _(b'cannot use httprequest without an HTTP peer')
4551 _(b'cannot use httprequest without an HTTP peer')
4552 )
4552 )
4553
4553
4554 request = action.split(b' ', 2)
4554 request = action.split(b' ', 2)
4555 if len(request) != 3:
4555 if len(request) != 3:
4556 raise error.Abort(
4556 raise error.Abort(
4557 _(
4557 _(
4558 b'invalid httprequest: expected format is '
4558 b'invalid httprequest: expected format is '
4559 b'"httprequest <method> <path>'
4559 b'"httprequest <method> <path>'
4560 )
4560 )
4561 )
4561 )
4562
4562
4563 method, httppath = request[1:]
4563 method, httppath = request[1:]
4564 headers = {}
4564 headers = {}
4565 body = None
4565 body = None
4566 frames = []
4566 frames = []
4567 for line in lines:
4567 for line in lines:
4568 line = line.lstrip()
4568 line = line.lstrip()
4569 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4569 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4570 if m:
4570 if m:
4571 # Headers need to use native strings.
4571 # Headers need to use native strings.
4572 key = pycompat.strurl(m.group(1))
4572 key = pycompat.strurl(m.group(1))
4573 value = pycompat.strurl(m.group(2))
4573 value = pycompat.strurl(m.group(2))
4574 headers[key] = value
4574 headers[key] = value
4575 continue
4575 continue
4576
4576
4577 if line.startswith(b'BODYFILE '):
4577 if line.startswith(b'BODYFILE '):
4578 with open(line.split(b' ', 1), b'rb') as fh:
4578 with open(line.split(b' ', 1), b'rb') as fh:
4579 body = fh.read()
4579 body = fh.read()
4580 elif line.startswith(b'frame '):
4580 elif line.startswith(b'frame '):
4581 frame = wireprotoframing.makeframefromhumanstring(
4581 frame = wireprotoframing.makeframefromhumanstring(
4582 line[len(b'frame ') :]
4582 line[len(b'frame ') :]
4583 )
4583 )
4584
4584
4585 frames.append(frame)
4585 frames.append(frame)
4586 else:
4586 else:
4587 raise error.Abort(
4587 raise error.Abort(
4588 _(b'unknown argument to httprequest: %s') % line
4588 _(b'unknown argument to httprequest: %s') % line
4589 )
4589 )
4590
4590
4591 url = path + httppath
4591 url = path + httppath
4592
4592
4593 if frames:
4593 if frames:
4594 body = b''.join(bytes(f) for f in frames)
4594 body = b''.join(bytes(f) for f in frames)
4595
4595
4596 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4596 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4597
4597
4598 # urllib.Request insists on using has_data() as a proxy for
4598 # urllib.Request insists on using has_data() as a proxy for
4599 # determining the request method. Override that to use our
4599 # determining the request method. Override that to use our
4600 # explicitly requested method.
4600 # explicitly requested method.
4601 req.get_method = lambda: pycompat.sysstr(method)
4601 req.get_method = lambda: pycompat.sysstr(method)
4602
4602
4603 try:
4603 try:
4604 res = opener.open(req)
4604 res = opener.open(req)
4605 body = res.read()
4605 body = res.read()
4606 except util.urlerr.urlerror as e:
4606 except util.urlerr.urlerror as e:
4607 # read() method must be called, but only exists in Python 2
4607 # read() method must be called, but only exists in Python 2
4608 getattr(e, 'read', lambda: None)()
4608 getattr(e, 'read', lambda: None)()
4609 continue
4609 continue
4610
4610
4611 ct = res.headers.get('Content-Type')
4611 ct = res.headers.get('Content-Type')
4612 if ct == 'application/mercurial-cbor':
4612 if ct == 'application/mercurial-cbor':
4613 ui.write(
4613 ui.write(
4614 _(b'cbor> %s\n')
4614 _(b'cbor> %s\n')
4615 % stringutil.pprint(
4615 % stringutil.pprint(
4616 cborutil.decodeall(body), bprefix=True, indent=2
4616 cborutil.decodeall(body), bprefix=True, indent=2
4617 )
4617 )
4618 )
4618 )
4619
4619
4620 elif action == b'close':
4620 elif action == b'close':
4621 assert peer is not None
4621 assert peer is not None
4622 peer.close()
4622 peer.close()
4623 elif action == b'readavailable':
4623 elif action == b'readavailable':
4624 if not stdout or not stderr:
4624 if not stdout or not stderr:
4625 raise error.Abort(
4625 raise error.Abort(
4626 _(b'readavailable not available on this peer')
4626 _(b'readavailable not available on this peer')
4627 )
4627 )
4628
4628
4629 stdin.close()
4629 stdin.close()
4630 stdout.read()
4630 stdout.read()
4631 stderr.read()
4631 stderr.read()
4632
4632
4633 elif action == b'readline':
4633 elif action == b'readline':
4634 if not stdout:
4634 if not stdout:
4635 raise error.Abort(_(b'readline not available on this peer'))
4635 raise error.Abort(_(b'readline not available on this peer'))
4636 stdout.readline()
4636 stdout.readline()
4637 elif action == b'ereadline':
4637 elif action == b'ereadline':
4638 if not stderr:
4638 if not stderr:
4639 raise error.Abort(_(b'ereadline not available on this peer'))
4639 raise error.Abort(_(b'ereadline not available on this peer'))
4640 stderr.readline()
4640 stderr.readline()
4641 elif action.startswith(b'read '):
4641 elif action.startswith(b'read '):
4642 count = int(action.split(b' ', 1)[1])
4642 count = int(action.split(b' ', 1)[1])
4643 if not stdout:
4643 if not stdout:
4644 raise error.Abort(_(b'read not available on this peer'))
4644 raise error.Abort(_(b'read not available on this peer'))
4645 stdout.read(count)
4645 stdout.read(count)
4646 elif action.startswith(b'eread '):
4646 elif action.startswith(b'eread '):
4647 count = int(action.split(b' ', 1)[1])
4647 count = int(action.split(b' ', 1)[1])
4648 if not stderr:
4648 if not stderr:
4649 raise error.Abort(_(b'eread not available on this peer'))
4649 raise error.Abort(_(b'eread not available on this peer'))
4650 stderr.read(count)
4650 stderr.read(count)
4651 else:
4651 else:
4652 raise error.Abort(_(b'unknown action: %s') % action)
4652 raise error.Abort(_(b'unknown action: %s') % action)
4653
4653
4654 if batchedcommands is not None:
4654 if batchedcommands is not None:
4655 raise error.Abort(_(b'unclosed "batchbegin" request'))
4655 raise error.Abort(_(b'unclosed "batchbegin" request'))
4656
4656
4657 if peer:
4657 if peer:
4658 peer.close()
4658 peer.close()
4659
4659
4660 if proc:
4660 if proc:
4661 proc.kill()
4661 proc.kill()
@@ -1,265 +1,251 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 hg,
13 hg,
14 localrepo,
14 localrepo,
15 pycompat,
15 pycompat,
16 )
16 )
17
17
18 from .upgrade_utils import (
18 from .upgrade_utils import (
19 actions as upgrade_actions,
19 actions as upgrade_actions,
20 engine as upgrade_engine,
20 engine as upgrade_engine,
21 )
21 )
22
22
23 allformatvariant = upgrade_actions.allformatvariant
23 allformatvariant = upgrade_actions.allformatvariant
24
24
25 # search without '-' to support older form on newer client.
26 #
27 # We don't enforce backward compatibility for debug command so this
28 # might eventually be dropped. However, having to use two different
29 # forms in script when comparing result is anoying enough to add
30 # backward compatibility for a while.
31 legacy_opts_map = {
32 b'redeltaparent': b're-delta-parent',
33 b'redeltamultibase': b're-delta-multibase',
34 b'redeltaall': b're-delta-all',
35 b'redeltafulladd': b're-delta-fulladd',
36 }
37
38
25
39 def upgraderepo(
26 def upgraderepo(
40 ui,
27 ui,
41 repo,
28 repo,
42 run=False,
29 run=False,
43 optimize=None,
30 optimize=None,
44 backup=True,
31 backup=True,
45 manifest=None,
32 manifest=None,
46 changelog=None,
33 changelog=None,
47 filelogs=None,
34 filelogs=None,
48 ):
35 ):
49 """Upgrade a repository in place."""
36 """Upgrade a repository in place."""
50 if optimize is None:
37 if optimize is None:
51 optimize = []
38 optimize = {}
52 optimize = {legacy_opts_map.get(o, o) for o in optimize}
53 repo = repo.unfiltered()
39 repo = repo.unfiltered()
54
40
55 revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
41 revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
56 specentries = (
42 specentries = (
57 (upgrade_engine.UPGRADE_CHANGELOG, changelog),
43 (upgrade_engine.UPGRADE_CHANGELOG, changelog),
58 (upgrade_engine.UPGRADE_MANIFEST, manifest),
44 (upgrade_engine.UPGRADE_MANIFEST, manifest),
59 (upgrade_engine.UPGRADE_FILELOGS, filelogs),
45 (upgrade_engine.UPGRADE_FILELOGS, filelogs),
60 )
46 )
61 specified = [(y, x) for (y, x) in specentries if x is not None]
47 specified = [(y, x) for (y, x) in specentries if x is not None]
62 if specified:
48 if specified:
63 # we have some limitation on revlogs to be recloned
49 # we have some limitation on revlogs to be recloned
64 if any(x for y, x in specified):
50 if any(x for y, x in specified):
65 revlogs = set()
51 revlogs = set()
66 for upgrade, enabled in specified:
52 for upgrade, enabled in specified:
67 if enabled:
53 if enabled:
68 revlogs.add(upgrade)
54 revlogs.add(upgrade)
69 else:
55 else:
70 # none are enabled
56 # none are enabled
71 for upgrade, __ in specified:
57 for upgrade, __ in specified:
72 revlogs.discard(upgrade)
58 revlogs.discard(upgrade)
73
59
74 # Ensure the repository can be upgraded.
60 # Ensure the repository can be upgraded.
75 upgrade_actions.check_source_requirements(repo)
61 upgrade_actions.check_source_requirements(repo)
76
62
77 default_options = localrepo.defaultcreateopts(repo.ui)
63 default_options = localrepo.defaultcreateopts(repo.ui)
78 newreqs = localrepo.newreporequirements(repo.ui, default_options)
64 newreqs = localrepo.newreporequirements(repo.ui, default_options)
79 newreqs.update(upgrade_actions.preservedrequirements(repo))
65 newreqs.update(upgrade_actions.preservedrequirements(repo))
80
66
81 upgrade_actions.check_requirements_changes(repo, newreqs)
67 upgrade_actions.check_requirements_changes(repo, newreqs)
82
68
83 # Find and validate all improvements that can be made.
69 # Find and validate all improvements that can be made.
84 alloptimizations = upgrade_actions.findoptimizations(repo)
70 alloptimizations = upgrade_actions.findoptimizations(repo)
85
71
86 # Apply and Validate arguments.
72 # Apply and Validate arguments.
87 optimizations = []
73 optimizations = []
88 for o in alloptimizations:
74 for o in alloptimizations:
89 if o.name in optimize:
75 if o.name in optimize:
90 optimizations.append(o)
76 optimizations.append(o)
91 optimize.discard(o.name)
77 optimize.discard(o.name)
92
78
93 if optimize: # anything left is unknown
79 if optimize: # anything left is unknown
94 raise error.Abort(
80 raise error.Abort(
95 _(b'unknown optimization action requested: %s')
81 _(b'unknown optimization action requested: %s')
96 % b', '.join(sorted(optimize)),
82 % b', '.join(sorted(optimize)),
97 hint=_(b'run without arguments to see valid optimizations'),
83 hint=_(b'run without arguments to see valid optimizations'),
98 )
84 )
99
85
100 format_upgrades = upgrade_actions.find_format_upgrades(repo)
86 format_upgrades = upgrade_actions.find_format_upgrades(repo)
101 actions = upgrade_actions.determineactions(
87 actions = upgrade_actions.determineactions(
102 repo, format_upgrades, repo.requirements, newreqs
88 repo, format_upgrades, repo.requirements, newreqs
103 )
89 )
104 actions.extend(
90 actions.extend(
105 o
91 o
106 for o in sorted(optimizations)
92 for o in sorted(optimizations)
107 # determineactions could have added optimisation
93 # determineactions could have added optimisation
108 if o not in actions
94 if o not in actions
109 )
95 )
110
96
111 removedreqs = repo.requirements - newreqs
97 removedreqs = repo.requirements - newreqs
112 addedreqs = newreqs - repo.requirements
98 addedreqs = newreqs - repo.requirements
113
99
114 if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
100 if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
115 incompatible = upgrade_actions.RECLONES_REQUIREMENTS & (
101 incompatible = upgrade_actions.RECLONES_REQUIREMENTS & (
116 removedreqs | addedreqs
102 removedreqs | addedreqs
117 )
103 )
118 if incompatible:
104 if incompatible:
119 msg = _(
105 msg = _(
120 b'ignoring revlogs selection flags, format requirements '
106 b'ignoring revlogs selection flags, format requirements '
121 b'change: %s\n'
107 b'change: %s\n'
122 )
108 )
123 ui.warn(msg % b', '.join(sorted(incompatible)))
109 ui.warn(msg % b', '.join(sorted(incompatible)))
124 revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
110 revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
125
111
126 upgrade_op = upgrade_actions.UpgradeOperation(
112 upgrade_op = upgrade_actions.UpgradeOperation(
127 ui,
113 ui,
128 newreqs,
114 newreqs,
129 repo.requirements,
115 repo.requirements,
130 actions,
116 actions,
131 revlogs,
117 revlogs,
132 )
118 )
133
119
134 if not run:
120 if not run:
135 fromconfig = []
121 fromconfig = []
136 onlydefault = []
122 onlydefault = []
137
123
138 for d in format_upgrades:
124 for d in format_upgrades:
139 if d.fromconfig(repo):
125 if d.fromconfig(repo):
140 fromconfig.append(d)
126 fromconfig.append(d)
141 elif d.default:
127 elif d.default:
142 onlydefault.append(d)
128 onlydefault.append(d)
143
129
144 if fromconfig or onlydefault:
130 if fromconfig or onlydefault:
145
131
146 if fromconfig:
132 if fromconfig:
147 ui.status(
133 ui.status(
148 _(
134 _(
149 b'repository lacks features recommended by '
135 b'repository lacks features recommended by '
150 b'current config options:\n\n'
136 b'current config options:\n\n'
151 )
137 )
152 )
138 )
153 for i in fromconfig:
139 for i in fromconfig:
154 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
140 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
155
141
156 if onlydefault:
142 if onlydefault:
157 ui.status(
143 ui.status(
158 _(
144 _(
159 b'repository lacks features used by the default '
145 b'repository lacks features used by the default '
160 b'config options:\n\n'
146 b'config options:\n\n'
161 )
147 )
162 )
148 )
163 for i in onlydefault:
149 for i in onlydefault:
164 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
150 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
165
151
166 ui.status(b'\n')
152 ui.status(b'\n')
167 else:
153 else:
168 ui.status(_(b'(no format upgrades found in existing repository)\n'))
154 ui.status(_(b'(no format upgrades found in existing repository)\n'))
169
155
170 ui.status(
156 ui.status(
171 _(
157 _(
172 b'performing an upgrade with "--run" will make the following '
158 b'performing an upgrade with "--run" will make the following '
173 b'changes:\n\n'
159 b'changes:\n\n'
174 )
160 )
175 )
161 )
176
162
177 upgrade_op.print_requirements()
163 upgrade_op.print_requirements()
178 upgrade_op.print_optimisations()
164 upgrade_op.print_optimisations()
179 upgrade_op.print_upgrade_actions()
165 upgrade_op.print_upgrade_actions()
180 upgrade_op.print_affected_revlogs()
166 upgrade_op.print_affected_revlogs()
181
167
182 if upgrade_op.unused_optimizations:
168 if upgrade_op.unused_optimizations:
183 ui.status(
169 ui.status(
184 _(
170 _(
185 b'additional optimizations are available by specifying '
171 b'additional optimizations are available by specifying '
186 b'"--optimize <name>":\n\n'
172 b'"--optimize <name>":\n\n'
187 )
173 )
188 )
174 )
189 upgrade_op.print_unused_optimizations()
175 upgrade_op.print_unused_optimizations()
190 return
176 return
191
177
192 # Else we're in the run=true case.
178 # Else we're in the run=true case.
193 ui.write(_(b'upgrade will perform the following actions:\n\n'))
179 ui.write(_(b'upgrade will perform the following actions:\n\n'))
194 upgrade_op.print_requirements()
180 upgrade_op.print_requirements()
195 upgrade_op.print_optimisations()
181 upgrade_op.print_optimisations()
196 upgrade_op.print_upgrade_actions()
182 upgrade_op.print_upgrade_actions()
197 upgrade_op.print_affected_revlogs()
183 upgrade_op.print_affected_revlogs()
198
184
199 ui.status(_(b'beginning upgrade...\n'))
185 ui.status(_(b'beginning upgrade...\n'))
200 with repo.wlock(), repo.lock():
186 with repo.wlock(), repo.lock():
201 ui.status(_(b'repository locked and read-only\n'))
187 ui.status(_(b'repository locked and read-only\n'))
202 # Our strategy for upgrading the repository is to create a new,
188 # Our strategy for upgrading the repository is to create a new,
203 # temporary repository, write data to it, then do a swap of the
189 # temporary repository, write data to it, then do a swap of the
204 # data. There are less heavyweight ways to do this, but it is easier
190 # data. There are less heavyweight ways to do this, but it is easier
205 # to create a new repo object than to instantiate all the components
191 # to create a new repo object than to instantiate all the components
206 # (like the store) separately.
192 # (like the store) separately.
207 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
193 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
208 backuppath = None
194 backuppath = None
209 try:
195 try:
210 ui.status(
196 ui.status(
211 _(
197 _(
212 b'creating temporary repository to stage migrated '
198 b'creating temporary repository to stage migrated '
213 b'data: %s\n'
199 b'data: %s\n'
214 )
200 )
215 % tmppath
201 % tmppath
216 )
202 )
217
203
218 # clone ui without using ui.copy because repo.ui is protected
204 # clone ui without using ui.copy because repo.ui is protected
219 repoui = repo.ui.__class__(repo.ui)
205 repoui = repo.ui.__class__(repo.ui)
220 dstrepo = hg.repository(repoui, path=tmppath, create=True)
206 dstrepo = hg.repository(repoui, path=tmppath, create=True)
221
207
222 with dstrepo.wlock(), dstrepo.lock():
208 with dstrepo.wlock(), dstrepo.lock():
223 backuppath = upgrade_engine.upgrade(
209 backuppath = upgrade_engine.upgrade(
224 ui, repo, dstrepo, upgrade_op
210 ui, repo, dstrepo, upgrade_op
225 )
211 )
226 if not (backup or backuppath is None):
212 if not (backup or backuppath is None):
227 ui.status(
213 ui.status(
228 _(b'removing old repository content %s\n') % backuppath
214 _(b'removing old repository content %s\n') % backuppath
229 )
215 )
230 repo.vfs.rmtree(backuppath, forcibly=True)
216 repo.vfs.rmtree(backuppath, forcibly=True)
231 backuppath = None
217 backuppath = None
232
218
233 finally:
219 finally:
234 ui.status(_(b'removing temporary repository %s\n') % tmppath)
220 ui.status(_(b'removing temporary repository %s\n') % tmppath)
235 repo.vfs.rmtree(tmppath, forcibly=True)
221 repo.vfs.rmtree(tmppath, forcibly=True)
236
222
237 if backuppath and not ui.quiet:
223 if backuppath and not ui.quiet:
238 ui.warn(
224 ui.warn(
239 _(b'copy of old repository backed up at %s\n') % backuppath
225 _(b'copy of old repository backed up at %s\n') % backuppath
240 )
226 )
241 ui.warn(
227 ui.warn(
242 _(
228 _(
243 b'the old repository will not be deleted; remove '
229 b'the old repository will not be deleted; remove '
244 b'it to free up disk space once the upgraded '
230 b'it to free up disk space once the upgraded '
245 b'repository is verified\n'
231 b'repository is verified\n'
246 )
232 )
247 )
233 )
248
234
249 if upgrade_actions.sharesafe.name in addedreqs:
235 if upgrade_actions.sharesafe.name in addedreqs:
250 ui.warn(
236 ui.warn(
251 _(
237 _(
252 b'repository upgraded to share safe mode, existing'
238 b'repository upgraded to share safe mode, existing'
253 b' shares will still work in old non-safe mode. '
239 b' shares will still work in old non-safe mode. '
254 b'Re-share existing shares to use them in safe mode'
240 b'Re-share existing shares to use them in safe mode'
255 b' New shares will be created in safe mode.\n'
241 b' New shares will be created in safe mode.\n'
256 )
242 )
257 )
243 )
258 if upgrade_actions.sharesafe.name in removedreqs:
244 if upgrade_actions.sharesafe.name in removedreqs:
259 ui.warn(
245 ui.warn(
260 _(
246 _(
261 b'repository downgraded to not use share safe mode, '
247 b'repository downgraded to not use share safe mode, '
262 b'existing shares will not work and needs to'
248 b'existing shares will not work and needs to'
263 b' be reshared.\n'
249 b' be reshared.\n'
264 )
250 )
265 )
251 )
@@ -1,1646 +1,1646 b''
1 #require no-reposimplestore
1 #require no-reposimplestore
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > share =
5 > share =
6 > EOF
6 > EOF
7
7
8 store and revlogv1 are required in source
8 store and revlogv1 are required in source
9
9
10 $ hg --config format.usestore=false init no-store
10 $ hg --config format.usestore=false init no-store
11 $ hg -R no-store debugupgraderepo
11 $ hg -R no-store debugupgraderepo
12 abort: cannot upgrade repository; requirement missing: store
12 abort: cannot upgrade repository; requirement missing: store
13 [255]
13 [255]
14
14
15 $ hg init no-revlogv1
15 $ hg init no-revlogv1
16 $ cat > no-revlogv1/.hg/requires << EOF
16 $ cat > no-revlogv1/.hg/requires << EOF
17 > dotencode
17 > dotencode
18 > fncache
18 > fncache
19 > generaldelta
19 > generaldelta
20 > store
20 > store
21 > EOF
21 > EOF
22
22
23 $ hg -R no-revlogv1 debugupgraderepo
23 $ hg -R no-revlogv1 debugupgraderepo
24 abort: cannot upgrade repository; requirement missing: revlogv1
24 abort: cannot upgrade repository; requirement missing: revlogv1
25 [255]
25 [255]
26
26
27 Cannot upgrade shared repositories
27 Cannot upgrade shared repositories
28
28
29 $ hg init share-parent
29 $ hg init share-parent
30 $ hg -q share share-parent share-child
30 $ hg -q share share-parent share-child
31
31
32 $ hg -R share-child debugupgraderepo
32 $ hg -R share-child debugupgraderepo
33 abort: cannot upgrade repository; unsupported source requirement: shared
33 abort: cannot upgrade repository; unsupported source requirement: shared
34 [255]
34 [255]
35
35
36 Do not yet support upgrading treemanifest repos
36 Do not yet support upgrading treemanifest repos
37
37
38 $ hg --config experimental.treemanifest=true init treemanifest
38 $ hg --config experimental.treemanifest=true init treemanifest
39 $ hg -R treemanifest debugupgraderepo
39 $ hg -R treemanifest debugupgraderepo
40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
41 [255]
41 [255]
42
42
43 Cannot add treemanifest requirement during upgrade
43 Cannot add treemanifest requirement during upgrade
44
44
45 $ hg init disallowaddedreq
45 $ hg init disallowaddedreq
46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
48 [255]
48 [255]
49
49
50 An upgrade of a repository created with recommended settings only suggests optimizations
50 An upgrade of a repository created with recommended settings only suggests optimizations
51
51
52 $ hg init empty
52 $ hg init empty
53 $ cd empty
53 $ cd empty
54 $ hg debugformat
54 $ hg debugformat
55 format-variant repo
55 format-variant repo
56 fncache: yes
56 fncache: yes
57 dotencode: yes
57 dotencode: yes
58 generaldelta: yes
58 generaldelta: yes
59 exp-sharesafe: no
59 exp-sharesafe: no
60 sparserevlog: yes
60 sparserevlog: yes
61 sidedata: no
61 sidedata: no
62 persistent-nodemap: no
62 persistent-nodemap: no
63 copies-sdc: no
63 copies-sdc: no
64 plain-cl-delta: yes
64 plain-cl-delta: yes
65 compression: zlib
65 compression: zlib
66 compression-level: default
66 compression-level: default
67 $ hg debugformat --verbose
67 $ hg debugformat --verbose
68 format-variant repo config default
68 format-variant repo config default
69 fncache: yes yes yes
69 fncache: yes yes yes
70 dotencode: yes yes yes
70 dotencode: yes yes yes
71 generaldelta: yes yes yes
71 generaldelta: yes yes yes
72 exp-sharesafe: no no no
72 exp-sharesafe: no no no
73 sparserevlog: yes yes yes
73 sparserevlog: yes yes yes
74 sidedata: no no no
74 sidedata: no no no
75 persistent-nodemap: no no no
75 persistent-nodemap: no no no
76 copies-sdc: no no no
76 copies-sdc: no no no
77 plain-cl-delta: yes yes yes
77 plain-cl-delta: yes yes yes
78 compression: zlib zlib zlib
78 compression: zlib zlib zlib
79 compression-level: default default default
79 compression-level: default default default
80 $ hg debugformat --verbose --config format.usefncache=no
80 $ hg debugformat --verbose --config format.usefncache=no
81 format-variant repo config default
81 format-variant repo config default
82 fncache: yes no yes
82 fncache: yes no yes
83 dotencode: yes no yes
83 dotencode: yes no yes
84 generaldelta: yes yes yes
84 generaldelta: yes yes yes
85 exp-sharesafe: no no no
85 exp-sharesafe: no no no
86 sparserevlog: yes yes yes
86 sparserevlog: yes yes yes
87 sidedata: no no no
87 sidedata: no no no
88 persistent-nodemap: no no no
88 persistent-nodemap: no no no
89 copies-sdc: no no no
89 copies-sdc: no no no
90 plain-cl-delta: yes yes yes
90 plain-cl-delta: yes yes yes
91 compression: zlib zlib zlib
91 compression: zlib zlib zlib
92 compression-level: default default default
92 compression-level: default default default
93 $ hg debugformat --verbose --config format.usefncache=no --color=debug
93 $ hg debugformat --verbose --config format.usefncache=no --color=debug
94 format-variant repo config default
94 format-variant repo config default
95 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
95 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
96 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
96 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
97 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
97 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
98 [formatvariant.name.uptodate|exp-sharesafe: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
98 [formatvariant.name.uptodate|exp-sharesafe: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
99 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
99 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
100 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
100 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
101 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
101 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
102 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
102 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
103 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
103 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
104 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
104 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
105 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
105 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
106 $ hg debugformat -Tjson
106 $ hg debugformat -Tjson
107 [
107 [
108 {
108 {
109 "config": true,
109 "config": true,
110 "default": true,
110 "default": true,
111 "name": "fncache",
111 "name": "fncache",
112 "repo": true
112 "repo": true
113 },
113 },
114 {
114 {
115 "config": true,
115 "config": true,
116 "default": true,
116 "default": true,
117 "name": "dotencode",
117 "name": "dotencode",
118 "repo": true
118 "repo": true
119 },
119 },
120 {
120 {
121 "config": true,
121 "config": true,
122 "default": true,
122 "default": true,
123 "name": "generaldelta",
123 "name": "generaldelta",
124 "repo": true
124 "repo": true
125 },
125 },
126 {
126 {
127 "config": false,
127 "config": false,
128 "default": false,
128 "default": false,
129 "name": "exp-sharesafe",
129 "name": "exp-sharesafe",
130 "repo": false
130 "repo": false
131 },
131 },
132 {
132 {
133 "config": true,
133 "config": true,
134 "default": true,
134 "default": true,
135 "name": "sparserevlog",
135 "name": "sparserevlog",
136 "repo": true
136 "repo": true
137 },
137 },
138 {
138 {
139 "config": false,
139 "config": false,
140 "default": false,
140 "default": false,
141 "name": "sidedata",
141 "name": "sidedata",
142 "repo": false
142 "repo": false
143 },
143 },
144 {
144 {
145 "config": false,
145 "config": false,
146 "default": false,
146 "default": false,
147 "name": "persistent-nodemap",
147 "name": "persistent-nodemap",
148 "repo": false
148 "repo": false
149 },
149 },
150 {
150 {
151 "config": false,
151 "config": false,
152 "default": false,
152 "default": false,
153 "name": "copies-sdc",
153 "name": "copies-sdc",
154 "repo": false
154 "repo": false
155 },
155 },
156 {
156 {
157 "config": true,
157 "config": true,
158 "default": true,
158 "default": true,
159 "name": "plain-cl-delta",
159 "name": "plain-cl-delta",
160 "repo": true
160 "repo": true
161 },
161 },
162 {
162 {
163 "config": "zlib",
163 "config": "zlib",
164 "default": "zlib",
164 "default": "zlib",
165 "name": "compression",
165 "name": "compression",
166 "repo": "zlib"
166 "repo": "zlib"
167 },
167 },
168 {
168 {
169 "config": "default",
169 "config": "default",
170 "default": "default",
170 "default": "default",
171 "name": "compression-level",
171 "name": "compression-level",
172 "repo": "default"
172 "repo": "default"
173 }
173 }
174 ]
174 ]
175 $ hg debugupgraderepo
175 $ hg debugupgraderepo
176 (no format upgrades found in existing repository)
176 (no format upgrades found in existing repository)
177 performing an upgrade with "--run" will make the following changes:
177 performing an upgrade with "--run" will make the following changes:
178
178
179 requirements
179 requirements
180 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
180 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
181
181
182 processed revlogs:
182 processed revlogs:
183 - all-filelogs
183 - all-filelogs
184 - changelog
184 - changelog
185 - manifest
185 - manifest
186
186
187 additional optimizations are available by specifying "--optimize <name>":
187 additional optimizations are available by specifying "--optimize <name>":
188
188
189 re-delta-parent
189 re-delta-parent
190 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
190 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
191
191
192 re-delta-multibase
192 re-delta-multibase
193 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
193 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
194
194
195 re-delta-all
195 re-delta-all
196 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
196 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
197
197
198 re-delta-fulladd
198 re-delta-fulladd
199 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
199 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
200
200
201
201
202 $ hg debugupgraderepo --quiet
202 $ hg debugupgraderepo --quiet
203 requirements
203 requirements
204 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
204 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
205
205
206 processed revlogs:
206 processed revlogs:
207 - all-filelogs
207 - all-filelogs
208 - changelog
208 - changelog
209 - manifest
209 - manifest
210
210
211
211
212 --optimize can be used to add optimizations
212 --optimize can be used to add optimizations
213
213
214 $ hg debugupgrade --optimize redeltaparent
214 $ hg debugupgrade --optimize 're-delta-parent'
215 (no format upgrades found in existing repository)
215 (no format upgrades found in existing repository)
216 performing an upgrade with "--run" will make the following changes:
216 performing an upgrade with "--run" will make the following changes:
217
217
218 requirements
218 requirements
219 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
219 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
220
220
221 optimisations: re-delta-parent
221 optimisations: re-delta-parent
222
222
223 re-delta-parent
223 re-delta-parent
224 deltas within internal storage will choose a new base revision if needed
224 deltas within internal storage will choose a new base revision if needed
225
225
226 processed revlogs:
226 processed revlogs:
227 - all-filelogs
227 - all-filelogs
228 - changelog
228 - changelog
229 - manifest
229 - manifest
230
230
231 additional optimizations are available by specifying "--optimize <name>":
231 additional optimizations are available by specifying "--optimize <name>":
232
232
233 re-delta-multibase
233 re-delta-multibase
234 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
234 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
235
235
236 re-delta-all
236 re-delta-all
237 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
237 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
238
238
239 re-delta-fulladd
239 re-delta-fulladd
240 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
240 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
241
241
242
242
243 modern form of the option
243 modern form of the option
244
244
245 $ hg debugupgrade --optimize re-delta-parent
245 $ hg debugupgrade --optimize re-delta-parent
246 (no format upgrades found in existing repository)
246 (no format upgrades found in existing repository)
247 performing an upgrade with "--run" will make the following changes:
247 performing an upgrade with "--run" will make the following changes:
248
248
249 requirements
249 requirements
250 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
250 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
251
251
252 optimisations: re-delta-parent
252 optimisations: re-delta-parent
253
253
254 re-delta-parent
254 re-delta-parent
255 deltas within internal storage will choose a new base revision if needed
255 deltas within internal storage will choose a new base revision if needed
256
256
257 processed revlogs:
257 processed revlogs:
258 - all-filelogs
258 - all-filelogs
259 - changelog
259 - changelog
260 - manifest
260 - manifest
261
261
262 additional optimizations are available by specifying "--optimize <name>":
262 additional optimizations are available by specifying "--optimize <name>":
263
263
264 re-delta-multibase
264 re-delta-multibase
265 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
265 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
266
266
267 re-delta-all
267 re-delta-all
268 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
268 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
269
269
270 re-delta-fulladd
270 re-delta-fulladd
271 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
271 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
272
272
273 $ hg debugupgrade --optimize re-delta-parent --quiet
273 $ hg debugupgrade --optimize re-delta-parent --quiet
274 requirements
274 requirements
275 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
275 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
276
276
277 optimisations: re-delta-parent
277 optimisations: re-delta-parent
278
278
279 processed revlogs:
279 processed revlogs:
280 - all-filelogs
280 - all-filelogs
281 - changelog
281 - changelog
282 - manifest
282 - manifest
283
283
284
284
285 unknown optimization:
285 unknown optimization:
286
286
287 $ hg debugupgrade --optimize foobar
287 $ hg debugupgrade --optimize foobar
288 abort: unknown optimization action requested: foobar
288 abort: unknown optimization action requested: foobar
289 (run without arguments to see valid optimizations)
289 (run without arguments to see valid optimizations)
290 [255]
290 [255]
291
291
292 Various sub-optimal detections work
292 Various sub-optimal detections work
293
293
294 $ cat > .hg/requires << EOF
294 $ cat > .hg/requires << EOF
295 > revlogv1
295 > revlogv1
296 > store
296 > store
297 > EOF
297 > EOF
298
298
299 $ hg debugformat
299 $ hg debugformat
300 format-variant repo
300 format-variant repo
301 fncache: no
301 fncache: no
302 dotencode: no
302 dotencode: no
303 generaldelta: no
303 generaldelta: no
304 exp-sharesafe: no
304 exp-sharesafe: no
305 sparserevlog: no
305 sparserevlog: no
306 sidedata: no
306 sidedata: no
307 persistent-nodemap: no
307 persistent-nodemap: no
308 copies-sdc: no
308 copies-sdc: no
309 plain-cl-delta: yes
309 plain-cl-delta: yes
310 compression: zlib
310 compression: zlib
311 compression-level: default
311 compression-level: default
312 $ hg debugformat --verbose
312 $ hg debugformat --verbose
313 format-variant repo config default
313 format-variant repo config default
314 fncache: no yes yes
314 fncache: no yes yes
315 dotencode: no yes yes
315 dotencode: no yes yes
316 generaldelta: no yes yes
316 generaldelta: no yes yes
317 exp-sharesafe: no no no
317 exp-sharesafe: no no no
318 sparserevlog: no yes yes
318 sparserevlog: no yes yes
319 sidedata: no no no
319 sidedata: no no no
320 persistent-nodemap: no no no
320 persistent-nodemap: no no no
321 copies-sdc: no no no
321 copies-sdc: no no no
322 plain-cl-delta: yes yes yes
322 plain-cl-delta: yes yes yes
323 compression: zlib zlib zlib
323 compression: zlib zlib zlib
324 compression-level: default default default
324 compression-level: default default default
325 $ hg debugformat --verbose --config format.usegeneraldelta=no
325 $ hg debugformat --verbose --config format.usegeneraldelta=no
326 format-variant repo config default
326 format-variant repo config default
327 fncache: no yes yes
327 fncache: no yes yes
328 dotencode: no yes yes
328 dotencode: no yes yes
329 generaldelta: no no yes
329 generaldelta: no no yes
330 exp-sharesafe: no no no
330 exp-sharesafe: no no no
331 sparserevlog: no no yes
331 sparserevlog: no no yes
332 sidedata: no no no
332 sidedata: no no no
333 persistent-nodemap: no no no
333 persistent-nodemap: no no no
334 copies-sdc: no no no
334 copies-sdc: no no no
335 plain-cl-delta: yes yes yes
335 plain-cl-delta: yes yes yes
336 compression: zlib zlib zlib
336 compression: zlib zlib zlib
337 compression-level: default default default
337 compression-level: default default default
338 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
338 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
339 format-variant repo config default
339 format-variant repo config default
340 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
340 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
341 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
341 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
342 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
342 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
343 [formatvariant.name.uptodate|exp-sharesafe: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
343 [formatvariant.name.uptodate|exp-sharesafe: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
344 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
344 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
345 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
345 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
346 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
346 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
347 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
347 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
348 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
348 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
349 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
349 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
350 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
350 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
351 $ hg debugupgraderepo
351 $ hg debugupgraderepo
352 repository lacks features recommended by current config options:
352 repository lacks features recommended by current config options:
353
353
354 fncache
354 fncache
355 long and reserved filenames may not work correctly; repository performance is sub-optimal
355 long and reserved filenames may not work correctly; repository performance is sub-optimal
356
356
357 dotencode
357 dotencode
358 storage of filenames beginning with a period or space may not work correctly
358 storage of filenames beginning with a period or space may not work correctly
359
359
360 generaldelta
360 generaldelta
361 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
361 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
362
362
363 sparserevlog
363 sparserevlog
364 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
364 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
365
365
366
366
367 performing an upgrade with "--run" will make the following changes:
367 performing an upgrade with "--run" will make the following changes:
368
368
369 requirements
369 requirements
370 preserved: revlogv1, store
370 preserved: revlogv1, store
371 added: dotencode, fncache, generaldelta, sparserevlog
371 added: dotencode, fncache, generaldelta, sparserevlog
372
372
373 fncache
373 fncache
374 repository will be more resilient to storing certain paths and performance of certain operations should be improved
374 repository will be more resilient to storing certain paths and performance of certain operations should be improved
375
375
376 dotencode
376 dotencode
377 repository will be better able to store files beginning with a space or period
377 repository will be better able to store files beginning with a space or period
378
378
379 generaldelta
379 generaldelta
380 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
380 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
381
381
382 sparserevlog
382 sparserevlog
383 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
383 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
384
384
385 processed revlogs:
385 processed revlogs:
386 - all-filelogs
386 - all-filelogs
387 - changelog
387 - changelog
388 - manifest
388 - manifest
389
389
390 additional optimizations are available by specifying "--optimize <name>":
390 additional optimizations are available by specifying "--optimize <name>":
391
391
392 re-delta-parent
392 re-delta-parent
393 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
393 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
394
394
395 re-delta-multibase
395 re-delta-multibase
396 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
396 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
397
397
398 re-delta-all
398 re-delta-all
399 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
399 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
400
400
401 re-delta-fulladd
401 re-delta-fulladd
402 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
402 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
403
403
404 $ hg debugupgraderepo --quiet
404 $ hg debugupgraderepo --quiet
405 requirements
405 requirements
406 preserved: revlogv1, store
406 preserved: revlogv1, store
407 added: dotencode, fncache, generaldelta, sparserevlog
407 added: dotencode, fncache, generaldelta, sparserevlog
408
408
409 processed revlogs:
409 processed revlogs:
410 - all-filelogs
410 - all-filelogs
411 - changelog
411 - changelog
412 - manifest
412 - manifest
413
413
414
414
415 $ hg --config format.dotencode=false debugupgraderepo
415 $ hg --config format.dotencode=false debugupgraderepo
416 repository lacks features recommended by current config options:
416 repository lacks features recommended by current config options:
417
417
418 fncache
418 fncache
419 long and reserved filenames may not work correctly; repository performance is sub-optimal
419 long and reserved filenames may not work correctly; repository performance is sub-optimal
420
420
421 generaldelta
421 generaldelta
422 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
422 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
423
423
424 sparserevlog
424 sparserevlog
425 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
425 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
426
426
427 repository lacks features used by the default config options:
427 repository lacks features used by the default config options:
428
428
429 dotencode
429 dotencode
430 storage of filenames beginning with a period or space may not work correctly
430 storage of filenames beginning with a period or space may not work correctly
431
431
432
432
433 performing an upgrade with "--run" will make the following changes:
433 performing an upgrade with "--run" will make the following changes:
434
434
435 requirements
435 requirements
436 preserved: revlogv1, store
436 preserved: revlogv1, store
437 added: fncache, generaldelta, sparserevlog
437 added: fncache, generaldelta, sparserevlog
438
438
439 fncache
439 fncache
440 repository will be more resilient to storing certain paths and performance of certain operations should be improved
440 repository will be more resilient to storing certain paths and performance of certain operations should be improved
441
441
442 generaldelta
442 generaldelta
443 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
443 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
444
444
445 sparserevlog
445 sparserevlog
446 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
446 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
447
447
448 processed revlogs:
448 processed revlogs:
449 - all-filelogs
449 - all-filelogs
450 - changelog
450 - changelog
451 - manifest
451 - manifest
452
452
453 additional optimizations are available by specifying "--optimize <name>":
453 additional optimizations are available by specifying "--optimize <name>":
454
454
455 re-delta-parent
455 re-delta-parent
456 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
456 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
457
457
458 re-delta-multibase
458 re-delta-multibase
459 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
459 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
460
460
461 re-delta-all
461 re-delta-all
462 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
462 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
463
463
464 re-delta-fulladd
464 re-delta-fulladd
465 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
465 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
466
466
467
467
468 $ cd ..
468 $ cd ..
469
469
470 Upgrading a repository that is already modern essentially no-ops
470 Upgrading a repository that is already modern essentially no-ops
471
471
472 $ hg init modern
472 $ hg init modern
473 $ hg -R modern debugupgraderepo --run
473 $ hg -R modern debugupgraderepo --run
474 upgrade will perform the following actions:
474 upgrade will perform the following actions:
475
475
476 requirements
476 requirements
477 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
477 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
478
478
479 processed revlogs:
479 processed revlogs:
480 - all-filelogs
480 - all-filelogs
481 - changelog
481 - changelog
482 - manifest
482 - manifest
483
483
484 beginning upgrade...
484 beginning upgrade...
485 repository locked and read-only
485 repository locked and read-only
486 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
486 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
487 (it is safe to interrupt this process any time before data migration completes)
487 (it is safe to interrupt this process any time before data migration completes)
488 data fully migrated to temporary repository
488 data fully migrated to temporary repository
489 marking source repository as being upgraded; clients will be unable to read from repository
489 marking source repository as being upgraded; clients will be unable to read from repository
490 starting in-place swap of repository data
490 starting in-place swap of repository data
491 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
491 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
492 replacing store...
492 replacing store...
493 store replacement complete; repository was inconsistent for *s (glob)
493 store replacement complete; repository was inconsistent for *s (glob)
494 finalizing requirements file and making repository readable again
494 finalizing requirements file and making repository readable again
495 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
495 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
496 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
496 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
497 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
497 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
498
498
499 Upgrading a repository to generaldelta works
499 Upgrading a repository to generaldelta works
500
500
501 $ hg --config format.usegeneraldelta=false init upgradegd
501 $ hg --config format.usegeneraldelta=false init upgradegd
502 $ cd upgradegd
502 $ cd upgradegd
503 $ touch f0
503 $ touch f0
504 $ hg -q commit -A -m initial
504 $ hg -q commit -A -m initial
505 $ mkdir FooBarDirectory.d
505 $ mkdir FooBarDirectory.d
506 $ touch FooBarDirectory.d/f1
506 $ touch FooBarDirectory.d/f1
507 $ hg -q commit -A -m 'add f1'
507 $ hg -q commit -A -m 'add f1'
508 $ hg -q up -r 0
508 $ hg -q up -r 0
509 >>> from __future__ import absolute_import, print_function
509 >>> from __future__ import absolute_import, print_function
510 >>> import random
510 >>> import random
511 >>> random.seed(0) # have a reproducible content
511 >>> random.seed(0) # have a reproducible content
512 >>> with open("f2", "wb") as f:
512 >>> with open("f2", "wb") as f:
513 ... for i in range(100000):
513 ... for i in range(100000):
514 ... f.write(b"%d\n" % random.randint(1000000000, 9999999999)) and None
514 ... f.write(b"%d\n" % random.randint(1000000000, 9999999999)) and None
515 $ hg -q commit -A -m 'add f2'
515 $ hg -q commit -A -m 'add f2'
516
516
517 make sure we have a .d file
517 make sure we have a .d file
518
518
519 $ ls -d .hg/store/data/*
519 $ ls -d .hg/store/data/*
520 .hg/store/data/_foo_bar_directory.d.hg
520 .hg/store/data/_foo_bar_directory.d.hg
521 .hg/store/data/f0.i
521 .hg/store/data/f0.i
522 .hg/store/data/f2.d
522 .hg/store/data/f2.d
523 .hg/store/data/f2.i
523 .hg/store/data/f2.i
524
524
525 $ hg debugupgraderepo --run --config format.sparse-revlog=false
525 $ hg debugupgraderepo --run --config format.sparse-revlog=false
526 upgrade will perform the following actions:
526 upgrade will perform the following actions:
527
527
528 requirements
528 requirements
529 preserved: dotencode, fncache, revlogv1, store
529 preserved: dotencode, fncache, revlogv1, store
530 added: generaldelta
530 added: generaldelta
531
531
532 generaldelta
532 generaldelta
533 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
533 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
534
534
535 processed revlogs:
535 processed revlogs:
536 - all-filelogs
536 - all-filelogs
537 - changelog
537 - changelog
538 - manifest
538 - manifest
539
539
540 beginning upgrade...
540 beginning upgrade...
541 repository locked and read-only
541 repository locked and read-only
542 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
542 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
543 (it is safe to interrupt this process any time before data migration completes)
543 (it is safe to interrupt this process any time before data migration completes)
544 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
544 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
545 migrating 519 KB in store; 1.05 MB tracked data
545 migrating 519 KB in store; 1.05 MB tracked data
546 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
546 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
547 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
547 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
548 migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
548 migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
549 finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
549 finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
550 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
550 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
551 finished migrating 3 changelog revisions; change in size: 0 bytes
551 finished migrating 3 changelog revisions; change in size: 0 bytes
552 finished migrating 9 total revisions; total change in store size: -17 bytes
552 finished migrating 9 total revisions; total change in store size: -17 bytes
553 copying phaseroots
553 copying phaseroots
554 data fully migrated to temporary repository
554 data fully migrated to temporary repository
555 marking source repository as being upgraded; clients will be unable to read from repository
555 marking source repository as being upgraded; clients will be unable to read from repository
556 starting in-place swap of repository data
556 starting in-place swap of repository data
557 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
557 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
558 replacing store...
558 replacing store...
559 store replacement complete; repository was inconsistent for *s (glob)
559 store replacement complete; repository was inconsistent for *s (glob)
560 finalizing requirements file and making repository readable again
560 finalizing requirements file and making repository readable again
561 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
561 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
562 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
562 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
563 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
563 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
564
564
565 Original requirements backed up
565 Original requirements backed up
566
566
567 $ cat .hg/upgradebackup.*/requires
567 $ cat .hg/upgradebackup.*/requires
568 dotencode
568 dotencode
569 fncache
569 fncache
570 revlogv1
570 revlogv1
571 store
571 store
572
572
573 generaldelta added to original requirements files
573 generaldelta added to original requirements files
574
574
575 $ cat .hg/requires
575 $ cat .hg/requires
576 dotencode
576 dotencode
577 fncache
577 fncache
578 generaldelta
578 generaldelta
579 revlogv1
579 revlogv1
580 store
580 store
581
581
582 store directory has files we expect
582 store directory has files we expect
583
583
584 $ ls .hg/store
584 $ ls .hg/store
585 00changelog.i
585 00changelog.i
586 00manifest.i
586 00manifest.i
587 data
587 data
588 fncache
588 fncache
589 phaseroots
589 phaseroots
590 undo
590 undo
591 undo.backupfiles
591 undo.backupfiles
592 undo.phaseroots
592 undo.phaseroots
593
593
594 manifest should be generaldelta
594 manifest should be generaldelta
595
595
596 $ hg debugrevlog -m | grep flags
596 $ hg debugrevlog -m | grep flags
597 flags : inline, generaldelta
597 flags : inline, generaldelta
598
598
599 verify should be happy
599 verify should be happy
600
600
601 $ hg verify
601 $ hg verify
602 checking changesets
602 checking changesets
603 checking manifests
603 checking manifests
604 crosschecking files in changesets and manifests
604 crosschecking files in changesets and manifests
605 checking files
605 checking files
606 checked 3 changesets with 3 changes to 3 files
606 checked 3 changesets with 3 changes to 3 files
607
607
608 old store should be backed up
608 old store should be backed up
609
609
610 $ ls -d .hg/upgradebackup.*/
610 $ ls -d .hg/upgradebackup.*/
611 .hg/upgradebackup.*/ (glob)
611 .hg/upgradebackup.*/ (glob)
612 $ ls .hg/upgradebackup.*/store
612 $ ls .hg/upgradebackup.*/store
613 00changelog.i
613 00changelog.i
614 00manifest.i
614 00manifest.i
615 data
615 data
616 fncache
616 fncache
617 phaseroots
617 phaseroots
618 undo
618 undo
619 undo.backup.fncache
619 undo.backup.fncache
620 undo.backupfiles
620 undo.backupfiles
621 undo.phaseroots
621 undo.phaseroots
622
622
623 unless --no-backup is passed
623 unless --no-backup is passed
624
624
625 $ rm -rf .hg/upgradebackup.*/
625 $ rm -rf .hg/upgradebackup.*/
626 $ hg debugupgraderepo --run --no-backup
626 $ hg debugupgraderepo --run --no-backup
627 upgrade will perform the following actions:
627 upgrade will perform the following actions:
628
628
629 requirements
629 requirements
630 preserved: dotencode, fncache, generaldelta, revlogv1, store
630 preserved: dotencode, fncache, generaldelta, revlogv1, store
631 added: sparserevlog
631 added: sparserevlog
632
632
633 sparserevlog
633 sparserevlog
634 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
634 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
635
635
636 processed revlogs:
636 processed revlogs:
637 - all-filelogs
637 - all-filelogs
638 - changelog
638 - changelog
639 - manifest
639 - manifest
640
640
641 beginning upgrade...
641 beginning upgrade...
642 repository locked and read-only
642 repository locked and read-only
643 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
643 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
644 (it is safe to interrupt this process any time before data migration completes)
644 (it is safe to interrupt this process any time before data migration completes)
645 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
645 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
646 migrating 519 KB in store; 1.05 MB tracked data
646 migrating 519 KB in store; 1.05 MB tracked data
647 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
647 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
648 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
648 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
649 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
649 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
650 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
650 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
651 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
651 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
652 finished migrating 3 changelog revisions; change in size: 0 bytes
652 finished migrating 3 changelog revisions; change in size: 0 bytes
653 finished migrating 9 total revisions; total change in store size: 0 bytes
653 finished migrating 9 total revisions; total change in store size: 0 bytes
654 copying phaseroots
654 copying phaseroots
655 data fully migrated to temporary repository
655 data fully migrated to temporary repository
656 marking source repository as being upgraded; clients will be unable to read from repository
656 marking source repository as being upgraded; clients will be unable to read from repository
657 starting in-place swap of repository data
657 starting in-place swap of repository data
658 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
658 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
659 replacing store...
659 replacing store...
660 store replacement complete; repository was inconsistent for * (glob)
660 store replacement complete; repository was inconsistent for * (glob)
661 finalizing requirements file and making repository readable again
661 finalizing requirements file and making repository readable again
662 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
662 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
663 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
663 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
664 $ ls -1 .hg/ | grep upgradebackup
664 $ ls -1 .hg/ | grep upgradebackup
665 [1]
665 [1]
666
666
667 We can restrict optimization to some revlog:
667 We can restrict optimization to some revlog:
668
668
669 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
669 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
670 upgrade will perform the following actions:
670 upgrade will perform the following actions:
671
671
672 requirements
672 requirements
673 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
673 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
674
674
675 optimisations: re-delta-parent
675 optimisations: re-delta-parent
676
676
677 re-delta-parent
677 re-delta-parent
678 deltas within internal storage will choose a new base revision if needed
678 deltas within internal storage will choose a new base revision if needed
679
679
680 processed revlogs:
680 processed revlogs:
681 - manifest
681 - manifest
682
682
683 beginning upgrade...
683 beginning upgrade...
684 repository locked and read-only
684 repository locked and read-only
685 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
685 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
686 (it is safe to interrupt this process any time before data migration completes)
686 (it is safe to interrupt this process any time before data migration completes)
687 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
687 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
688 migrating 519 KB in store; 1.05 MB tracked data
688 migrating 519 KB in store; 1.05 MB tracked data
689 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
689 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
690 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
690 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
691 blindly copying data/f0.i containing 1 revisions
691 blindly copying data/f0.i containing 1 revisions
692 blindly copying data/f2.i containing 1 revisions
692 blindly copying data/f2.i containing 1 revisions
693 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
693 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
694 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
694 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
695 cloning 3 revisions from 00manifest.i
695 cloning 3 revisions from 00manifest.i
696 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
696 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
697 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
697 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
698 blindly copying 00changelog.i containing 3 revisions
698 blindly copying 00changelog.i containing 3 revisions
699 finished migrating 3 changelog revisions; change in size: 0 bytes
699 finished migrating 3 changelog revisions; change in size: 0 bytes
700 finished migrating 9 total revisions; total change in store size: 0 bytes
700 finished migrating 9 total revisions; total change in store size: 0 bytes
701 copying phaseroots
701 copying phaseroots
702 data fully migrated to temporary repository
702 data fully migrated to temporary repository
703 marking source repository as being upgraded; clients will be unable to read from repository
703 marking source repository as being upgraded; clients will be unable to read from repository
704 starting in-place swap of repository data
704 starting in-place swap of repository data
705 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
705 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
706 replacing store...
706 replacing store...
707 store replacement complete; repository was inconsistent for *s (glob)
707 store replacement complete; repository was inconsistent for *s (glob)
708 finalizing requirements file and making repository readable again
708 finalizing requirements file and making repository readable again
709 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
709 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
710 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
710 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
711
711
712 Check that the repo still works fine
712 Check that the repo still works fine
713
713
714 $ hg log -G --stat
714 $ hg log -G --stat
715 @ changeset: 2:76d4395f5413 (no-py3 !)
715 @ changeset: 2:76d4395f5413 (no-py3 !)
716 @ changeset: 2:fca376863211 (py3 !)
716 @ changeset: 2:fca376863211 (py3 !)
717 | tag: tip
717 | tag: tip
718 | parent: 0:ba592bf28da2
718 | parent: 0:ba592bf28da2
719 | user: test
719 | user: test
720 | date: Thu Jan 01 00:00:00 1970 +0000
720 | date: Thu Jan 01 00:00:00 1970 +0000
721 | summary: add f2
721 | summary: add f2
722 |
722 |
723 | f2 | 100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
723 | f2 | 100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
724 | 1 files changed, 100000 insertions(+), 0 deletions(-)
724 | 1 files changed, 100000 insertions(+), 0 deletions(-)
725 |
725 |
726 | o changeset: 1:2029ce2354e2
726 | o changeset: 1:2029ce2354e2
727 |/ user: test
727 |/ user: test
728 | date: Thu Jan 01 00:00:00 1970 +0000
728 | date: Thu Jan 01 00:00:00 1970 +0000
729 | summary: add f1
729 | summary: add f1
730 |
730 |
731 |
731 |
732 o changeset: 0:ba592bf28da2
732 o changeset: 0:ba592bf28da2
733 user: test
733 user: test
734 date: Thu Jan 01 00:00:00 1970 +0000
734 date: Thu Jan 01 00:00:00 1970 +0000
735 summary: initial
735 summary: initial
736
736
737
737
738
738
739 $ hg verify
739 $ hg verify
740 checking changesets
740 checking changesets
741 checking manifests
741 checking manifests
742 crosschecking files in changesets and manifests
742 crosschecking files in changesets and manifests
743 checking files
743 checking files
744 checked 3 changesets with 3 changes to 3 files
744 checked 3 changesets with 3 changes to 3 files
745
745
746 Check we can select negatively
746 Check we can select negatively
747
747
748 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
748 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
749 upgrade will perform the following actions:
749 upgrade will perform the following actions:
750
750
751 requirements
751 requirements
752 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
752 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
753
753
754 optimisations: re-delta-parent
754 optimisations: re-delta-parent
755
755
756 re-delta-parent
756 re-delta-parent
757 deltas within internal storage will choose a new base revision if needed
757 deltas within internal storage will choose a new base revision if needed
758
758
759 processed revlogs:
759 processed revlogs:
760 - all-filelogs
760 - all-filelogs
761 - changelog
761 - changelog
762
762
763 beginning upgrade...
763 beginning upgrade...
764 repository locked and read-only
764 repository locked and read-only
765 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
765 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
766 (it is safe to interrupt this process any time before data migration completes)
766 (it is safe to interrupt this process any time before data migration completes)
767 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
767 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
768 migrating 519 KB in store; 1.05 MB tracked data
768 migrating 519 KB in store; 1.05 MB tracked data
769 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
769 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
770 cloning 1 revisions from data/FooBarDirectory.d/f1.i
770 cloning 1 revisions from data/FooBarDirectory.d/f1.i
771 cloning 1 revisions from data/f0.i
771 cloning 1 revisions from data/f0.i
772 cloning 1 revisions from data/f2.i
772 cloning 1 revisions from data/f2.i
773 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
773 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
774 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
774 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
775 blindly copying 00manifest.i containing 3 revisions
775 blindly copying 00manifest.i containing 3 revisions
776 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
776 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
777 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
777 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
778 cloning 3 revisions from 00changelog.i
778 cloning 3 revisions from 00changelog.i
779 finished migrating 3 changelog revisions; change in size: 0 bytes
779 finished migrating 3 changelog revisions; change in size: 0 bytes
780 finished migrating 9 total revisions; total change in store size: 0 bytes
780 finished migrating 9 total revisions; total change in store size: 0 bytes
781 copying phaseroots
781 copying phaseroots
782 data fully migrated to temporary repository
782 data fully migrated to temporary repository
783 marking source repository as being upgraded; clients will be unable to read from repository
783 marking source repository as being upgraded; clients will be unable to read from repository
784 starting in-place swap of repository data
784 starting in-place swap of repository data
785 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
785 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
786 replacing store...
786 replacing store...
787 store replacement complete; repository was inconsistent for *s (glob)
787 store replacement complete; repository was inconsistent for *s (glob)
788 finalizing requirements file and making repository readable again
788 finalizing requirements file and making repository readable again
789 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
789 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
790 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
790 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
791 $ hg verify
791 $ hg verify
792 checking changesets
792 checking changesets
793 checking manifests
793 checking manifests
794 crosschecking files in changesets and manifests
794 crosschecking files in changesets and manifests
795 checking files
795 checking files
796 checked 3 changesets with 3 changes to 3 files
796 checked 3 changesets with 3 changes to 3 files
797
797
798 Check that we can select changelog only
798 Check that we can select changelog only
799
799
800 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
800 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
801 upgrade will perform the following actions:
801 upgrade will perform the following actions:
802
802
803 requirements
803 requirements
804 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
804 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
805
805
806 optimisations: re-delta-parent
806 optimisations: re-delta-parent
807
807
808 re-delta-parent
808 re-delta-parent
809 deltas within internal storage will choose a new base revision if needed
809 deltas within internal storage will choose a new base revision if needed
810
810
811 processed revlogs:
811 processed revlogs:
812 - changelog
812 - changelog
813
813
814 beginning upgrade...
814 beginning upgrade...
815 repository locked and read-only
815 repository locked and read-only
816 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
816 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
817 (it is safe to interrupt this process any time before data migration completes)
817 (it is safe to interrupt this process any time before data migration completes)
818 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
818 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
819 migrating 519 KB in store; 1.05 MB tracked data
819 migrating 519 KB in store; 1.05 MB tracked data
820 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
820 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
821 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
821 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
822 blindly copying data/f0.i containing 1 revisions
822 blindly copying data/f0.i containing 1 revisions
823 blindly copying data/f2.i containing 1 revisions
823 blindly copying data/f2.i containing 1 revisions
824 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
824 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
825 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
825 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
826 blindly copying 00manifest.i containing 3 revisions
826 blindly copying 00manifest.i containing 3 revisions
827 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
827 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
828 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
828 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
829 cloning 3 revisions from 00changelog.i
829 cloning 3 revisions from 00changelog.i
830 finished migrating 3 changelog revisions; change in size: 0 bytes
830 finished migrating 3 changelog revisions; change in size: 0 bytes
831 finished migrating 9 total revisions; total change in store size: 0 bytes
831 finished migrating 9 total revisions; total change in store size: 0 bytes
832 copying phaseroots
832 copying phaseroots
833 data fully migrated to temporary repository
833 data fully migrated to temporary repository
834 marking source repository as being upgraded; clients will be unable to read from repository
834 marking source repository as being upgraded; clients will be unable to read from repository
835 starting in-place swap of repository data
835 starting in-place swap of repository data
836 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
836 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
837 replacing store...
837 replacing store...
838 store replacement complete; repository was inconsistent for *s (glob)
838 store replacement complete; repository was inconsistent for *s (glob)
839 finalizing requirements file and making repository readable again
839 finalizing requirements file and making repository readable again
840 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
840 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
841 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
841 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
842 $ hg verify
842 $ hg verify
843 checking changesets
843 checking changesets
844 checking manifests
844 checking manifests
845 crosschecking files in changesets and manifests
845 crosschecking files in changesets and manifests
846 checking files
846 checking files
847 checked 3 changesets with 3 changes to 3 files
847 checked 3 changesets with 3 changes to 3 files
848
848
849 Check that we can select filelog only
849 Check that we can select filelog only
850
850
851 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
851 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
852 upgrade will perform the following actions:
852 upgrade will perform the following actions:
853
853
854 requirements
854 requirements
855 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
855 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
856
856
857 optimisations: re-delta-parent
857 optimisations: re-delta-parent
858
858
859 re-delta-parent
859 re-delta-parent
860 deltas within internal storage will choose a new base revision if needed
860 deltas within internal storage will choose a new base revision if needed
861
861
862 processed revlogs:
862 processed revlogs:
863 - all-filelogs
863 - all-filelogs
864
864
865 beginning upgrade...
865 beginning upgrade...
866 repository locked and read-only
866 repository locked and read-only
867 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
867 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
868 (it is safe to interrupt this process any time before data migration completes)
868 (it is safe to interrupt this process any time before data migration completes)
869 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
869 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
870 migrating 519 KB in store; 1.05 MB tracked data
870 migrating 519 KB in store; 1.05 MB tracked data
871 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
871 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
872 cloning 1 revisions from data/FooBarDirectory.d/f1.i
872 cloning 1 revisions from data/FooBarDirectory.d/f1.i
873 cloning 1 revisions from data/f0.i
873 cloning 1 revisions from data/f0.i
874 cloning 1 revisions from data/f2.i
874 cloning 1 revisions from data/f2.i
875 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
875 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
876 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
876 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
877 blindly copying 00manifest.i containing 3 revisions
877 blindly copying 00manifest.i containing 3 revisions
878 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
878 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
879 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
879 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
880 blindly copying 00changelog.i containing 3 revisions
880 blindly copying 00changelog.i containing 3 revisions
881 finished migrating 3 changelog revisions; change in size: 0 bytes
881 finished migrating 3 changelog revisions; change in size: 0 bytes
882 finished migrating 9 total revisions; total change in store size: 0 bytes
882 finished migrating 9 total revisions; total change in store size: 0 bytes
883 copying phaseroots
883 copying phaseroots
884 data fully migrated to temporary repository
884 data fully migrated to temporary repository
885 marking source repository as being upgraded; clients will be unable to read from repository
885 marking source repository as being upgraded; clients will be unable to read from repository
886 starting in-place swap of repository data
886 starting in-place swap of repository data
887 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
887 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
888 replacing store...
888 replacing store...
889 store replacement complete; repository was inconsistent for *s (glob)
889 store replacement complete; repository was inconsistent for *s (glob)
890 finalizing requirements file and making repository readable again
890 finalizing requirements file and making repository readable again
891 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
891 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
892 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
892 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
893 $ hg verify
893 $ hg verify
894 checking changesets
894 checking changesets
895 checking manifests
895 checking manifests
896 crosschecking files in changesets and manifests
896 crosschecking files in changesets and manifests
897 checking files
897 checking files
898 checked 3 changesets with 3 changes to 3 files
898 checked 3 changesets with 3 changes to 3 files
899
899
900
900
901 Check you can't skip revlog clone during important format downgrade
901 Check you can't skip revlog clone during important format downgrade
902
902
903 $ echo "[format]" > .hg/hgrc
903 $ echo "[format]" > .hg/hgrc
904 $ echo "sparse-revlog=no" >> .hg/hgrc
904 $ echo "sparse-revlog=no" >> .hg/hgrc
905 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
905 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
906 ignoring revlogs selection flags, format requirements change: sparserevlog
906 ignoring revlogs selection flags, format requirements change: sparserevlog
907 upgrade will perform the following actions:
907 upgrade will perform the following actions:
908
908
909 requirements
909 requirements
910 preserved: dotencode, fncache, generaldelta, revlogv1, store
910 preserved: dotencode, fncache, generaldelta, revlogv1, store
911 removed: sparserevlog
911 removed: sparserevlog
912
912
913 optimisations: re-delta-parent
913 optimisations: re-delta-parent
914
914
915 re-delta-parent
915 re-delta-parent
916 deltas within internal storage will choose a new base revision if needed
916 deltas within internal storage will choose a new base revision if needed
917
917
918 processed revlogs:
918 processed revlogs:
919 - all-filelogs
919 - all-filelogs
920 - changelog
920 - changelog
921 - manifest
921 - manifest
922
922
923 beginning upgrade...
923 beginning upgrade...
924 repository locked and read-only
924 repository locked and read-only
925 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
925 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
926 (it is safe to interrupt this process any time before data migration completes)
926 (it is safe to interrupt this process any time before data migration completes)
927 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
927 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
928 migrating 519 KB in store; 1.05 MB tracked data
928 migrating 519 KB in store; 1.05 MB tracked data
929 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
929 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
930 cloning 1 revisions from data/FooBarDirectory.d/f1.i
930 cloning 1 revisions from data/FooBarDirectory.d/f1.i
931 cloning 1 revisions from data/f0.i
931 cloning 1 revisions from data/f0.i
932 cloning 1 revisions from data/f2.i
932 cloning 1 revisions from data/f2.i
933 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
933 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
934 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
934 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
935 cloning 3 revisions from 00manifest.i
935 cloning 3 revisions from 00manifest.i
936 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
936 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
937 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
937 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
938 cloning 3 revisions from 00changelog.i
938 cloning 3 revisions from 00changelog.i
939 finished migrating 3 changelog revisions; change in size: 0 bytes
939 finished migrating 3 changelog revisions; change in size: 0 bytes
940 finished migrating 9 total revisions; total change in store size: 0 bytes
940 finished migrating 9 total revisions; total change in store size: 0 bytes
941 copying phaseroots
941 copying phaseroots
942 data fully migrated to temporary repository
942 data fully migrated to temporary repository
943 marking source repository as being upgraded; clients will be unable to read from repository
943 marking source repository as being upgraded; clients will be unable to read from repository
944 starting in-place swap of repository data
944 starting in-place swap of repository data
945 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
945 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
946 replacing store...
946 replacing store...
947 store replacement complete; repository was inconsistent for *s (glob)
947 store replacement complete; repository was inconsistent for *s (glob)
948 finalizing requirements file and making repository readable again
948 finalizing requirements file and making repository readable again
949 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
949 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
950 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
950 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
951 $ hg verify
951 $ hg verify
952 checking changesets
952 checking changesets
953 checking manifests
953 checking manifests
954 crosschecking files in changesets and manifests
954 crosschecking files in changesets and manifests
955 checking files
955 checking files
956 checked 3 changesets with 3 changes to 3 files
956 checked 3 changesets with 3 changes to 3 files
957
957
958 Check you can't skip revlog clone during important format upgrade
958 Check you can't skip revlog clone during important format upgrade
959
959
960 $ echo "sparse-revlog=yes" >> .hg/hgrc
960 $ echo "sparse-revlog=yes" >> .hg/hgrc
961 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
961 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
962 ignoring revlogs selection flags, format requirements change: sparserevlog
962 ignoring revlogs selection flags, format requirements change: sparserevlog
963 upgrade will perform the following actions:
963 upgrade will perform the following actions:
964
964
965 requirements
965 requirements
966 preserved: dotencode, fncache, generaldelta, revlogv1, store
966 preserved: dotencode, fncache, generaldelta, revlogv1, store
967 added: sparserevlog
967 added: sparserevlog
968
968
969 optimisations: re-delta-parent
969 optimisations: re-delta-parent
970
970
971 sparserevlog
971 sparserevlog
972 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
972 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
973
973
974 re-delta-parent
974 re-delta-parent
975 deltas within internal storage will choose a new base revision if needed
975 deltas within internal storage will choose a new base revision if needed
976
976
977 processed revlogs:
977 processed revlogs:
978 - all-filelogs
978 - all-filelogs
979 - changelog
979 - changelog
980 - manifest
980 - manifest
981
981
982 beginning upgrade...
982 beginning upgrade...
983 repository locked and read-only
983 repository locked and read-only
984 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
984 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
985 (it is safe to interrupt this process any time before data migration completes)
985 (it is safe to interrupt this process any time before data migration completes)
986 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
986 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
987 migrating 519 KB in store; 1.05 MB tracked data
987 migrating 519 KB in store; 1.05 MB tracked data
988 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
988 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
989 cloning 1 revisions from data/FooBarDirectory.d/f1.i
989 cloning 1 revisions from data/FooBarDirectory.d/f1.i
990 cloning 1 revisions from data/f0.i
990 cloning 1 revisions from data/f0.i
991 cloning 1 revisions from data/f2.i
991 cloning 1 revisions from data/f2.i
992 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
992 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
993 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
993 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
994 cloning 3 revisions from 00manifest.i
994 cloning 3 revisions from 00manifest.i
995 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
995 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
996 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
996 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
997 cloning 3 revisions from 00changelog.i
997 cloning 3 revisions from 00changelog.i
998 finished migrating 3 changelog revisions; change in size: 0 bytes
998 finished migrating 3 changelog revisions; change in size: 0 bytes
999 finished migrating 9 total revisions; total change in store size: 0 bytes
999 finished migrating 9 total revisions; total change in store size: 0 bytes
1000 copying phaseroots
1000 copying phaseroots
1001 data fully migrated to temporary repository
1001 data fully migrated to temporary repository
1002 marking source repository as being upgraded; clients will be unable to read from repository
1002 marking source repository as being upgraded; clients will be unable to read from repository
1003 starting in-place swap of repository data
1003 starting in-place swap of repository data
1004 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
1004 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
1005 replacing store...
1005 replacing store...
1006 store replacement complete; repository was inconsistent for *s (glob)
1006 store replacement complete; repository was inconsistent for *s (glob)
1007 finalizing requirements file and making repository readable again
1007 finalizing requirements file and making repository readable again
1008 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
1008 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
1009 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1009 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1010 $ hg verify
1010 $ hg verify
1011 checking changesets
1011 checking changesets
1012 checking manifests
1012 checking manifests
1013 crosschecking files in changesets and manifests
1013 crosschecking files in changesets and manifests
1014 checking files
1014 checking files
1015 checked 3 changesets with 3 changes to 3 files
1015 checked 3 changesets with 3 changes to 3 files
1016
1016
1017 $ cd ..
1017 $ cd ..
1018
1018
1019 store files with special filenames aren't encoded during copy
1019 store files with special filenames aren't encoded during copy
1020
1020
1021 $ hg init store-filenames
1021 $ hg init store-filenames
1022 $ cd store-filenames
1022 $ cd store-filenames
1023 $ touch foo
1023 $ touch foo
1024 $ hg -q commit -A -m initial
1024 $ hg -q commit -A -m initial
1025 $ touch .hg/store/.XX_special_filename
1025 $ touch .hg/store/.XX_special_filename
1026
1026
1027 $ hg debugupgraderepo --run
1027 $ hg debugupgraderepo --run
1028 upgrade will perform the following actions:
1028 upgrade will perform the following actions:
1029
1029
1030 requirements
1030 requirements
1031 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1031 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1032
1032
1033 processed revlogs:
1033 processed revlogs:
1034 - all-filelogs
1034 - all-filelogs
1035 - changelog
1035 - changelog
1036 - manifest
1036 - manifest
1037
1037
1038 beginning upgrade...
1038 beginning upgrade...
1039 repository locked and read-only
1039 repository locked and read-only
1040 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1040 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1041 (it is safe to interrupt this process any time before data migration completes)
1041 (it is safe to interrupt this process any time before data migration completes)
1042 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1042 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1043 migrating 301 bytes in store; 107 bytes tracked data
1043 migrating 301 bytes in store; 107 bytes tracked data
1044 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1044 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1045 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1045 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1046 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1046 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1047 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1047 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1048 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1048 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1049 finished migrating 1 changelog revisions; change in size: 0 bytes
1049 finished migrating 1 changelog revisions; change in size: 0 bytes
1050 finished migrating 3 total revisions; total change in store size: 0 bytes
1050 finished migrating 3 total revisions; total change in store size: 0 bytes
1051 copying .XX_special_filename
1051 copying .XX_special_filename
1052 copying phaseroots
1052 copying phaseroots
1053 data fully migrated to temporary repository
1053 data fully migrated to temporary repository
1054 marking source repository as being upgraded; clients will be unable to read from repository
1054 marking source repository as being upgraded; clients will be unable to read from repository
1055 starting in-place swap of repository data
1055 starting in-place swap of repository data
1056 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1056 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1057 replacing store...
1057 replacing store...
1058 store replacement complete; repository was inconsistent for *s (glob)
1058 store replacement complete; repository was inconsistent for *s (glob)
1059 finalizing requirements file and making repository readable again
1059 finalizing requirements file and making repository readable again
1060 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1060 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1061 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1061 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1062 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1062 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1063 $ hg debugupgraderepo --run --optimize redeltafulladd
1063 $ hg debugupgraderepo --run --optimize 're-delta-fulladd'
1064 upgrade will perform the following actions:
1064 upgrade will perform the following actions:
1065
1065
1066 requirements
1066 requirements
1067 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1067 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1068
1068
1069 optimisations: re-delta-fulladd
1069 optimisations: re-delta-fulladd
1070
1070
1071 re-delta-fulladd
1071 re-delta-fulladd
1072 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
1072 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
1073
1073
1074 processed revlogs:
1074 processed revlogs:
1075 - all-filelogs
1075 - all-filelogs
1076 - changelog
1076 - changelog
1077 - manifest
1077 - manifest
1078
1078
1079 beginning upgrade...
1079 beginning upgrade...
1080 repository locked and read-only
1080 repository locked and read-only
1081 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1081 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1082 (it is safe to interrupt this process any time before data migration completes)
1082 (it is safe to interrupt this process any time before data migration completes)
1083 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1083 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1084 migrating 301 bytes in store; 107 bytes tracked data
1084 migrating 301 bytes in store; 107 bytes tracked data
1085 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1085 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1086 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1086 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1087 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1087 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1088 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1088 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1089 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1089 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1090 finished migrating 1 changelog revisions; change in size: 0 bytes
1090 finished migrating 1 changelog revisions; change in size: 0 bytes
1091 finished migrating 3 total revisions; total change in store size: 0 bytes
1091 finished migrating 3 total revisions; total change in store size: 0 bytes
1092 copying .XX_special_filename
1092 copying .XX_special_filename
1093 copying phaseroots
1093 copying phaseroots
1094 data fully migrated to temporary repository
1094 data fully migrated to temporary repository
1095 marking source repository as being upgraded; clients will be unable to read from repository
1095 marking source repository as being upgraded; clients will be unable to read from repository
1096 starting in-place swap of repository data
1096 starting in-place swap of repository data
1097 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1097 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1098 replacing store...
1098 replacing store...
1099 store replacement complete; repository was inconsistent for *s (glob)
1099 store replacement complete; repository was inconsistent for *s (glob)
1100 finalizing requirements file and making repository readable again
1100 finalizing requirements file and making repository readable again
1101 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1101 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1102 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1102 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1103 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1103 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1104
1104
1105 fncache is valid after upgrade
1105 fncache is valid after upgrade
1106
1106
1107 $ hg debugrebuildfncache
1107 $ hg debugrebuildfncache
1108 fncache already up to date
1108 fncache already up to date
1109
1109
1110 $ cd ..
1110 $ cd ..
1111
1111
1112 Check upgrading a large file repository
1112 Check upgrading a large file repository
1113 ---------------------------------------
1113 ---------------------------------------
1114
1114
1115 $ hg init largefilesrepo
1115 $ hg init largefilesrepo
1116 $ cat << EOF >> largefilesrepo/.hg/hgrc
1116 $ cat << EOF >> largefilesrepo/.hg/hgrc
1117 > [extensions]
1117 > [extensions]
1118 > largefiles =
1118 > largefiles =
1119 > EOF
1119 > EOF
1120
1120
1121 $ cd largefilesrepo
1121 $ cd largefilesrepo
1122 $ touch foo
1122 $ touch foo
1123 $ hg add --large foo
1123 $ hg add --large foo
1124 $ hg -q commit -m initial
1124 $ hg -q commit -m initial
1125 $ cat .hg/requires
1125 $ cat .hg/requires
1126 dotencode
1126 dotencode
1127 fncache
1127 fncache
1128 generaldelta
1128 generaldelta
1129 largefiles
1129 largefiles
1130 revlogv1
1130 revlogv1
1131 sparserevlog
1131 sparserevlog
1132 store
1132 store
1133
1133
1134 $ hg debugupgraderepo --run
1134 $ hg debugupgraderepo --run
1135 upgrade will perform the following actions:
1135 upgrade will perform the following actions:
1136
1136
1137 requirements
1137 requirements
1138 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
1138 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
1139
1139
1140 processed revlogs:
1140 processed revlogs:
1141 - all-filelogs
1141 - all-filelogs
1142 - changelog
1142 - changelog
1143 - manifest
1143 - manifest
1144
1144
1145 beginning upgrade...
1145 beginning upgrade...
1146 repository locked and read-only
1146 repository locked and read-only
1147 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1147 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1148 (it is safe to interrupt this process any time before data migration completes)
1148 (it is safe to interrupt this process any time before data migration completes)
1149 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1149 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1150 migrating 355 bytes in store; 160 bytes tracked data
1150 migrating 355 bytes in store; 160 bytes tracked data
1151 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
1151 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
1152 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1152 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1153 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
1153 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
1154 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1154 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1155 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
1155 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
1156 finished migrating 1 changelog revisions; change in size: 0 bytes
1156 finished migrating 1 changelog revisions; change in size: 0 bytes
1157 finished migrating 3 total revisions; total change in store size: 0 bytes
1157 finished migrating 3 total revisions; total change in store size: 0 bytes
1158 copying phaseroots
1158 copying phaseroots
1159 data fully migrated to temporary repository
1159 data fully migrated to temporary repository
1160 marking source repository as being upgraded; clients will be unable to read from repository
1160 marking source repository as being upgraded; clients will be unable to read from repository
1161 starting in-place swap of repository data
1161 starting in-place swap of repository data
1162 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1162 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1163 replacing store...
1163 replacing store...
1164 store replacement complete; repository was inconsistent for *s (glob)
1164 store replacement complete; repository was inconsistent for *s (glob)
1165 finalizing requirements file and making repository readable again
1165 finalizing requirements file and making repository readable again
1166 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1166 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1167 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1167 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1168 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1168 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1169 $ cat .hg/requires
1169 $ cat .hg/requires
1170 dotencode
1170 dotencode
1171 fncache
1171 fncache
1172 generaldelta
1172 generaldelta
1173 largefiles
1173 largefiles
1174 revlogv1
1174 revlogv1
1175 sparserevlog
1175 sparserevlog
1176 store
1176 store
1177
1177
1178 $ cat << EOF >> .hg/hgrc
1178 $ cat << EOF >> .hg/hgrc
1179 > [extensions]
1179 > [extensions]
1180 > lfs =
1180 > lfs =
1181 > [lfs]
1181 > [lfs]
1182 > threshold = 10
1182 > threshold = 10
1183 > EOF
1183 > EOF
1184 $ echo '123456789012345' > lfs.bin
1184 $ echo '123456789012345' > lfs.bin
1185 $ hg ci -Am 'lfs.bin'
1185 $ hg ci -Am 'lfs.bin'
1186 adding lfs.bin
1186 adding lfs.bin
1187 $ grep lfs .hg/requires
1187 $ grep lfs .hg/requires
1188 lfs
1188 lfs
1189 $ find .hg/store/lfs -type f
1189 $ find .hg/store/lfs -type f
1190 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1190 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1191
1191
1192 $ hg debugupgraderepo --run
1192 $ hg debugupgraderepo --run
1193 upgrade will perform the following actions:
1193 upgrade will perform the following actions:
1194
1194
1195 requirements
1195 requirements
1196 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
1196 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
1197
1197
1198 processed revlogs:
1198 processed revlogs:
1199 - all-filelogs
1199 - all-filelogs
1200 - changelog
1200 - changelog
1201 - manifest
1201 - manifest
1202
1202
1203 beginning upgrade...
1203 beginning upgrade...
1204 repository locked and read-only
1204 repository locked and read-only
1205 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1205 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1206 (it is safe to interrupt this process any time before data migration completes)
1206 (it is safe to interrupt this process any time before data migration completes)
1207 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
1207 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
1208 migrating 801 bytes in store; 467 bytes tracked data
1208 migrating 801 bytes in store; 467 bytes tracked data
1209 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
1209 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
1210 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
1210 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
1211 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
1211 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
1212 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
1212 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
1213 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
1213 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
1214 finished migrating 2 changelog revisions; change in size: 0 bytes
1214 finished migrating 2 changelog revisions; change in size: 0 bytes
1215 finished migrating 6 total revisions; total change in store size: 0 bytes
1215 finished migrating 6 total revisions; total change in store size: 0 bytes
1216 copying phaseroots
1216 copying phaseroots
1217 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1217 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1218 data fully migrated to temporary repository
1218 data fully migrated to temporary repository
1219 marking source repository as being upgraded; clients will be unable to read from repository
1219 marking source repository as being upgraded; clients will be unable to read from repository
1220 starting in-place swap of repository data
1220 starting in-place swap of repository data
1221 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1221 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1222 replacing store...
1222 replacing store...
1223 store replacement complete; repository was inconsistent for *s (glob)
1223 store replacement complete; repository was inconsistent for *s (glob)
1224 finalizing requirements file and making repository readable again
1224 finalizing requirements file and making repository readable again
1225 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1225 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1226 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1226 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1227 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1227 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1228
1228
1229 $ grep lfs .hg/requires
1229 $ grep lfs .hg/requires
1230 lfs
1230 lfs
1231 $ find .hg/store/lfs -type f
1231 $ find .hg/store/lfs -type f
1232 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1232 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1233 $ hg verify
1233 $ hg verify
1234 checking changesets
1234 checking changesets
1235 checking manifests
1235 checking manifests
1236 crosschecking files in changesets and manifests
1236 crosschecking files in changesets and manifests
1237 checking files
1237 checking files
1238 checked 2 changesets with 2 changes to 2 files
1238 checked 2 changesets with 2 changes to 2 files
1239 $ hg debugdata lfs.bin 0
1239 $ hg debugdata lfs.bin 0
1240 version https://git-lfs.github.com/spec/v1
1240 version https://git-lfs.github.com/spec/v1
1241 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1241 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1242 size 16
1242 size 16
1243 x-is-binary 0
1243 x-is-binary 0
1244
1244
1245 $ cd ..
1245 $ cd ..
1246
1246
1247 repository config is taken in account
1247 repository config is taken in account
1248 -------------------------------------
1248 -------------------------------------
1249
1249
1250 $ cat << EOF >> $HGRCPATH
1250 $ cat << EOF >> $HGRCPATH
1251 > [format]
1251 > [format]
1252 > maxchainlen = 1
1252 > maxchainlen = 1
1253 > EOF
1253 > EOF
1254
1254
1255 $ hg init localconfig
1255 $ hg init localconfig
1256 $ cd localconfig
1256 $ cd localconfig
1257 $ cat << EOF > file
1257 $ cat << EOF > file
1258 > some content
1258 > some content
1259 > with some length
1259 > with some length
1260 > to make sure we get a delta
1260 > to make sure we get a delta
1261 > after changes
1261 > after changes
1262 > very long
1262 > very long
1263 > very long
1263 > very long
1264 > very long
1264 > very long
1265 > very long
1265 > very long
1266 > very long
1266 > very long
1267 > very long
1267 > very long
1268 > very long
1268 > very long
1269 > very long
1269 > very long
1270 > very long
1270 > very long
1271 > very long
1271 > very long
1272 > very long
1272 > very long
1273 > EOF
1273 > EOF
1274 $ hg -q commit -A -m A
1274 $ hg -q commit -A -m A
1275 $ echo "new line" >> file
1275 $ echo "new line" >> file
1276 $ hg -q commit -m B
1276 $ hg -q commit -m B
1277 $ echo "new line" >> file
1277 $ echo "new line" >> file
1278 $ hg -q commit -m C
1278 $ hg -q commit -m C
1279
1279
1280 $ cat << EOF >> .hg/hgrc
1280 $ cat << EOF >> .hg/hgrc
1281 > [format]
1281 > [format]
1282 > maxchainlen = 9001
1282 > maxchainlen = 9001
1283 > EOF
1283 > EOF
1284 $ hg config format
1284 $ hg config format
1285 format.maxchainlen=9001
1285 format.maxchainlen=9001
1286 $ hg debugdeltachain file
1286 $ hg debugdeltachain file
1287 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1287 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1288 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1288 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1289 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1289 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1290 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1290 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1291
1291
1292 $ hg debugupgraderepo --run --optimize redeltaall
1292 $ hg debugupgraderepo --run --optimize 're-delta-all'
1293 upgrade will perform the following actions:
1293 upgrade will perform the following actions:
1294
1294
1295 requirements
1295 requirements
1296 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1296 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1297
1297
1298 optimisations: re-delta-all
1298 optimisations: re-delta-all
1299
1299
1300 re-delta-all
1300 re-delta-all
1301 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
1301 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
1302
1302
1303 processed revlogs:
1303 processed revlogs:
1304 - all-filelogs
1304 - all-filelogs
1305 - changelog
1305 - changelog
1306 - manifest
1306 - manifest
1307
1307
1308 beginning upgrade...
1308 beginning upgrade...
1309 repository locked and read-only
1309 repository locked and read-only
1310 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
1310 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
1311 (it is safe to interrupt this process any time before data migration completes)
1311 (it is safe to interrupt this process any time before data migration completes)
1312 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1312 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1313 migrating 1019 bytes in store; 882 bytes tracked data
1313 migrating 1019 bytes in store; 882 bytes tracked data
1314 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
1314 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
1315 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1315 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1316 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1316 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1317 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1317 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1318 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1318 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1319 finished migrating 3 changelog revisions; change in size: 0 bytes
1319 finished migrating 3 changelog revisions; change in size: 0 bytes
1320 finished migrating 9 total revisions; total change in store size: -9 bytes
1320 finished migrating 9 total revisions; total change in store size: -9 bytes
1321 copying phaseroots
1321 copying phaseroots
1322 data fully migrated to temporary repository
1322 data fully migrated to temporary repository
1323 marking source repository as being upgraded; clients will be unable to read from repository
1323 marking source repository as being upgraded; clients will be unable to read from repository
1324 starting in-place swap of repository data
1324 starting in-place swap of repository data
1325 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1325 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1326 replacing store...
1326 replacing store...
1327 store replacement complete; repository was inconsistent for *s (glob)
1327 store replacement complete; repository was inconsistent for *s (glob)
1328 finalizing requirements file and making repository readable again
1328 finalizing requirements file and making repository readable again
1329 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1329 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1330 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1330 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1331 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1331 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1332 $ hg debugdeltachain file
1332 $ hg debugdeltachain file
1333 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1333 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1334 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1334 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1335 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1335 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1336 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1336 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1337 $ cd ..
1337 $ cd ..
1338
1338
1339 $ cat << EOF >> $HGRCPATH
1339 $ cat << EOF >> $HGRCPATH
1340 > [format]
1340 > [format]
1341 > maxchainlen = 9001
1341 > maxchainlen = 9001
1342 > EOF
1342 > EOF
1343
1343
1344 Check upgrading a sparse-revlog repository
1344 Check upgrading a sparse-revlog repository
1345 ---------------------------------------
1345 ---------------------------------------
1346
1346
1347 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1347 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1348 $ cd sparserevlogrepo
1348 $ cd sparserevlogrepo
1349 $ touch foo
1349 $ touch foo
1350 $ hg add foo
1350 $ hg add foo
1351 $ hg -q commit -m "foo"
1351 $ hg -q commit -m "foo"
1352 $ cat .hg/requires
1352 $ cat .hg/requires
1353 dotencode
1353 dotencode
1354 fncache
1354 fncache
1355 generaldelta
1355 generaldelta
1356 revlogv1
1356 revlogv1
1357 store
1357 store
1358
1358
1359 Check that we can add the sparse-revlog format requirement
1359 Check that we can add the sparse-revlog format requirement
1360 $ hg --config format.sparse-revlog=yes debugupgraderepo --run --quiet
1360 $ hg --config format.sparse-revlog=yes debugupgraderepo --run --quiet
1361 upgrade will perform the following actions:
1361 upgrade will perform the following actions:
1362
1362
1363 requirements
1363 requirements
1364 preserved: dotencode, fncache, generaldelta, revlogv1, store
1364 preserved: dotencode, fncache, generaldelta, revlogv1, store
1365 added: sparserevlog
1365 added: sparserevlog
1366
1366
1367 processed revlogs:
1367 processed revlogs:
1368 - all-filelogs
1368 - all-filelogs
1369 - changelog
1369 - changelog
1370 - manifest
1370 - manifest
1371
1371
1372 $ cat .hg/requires
1372 $ cat .hg/requires
1373 dotencode
1373 dotencode
1374 fncache
1374 fncache
1375 generaldelta
1375 generaldelta
1376 revlogv1
1376 revlogv1
1377 sparserevlog
1377 sparserevlog
1378 store
1378 store
1379
1379
1380 Check that we can remove the sparse-revlog format requirement
1380 Check that we can remove the sparse-revlog format requirement
1381 $ hg --config format.sparse-revlog=no debugupgraderepo --run --quiet
1381 $ hg --config format.sparse-revlog=no debugupgraderepo --run --quiet
1382 upgrade will perform the following actions:
1382 upgrade will perform the following actions:
1383
1383
1384 requirements
1384 requirements
1385 preserved: dotencode, fncache, generaldelta, revlogv1, store
1385 preserved: dotencode, fncache, generaldelta, revlogv1, store
1386 removed: sparserevlog
1386 removed: sparserevlog
1387
1387
1388 processed revlogs:
1388 processed revlogs:
1389 - all-filelogs
1389 - all-filelogs
1390 - changelog
1390 - changelog
1391 - manifest
1391 - manifest
1392
1392
1393 $ cat .hg/requires
1393 $ cat .hg/requires
1394 dotencode
1394 dotencode
1395 fncache
1395 fncache
1396 generaldelta
1396 generaldelta
1397 revlogv1
1397 revlogv1
1398 store
1398 store
1399
1399
1400 #if zstd
1400 #if zstd
1401
1401
1402 Check upgrading to a zstd revlog
1402 Check upgrading to a zstd revlog
1403 --------------------------------
1403 --------------------------------
1404
1404
1405 upgrade
1405 upgrade
1406
1406
1407 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup --quiet
1407 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup --quiet
1408 upgrade will perform the following actions:
1408 upgrade will perform the following actions:
1409
1409
1410 requirements
1410 requirements
1411 preserved: dotencode, fncache, generaldelta, revlogv1, store
1411 preserved: dotencode, fncache, generaldelta, revlogv1, store
1412 added: revlog-compression-zstd, sparserevlog
1412 added: revlog-compression-zstd, sparserevlog
1413
1413
1414 processed revlogs:
1414 processed revlogs:
1415 - all-filelogs
1415 - all-filelogs
1416 - changelog
1416 - changelog
1417 - manifest
1417 - manifest
1418
1418
1419 $ hg debugformat -v
1419 $ hg debugformat -v
1420 format-variant repo config default
1420 format-variant repo config default
1421 fncache: yes yes yes
1421 fncache: yes yes yes
1422 dotencode: yes yes yes
1422 dotencode: yes yes yes
1423 generaldelta: yes yes yes
1423 generaldelta: yes yes yes
1424 exp-sharesafe: no no no
1424 exp-sharesafe: no no no
1425 sparserevlog: yes yes yes
1425 sparserevlog: yes yes yes
1426 sidedata: no no no
1426 sidedata: no no no
1427 persistent-nodemap: no no no
1427 persistent-nodemap: no no no
1428 copies-sdc: no no no
1428 copies-sdc: no no no
1429 plain-cl-delta: yes yes yes
1429 plain-cl-delta: yes yes yes
1430 compression: zstd zlib zlib
1430 compression: zstd zlib zlib
1431 compression-level: default default default
1431 compression-level: default default default
1432 $ cat .hg/requires
1432 $ cat .hg/requires
1433 dotencode
1433 dotencode
1434 fncache
1434 fncache
1435 generaldelta
1435 generaldelta
1436 revlog-compression-zstd
1436 revlog-compression-zstd
1437 revlogv1
1437 revlogv1
1438 sparserevlog
1438 sparserevlog
1439 store
1439 store
1440
1440
1441 downgrade
1441 downgrade
1442
1442
1443 $ hg debugupgraderepo --run --no-backup --quiet
1443 $ hg debugupgraderepo --run --no-backup --quiet
1444 upgrade will perform the following actions:
1444 upgrade will perform the following actions:
1445
1445
1446 requirements
1446 requirements
1447 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1447 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1448 removed: revlog-compression-zstd
1448 removed: revlog-compression-zstd
1449
1449
1450 processed revlogs:
1450 processed revlogs:
1451 - all-filelogs
1451 - all-filelogs
1452 - changelog
1452 - changelog
1453 - manifest
1453 - manifest
1454
1454
1455 $ hg debugformat -v
1455 $ hg debugformat -v
1456 format-variant repo config default
1456 format-variant repo config default
1457 fncache: yes yes yes
1457 fncache: yes yes yes
1458 dotencode: yes yes yes
1458 dotencode: yes yes yes
1459 generaldelta: yes yes yes
1459 generaldelta: yes yes yes
1460 exp-sharesafe: no no no
1460 exp-sharesafe: no no no
1461 sparserevlog: yes yes yes
1461 sparserevlog: yes yes yes
1462 sidedata: no no no
1462 sidedata: no no no
1463 persistent-nodemap: no no no
1463 persistent-nodemap: no no no
1464 copies-sdc: no no no
1464 copies-sdc: no no no
1465 plain-cl-delta: yes yes yes
1465 plain-cl-delta: yes yes yes
1466 compression: zlib zlib zlib
1466 compression: zlib zlib zlib
1467 compression-level: default default default
1467 compression-level: default default default
1468 $ cat .hg/requires
1468 $ cat .hg/requires
1469 dotencode
1469 dotencode
1470 fncache
1470 fncache
1471 generaldelta
1471 generaldelta
1472 revlogv1
1472 revlogv1
1473 sparserevlog
1473 sparserevlog
1474 store
1474 store
1475
1475
1476 upgrade from hgrc
1476 upgrade from hgrc
1477
1477
1478 $ cat >> .hg/hgrc << EOF
1478 $ cat >> .hg/hgrc << EOF
1479 > [format]
1479 > [format]
1480 > revlog-compression=zstd
1480 > revlog-compression=zstd
1481 > EOF
1481 > EOF
1482 $ hg debugupgraderepo --run --no-backup --quiet
1482 $ hg debugupgraderepo --run --no-backup --quiet
1483 upgrade will perform the following actions:
1483 upgrade will perform the following actions:
1484
1484
1485 requirements
1485 requirements
1486 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1486 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1487 added: revlog-compression-zstd
1487 added: revlog-compression-zstd
1488
1488
1489 processed revlogs:
1489 processed revlogs:
1490 - all-filelogs
1490 - all-filelogs
1491 - changelog
1491 - changelog
1492 - manifest
1492 - manifest
1493
1493
1494 $ hg debugformat -v
1494 $ hg debugformat -v
1495 format-variant repo config default
1495 format-variant repo config default
1496 fncache: yes yes yes
1496 fncache: yes yes yes
1497 dotencode: yes yes yes
1497 dotencode: yes yes yes
1498 generaldelta: yes yes yes
1498 generaldelta: yes yes yes
1499 exp-sharesafe: no no no
1499 exp-sharesafe: no no no
1500 sparserevlog: yes yes yes
1500 sparserevlog: yes yes yes
1501 sidedata: no no no
1501 sidedata: no no no
1502 persistent-nodemap: no no no
1502 persistent-nodemap: no no no
1503 copies-sdc: no no no
1503 copies-sdc: no no no
1504 plain-cl-delta: yes yes yes
1504 plain-cl-delta: yes yes yes
1505 compression: zstd zstd zlib
1505 compression: zstd zstd zlib
1506 compression-level: default default default
1506 compression-level: default default default
1507 $ cat .hg/requires
1507 $ cat .hg/requires
1508 dotencode
1508 dotencode
1509 fncache
1509 fncache
1510 generaldelta
1510 generaldelta
1511 revlog-compression-zstd
1511 revlog-compression-zstd
1512 revlogv1
1512 revlogv1
1513 sparserevlog
1513 sparserevlog
1514 store
1514 store
1515
1515
1516 #endif
1516 #endif
1517
1517
1518 Check upgrading to a side-data revlog
1518 Check upgrading to a side-data revlog
1519 -------------------------------------
1519 -------------------------------------
1520
1520
1521 upgrade
1521 upgrade
1522
1522
1523 $ hg --config format.exp-use-side-data=yes debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
1523 $ hg --config format.exp-use-side-data=yes debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
1524 upgrade will perform the following actions:
1524 upgrade will perform the following actions:
1525
1525
1526 requirements
1526 requirements
1527 preserved: dotencode, fncache, generaldelta, revlogv1, store (no-zstd !)
1527 preserved: dotencode, fncache, generaldelta, revlogv1, store (no-zstd !)
1528 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
1528 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
1529 added: exp-sidedata-flag (zstd !)
1529 added: exp-sidedata-flag (zstd !)
1530 added: exp-sidedata-flag, sparserevlog (no-zstd !)
1530 added: exp-sidedata-flag, sparserevlog (no-zstd !)
1531
1531
1532 processed revlogs:
1532 processed revlogs:
1533 - all-filelogs
1533 - all-filelogs
1534 - changelog
1534 - changelog
1535 - manifest
1535 - manifest
1536
1536
1537 $ hg debugformat -v
1537 $ hg debugformat -v
1538 format-variant repo config default
1538 format-variant repo config default
1539 fncache: yes yes yes
1539 fncache: yes yes yes
1540 dotencode: yes yes yes
1540 dotencode: yes yes yes
1541 generaldelta: yes yes yes
1541 generaldelta: yes yes yes
1542 exp-sharesafe: no no no
1542 exp-sharesafe: no no no
1543 sparserevlog: yes yes yes
1543 sparserevlog: yes yes yes
1544 sidedata: yes no no
1544 sidedata: yes no no
1545 persistent-nodemap: no no no
1545 persistent-nodemap: no no no
1546 copies-sdc: no no no
1546 copies-sdc: no no no
1547 plain-cl-delta: yes yes yes
1547 plain-cl-delta: yes yes yes
1548 compression: zlib zlib zlib (no-zstd !)
1548 compression: zlib zlib zlib (no-zstd !)
1549 compression: zstd zstd zlib (zstd !)
1549 compression: zstd zstd zlib (zstd !)
1550 compression-level: default default default
1550 compression-level: default default default
1551 $ cat .hg/requires
1551 $ cat .hg/requires
1552 dotencode
1552 dotencode
1553 exp-sidedata-flag
1553 exp-sidedata-flag
1554 fncache
1554 fncache
1555 generaldelta
1555 generaldelta
1556 revlog-compression-zstd (zstd !)
1556 revlog-compression-zstd (zstd !)
1557 revlogv1
1557 revlogv1
1558 sparserevlog
1558 sparserevlog
1559 store
1559 store
1560 $ hg debugsidedata -c 0
1560 $ hg debugsidedata -c 0
1561 2 sidedata entries
1561 2 sidedata entries
1562 entry-0001 size 4
1562 entry-0001 size 4
1563 entry-0002 size 32
1563 entry-0002 size 32
1564
1564
1565 downgrade
1565 downgrade
1566
1566
1567 $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup --quiet
1567 $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup --quiet
1568 upgrade will perform the following actions:
1568 upgrade will perform the following actions:
1569
1569
1570 requirements
1570 requirements
1571 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
1571 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
1572 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
1572 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
1573 removed: exp-sidedata-flag
1573 removed: exp-sidedata-flag
1574
1574
1575 processed revlogs:
1575 processed revlogs:
1576 - all-filelogs
1576 - all-filelogs
1577 - changelog
1577 - changelog
1578 - manifest
1578 - manifest
1579
1579
1580 $ hg debugformat -v
1580 $ hg debugformat -v
1581 format-variant repo config default
1581 format-variant repo config default
1582 fncache: yes yes yes
1582 fncache: yes yes yes
1583 dotencode: yes yes yes
1583 dotencode: yes yes yes
1584 generaldelta: yes yes yes
1584 generaldelta: yes yes yes
1585 exp-sharesafe: no no no
1585 exp-sharesafe: no no no
1586 sparserevlog: yes yes yes
1586 sparserevlog: yes yes yes
1587 sidedata: no no no
1587 sidedata: no no no
1588 persistent-nodemap: no no no
1588 persistent-nodemap: no no no
1589 copies-sdc: no no no
1589 copies-sdc: no no no
1590 plain-cl-delta: yes yes yes
1590 plain-cl-delta: yes yes yes
1591 compression: zlib zlib zlib (no-zstd !)
1591 compression: zlib zlib zlib (no-zstd !)
1592 compression: zstd zstd zlib (zstd !)
1592 compression: zstd zstd zlib (zstd !)
1593 compression-level: default default default
1593 compression-level: default default default
1594 $ cat .hg/requires
1594 $ cat .hg/requires
1595 dotencode
1595 dotencode
1596 fncache
1596 fncache
1597 generaldelta
1597 generaldelta
1598 revlog-compression-zstd (zstd !)
1598 revlog-compression-zstd (zstd !)
1599 revlogv1
1599 revlogv1
1600 sparserevlog
1600 sparserevlog
1601 store
1601 store
1602 $ hg debugsidedata -c 0
1602 $ hg debugsidedata -c 0
1603
1603
1604 upgrade from hgrc
1604 upgrade from hgrc
1605
1605
1606 $ cat >> .hg/hgrc << EOF
1606 $ cat >> .hg/hgrc << EOF
1607 > [format]
1607 > [format]
1608 > exp-use-side-data=yes
1608 > exp-use-side-data=yes
1609 > EOF
1609 > EOF
1610 $ hg debugupgraderepo --run --no-backup --quiet
1610 $ hg debugupgraderepo --run --no-backup --quiet
1611 upgrade will perform the following actions:
1611 upgrade will perform the following actions:
1612
1612
1613 requirements
1613 requirements
1614 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
1614 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
1615 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
1615 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
1616 added: exp-sidedata-flag
1616 added: exp-sidedata-flag
1617
1617
1618 processed revlogs:
1618 processed revlogs:
1619 - all-filelogs
1619 - all-filelogs
1620 - changelog
1620 - changelog
1621 - manifest
1621 - manifest
1622
1622
1623 $ hg debugformat -v
1623 $ hg debugformat -v
1624 format-variant repo config default
1624 format-variant repo config default
1625 fncache: yes yes yes
1625 fncache: yes yes yes
1626 dotencode: yes yes yes
1626 dotencode: yes yes yes
1627 generaldelta: yes yes yes
1627 generaldelta: yes yes yes
1628 exp-sharesafe: no no no
1628 exp-sharesafe: no no no
1629 sparserevlog: yes yes yes
1629 sparserevlog: yes yes yes
1630 sidedata: yes yes no
1630 sidedata: yes yes no
1631 persistent-nodemap: no no no
1631 persistent-nodemap: no no no
1632 copies-sdc: no no no
1632 copies-sdc: no no no
1633 plain-cl-delta: yes yes yes
1633 plain-cl-delta: yes yes yes
1634 compression: zlib zlib zlib (no-zstd !)
1634 compression: zlib zlib zlib (no-zstd !)
1635 compression: zstd zstd zlib (zstd !)
1635 compression: zstd zstd zlib (zstd !)
1636 compression-level: default default default
1636 compression-level: default default default
1637 $ cat .hg/requires
1637 $ cat .hg/requires
1638 dotencode
1638 dotencode
1639 exp-sidedata-flag
1639 exp-sidedata-flag
1640 fncache
1640 fncache
1641 generaldelta
1641 generaldelta
1642 revlog-compression-zstd (zstd !)
1642 revlog-compression-zstd (zstd !)
1643 revlogv1
1643 revlogv1
1644 sparserevlog
1644 sparserevlog
1645 store
1645 store
1646 $ hg debugsidedata -c 0
1646 $ hg debugsidedata -c 0
General Comments 0
You need to be logged in to leave comments. Login now