##// END OF EJS Templates
debugupgraderepo: add a --no-backup mode...
Boris Feld -
r41121:a59a7472 default
parent child Browse files
Show More
@@ -1,3391 +1,3393 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import difflib
12 import difflib
13 import errno
13 import errno
14 import operator
14 import operator
15 import os
15 import os
16 import random
16 import random
17 import re
17 import re
18 import socket
18 import socket
19 import ssl
19 import ssl
20 import stat
20 import stat
21 import string
21 import string
22 import subprocess
22 import subprocess
23 import sys
23 import sys
24 import time
24 import time
25
25
26 from .i18n import _
26 from .i18n import _
27 from .node import (
27 from .node import (
28 bin,
28 bin,
29 hex,
29 hex,
30 nullhex,
30 nullhex,
31 nullid,
31 nullid,
32 nullrev,
32 nullrev,
33 short,
33 short,
34 )
34 )
35 from . import (
35 from . import (
36 bundle2,
36 bundle2,
37 changegroup,
37 changegroup,
38 cmdutil,
38 cmdutil,
39 color,
39 color,
40 context,
40 context,
41 dagparser,
41 dagparser,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filemerge,
46 filemerge,
47 filesetlang,
47 filesetlang,
48 formatter,
48 formatter,
49 hg,
49 hg,
50 httppeer,
50 httppeer,
51 localrepo,
51 localrepo,
52 lock as lockmod,
52 lock as lockmod,
53 logcmdutil,
53 logcmdutil,
54 merge as mergemod,
54 merge as mergemod,
55 obsolete,
55 obsolete,
56 obsutil,
56 obsutil,
57 phases,
57 phases,
58 policy,
58 policy,
59 pvec,
59 pvec,
60 pycompat,
60 pycompat,
61 registrar,
61 registrar,
62 repair,
62 repair,
63 revlog,
63 revlog,
64 revset,
64 revset,
65 revsetlang,
65 revsetlang,
66 scmutil,
66 scmutil,
67 setdiscovery,
67 setdiscovery,
68 simplemerge,
68 simplemerge,
69 sshpeer,
69 sshpeer,
70 sslutil,
70 sslutil,
71 streamclone,
71 streamclone,
72 templater,
72 templater,
73 treediscovery,
73 treediscovery,
74 upgrade,
74 upgrade,
75 url as urlmod,
75 url as urlmod,
76 util,
76 util,
77 vfs as vfsmod,
77 vfs as vfsmod,
78 wireprotoframing,
78 wireprotoframing,
79 wireprotoserver,
79 wireprotoserver,
80 wireprotov2peer,
80 wireprotov2peer,
81 )
81 )
82 from .utils import (
82 from .utils import (
83 cborutil,
83 cborutil,
84 dateutil,
84 dateutil,
85 procutil,
85 procutil,
86 stringutil,
86 stringutil,
87 )
87 )
88
88
89 from .revlogutils import (
89 from .revlogutils import (
90 deltas as deltautil
90 deltas as deltautil
91 )
91 )
92
92
93 release = lockmod.release
93 release = lockmod.release
94
94
95 command = registrar.command()
95 command = registrar.command()
96
96
97 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
97 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
98 def debugancestor(ui, repo, *args):
98 def debugancestor(ui, repo, *args):
99 """find the ancestor revision of two revisions in a given index"""
99 """find the ancestor revision of two revisions in a given index"""
100 if len(args) == 3:
100 if len(args) == 3:
101 index, rev1, rev2 = args
101 index, rev1, rev2 = args
102 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
102 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
103 lookup = r.lookup
103 lookup = r.lookup
104 elif len(args) == 2:
104 elif len(args) == 2:
105 if not repo:
105 if not repo:
106 raise error.Abort(_('there is no Mercurial repository here '
106 raise error.Abort(_('there is no Mercurial repository here '
107 '(.hg not found)'))
107 '(.hg not found)'))
108 rev1, rev2 = args
108 rev1, rev2 = args
109 r = repo.changelog
109 r = repo.changelog
110 lookup = repo.lookup
110 lookup = repo.lookup
111 else:
111 else:
112 raise error.Abort(_('either two or three arguments required'))
112 raise error.Abort(_('either two or three arguments required'))
113 a = r.ancestor(lookup(rev1), lookup(rev2))
113 a = r.ancestor(lookup(rev1), lookup(rev2))
114 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
114 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
115
115
116 @command('debugapplystreamclonebundle', [], 'FILE')
116 @command('debugapplystreamclonebundle', [], 'FILE')
117 def debugapplystreamclonebundle(ui, repo, fname):
117 def debugapplystreamclonebundle(ui, repo, fname):
118 """apply a stream clone bundle file"""
118 """apply a stream clone bundle file"""
119 f = hg.openpath(ui, fname)
119 f = hg.openpath(ui, fname)
120 gen = exchange.readbundle(ui, f, fname)
120 gen = exchange.readbundle(ui, f, fname)
121 gen.apply(repo)
121 gen.apply(repo)
122
122
123 @command('debugbuilddag',
123 @command('debugbuilddag',
124 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
124 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
125 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
125 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
126 ('n', 'new-file', None, _('add new file at each rev'))],
126 ('n', 'new-file', None, _('add new file at each rev'))],
127 _('[OPTION]... [TEXT]'))
127 _('[OPTION]... [TEXT]'))
128 def debugbuilddag(ui, repo, text=None,
128 def debugbuilddag(ui, repo, text=None,
129 mergeable_file=False,
129 mergeable_file=False,
130 overwritten_file=False,
130 overwritten_file=False,
131 new_file=False):
131 new_file=False):
132 """builds a repo with a given DAG from scratch in the current empty repo
132 """builds a repo with a given DAG from scratch in the current empty repo
133
133
134 The description of the DAG is read from stdin if not given on the
134 The description of the DAG is read from stdin if not given on the
135 command line.
135 command line.
136
136
137 Elements:
137 Elements:
138
138
139 - "+n" is a linear run of n nodes based on the current default parent
139 - "+n" is a linear run of n nodes based on the current default parent
140 - "." is a single node based on the current default parent
140 - "." is a single node based on the current default parent
141 - "$" resets the default parent to null (implied at the start);
141 - "$" resets the default parent to null (implied at the start);
142 otherwise the default parent is always the last node created
142 otherwise the default parent is always the last node created
143 - "<p" sets the default parent to the backref p
143 - "<p" sets the default parent to the backref p
144 - "*p" is a fork at parent p, which is a backref
144 - "*p" is a fork at parent p, which is a backref
145 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
145 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
146 - "/p2" is a merge of the preceding node and p2
146 - "/p2" is a merge of the preceding node and p2
147 - ":tag" defines a local tag for the preceding node
147 - ":tag" defines a local tag for the preceding node
148 - "@branch" sets the named branch for subsequent nodes
148 - "@branch" sets the named branch for subsequent nodes
149 - "#...\\n" is a comment up to the end of the line
149 - "#...\\n" is a comment up to the end of the line
150
150
151 Whitespace between the above elements is ignored.
151 Whitespace between the above elements is ignored.
152
152
153 A backref is either
153 A backref is either
154
154
155 - a number n, which references the node curr-n, where curr is the current
155 - a number n, which references the node curr-n, where curr is the current
156 node, or
156 node, or
157 - the name of a local tag you placed earlier using ":tag", or
157 - the name of a local tag you placed earlier using ":tag", or
158 - empty to denote the default parent.
158 - empty to denote the default parent.
159
159
160 All string valued-elements are either strictly alphanumeric, or must
160 All string valued-elements are either strictly alphanumeric, or must
161 be enclosed in double quotes ("..."), with "\\" as escape character.
161 be enclosed in double quotes ("..."), with "\\" as escape character.
162 """
162 """
163
163
164 if text is None:
164 if text is None:
165 ui.status(_("reading DAG from stdin\n"))
165 ui.status(_("reading DAG from stdin\n"))
166 text = ui.fin.read()
166 text = ui.fin.read()
167
167
168 cl = repo.changelog
168 cl = repo.changelog
169 if len(cl) > 0:
169 if len(cl) > 0:
170 raise error.Abort(_('repository is not empty'))
170 raise error.Abort(_('repository is not empty'))
171
171
172 # determine number of revs in DAG
172 # determine number of revs in DAG
173 total = 0
173 total = 0
174 for type, data in dagparser.parsedag(text):
174 for type, data in dagparser.parsedag(text):
175 if type == 'n':
175 if type == 'n':
176 total += 1
176 total += 1
177
177
178 if mergeable_file:
178 if mergeable_file:
179 linesperrev = 2
179 linesperrev = 2
180 # make a file with k lines per rev
180 # make a file with k lines per rev
181 initialmergedlines = ['%d' % i
181 initialmergedlines = ['%d' % i
182 for i in pycompat.xrange(0, total * linesperrev)]
182 for i in pycompat.xrange(0, total * linesperrev)]
183 initialmergedlines.append("")
183 initialmergedlines.append("")
184
184
185 tags = []
185 tags = []
186 progress = ui.makeprogress(_('building'), unit=_('revisions'),
186 progress = ui.makeprogress(_('building'), unit=_('revisions'),
187 total=total)
187 total=total)
188 with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
188 with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
189 at = -1
189 at = -1
190 atbranch = 'default'
190 atbranch = 'default'
191 nodeids = []
191 nodeids = []
192 id = 0
192 id = 0
193 progress.update(id)
193 progress.update(id)
194 for type, data in dagparser.parsedag(text):
194 for type, data in dagparser.parsedag(text):
195 if type == 'n':
195 if type == 'n':
196 ui.note(('node %s\n' % pycompat.bytestr(data)))
196 ui.note(('node %s\n' % pycompat.bytestr(data)))
197 id, ps = data
197 id, ps = data
198
198
199 files = []
199 files = []
200 filecontent = {}
200 filecontent = {}
201
201
202 p2 = None
202 p2 = None
203 if mergeable_file:
203 if mergeable_file:
204 fn = "mf"
204 fn = "mf"
205 p1 = repo[ps[0]]
205 p1 = repo[ps[0]]
206 if len(ps) > 1:
206 if len(ps) > 1:
207 p2 = repo[ps[1]]
207 p2 = repo[ps[1]]
208 pa = p1.ancestor(p2)
208 pa = p1.ancestor(p2)
209 base, local, other = [x[fn].data() for x in (pa, p1,
209 base, local, other = [x[fn].data() for x in (pa, p1,
210 p2)]
210 p2)]
211 m3 = simplemerge.Merge3Text(base, local, other)
211 m3 = simplemerge.Merge3Text(base, local, other)
212 ml = [l.strip() for l in m3.merge_lines()]
212 ml = [l.strip() for l in m3.merge_lines()]
213 ml.append("")
213 ml.append("")
214 elif at > 0:
214 elif at > 0:
215 ml = p1[fn].data().split("\n")
215 ml = p1[fn].data().split("\n")
216 else:
216 else:
217 ml = initialmergedlines
217 ml = initialmergedlines
218 ml[id * linesperrev] += " r%i" % id
218 ml[id * linesperrev] += " r%i" % id
219 mergedtext = "\n".join(ml)
219 mergedtext = "\n".join(ml)
220 files.append(fn)
220 files.append(fn)
221 filecontent[fn] = mergedtext
221 filecontent[fn] = mergedtext
222
222
223 if overwritten_file:
223 if overwritten_file:
224 fn = "of"
224 fn = "of"
225 files.append(fn)
225 files.append(fn)
226 filecontent[fn] = "r%i\n" % id
226 filecontent[fn] = "r%i\n" % id
227
227
228 if new_file:
228 if new_file:
229 fn = "nf%i" % id
229 fn = "nf%i" % id
230 files.append(fn)
230 files.append(fn)
231 filecontent[fn] = "r%i\n" % id
231 filecontent[fn] = "r%i\n" % id
232 if len(ps) > 1:
232 if len(ps) > 1:
233 if not p2:
233 if not p2:
234 p2 = repo[ps[1]]
234 p2 = repo[ps[1]]
235 for fn in p2:
235 for fn in p2:
236 if fn.startswith("nf"):
236 if fn.startswith("nf"):
237 files.append(fn)
237 files.append(fn)
238 filecontent[fn] = p2[fn].data()
238 filecontent[fn] = p2[fn].data()
239
239
240 def fctxfn(repo, cx, path):
240 def fctxfn(repo, cx, path):
241 if path in filecontent:
241 if path in filecontent:
242 return context.memfilectx(repo, cx, path,
242 return context.memfilectx(repo, cx, path,
243 filecontent[path])
243 filecontent[path])
244 return None
244 return None
245
245
246 if len(ps) == 0 or ps[0] < 0:
246 if len(ps) == 0 or ps[0] < 0:
247 pars = [None, None]
247 pars = [None, None]
248 elif len(ps) == 1:
248 elif len(ps) == 1:
249 pars = [nodeids[ps[0]], None]
249 pars = [nodeids[ps[0]], None]
250 else:
250 else:
251 pars = [nodeids[p] for p in ps]
251 pars = [nodeids[p] for p in ps]
252 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
252 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
253 date=(id, 0),
253 date=(id, 0),
254 user="debugbuilddag",
254 user="debugbuilddag",
255 extra={'branch': atbranch})
255 extra={'branch': atbranch})
256 nodeid = repo.commitctx(cx)
256 nodeid = repo.commitctx(cx)
257 nodeids.append(nodeid)
257 nodeids.append(nodeid)
258 at = id
258 at = id
259 elif type == 'l':
259 elif type == 'l':
260 id, name = data
260 id, name = data
261 ui.note(('tag %s\n' % name))
261 ui.note(('tag %s\n' % name))
262 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
262 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
263 elif type == 'a':
263 elif type == 'a':
264 ui.note(('branch %s\n' % data))
264 ui.note(('branch %s\n' % data))
265 atbranch = data
265 atbranch = data
266 progress.update(id)
266 progress.update(id)
267
267
268 if tags:
268 if tags:
269 repo.vfs.write("localtags", "".join(tags))
269 repo.vfs.write("localtags", "".join(tags))
270
270
271 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
271 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
272 indent_string = ' ' * indent
272 indent_string = ' ' * indent
273 if all:
273 if all:
274 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
274 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
275 % indent_string)
275 % indent_string)
276
276
277 def showchunks(named):
277 def showchunks(named):
278 ui.write("\n%s%s\n" % (indent_string, named))
278 ui.write("\n%s%s\n" % (indent_string, named))
279 for deltadata in gen.deltaiter():
279 for deltadata in gen.deltaiter():
280 node, p1, p2, cs, deltabase, delta, flags = deltadata
280 node, p1, p2, cs, deltabase, delta, flags = deltadata
281 ui.write("%s%s %s %s %s %s %d\n" %
281 ui.write("%s%s %s %s %s %s %d\n" %
282 (indent_string, hex(node), hex(p1), hex(p2),
282 (indent_string, hex(node), hex(p1), hex(p2),
283 hex(cs), hex(deltabase), len(delta)))
283 hex(cs), hex(deltabase), len(delta)))
284
284
285 chunkdata = gen.changelogheader()
285 chunkdata = gen.changelogheader()
286 showchunks("changelog")
286 showchunks("changelog")
287 chunkdata = gen.manifestheader()
287 chunkdata = gen.manifestheader()
288 showchunks("manifest")
288 showchunks("manifest")
289 for chunkdata in iter(gen.filelogheader, {}):
289 for chunkdata in iter(gen.filelogheader, {}):
290 fname = chunkdata['filename']
290 fname = chunkdata['filename']
291 showchunks(fname)
291 showchunks(fname)
292 else:
292 else:
293 if isinstance(gen, bundle2.unbundle20):
293 if isinstance(gen, bundle2.unbundle20):
294 raise error.Abort(_('use debugbundle2 for this file'))
294 raise error.Abort(_('use debugbundle2 for this file'))
295 chunkdata = gen.changelogheader()
295 chunkdata = gen.changelogheader()
296 for deltadata in gen.deltaiter():
296 for deltadata in gen.deltaiter():
297 node, p1, p2, cs, deltabase, delta, flags = deltadata
297 node, p1, p2, cs, deltabase, delta, flags = deltadata
298 ui.write("%s%s\n" % (indent_string, hex(node)))
298 ui.write("%s%s\n" % (indent_string, hex(node)))
299
299
300 def _debugobsmarkers(ui, part, indent=0, **opts):
300 def _debugobsmarkers(ui, part, indent=0, **opts):
301 """display version and markers contained in 'data'"""
301 """display version and markers contained in 'data'"""
302 opts = pycompat.byteskwargs(opts)
302 opts = pycompat.byteskwargs(opts)
303 data = part.read()
303 data = part.read()
304 indent_string = ' ' * indent
304 indent_string = ' ' * indent
305 try:
305 try:
306 version, markers = obsolete._readmarkers(data)
306 version, markers = obsolete._readmarkers(data)
307 except error.UnknownVersion as exc:
307 except error.UnknownVersion as exc:
308 msg = "%sunsupported version: %s (%d bytes)\n"
308 msg = "%sunsupported version: %s (%d bytes)\n"
309 msg %= indent_string, exc.version, len(data)
309 msg %= indent_string, exc.version, len(data)
310 ui.write(msg)
310 ui.write(msg)
311 else:
311 else:
312 msg = "%sversion: %d (%d bytes)\n"
312 msg = "%sversion: %d (%d bytes)\n"
313 msg %= indent_string, version, len(data)
313 msg %= indent_string, version, len(data)
314 ui.write(msg)
314 ui.write(msg)
315 fm = ui.formatter('debugobsolete', opts)
315 fm = ui.formatter('debugobsolete', opts)
316 for rawmarker in sorted(markers):
316 for rawmarker in sorted(markers):
317 m = obsutil.marker(None, rawmarker)
317 m = obsutil.marker(None, rawmarker)
318 fm.startitem()
318 fm.startitem()
319 fm.plain(indent_string)
319 fm.plain(indent_string)
320 cmdutil.showmarker(fm, m)
320 cmdutil.showmarker(fm, m)
321 fm.end()
321 fm.end()
322
322
323 def _debugphaseheads(ui, data, indent=0):
323 def _debugphaseheads(ui, data, indent=0):
324 """display version and markers contained in 'data'"""
324 """display version and markers contained in 'data'"""
325 indent_string = ' ' * indent
325 indent_string = ' ' * indent
326 headsbyphase = phases.binarydecode(data)
326 headsbyphase = phases.binarydecode(data)
327 for phase in phases.allphases:
327 for phase in phases.allphases:
328 for head in headsbyphase[phase]:
328 for head in headsbyphase[phase]:
329 ui.write(indent_string)
329 ui.write(indent_string)
330 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
330 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
331
331
332 def _quasirepr(thing):
332 def _quasirepr(thing):
333 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
333 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
334 return '{%s}' % (
334 return '{%s}' % (
335 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
335 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
336 return pycompat.bytestr(repr(thing))
336 return pycompat.bytestr(repr(thing))
337
337
338 def _debugbundle2(ui, gen, all=None, **opts):
338 def _debugbundle2(ui, gen, all=None, **opts):
339 """lists the contents of a bundle2"""
339 """lists the contents of a bundle2"""
340 if not isinstance(gen, bundle2.unbundle20):
340 if not isinstance(gen, bundle2.unbundle20):
341 raise error.Abort(_('not a bundle2 file'))
341 raise error.Abort(_('not a bundle2 file'))
342 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
342 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
343 parttypes = opts.get(r'part_type', [])
343 parttypes = opts.get(r'part_type', [])
344 for part in gen.iterparts():
344 for part in gen.iterparts():
345 if parttypes and part.type not in parttypes:
345 if parttypes and part.type not in parttypes:
346 continue
346 continue
347 msg = '%s -- %s (mandatory: %r)\n'
347 msg = '%s -- %s (mandatory: %r)\n'
348 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
348 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
349 if part.type == 'changegroup':
349 if part.type == 'changegroup':
350 version = part.params.get('version', '01')
350 version = part.params.get('version', '01')
351 cg = changegroup.getunbundler(version, part, 'UN')
351 cg = changegroup.getunbundler(version, part, 'UN')
352 if not ui.quiet:
352 if not ui.quiet:
353 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
353 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
354 if part.type == 'obsmarkers':
354 if part.type == 'obsmarkers':
355 if not ui.quiet:
355 if not ui.quiet:
356 _debugobsmarkers(ui, part, indent=4, **opts)
356 _debugobsmarkers(ui, part, indent=4, **opts)
357 if part.type == 'phase-heads':
357 if part.type == 'phase-heads':
358 if not ui.quiet:
358 if not ui.quiet:
359 _debugphaseheads(ui, part, indent=4)
359 _debugphaseheads(ui, part, indent=4)
360
360
361 @command('debugbundle',
361 @command('debugbundle',
362 [('a', 'all', None, _('show all details')),
362 [('a', 'all', None, _('show all details')),
363 ('', 'part-type', [], _('show only the named part type')),
363 ('', 'part-type', [], _('show only the named part type')),
364 ('', 'spec', None, _('print the bundlespec of the bundle'))],
364 ('', 'spec', None, _('print the bundlespec of the bundle'))],
365 _('FILE'),
365 _('FILE'),
366 norepo=True)
366 norepo=True)
367 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
367 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
368 """lists the contents of a bundle"""
368 """lists the contents of a bundle"""
369 with hg.openpath(ui, bundlepath) as f:
369 with hg.openpath(ui, bundlepath) as f:
370 if spec:
370 if spec:
371 spec = exchange.getbundlespec(ui, f)
371 spec = exchange.getbundlespec(ui, f)
372 ui.write('%s\n' % spec)
372 ui.write('%s\n' % spec)
373 return
373 return
374
374
375 gen = exchange.readbundle(ui, f, bundlepath)
375 gen = exchange.readbundle(ui, f, bundlepath)
376 if isinstance(gen, bundle2.unbundle20):
376 if isinstance(gen, bundle2.unbundle20):
377 return _debugbundle2(ui, gen, all=all, **opts)
377 return _debugbundle2(ui, gen, all=all, **opts)
378 _debugchangegroup(ui, gen, all=all, **opts)
378 _debugchangegroup(ui, gen, all=all, **opts)
379
379
380 @command('debugcapabilities',
380 @command('debugcapabilities',
381 [], _('PATH'),
381 [], _('PATH'),
382 norepo=True)
382 norepo=True)
383 def debugcapabilities(ui, path, **opts):
383 def debugcapabilities(ui, path, **opts):
384 """lists the capabilities of a remote peer"""
384 """lists the capabilities of a remote peer"""
385 opts = pycompat.byteskwargs(opts)
385 opts = pycompat.byteskwargs(opts)
386 peer = hg.peer(ui, opts, path)
386 peer = hg.peer(ui, opts, path)
387 caps = peer.capabilities()
387 caps = peer.capabilities()
388 ui.write(('Main capabilities:\n'))
388 ui.write(('Main capabilities:\n'))
389 for c in sorted(caps):
389 for c in sorted(caps):
390 ui.write((' %s\n') % c)
390 ui.write((' %s\n') % c)
391 b2caps = bundle2.bundle2caps(peer)
391 b2caps = bundle2.bundle2caps(peer)
392 if b2caps:
392 if b2caps:
393 ui.write(('Bundle2 capabilities:\n'))
393 ui.write(('Bundle2 capabilities:\n'))
394 for key, values in sorted(b2caps.iteritems()):
394 for key, values in sorted(b2caps.iteritems()):
395 ui.write((' %s\n') % key)
395 ui.write((' %s\n') % key)
396 for v in values:
396 for v in values:
397 ui.write((' %s\n') % v)
397 ui.write((' %s\n') % v)
398
398
399 @command('debugcheckstate', [], '')
399 @command('debugcheckstate', [], '')
400 def debugcheckstate(ui, repo):
400 def debugcheckstate(ui, repo):
401 """validate the correctness of the current dirstate"""
401 """validate the correctness of the current dirstate"""
402 parent1, parent2 = repo.dirstate.parents()
402 parent1, parent2 = repo.dirstate.parents()
403 m1 = repo[parent1].manifest()
403 m1 = repo[parent1].manifest()
404 m2 = repo[parent2].manifest()
404 m2 = repo[parent2].manifest()
405 errors = 0
405 errors = 0
406 for f in repo.dirstate:
406 for f in repo.dirstate:
407 state = repo.dirstate[f]
407 state = repo.dirstate[f]
408 if state in "nr" and f not in m1:
408 if state in "nr" and f not in m1:
409 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
409 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
410 errors += 1
410 errors += 1
411 if state in "a" and f in m1:
411 if state in "a" and f in m1:
412 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
412 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
413 errors += 1
413 errors += 1
414 if state in "m" and f not in m1 and f not in m2:
414 if state in "m" and f not in m1 and f not in m2:
415 ui.warn(_("%s in state %s, but not in either manifest\n") %
415 ui.warn(_("%s in state %s, but not in either manifest\n") %
416 (f, state))
416 (f, state))
417 errors += 1
417 errors += 1
418 for f in m1:
418 for f in m1:
419 state = repo.dirstate[f]
419 state = repo.dirstate[f]
420 if state not in "nrm":
420 if state not in "nrm":
421 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
421 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
422 errors += 1
422 errors += 1
423 if errors:
423 if errors:
424 error = _(".hg/dirstate inconsistent with current parent's manifest")
424 error = _(".hg/dirstate inconsistent with current parent's manifest")
425 raise error.Abort(error)
425 raise error.Abort(error)
426
426
427 @command('debugcolor',
427 @command('debugcolor',
428 [('', 'style', None, _('show all configured styles'))],
428 [('', 'style', None, _('show all configured styles'))],
429 'hg debugcolor')
429 'hg debugcolor')
430 def debugcolor(ui, repo, **opts):
430 def debugcolor(ui, repo, **opts):
431 """show available color, effects or style"""
431 """show available color, effects or style"""
432 ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
432 ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
433 if opts.get(r'style'):
433 if opts.get(r'style'):
434 return _debugdisplaystyle(ui)
434 return _debugdisplaystyle(ui)
435 else:
435 else:
436 return _debugdisplaycolor(ui)
436 return _debugdisplaycolor(ui)
437
437
438 def _debugdisplaycolor(ui):
438 def _debugdisplaycolor(ui):
439 ui = ui.copy()
439 ui = ui.copy()
440 ui._styles.clear()
440 ui._styles.clear()
441 for effect in color._activeeffects(ui).keys():
441 for effect in color._activeeffects(ui).keys():
442 ui._styles[effect] = effect
442 ui._styles[effect] = effect
443 if ui._terminfoparams:
443 if ui._terminfoparams:
444 for k, v in ui.configitems('color'):
444 for k, v in ui.configitems('color'):
445 if k.startswith('color.'):
445 if k.startswith('color.'):
446 ui._styles[k] = k[6:]
446 ui._styles[k] = k[6:]
447 elif k.startswith('terminfo.'):
447 elif k.startswith('terminfo.'):
448 ui._styles[k] = k[9:]
448 ui._styles[k] = k[9:]
449 ui.write(_('available colors:\n'))
449 ui.write(_('available colors:\n'))
450 # sort label with a '_' after the other to group '_background' entry.
450 # sort label with a '_' after the other to group '_background' entry.
451 items = sorted(ui._styles.items(),
451 items = sorted(ui._styles.items(),
452 key=lambda i: ('_' in i[0], i[0], i[1]))
452 key=lambda i: ('_' in i[0], i[0], i[1]))
453 for colorname, label in items:
453 for colorname, label in items:
454 ui.write(('%s\n') % colorname, label=label)
454 ui.write(('%s\n') % colorname, label=label)
455
455
456 def _debugdisplaystyle(ui):
456 def _debugdisplaystyle(ui):
457 ui.write(_('available style:\n'))
457 ui.write(_('available style:\n'))
458 if not ui._styles:
458 if not ui._styles:
459 return
459 return
460 width = max(len(s) for s in ui._styles)
460 width = max(len(s) for s in ui._styles)
461 for label, effects in sorted(ui._styles.items()):
461 for label, effects in sorted(ui._styles.items()):
462 ui.write('%s' % label, label=label)
462 ui.write('%s' % label, label=label)
463 if effects:
463 if effects:
464 # 50
464 # 50
465 ui.write(': ')
465 ui.write(': ')
466 ui.write(' ' * (max(0, width - len(label))))
466 ui.write(' ' * (max(0, width - len(label))))
467 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
467 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
468 ui.write('\n')
468 ui.write('\n')
469
469
470 @command('debugcreatestreamclonebundle', [], 'FILE')
470 @command('debugcreatestreamclonebundle', [], 'FILE')
471 def debugcreatestreamclonebundle(ui, repo, fname):
471 def debugcreatestreamclonebundle(ui, repo, fname):
472 """create a stream clone bundle file
472 """create a stream clone bundle file
473
473
474 Stream bundles are special bundles that are essentially archives of
474 Stream bundles are special bundles that are essentially archives of
475 revlog files. They are commonly used for cloning very quickly.
475 revlog files. They are commonly used for cloning very quickly.
476 """
476 """
477 # TODO we may want to turn this into an abort when this functionality
477 # TODO we may want to turn this into an abort when this functionality
478 # is moved into `hg bundle`.
478 # is moved into `hg bundle`.
479 if phases.hassecret(repo):
479 if phases.hassecret(repo):
480 ui.warn(_('(warning: stream clone bundle will contain secret '
480 ui.warn(_('(warning: stream clone bundle will contain secret '
481 'revisions)\n'))
481 'revisions)\n'))
482
482
483 requirements, gen = streamclone.generatebundlev1(repo)
483 requirements, gen = streamclone.generatebundlev1(repo)
484 changegroup.writechunks(ui, gen, fname)
484 changegroup.writechunks(ui, gen, fname)
485
485
486 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
486 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
487
487
488 @command('debugdag',
488 @command('debugdag',
489 [('t', 'tags', None, _('use tags as labels')),
489 [('t', 'tags', None, _('use tags as labels')),
490 ('b', 'branches', None, _('annotate with branch names')),
490 ('b', 'branches', None, _('annotate with branch names')),
491 ('', 'dots', None, _('use dots for runs')),
491 ('', 'dots', None, _('use dots for runs')),
492 ('s', 'spaces', None, _('separate elements by spaces'))],
492 ('s', 'spaces', None, _('separate elements by spaces'))],
493 _('[OPTION]... [FILE [REV]...]'),
493 _('[OPTION]... [FILE [REV]...]'),
494 optionalrepo=True)
494 optionalrepo=True)
495 def debugdag(ui, repo, file_=None, *revs, **opts):
495 def debugdag(ui, repo, file_=None, *revs, **opts):
496 """format the changelog or an index DAG as a concise textual description
496 """format the changelog or an index DAG as a concise textual description
497
497
498 If you pass a revlog index, the revlog's DAG is emitted. If you list
498 If you pass a revlog index, the revlog's DAG is emitted. If you list
499 revision numbers, they get labeled in the output as rN.
499 revision numbers, they get labeled in the output as rN.
500
500
501 Otherwise, the changelog DAG of the current repo is emitted.
501 Otherwise, the changelog DAG of the current repo is emitted.
502 """
502 """
503 spaces = opts.get(r'spaces')
503 spaces = opts.get(r'spaces')
504 dots = opts.get(r'dots')
504 dots = opts.get(r'dots')
505 if file_:
505 if file_:
506 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
506 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
507 file_)
507 file_)
508 revs = set((int(r) for r in revs))
508 revs = set((int(r) for r in revs))
509 def events():
509 def events():
510 for r in rlog:
510 for r in rlog:
511 yield 'n', (r, list(p for p in rlog.parentrevs(r)
511 yield 'n', (r, list(p for p in rlog.parentrevs(r)
512 if p != -1))
512 if p != -1))
513 if r in revs:
513 if r in revs:
514 yield 'l', (r, "r%i" % r)
514 yield 'l', (r, "r%i" % r)
515 elif repo:
515 elif repo:
516 cl = repo.changelog
516 cl = repo.changelog
517 tags = opts.get(r'tags')
517 tags = opts.get(r'tags')
518 branches = opts.get(r'branches')
518 branches = opts.get(r'branches')
519 if tags:
519 if tags:
520 labels = {}
520 labels = {}
521 for l, n in repo.tags().items():
521 for l, n in repo.tags().items():
522 labels.setdefault(cl.rev(n), []).append(l)
522 labels.setdefault(cl.rev(n), []).append(l)
523 def events():
523 def events():
524 b = "default"
524 b = "default"
525 for r in cl:
525 for r in cl:
526 if branches:
526 if branches:
527 newb = cl.read(cl.node(r))[5]['branch']
527 newb = cl.read(cl.node(r))[5]['branch']
528 if newb != b:
528 if newb != b:
529 yield 'a', newb
529 yield 'a', newb
530 b = newb
530 b = newb
531 yield 'n', (r, list(p for p in cl.parentrevs(r)
531 yield 'n', (r, list(p for p in cl.parentrevs(r)
532 if p != -1))
532 if p != -1))
533 if tags:
533 if tags:
534 ls = labels.get(r)
534 ls = labels.get(r)
535 if ls:
535 if ls:
536 for l in ls:
536 for l in ls:
537 yield 'l', (r, l)
537 yield 'l', (r, l)
538 else:
538 else:
539 raise error.Abort(_('need repo for changelog dag'))
539 raise error.Abort(_('need repo for changelog dag'))
540
540
541 for line in dagparser.dagtextlines(events(),
541 for line in dagparser.dagtextlines(events(),
542 addspaces=spaces,
542 addspaces=spaces,
543 wraplabels=True,
543 wraplabels=True,
544 wrapannotations=True,
544 wrapannotations=True,
545 wrapnonlinear=dots,
545 wrapnonlinear=dots,
546 usedots=dots,
546 usedots=dots,
547 maxlinewidth=70):
547 maxlinewidth=70):
548 ui.write(line)
548 ui.write(line)
549 ui.write("\n")
549 ui.write("\n")
550
550
551 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
551 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
552 def debugdata(ui, repo, file_, rev=None, **opts):
552 def debugdata(ui, repo, file_, rev=None, **opts):
553 """dump the contents of a data file revision"""
553 """dump the contents of a data file revision"""
554 opts = pycompat.byteskwargs(opts)
554 opts = pycompat.byteskwargs(opts)
555 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
555 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
556 if rev is not None:
556 if rev is not None:
557 raise error.CommandError('debugdata', _('invalid arguments'))
557 raise error.CommandError('debugdata', _('invalid arguments'))
558 file_, rev = None, file_
558 file_, rev = None, file_
559 elif rev is None:
559 elif rev is None:
560 raise error.CommandError('debugdata', _('invalid arguments'))
560 raise error.CommandError('debugdata', _('invalid arguments'))
561 r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
561 r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
562 try:
562 try:
563 ui.write(r.revision(r.lookup(rev), raw=True))
563 ui.write(r.revision(r.lookup(rev), raw=True))
564 except KeyError:
564 except KeyError:
565 raise error.Abort(_('invalid revision identifier %s') % rev)
565 raise error.Abort(_('invalid revision identifier %s') % rev)
566
566
567 @command('debugdate',
567 @command('debugdate',
568 [('e', 'extended', None, _('try extended date formats'))],
568 [('e', 'extended', None, _('try extended date formats'))],
569 _('[-e] DATE [RANGE]'),
569 _('[-e] DATE [RANGE]'),
570 norepo=True, optionalrepo=True)
570 norepo=True, optionalrepo=True)
571 def debugdate(ui, date, range=None, **opts):
571 def debugdate(ui, date, range=None, **opts):
572 """parse and display a date"""
572 """parse and display a date"""
573 if opts[r"extended"]:
573 if opts[r"extended"]:
574 d = dateutil.parsedate(date, util.extendeddateformats)
574 d = dateutil.parsedate(date, util.extendeddateformats)
575 else:
575 else:
576 d = dateutil.parsedate(date)
576 d = dateutil.parsedate(date)
577 ui.write(("internal: %d %d\n") % d)
577 ui.write(("internal: %d %d\n") % d)
578 ui.write(("standard: %s\n") % dateutil.datestr(d))
578 ui.write(("standard: %s\n") % dateutil.datestr(d))
579 if range:
579 if range:
580 m = dateutil.matchdate(range)
580 m = dateutil.matchdate(range)
581 ui.write(("match: %s\n") % m(d[0]))
581 ui.write(("match: %s\n") % m(d[0]))
582
582
583 @command('debugdeltachain',
583 @command('debugdeltachain',
584 cmdutil.debugrevlogopts + cmdutil.formatteropts,
584 cmdutil.debugrevlogopts + cmdutil.formatteropts,
585 _('-c|-m|FILE'),
585 _('-c|-m|FILE'),
586 optionalrepo=True)
586 optionalrepo=True)
587 def debugdeltachain(ui, repo, file_=None, **opts):
587 def debugdeltachain(ui, repo, file_=None, **opts):
588 """dump information about delta chains in a revlog
588 """dump information about delta chains in a revlog
589
589
590 Output can be templatized. Available template keywords are:
590 Output can be templatized. Available template keywords are:
591
591
592 :``rev``: revision number
592 :``rev``: revision number
593 :``chainid``: delta chain identifier (numbered by unique base)
593 :``chainid``: delta chain identifier (numbered by unique base)
594 :``chainlen``: delta chain length to this revision
594 :``chainlen``: delta chain length to this revision
595 :``prevrev``: previous revision in delta chain
595 :``prevrev``: previous revision in delta chain
596 :``deltatype``: role of delta / how it was computed
596 :``deltatype``: role of delta / how it was computed
597 :``compsize``: compressed size of revision
597 :``compsize``: compressed size of revision
598 :``uncompsize``: uncompressed size of revision
598 :``uncompsize``: uncompressed size of revision
599 :``chainsize``: total size of compressed revisions in chain
599 :``chainsize``: total size of compressed revisions in chain
600 :``chainratio``: total chain size divided by uncompressed revision size
600 :``chainratio``: total chain size divided by uncompressed revision size
601 (new delta chains typically start at ratio 2.00)
601 (new delta chains typically start at ratio 2.00)
602 :``lindist``: linear distance from base revision in delta chain to end
602 :``lindist``: linear distance from base revision in delta chain to end
603 of this revision
603 of this revision
604 :``extradist``: total size of revisions not part of this delta chain from
604 :``extradist``: total size of revisions not part of this delta chain from
605 base of delta chain to end of this revision; a measurement
605 base of delta chain to end of this revision; a measurement
606 of how much extra data we need to read/seek across to read
606 of how much extra data we need to read/seek across to read
607 the delta chain for this revision
607 the delta chain for this revision
608 :``extraratio``: extradist divided by chainsize; another representation of
608 :``extraratio``: extradist divided by chainsize; another representation of
609 how much unrelated data is needed to load this delta chain
609 how much unrelated data is needed to load this delta chain
610
610
611 If the repository is configured to use the sparse read, additional keywords
611 If the repository is configured to use the sparse read, additional keywords
612 are available:
612 are available:
613
613
614 :``readsize``: total size of data read from the disk for a revision
614 :``readsize``: total size of data read from the disk for a revision
615 (sum of the sizes of all the blocks)
615 (sum of the sizes of all the blocks)
616 :``largestblock``: size of the largest block of data read from the disk
616 :``largestblock``: size of the largest block of data read from the disk
617 :``readdensity``: density of useful bytes in the data read from the disk
617 :``readdensity``: density of useful bytes in the data read from the disk
618 :``srchunks``: in how many data hunks the whole revision would be read
618 :``srchunks``: in how many data hunks the whole revision would be read
619
619
620 The sparse read can be enabled with experimental.sparse-read = True
620 The sparse read can be enabled with experimental.sparse-read = True
621 """
621 """
622 opts = pycompat.byteskwargs(opts)
622 opts = pycompat.byteskwargs(opts)
623 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
623 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
624 index = r.index
624 index = r.index
625 start = r.start
625 start = r.start
626 length = r.length
626 length = r.length
627 generaldelta = r.version & revlog.FLAG_GENERALDELTA
627 generaldelta = r.version & revlog.FLAG_GENERALDELTA
628 withsparseread = getattr(r, '_withsparseread', False)
628 withsparseread = getattr(r, '_withsparseread', False)
629
629
630 def revinfo(rev):
630 def revinfo(rev):
631 e = index[rev]
631 e = index[rev]
632 compsize = e[1]
632 compsize = e[1]
633 uncompsize = e[2]
633 uncompsize = e[2]
634 chainsize = 0
634 chainsize = 0
635
635
636 if generaldelta:
636 if generaldelta:
637 if e[3] == e[5]:
637 if e[3] == e[5]:
638 deltatype = 'p1'
638 deltatype = 'p1'
639 elif e[3] == e[6]:
639 elif e[3] == e[6]:
640 deltatype = 'p2'
640 deltatype = 'p2'
641 elif e[3] == rev - 1:
641 elif e[3] == rev - 1:
642 deltatype = 'prev'
642 deltatype = 'prev'
643 elif e[3] == rev:
643 elif e[3] == rev:
644 deltatype = 'base'
644 deltatype = 'base'
645 else:
645 else:
646 deltatype = 'other'
646 deltatype = 'other'
647 else:
647 else:
648 if e[3] == rev:
648 if e[3] == rev:
649 deltatype = 'base'
649 deltatype = 'base'
650 else:
650 else:
651 deltatype = 'prev'
651 deltatype = 'prev'
652
652
653 chain = r._deltachain(rev)[0]
653 chain = r._deltachain(rev)[0]
654 for iterrev in chain:
654 for iterrev in chain:
655 e = index[iterrev]
655 e = index[iterrev]
656 chainsize += e[1]
656 chainsize += e[1]
657
657
658 return compsize, uncompsize, deltatype, chain, chainsize
658 return compsize, uncompsize, deltatype, chain, chainsize
659
659
660 fm = ui.formatter('debugdeltachain', opts)
660 fm = ui.formatter('debugdeltachain', opts)
661
661
662 fm.plain(' rev chain# chainlen prev delta '
662 fm.plain(' rev chain# chainlen prev delta '
663 'size rawsize chainsize ratio lindist extradist '
663 'size rawsize chainsize ratio lindist extradist '
664 'extraratio')
664 'extraratio')
665 if withsparseread:
665 if withsparseread:
666 fm.plain(' readsize largestblk rddensity srchunks')
666 fm.plain(' readsize largestblk rddensity srchunks')
667 fm.plain('\n')
667 fm.plain('\n')
668
668
669 chainbases = {}
669 chainbases = {}
670 for rev in r:
670 for rev in r:
671 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
671 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
672 chainbase = chain[0]
672 chainbase = chain[0]
673 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
673 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
674 basestart = start(chainbase)
674 basestart = start(chainbase)
675 revstart = start(rev)
675 revstart = start(rev)
676 lineardist = revstart + comp - basestart
676 lineardist = revstart + comp - basestart
677 extradist = lineardist - chainsize
677 extradist = lineardist - chainsize
678 try:
678 try:
679 prevrev = chain[-2]
679 prevrev = chain[-2]
680 except IndexError:
680 except IndexError:
681 prevrev = -1
681 prevrev = -1
682
682
683 if uncomp != 0:
683 if uncomp != 0:
684 chainratio = float(chainsize) / float(uncomp)
684 chainratio = float(chainsize) / float(uncomp)
685 else:
685 else:
686 chainratio = chainsize
686 chainratio = chainsize
687
687
688 if chainsize != 0:
688 if chainsize != 0:
689 extraratio = float(extradist) / float(chainsize)
689 extraratio = float(extradist) / float(chainsize)
690 else:
690 else:
691 extraratio = extradist
691 extraratio = extradist
692
692
693 fm.startitem()
693 fm.startitem()
694 fm.write('rev chainid chainlen prevrev deltatype compsize '
694 fm.write('rev chainid chainlen prevrev deltatype compsize '
695 'uncompsize chainsize chainratio lindist extradist '
695 'uncompsize chainsize chainratio lindist extradist '
696 'extraratio',
696 'extraratio',
697 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
697 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
698 rev, chainid, len(chain), prevrev, deltatype, comp,
698 rev, chainid, len(chain), prevrev, deltatype, comp,
699 uncomp, chainsize, chainratio, lineardist, extradist,
699 uncomp, chainsize, chainratio, lineardist, extradist,
700 extraratio,
700 extraratio,
701 rev=rev, chainid=chainid, chainlen=len(chain),
701 rev=rev, chainid=chainid, chainlen=len(chain),
702 prevrev=prevrev, deltatype=deltatype, compsize=comp,
702 prevrev=prevrev, deltatype=deltatype, compsize=comp,
703 uncompsize=uncomp, chainsize=chainsize,
703 uncompsize=uncomp, chainsize=chainsize,
704 chainratio=chainratio, lindist=lineardist,
704 chainratio=chainratio, lindist=lineardist,
705 extradist=extradist, extraratio=extraratio)
705 extradist=extradist, extraratio=extraratio)
706 if withsparseread:
706 if withsparseread:
707 readsize = 0
707 readsize = 0
708 largestblock = 0
708 largestblock = 0
709 srchunks = 0
709 srchunks = 0
710
710
711 for revschunk in deltautil.slicechunk(r, chain):
711 for revschunk in deltautil.slicechunk(r, chain):
712 srchunks += 1
712 srchunks += 1
713 blkend = start(revschunk[-1]) + length(revschunk[-1])
713 blkend = start(revschunk[-1]) + length(revschunk[-1])
714 blksize = blkend - start(revschunk[0])
714 blksize = blkend - start(revschunk[0])
715
715
716 readsize += blksize
716 readsize += blksize
717 if largestblock < blksize:
717 if largestblock < blksize:
718 largestblock = blksize
718 largestblock = blksize
719
719
720 if readsize:
720 if readsize:
721 readdensity = float(chainsize) / float(readsize)
721 readdensity = float(chainsize) / float(readsize)
722 else:
722 else:
723 readdensity = 1
723 readdensity = 1
724
724
725 fm.write('readsize largestblock readdensity srchunks',
725 fm.write('readsize largestblock readdensity srchunks',
726 ' %10d %10d %9.5f %8d',
726 ' %10d %10d %9.5f %8d',
727 readsize, largestblock, readdensity, srchunks,
727 readsize, largestblock, readdensity, srchunks,
728 readsize=readsize, largestblock=largestblock,
728 readsize=readsize, largestblock=largestblock,
729 readdensity=readdensity, srchunks=srchunks)
729 readdensity=readdensity, srchunks=srchunks)
730
730
731 fm.plain('\n')
731 fm.plain('\n')
732
732
733 fm.end()
733 fm.end()
734
734
735 @command('debugdirstate|debugstate',
735 @command('debugdirstate|debugstate',
736 [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
736 [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
737 ('', 'dates', True, _('display the saved mtime')),
737 ('', 'dates', True, _('display the saved mtime')),
738 ('', 'datesort', None, _('sort by saved mtime'))],
738 ('', 'datesort', None, _('sort by saved mtime'))],
739 _('[OPTION]...'))
739 _('[OPTION]...'))
740 def debugstate(ui, repo, **opts):
740 def debugstate(ui, repo, **opts):
741 """show the contents of the current dirstate"""
741 """show the contents of the current dirstate"""
742
742
743 nodates = not opts[r'dates']
743 nodates = not opts[r'dates']
744 if opts.get(r'nodates') is not None:
744 if opts.get(r'nodates') is not None:
745 nodates = True
745 nodates = True
746 datesort = opts.get(r'datesort')
746 datesort = opts.get(r'datesort')
747
747
748 timestr = ""
748 timestr = ""
749 if datesort:
749 if datesort:
750 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
750 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
751 else:
751 else:
752 keyfunc = None # sort by filename
752 keyfunc = None # sort by filename
753 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
753 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
754 if ent[3] == -1:
754 if ent[3] == -1:
755 timestr = 'unset '
755 timestr = 'unset '
756 elif nodates:
756 elif nodates:
757 timestr = 'set '
757 timestr = 'set '
758 else:
758 else:
759 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
759 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
760 time.localtime(ent[3]))
760 time.localtime(ent[3]))
761 timestr = encoding.strtolocal(timestr)
761 timestr = encoding.strtolocal(timestr)
762 if ent[1] & 0o20000:
762 if ent[1] & 0o20000:
763 mode = 'lnk'
763 mode = 'lnk'
764 else:
764 else:
765 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
765 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
766 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
766 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
767 for f in repo.dirstate.copies():
767 for f in repo.dirstate.copies():
768 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
768 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
769
769
770 @command('debugdiscovery',
770 @command('debugdiscovery',
771 [('', 'old', None, _('use old-style discovery')),
771 [('', 'old', None, _('use old-style discovery')),
772 ('', 'nonheads', None,
772 ('', 'nonheads', None,
773 _('use old-style discovery with non-heads included')),
773 _('use old-style discovery with non-heads included')),
774 ('', 'rev', [], 'restrict discovery to this set of revs'),
774 ('', 'rev', [], 'restrict discovery to this set of revs'),
775 ] + cmdutil.remoteopts,
775 ] + cmdutil.remoteopts,
776 _('[--rev REV] [OTHER]'))
776 _('[--rev REV] [OTHER]'))
777 def debugdiscovery(ui, repo, remoteurl="default", **opts):
777 def debugdiscovery(ui, repo, remoteurl="default", **opts):
778 """runs the changeset discovery protocol in isolation"""
778 """runs the changeset discovery protocol in isolation"""
779 opts = pycompat.byteskwargs(opts)
779 opts = pycompat.byteskwargs(opts)
780 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
780 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
781 remote = hg.peer(repo, opts, remoteurl)
781 remote = hg.peer(repo, opts, remoteurl)
782 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
782 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
783
783
784 # make sure tests are repeatable
784 # make sure tests are repeatable
785 random.seed(12323)
785 random.seed(12323)
786
786
787 def doit(pushedrevs, remoteheads, remote=remote):
787 def doit(pushedrevs, remoteheads, remote=remote):
788 if opts.get('old'):
788 if opts.get('old'):
789 if not util.safehasattr(remote, 'branches'):
789 if not util.safehasattr(remote, 'branches'):
790 # enable in-client legacy support
790 # enable in-client legacy support
791 remote = localrepo.locallegacypeer(remote.local())
791 remote = localrepo.locallegacypeer(remote.local())
792 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
792 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
793 force=True)
793 force=True)
794 common = set(common)
794 common = set(common)
795 if not opts.get('nonheads'):
795 if not opts.get('nonheads'):
796 ui.write(("unpruned common: %s\n") %
796 ui.write(("unpruned common: %s\n") %
797 " ".join(sorted(short(n) for n in common)))
797 " ".join(sorted(short(n) for n in common)))
798
798
799 clnode = repo.changelog.node
799 clnode = repo.changelog.node
800 common = repo.revs('heads(::%ln)', common)
800 common = repo.revs('heads(::%ln)', common)
801 common = {clnode(r) for r in common}
801 common = {clnode(r) for r in common}
802 else:
802 else:
803 nodes = None
803 nodes = None
804 if pushedrevs:
804 if pushedrevs:
805 revs = scmutil.revrange(repo, pushedrevs)
805 revs = scmutil.revrange(repo, pushedrevs)
806 nodes = [repo[r].node() for r in revs]
806 nodes = [repo[r].node() for r in revs]
807 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
807 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
808 ancestorsof=nodes)
808 ancestorsof=nodes)
809 common = set(common)
809 common = set(common)
810 rheads = set(hds)
810 rheads = set(hds)
811 lheads = set(repo.heads())
811 lheads = set(repo.heads())
812 ui.write(("common heads: %s\n") %
812 ui.write(("common heads: %s\n") %
813 " ".join(sorted(short(n) for n in common)))
813 " ".join(sorted(short(n) for n in common)))
814 if lheads <= common:
814 if lheads <= common:
815 ui.write(("local is subset\n"))
815 ui.write(("local is subset\n"))
816 elif rheads <= common:
816 elif rheads <= common:
817 ui.write(("remote is subset\n"))
817 ui.write(("remote is subset\n"))
818
818
819 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
819 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
820 localrevs = opts['rev']
820 localrevs = opts['rev']
821 doit(localrevs, remoterevs)
821 doit(localrevs, remoterevs)
822
822
823 _chunksize = 4 << 10
823 _chunksize = 4 << 10
824
824
825 @command('debugdownload',
825 @command('debugdownload',
826 [
826 [
827 ('o', 'output', '', _('path')),
827 ('o', 'output', '', _('path')),
828 ],
828 ],
829 optionalrepo=True)
829 optionalrepo=True)
830 def debugdownload(ui, repo, url, output=None, **opts):
830 def debugdownload(ui, repo, url, output=None, **opts):
831 """download a resource using Mercurial logic and config
831 """download a resource using Mercurial logic and config
832 """
832 """
833 fh = urlmod.open(ui, url, output)
833 fh = urlmod.open(ui, url, output)
834
834
835 dest = ui
835 dest = ui
836 if output:
836 if output:
837 dest = open(output, "wb", _chunksize)
837 dest = open(output, "wb", _chunksize)
838 try:
838 try:
839 data = fh.read(_chunksize)
839 data = fh.read(_chunksize)
840 while data:
840 while data:
841 dest.write(data)
841 dest.write(data)
842 data = fh.read(_chunksize)
842 data = fh.read(_chunksize)
843 finally:
843 finally:
844 if output:
844 if output:
845 dest.close()
845 dest.close()
846
846
847 @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
847 @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
848 def debugextensions(ui, repo, **opts):
848 def debugextensions(ui, repo, **opts):
849 '''show information about active extensions'''
849 '''show information about active extensions'''
850 opts = pycompat.byteskwargs(opts)
850 opts = pycompat.byteskwargs(opts)
851 exts = extensions.extensions(ui)
851 exts = extensions.extensions(ui)
852 hgver = util.version()
852 hgver = util.version()
853 fm = ui.formatter('debugextensions', opts)
853 fm = ui.formatter('debugextensions', opts)
854 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
854 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
855 isinternal = extensions.ismoduleinternal(extmod)
855 isinternal = extensions.ismoduleinternal(extmod)
856 extsource = pycompat.fsencode(extmod.__file__)
856 extsource = pycompat.fsencode(extmod.__file__)
857 if isinternal:
857 if isinternal:
858 exttestedwith = [] # never expose magic string to users
858 exttestedwith = [] # never expose magic string to users
859 else:
859 else:
860 exttestedwith = getattr(extmod, 'testedwith', '').split()
860 exttestedwith = getattr(extmod, 'testedwith', '').split()
861 extbuglink = getattr(extmod, 'buglink', None)
861 extbuglink = getattr(extmod, 'buglink', None)
862
862
863 fm.startitem()
863 fm.startitem()
864
864
865 if ui.quiet or ui.verbose:
865 if ui.quiet or ui.verbose:
866 fm.write('name', '%s\n', extname)
866 fm.write('name', '%s\n', extname)
867 else:
867 else:
868 fm.write('name', '%s', extname)
868 fm.write('name', '%s', extname)
869 if isinternal or hgver in exttestedwith:
869 if isinternal or hgver in exttestedwith:
870 fm.plain('\n')
870 fm.plain('\n')
871 elif not exttestedwith:
871 elif not exttestedwith:
872 fm.plain(_(' (untested!)\n'))
872 fm.plain(_(' (untested!)\n'))
873 else:
873 else:
874 lasttestedversion = exttestedwith[-1]
874 lasttestedversion = exttestedwith[-1]
875 fm.plain(' (%s!)\n' % lasttestedversion)
875 fm.plain(' (%s!)\n' % lasttestedversion)
876
876
877 fm.condwrite(ui.verbose and extsource, 'source',
877 fm.condwrite(ui.verbose and extsource, 'source',
878 _(' location: %s\n'), extsource or "")
878 _(' location: %s\n'), extsource or "")
879
879
880 if ui.verbose:
880 if ui.verbose:
881 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
881 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
882 fm.data(bundled=isinternal)
882 fm.data(bundled=isinternal)
883
883
884 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
884 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
885 _(' tested with: %s\n'),
885 _(' tested with: %s\n'),
886 fm.formatlist(exttestedwith, name='ver'))
886 fm.formatlist(exttestedwith, name='ver'))
887
887
888 fm.condwrite(ui.verbose and extbuglink, 'buglink',
888 fm.condwrite(ui.verbose and extbuglink, 'buglink',
889 _(' bug reporting: %s\n'), extbuglink or "")
889 _(' bug reporting: %s\n'), extbuglink or "")
890
890
891 fm.end()
891 fm.end()
892
892
893 @command('debugfileset',
893 @command('debugfileset',
894 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
894 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
895 ('', 'all-files', False,
895 ('', 'all-files', False,
896 _('test files from all revisions and working directory')),
896 _('test files from all revisions and working directory')),
897 ('s', 'show-matcher', None,
897 ('s', 'show-matcher', None,
898 _('print internal representation of matcher')),
898 _('print internal representation of matcher')),
899 ('p', 'show-stage', [],
899 ('p', 'show-stage', [],
900 _('print parsed tree at the given stage'), _('NAME'))],
900 _('print parsed tree at the given stage'), _('NAME'))],
901 _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
901 _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
902 def debugfileset(ui, repo, expr, **opts):
902 def debugfileset(ui, repo, expr, **opts):
903 '''parse and apply a fileset specification'''
903 '''parse and apply a fileset specification'''
904 from . import fileset
904 from . import fileset
905 fileset.symbols # force import of fileset so we have predicates to optimize
905 fileset.symbols # force import of fileset so we have predicates to optimize
906 opts = pycompat.byteskwargs(opts)
906 opts = pycompat.byteskwargs(opts)
907 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
907 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
908
908
909 stages = [
909 stages = [
910 ('parsed', pycompat.identity),
910 ('parsed', pycompat.identity),
911 ('analyzed', filesetlang.analyze),
911 ('analyzed', filesetlang.analyze),
912 ('optimized', filesetlang.optimize),
912 ('optimized', filesetlang.optimize),
913 ]
913 ]
914 stagenames = set(n for n, f in stages)
914 stagenames = set(n for n, f in stages)
915
915
916 showalways = set()
916 showalways = set()
917 if ui.verbose and not opts['show_stage']:
917 if ui.verbose and not opts['show_stage']:
918 # show parsed tree by --verbose (deprecated)
918 # show parsed tree by --verbose (deprecated)
919 showalways.add('parsed')
919 showalways.add('parsed')
920 if opts['show_stage'] == ['all']:
920 if opts['show_stage'] == ['all']:
921 showalways.update(stagenames)
921 showalways.update(stagenames)
922 else:
922 else:
923 for n in opts['show_stage']:
923 for n in opts['show_stage']:
924 if n not in stagenames:
924 if n not in stagenames:
925 raise error.Abort(_('invalid stage name: %s') % n)
925 raise error.Abort(_('invalid stage name: %s') % n)
926 showalways.update(opts['show_stage'])
926 showalways.update(opts['show_stage'])
927
927
928 tree = filesetlang.parse(expr)
928 tree = filesetlang.parse(expr)
929 for n, f in stages:
929 for n, f in stages:
930 tree = f(tree)
930 tree = f(tree)
931 if n in showalways:
931 if n in showalways:
932 if opts['show_stage'] or n != 'parsed':
932 if opts['show_stage'] or n != 'parsed':
933 ui.write(("* %s:\n") % n)
933 ui.write(("* %s:\n") % n)
934 ui.write(filesetlang.prettyformat(tree), "\n")
934 ui.write(filesetlang.prettyformat(tree), "\n")
935
935
936 files = set()
936 files = set()
937 if opts['all_files']:
937 if opts['all_files']:
938 for r in repo:
938 for r in repo:
939 c = repo[r]
939 c = repo[r]
940 files.update(c.files())
940 files.update(c.files())
941 files.update(c.substate)
941 files.update(c.substate)
942 if opts['all_files'] or ctx.rev() is None:
942 if opts['all_files'] or ctx.rev() is None:
943 wctx = repo[None]
943 wctx = repo[None]
944 files.update(repo.dirstate.walk(scmutil.matchall(repo),
944 files.update(repo.dirstate.walk(scmutil.matchall(repo),
945 subrepos=list(wctx.substate),
945 subrepos=list(wctx.substate),
946 unknown=True, ignored=True))
946 unknown=True, ignored=True))
947 files.update(wctx.substate)
947 files.update(wctx.substate)
948 else:
948 else:
949 files.update(ctx.files())
949 files.update(ctx.files())
950 files.update(ctx.substate)
950 files.update(ctx.substate)
951
951
952 m = ctx.matchfileset(expr)
952 m = ctx.matchfileset(expr)
953 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
953 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
954 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
954 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
955 for f in sorted(files):
955 for f in sorted(files):
956 if not m(f):
956 if not m(f):
957 continue
957 continue
958 ui.write("%s\n" % f)
958 ui.write("%s\n" % f)
959
959
960 @command('debugformat',
960 @command('debugformat',
961 [] + cmdutil.formatteropts)
961 [] + cmdutil.formatteropts)
962 def debugformat(ui, repo, **opts):
962 def debugformat(ui, repo, **opts):
963 """display format information about the current repository
963 """display format information about the current repository
964
964
965 Use --verbose to get extra information about current config value and
965 Use --verbose to get extra information about current config value and
966 Mercurial default."""
966 Mercurial default."""
967 opts = pycompat.byteskwargs(opts)
967 opts = pycompat.byteskwargs(opts)
968 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
968 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
969 maxvariantlength = max(len('format-variant'), maxvariantlength)
969 maxvariantlength = max(len('format-variant'), maxvariantlength)
970
970
971 def makeformatname(name):
971 def makeformatname(name):
972 return '%s:' + (' ' * (maxvariantlength - len(name)))
972 return '%s:' + (' ' * (maxvariantlength - len(name)))
973
973
974 fm = ui.formatter('debugformat', opts)
974 fm = ui.formatter('debugformat', opts)
975 if fm.isplain():
975 if fm.isplain():
976 def formatvalue(value):
976 def formatvalue(value):
977 if util.safehasattr(value, 'startswith'):
977 if util.safehasattr(value, 'startswith'):
978 return value
978 return value
979 if value:
979 if value:
980 return 'yes'
980 return 'yes'
981 else:
981 else:
982 return 'no'
982 return 'no'
983 else:
983 else:
984 formatvalue = pycompat.identity
984 formatvalue = pycompat.identity
985
985
986 fm.plain('format-variant')
986 fm.plain('format-variant')
987 fm.plain(' ' * (maxvariantlength - len('format-variant')))
987 fm.plain(' ' * (maxvariantlength - len('format-variant')))
988 fm.plain(' repo')
988 fm.plain(' repo')
989 if ui.verbose:
989 if ui.verbose:
990 fm.plain(' config default')
990 fm.plain(' config default')
991 fm.plain('\n')
991 fm.plain('\n')
992 for fv in upgrade.allformatvariant:
992 for fv in upgrade.allformatvariant:
993 fm.startitem()
993 fm.startitem()
994 repovalue = fv.fromrepo(repo)
994 repovalue = fv.fromrepo(repo)
995 configvalue = fv.fromconfig(repo)
995 configvalue = fv.fromconfig(repo)
996
996
997 if repovalue != configvalue:
997 if repovalue != configvalue:
998 namelabel = 'formatvariant.name.mismatchconfig'
998 namelabel = 'formatvariant.name.mismatchconfig'
999 repolabel = 'formatvariant.repo.mismatchconfig'
999 repolabel = 'formatvariant.repo.mismatchconfig'
1000 elif repovalue != fv.default:
1000 elif repovalue != fv.default:
1001 namelabel = 'formatvariant.name.mismatchdefault'
1001 namelabel = 'formatvariant.name.mismatchdefault'
1002 repolabel = 'formatvariant.repo.mismatchdefault'
1002 repolabel = 'formatvariant.repo.mismatchdefault'
1003 else:
1003 else:
1004 namelabel = 'formatvariant.name.uptodate'
1004 namelabel = 'formatvariant.name.uptodate'
1005 repolabel = 'formatvariant.repo.uptodate'
1005 repolabel = 'formatvariant.repo.uptodate'
1006
1006
1007 fm.write('name', makeformatname(fv.name), fv.name,
1007 fm.write('name', makeformatname(fv.name), fv.name,
1008 label=namelabel)
1008 label=namelabel)
1009 fm.write('repo', ' %3s', formatvalue(repovalue),
1009 fm.write('repo', ' %3s', formatvalue(repovalue),
1010 label=repolabel)
1010 label=repolabel)
1011 if fv.default != configvalue:
1011 if fv.default != configvalue:
1012 configlabel = 'formatvariant.config.special'
1012 configlabel = 'formatvariant.config.special'
1013 else:
1013 else:
1014 configlabel = 'formatvariant.config.default'
1014 configlabel = 'formatvariant.config.default'
1015 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
1015 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
1016 label=configlabel)
1016 label=configlabel)
1017 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
1017 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
1018 label='formatvariant.default')
1018 label='formatvariant.default')
1019 fm.plain('\n')
1019 fm.plain('\n')
1020 fm.end()
1020 fm.end()
1021
1021
1022 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
1022 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
1023 def debugfsinfo(ui, path="."):
1023 def debugfsinfo(ui, path="."):
1024 """show information detected about current filesystem"""
1024 """show information detected about current filesystem"""
1025 ui.write(('path: %s\n') % path)
1025 ui.write(('path: %s\n') % path)
1026 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
1026 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
1027 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
1027 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
1028 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
1028 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
1029 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
1029 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
1030 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
1030 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
1031 casesensitive = '(unknown)'
1031 casesensitive = '(unknown)'
1032 try:
1032 try:
1033 with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
1033 with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
1034 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
1034 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
1035 except OSError:
1035 except OSError:
1036 pass
1036 pass
1037 ui.write(('case-sensitive: %s\n') % casesensitive)
1037 ui.write(('case-sensitive: %s\n') % casesensitive)
1038
1038
1039 @command('debuggetbundle',
1039 @command('debuggetbundle',
1040 [('H', 'head', [], _('id of head node'), _('ID')),
1040 [('H', 'head', [], _('id of head node'), _('ID')),
1041 ('C', 'common', [], _('id of common node'), _('ID')),
1041 ('C', 'common', [], _('id of common node'), _('ID')),
1042 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1042 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1043 _('REPO FILE [-H|-C ID]...'),
1043 _('REPO FILE [-H|-C ID]...'),
1044 norepo=True)
1044 norepo=True)
1045 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1045 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1046 """retrieves a bundle from a repo
1046 """retrieves a bundle from a repo
1047
1047
1048 Every ID must be a full-length hex node id string. Saves the bundle to the
1048 Every ID must be a full-length hex node id string. Saves the bundle to the
1049 given file.
1049 given file.
1050 """
1050 """
1051 opts = pycompat.byteskwargs(opts)
1051 opts = pycompat.byteskwargs(opts)
1052 repo = hg.peer(ui, opts, repopath)
1052 repo = hg.peer(ui, opts, repopath)
1053 if not repo.capable('getbundle'):
1053 if not repo.capable('getbundle'):
1054 raise error.Abort("getbundle() not supported by target repository")
1054 raise error.Abort("getbundle() not supported by target repository")
1055 args = {}
1055 args = {}
1056 if common:
1056 if common:
1057 args[r'common'] = [bin(s) for s in common]
1057 args[r'common'] = [bin(s) for s in common]
1058 if head:
1058 if head:
1059 args[r'heads'] = [bin(s) for s in head]
1059 args[r'heads'] = [bin(s) for s in head]
1060 # TODO: get desired bundlecaps from command line.
1060 # TODO: get desired bundlecaps from command line.
1061 args[r'bundlecaps'] = None
1061 args[r'bundlecaps'] = None
1062 bundle = repo.getbundle('debug', **args)
1062 bundle = repo.getbundle('debug', **args)
1063
1063
1064 bundletype = opts.get('type', 'bzip2').lower()
1064 bundletype = opts.get('type', 'bzip2').lower()
1065 btypes = {'none': 'HG10UN',
1065 btypes = {'none': 'HG10UN',
1066 'bzip2': 'HG10BZ',
1066 'bzip2': 'HG10BZ',
1067 'gzip': 'HG10GZ',
1067 'gzip': 'HG10GZ',
1068 'bundle2': 'HG20'}
1068 'bundle2': 'HG20'}
1069 bundletype = btypes.get(bundletype)
1069 bundletype = btypes.get(bundletype)
1070 if bundletype not in bundle2.bundletypes:
1070 if bundletype not in bundle2.bundletypes:
1071 raise error.Abort(_('unknown bundle type specified with --type'))
1071 raise error.Abort(_('unknown bundle type specified with --type'))
1072 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1072 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1073
1073
1074 @command('debugignore', [], '[FILE]')
1074 @command('debugignore', [], '[FILE]')
1075 def debugignore(ui, repo, *files, **opts):
1075 def debugignore(ui, repo, *files, **opts):
1076 """display the combined ignore pattern and information about ignored files
1076 """display the combined ignore pattern and information about ignored files
1077
1077
1078 With no argument display the combined ignore pattern.
1078 With no argument display the combined ignore pattern.
1079
1079
1080 Given space separated file names, shows if the given file is ignored and
1080 Given space separated file names, shows if the given file is ignored and
1081 if so, show the ignore rule (file and line number) that matched it.
1081 if so, show the ignore rule (file and line number) that matched it.
1082 """
1082 """
1083 ignore = repo.dirstate._ignore
1083 ignore = repo.dirstate._ignore
1084 if not files:
1084 if not files:
1085 # Show all the patterns
1085 # Show all the patterns
1086 ui.write("%s\n" % pycompat.byterepr(ignore))
1086 ui.write("%s\n" % pycompat.byterepr(ignore))
1087 else:
1087 else:
1088 m = scmutil.match(repo[None], pats=files)
1088 m = scmutil.match(repo[None], pats=files)
1089 for f in m.files():
1089 for f in m.files():
1090 nf = util.normpath(f)
1090 nf = util.normpath(f)
1091 ignored = None
1091 ignored = None
1092 ignoredata = None
1092 ignoredata = None
1093 if nf != '.':
1093 if nf != '.':
1094 if ignore(nf):
1094 if ignore(nf):
1095 ignored = nf
1095 ignored = nf
1096 ignoredata = repo.dirstate._ignorefileandline(nf)
1096 ignoredata = repo.dirstate._ignorefileandline(nf)
1097 else:
1097 else:
1098 for p in util.finddirs(nf):
1098 for p in util.finddirs(nf):
1099 if ignore(p):
1099 if ignore(p):
1100 ignored = p
1100 ignored = p
1101 ignoredata = repo.dirstate._ignorefileandline(p)
1101 ignoredata = repo.dirstate._ignorefileandline(p)
1102 break
1102 break
1103 if ignored:
1103 if ignored:
1104 if ignored == nf:
1104 if ignored == nf:
1105 ui.write(_("%s is ignored\n") % m.uipath(f))
1105 ui.write(_("%s is ignored\n") % m.uipath(f))
1106 else:
1106 else:
1107 ui.write(_("%s is ignored because of "
1107 ui.write(_("%s is ignored because of "
1108 "containing folder %s\n")
1108 "containing folder %s\n")
1109 % (m.uipath(f), ignored))
1109 % (m.uipath(f), ignored))
1110 ignorefile, lineno, line = ignoredata
1110 ignorefile, lineno, line = ignoredata
1111 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1111 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1112 % (ignorefile, lineno, line))
1112 % (ignorefile, lineno, line))
1113 else:
1113 else:
1114 ui.write(_("%s is not ignored\n") % m.uipath(f))
1114 ui.write(_("%s is not ignored\n") % m.uipath(f))
1115
1115
1116 @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
1116 @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
1117 _('-c|-m|FILE'))
1117 _('-c|-m|FILE'))
1118 def debugindex(ui, repo, file_=None, **opts):
1118 def debugindex(ui, repo, file_=None, **opts):
1119 """dump index data for a storage primitive"""
1119 """dump index data for a storage primitive"""
1120 opts = pycompat.byteskwargs(opts)
1120 opts = pycompat.byteskwargs(opts)
1121 store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
1121 store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
1122
1122
1123 if ui.debugflag:
1123 if ui.debugflag:
1124 shortfn = hex
1124 shortfn = hex
1125 else:
1125 else:
1126 shortfn = short
1126 shortfn = short
1127
1127
1128 idlen = 12
1128 idlen = 12
1129 for i in store:
1129 for i in store:
1130 idlen = len(shortfn(store.node(i)))
1130 idlen = len(shortfn(store.node(i)))
1131 break
1131 break
1132
1132
1133 fm = ui.formatter('debugindex', opts)
1133 fm = ui.formatter('debugindex', opts)
1134 fm.plain(b' rev linkrev %s %s p2\n' % (
1134 fm.plain(b' rev linkrev %s %s p2\n' % (
1135 b'nodeid'.ljust(idlen),
1135 b'nodeid'.ljust(idlen),
1136 b'p1'.ljust(idlen)))
1136 b'p1'.ljust(idlen)))
1137
1137
1138 for rev in store:
1138 for rev in store:
1139 node = store.node(rev)
1139 node = store.node(rev)
1140 parents = store.parents(node)
1140 parents = store.parents(node)
1141
1141
1142 fm.startitem()
1142 fm.startitem()
1143 fm.write(b'rev', b'%6d ', rev)
1143 fm.write(b'rev', b'%6d ', rev)
1144 fm.write(b'linkrev', '%7d ', store.linkrev(rev))
1144 fm.write(b'linkrev', '%7d ', store.linkrev(rev))
1145 fm.write(b'node', '%s ', shortfn(node))
1145 fm.write(b'node', '%s ', shortfn(node))
1146 fm.write(b'p1', '%s ', shortfn(parents[0]))
1146 fm.write(b'p1', '%s ', shortfn(parents[0]))
1147 fm.write(b'p2', '%s', shortfn(parents[1]))
1147 fm.write(b'p2', '%s', shortfn(parents[1]))
1148 fm.plain(b'\n')
1148 fm.plain(b'\n')
1149
1149
1150 fm.end()
1150 fm.end()
1151
1151
1152 @command('debugindexdot', cmdutil.debugrevlogopts,
1152 @command('debugindexdot', cmdutil.debugrevlogopts,
1153 _('-c|-m|FILE'), optionalrepo=True)
1153 _('-c|-m|FILE'), optionalrepo=True)
1154 def debugindexdot(ui, repo, file_=None, **opts):
1154 def debugindexdot(ui, repo, file_=None, **opts):
1155 """dump an index DAG as a graphviz dot file"""
1155 """dump an index DAG as a graphviz dot file"""
1156 opts = pycompat.byteskwargs(opts)
1156 opts = pycompat.byteskwargs(opts)
1157 r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
1157 r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
1158 ui.write(("digraph G {\n"))
1158 ui.write(("digraph G {\n"))
1159 for i in r:
1159 for i in r:
1160 node = r.node(i)
1160 node = r.node(i)
1161 pp = r.parents(node)
1161 pp = r.parents(node)
1162 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1162 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1163 if pp[1] != nullid:
1163 if pp[1] != nullid:
1164 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1164 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1165 ui.write("}\n")
1165 ui.write("}\n")
1166
1166
1167 @command('debugindexstats', [])
1167 @command('debugindexstats', [])
1168 def debugindexstats(ui, repo):
1168 def debugindexstats(ui, repo):
1169 """show stats related to the changelog index"""
1169 """show stats related to the changelog index"""
1170 repo.changelog.shortest(nullid, 1)
1170 repo.changelog.shortest(nullid, 1)
1171 index = repo.changelog.index
1171 index = repo.changelog.index
1172 if not util.safehasattr(index, 'stats'):
1172 if not util.safehasattr(index, 'stats'):
1173 raise error.Abort(_('debugindexstats only works with native code'))
1173 raise error.Abort(_('debugindexstats only works with native code'))
1174 for k, v in sorted(index.stats().items()):
1174 for k, v in sorted(index.stats().items()):
1175 ui.write('%s: %d\n' % (k, v))
1175 ui.write('%s: %d\n' % (k, v))
1176
1176
1177 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1177 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1178 def debuginstall(ui, **opts):
1178 def debuginstall(ui, **opts):
1179 '''test Mercurial installation
1179 '''test Mercurial installation
1180
1180
1181 Returns 0 on success.
1181 Returns 0 on success.
1182 '''
1182 '''
1183 opts = pycompat.byteskwargs(opts)
1183 opts = pycompat.byteskwargs(opts)
1184
1184
1185 def writetemp(contents):
1185 def writetemp(contents):
1186 (fd, name) = pycompat.mkstemp(prefix="hg-debuginstall-")
1186 (fd, name) = pycompat.mkstemp(prefix="hg-debuginstall-")
1187 f = os.fdopen(fd, r"wb")
1187 f = os.fdopen(fd, r"wb")
1188 f.write(contents)
1188 f.write(contents)
1189 f.close()
1189 f.close()
1190 return name
1190 return name
1191
1191
1192 problems = 0
1192 problems = 0
1193
1193
1194 fm = ui.formatter('debuginstall', opts)
1194 fm = ui.formatter('debuginstall', opts)
1195 fm.startitem()
1195 fm.startitem()
1196
1196
1197 # encoding
1197 # encoding
1198 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1198 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1199 err = None
1199 err = None
1200 try:
1200 try:
1201 codecs.lookup(pycompat.sysstr(encoding.encoding))
1201 codecs.lookup(pycompat.sysstr(encoding.encoding))
1202 except LookupError as inst:
1202 except LookupError as inst:
1203 err = stringutil.forcebytestr(inst)
1203 err = stringutil.forcebytestr(inst)
1204 problems += 1
1204 problems += 1
1205 fm.condwrite(err, 'encodingerror', _(" %s\n"
1205 fm.condwrite(err, 'encodingerror', _(" %s\n"
1206 " (check that your locale is properly set)\n"), err)
1206 " (check that your locale is properly set)\n"), err)
1207
1207
1208 # Python
1208 # Python
1209 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1209 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1210 pycompat.sysexecutable)
1210 pycompat.sysexecutable)
1211 fm.write('pythonver', _("checking Python version (%s)\n"),
1211 fm.write('pythonver', _("checking Python version (%s)\n"),
1212 ("%d.%d.%d" % sys.version_info[:3]))
1212 ("%d.%d.%d" % sys.version_info[:3]))
1213 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1213 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1214 os.path.dirname(pycompat.fsencode(os.__file__)))
1214 os.path.dirname(pycompat.fsencode(os.__file__)))
1215
1215
1216 security = set(sslutil.supportedprotocols)
1216 security = set(sslutil.supportedprotocols)
1217 if sslutil.hassni:
1217 if sslutil.hassni:
1218 security.add('sni')
1218 security.add('sni')
1219
1219
1220 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1220 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1221 fm.formatlist(sorted(security), name='protocol',
1221 fm.formatlist(sorted(security), name='protocol',
1222 fmt='%s', sep=','))
1222 fmt='%s', sep=','))
1223
1223
1224 # These are warnings, not errors. So don't increment problem count. This
1224 # These are warnings, not errors. So don't increment problem count. This
1225 # may change in the future.
1225 # may change in the future.
1226 if 'tls1.2' not in security:
1226 if 'tls1.2' not in security:
1227 fm.plain(_(' TLS 1.2 not supported by Python install; '
1227 fm.plain(_(' TLS 1.2 not supported by Python install; '
1228 'network connections lack modern security\n'))
1228 'network connections lack modern security\n'))
1229 if 'sni' not in security:
1229 if 'sni' not in security:
1230 fm.plain(_(' SNI not supported by Python install; may have '
1230 fm.plain(_(' SNI not supported by Python install; may have '
1231 'connectivity issues with some servers\n'))
1231 'connectivity issues with some servers\n'))
1232
1232
1233 # TODO print CA cert info
1233 # TODO print CA cert info
1234
1234
1235 # hg version
1235 # hg version
1236 hgver = util.version()
1236 hgver = util.version()
1237 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1237 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1238 hgver.split('+')[0])
1238 hgver.split('+')[0])
1239 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1239 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1240 '+'.join(hgver.split('+')[1:]))
1240 '+'.join(hgver.split('+')[1:]))
1241
1241
1242 # compiled modules
1242 # compiled modules
1243 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1243 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1244 policy.policy)
1244 policy.policy)
1245 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1245 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1246 os.path.dirname(pycompat.fsencode(__file__)))
1246 os.path.dirname(pycompat.fsencode(__file__)))
1247
1247
1248 if policy.policy in ('c', 'allow'):
1248 if policy.policy in ('c', 'allow'):
1249 err = None
1249 err = None
1250 try:
1250 try:
1251 from .cext import (
1251 from .cext import (
1252 base85,
1252 base85,
1253 bdiff,
1253 bdiff,
1254 mpatch,
1254 mpatch,
1255 osutil,
1255 osutil,
1256 )
1256 )
1257 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1257 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1258 except Exception as inst:
1258 except Exception as inst:
1259 err = stringutil.forcebytestr(inst)
1259 err = stringutil.forcebytestr(inst)
1260 problems += 1
1260 problems += 1
1261 fm.condwrite(err, 'extensionserror', " %s\n", err)
1261 fm.condwrite(err, 'extensionserror', " %s\n", err)
1262
1262
1263 compengines = util.compengines._engines.values()
1263 compengines = util.compengines._engines.values()
1264 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1264 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1265 fm.formatlist(sorted(e.name() for e in compengines),
1265 fm.formatlist(sorted(e.name() for e in compengines),
1266 name='compengine', fmt='%s', sep=', '))
1266 name='compengine', fmt='%s', sep=', '))
1267 fm.write('compenginesavail', _('checking available compression engines '
1267 fm.write('compenginesavail', _('checking available compression engines '
1268 '(%s)\n'),
1268 '(%s)\n'),
1269 fm.formatlist(sorted(e.name() for e in compengines
1269 fm.formatlist(sorted(e.name() for e in compengines
1270 if e.available()),
1270 if e.available()),
1271 name='compengine', fmt='%s', sep=', '))
1271 name='compengine', fmt='%s', sep=', '))
1272 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1272 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1273 fm.write('compenginesserver', _('checking available compression engines '
1273 fm.write('compenginesserver', _('checking available compression engines '
1274 'for wire protocol (%s)\n'),
1274 'for wire protocol (%s)\n'),
1275 fm.formatlist([e.name() for e in wirecompengines
1275 fm.formatlist([e.name() for e in wirecompengines
1276 if e.wireprotosupport()],
1276 if e.wireprotosupport()],
1277 name='compengine', fmt='%s', sep=', '))
1277 name='compengine', fmt='%s', sep=', '))
1278 re2 = 'missing'
1278 re2 = 'missing'
1279 if util._re2:
1279 if util._re2:
1280 re2 = 'available'
1280 re2 = 'available'
1281 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1281 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1282 fm.data(re2=bool(util._re2))
1282 fm.data(re2=bool(util._re2))
1283
1283
1284 # templates
1284 # templates
1285 p = templater.templatepaths()
1285 p = templater.templatepaths()
1286 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1286 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1287 fm.condwrite(not p, '', _(" no template directories found\n"))
1287 fm.condwrite(not p, '', _(" no template directories found\n"))
1288 if p:
1288 if p:
1289 m = templater.templatepath("map-cmdline.default")
1289 m = templater.templatepath("map-cmdline.default")
1290 if m:
1290 if m:
1291 # template found, check if it is working
1291 # template found, check if it is working
1292 err = None
1292 err = None
1293 try:
1293 try:
1294 templater.templater.frommapfile(m)
1294 templater.templater.frommapfile(m)
1295 except Exception as inst:
1295 except Exception as inst:
1296 err = stringutil.forcebytestr(inst)
1296 err = stringutil.forcebytestr(inst)
1297 p = None
1297 p = None
1298 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1298 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1299 else:
1299 else:
1300 p = None
1300 p = None
1301 fm.condwrite(p, 'defaulttemplate',
1301 fm.condwrite(p, 'defaulttemplate',
1302 _("checking default template (%s)\n"), m)
1302 _("checking default template (%s)\n"), m)
1303 fm.condwrite(not m, 'defaulttemplatenotfound',
1303 fm.condwrite(not m, 'defaulttemplatenotfound',
1304 _(" template '%s' not found\n"), "default")
1304 _(" template '%s' not found\n"), "default")
1305 if not p:
1305 if not p:
1306 problems += 1
1306 problems += 1
1307 fm.condwrite(not p, '',
1307 fm.condwrite(not p, '',
1308 _(" (templates seem to have been installed incorrectly)\n"))
1308 _(" (templates seem to have been installed incorrectly)\n"))
1309
1309
1310 # editor
1310 # editor
1311 editor = ui.geteditor()
1311 editor = ui.geteditor()
1312 editor = util.expandpath(editor)
1312 editor = util.expandpath(editor)
1313 editorbin = procutil.shellsplit(editor)[0]
1313 editorbin = procutil.shellsplit(editor)[0]
1314 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1314 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1315 cmdpath = procutil.findexe(editorbin)
1315 cmdpath = procutil.findexe(editorbin)
1316 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1316 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1317 _(" No commit editor set and can't find %s in PATH\n"
1317 _(" No commit editor set and can't find %s in PATH\n"
1318 " (specify a commit editor in your configuration"
1318 " (specify a commit editor in your configuration"
1319 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1319 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1320 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1320 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1321 _(" Can't find editor '%s' in PATH\n"
1321 _(" Can't find editor '%s' in PATH\n"
1322 " (specify a commit editor in your configuration"
1322 " (specify a commit editor in your configuration"
1323 " file)\n"), not cmdpath and editorbin)
1323 " file)\n"), not cmdpath and editorbin)
1324 if not cmdpath and editor != 'vi':
1324 if not cmdpath and editor != 'vi':
1325 problems += 1
1325 problems += 1
1326
1326
1327 # check username
1327 # check username
1328 username = None
1328 username = None
1329 err = None
1329 err = None
1330 try:
1330 try:
1331 username = ui.username()
1331 username = ui.username()
1332 except error.Abort as e:
1332 except error.Abort as e:
1333 err = stringutil.forcebytestr(e)
1333 err = stringutil.forcebytestr(e)
1334 problems += 1
1334 problems += 1
1335
1335
1336 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1336 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1337 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1337 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1338 " (specify a username in your configuration file)\n"), err)
1338 " (specify a username in your configuration file)\n"), err)
1339
1339
1340 fm.condwrite(not problems, '',
1340 fm.condwrite(not problems, '',
1341 _("no problems detected\n"))
1341 _("no problems detected\n"))
1342 if not problems:
1342 if not problems:
1343 fm.data(problems=problems)
1343 fm.data(problems=problems)
1344 fm.condwrite(problems, 'problems',
1344 fm.condwrite(problems, 'problems',
1345 _("%d problems detected,"
1345 _("%d problems detected,"
1346 " please check your install!\n"), problems)
1346 " please check your install!\n"), problems)
1347 fm.end()
1347 fm.end()
1348
1348
1349 return problems
1349 return problems
1350
1350
1351 @command('debugknown', [], _('REPO ID...'), norepo=True)
1351 @command('debugknown', [], _('REPO ID...'), norepo=True)
1352 def debugknown(ui, repopath, *ids, **opts):
1352 def debugknown(ui, repopath, *ids, **opts):
1353 """test whether node ids are known to a repo
1353 """test whether node ids are known to a repo
1354
1354
1355 Every ID must be a full-length hex node id string. Returns a list of 0s
1355 Every ID must be a full-length hex node id string. Returns a list of 0s
1356 and 1s indicating unknown/known.
1356 and 1s indicating unknown/known.
1357 """
1357 """
1358 opts = pycompat.byteskwargs(opts)
1358 opts = pycompat.byteskwargs(opts)
1359 repo = hg.peer(ui, opts, repopath)
1359 repo = hg.peer(ui, opts, repopath)
1360 if not repo.capable('known'):
1360 if not repo.capable('known'):
1361 raise error.Abort("known() not supported by target repository")
1361 raise error.Abort("known() not supported by target repository")
1362 flags = repo.known([bin(s) for s in ids])
1362 flags = repo.known([bin(s) for s in ids])
1363 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1363 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1364
1364
1365 @command('debuglabelcomplete', [], _('LABEL...'))
1365 @command('debuglabelcomplete', [], _('LABEL...'))
1366 def debuglabelcomplete(ui, repo, *args):
1366 def debuglabelcomplete(ui, repo, *args):
1367 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1367 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1368 debugnamecomplete(ui, repo, *args)
1368 debugnamecomplete(ui, repo, *args)
1369
1369
1370 @command('debuglocks',
1370 @command('debuglocks',
1371 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1371 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1372 ('W', 'force-wlock', None,
1372 ('W', 'force-wlock', None,
1373 _('free the working state lock (DANGEROUS)')),
1373 _('free the working state lock (DANGEROUS)')),
1374 ('s', 'set-lock', None, _('set the store lock until stopped')),
1374 ('s', 'set-lock', None, _('set the store lock until stopped')),
1375 ('S', 'set-wlock', None,
1375 ('S', 'set-wlock', None,
1376 _('set the working state lock until stopped'))],
1376 _('set the working state lock until stopped'))],
1377 _('[OPTION]...'))
1377 _('[OPTION]...'))
1378 def debuglocks(ui, repo, **opts):
1378 def debuglocks(ui, repo, **opts):
1379 """show or modify state of locks
1379 """show or modify state of locks
1380
1380
1381 By default, this command will show which locks are held. This
1381 By default, this command will show which locks are held. This
1382 includes the user and process holding the lock, the amount of time
1382 includes the user and process holding the lock, the amount of time
1383 the lock has been held, and the machine name where the process is
1383 the lock has been held, and the machine name where the process is
1384 running if it's not local.
1384 running if it's not local.
1385
1385
1386 Locks protect the integrity of Mercurial's data, so should be
1386 Locks protect the integrity of Mercurial's data, so should be
1387 treated with care. System crashes or other interruptions may cause
1387 treated with care. System crashes or other interruptions may cause
1388 locks to not be properly released, though Mercurial will usually
1388 locks to not be properly released, though Mercurial will usually
1389 detect and remove such stale locks automatically.
1389 detect and remove such stale locks automatically.
1390
1390
1391 However, detecting stale locks may not always be possible (for
1391 However, detecting stale locks may not always be possible (for
1392 instance, on a shared filesystem). Removing locks may also be
1392 instance, on a shared filesystem). Removing locks may also be
1393 blocked by filesystem permissions.
1393 blocked by filesystem permissions.
1394
1394
1395 Setting a lock will prevent other commands from changing the data.
1395 Setting a lock will prevent other commands from changing the data.
1396 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1396 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1397 The set locks are removed when the command exits.
1397 The set locks are removed when the command exits.
1398
1398
1399 Returns 0 if no locks are held.
1399 Returns 0 if no locks are held.
1400
1400
1401 """
1401 """
1402
1402
1403 if opts.get(r'force_lock'):
1403 if opts.get(r'force_lock'):
1404 repo.svfs.unlink('lock')
1404 repo.svfs.unlink('lock')
1405 if opts.get(r'force_wlock'):
1405 if opts.get(r'force_wlock'):
1406 repo.vfs.unlink('wlock')
1406 repo.vfs.unlink('wlock')
1407 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1407 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1408 return 0
1408 return 0
1409
1409
1410 locks = []
1410 locks = []
1411 try:
1411 try:
1412 if opts.get(r'set_wlock'):
1412 if opts.get(r'set_wlock'):
1413 try:
1413 try:
1414 locks.append(repo.wlock(False))
1414 locks.append(repo.wlock(False))
1415 except error.LockHeld:
1415 except error.LockHeld:
1416 raise error.Abort(_('wlock is already held'))
1416 raise error.Abort(_('wlock is already held'))
1417 if opts.get(r'set_lock'):
1417 if opts.get(r'set_lock'):
1418 try:
1418 try:
1419 locks.append(repo.lock(False))
1419 locks.append(repo.lock(False))
1420 except error.LockHeld:
1420 except error.LockHeld:
1421 raise error.Abort(_('lock is already held'))
1421 raise error.Abort(_('lock is already held'))
1422 if len(locks):
1422 if len(locks):
1423 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1423 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1424 return 0
1424 return 0
1425 finally:
1425 finally:
1426 release(*locks)
1426 release(*locks)
1427
1427
1428 now = time.time()
1428 now = time.time()
1429 held = 0
1429 held = 0
1430
1430
1431 def report(vfs, name, method):
1431 def report(vfs, name, method):
1432 # this causes stale locks to get reaped for more accurate reporting
1432 # this causes stale locks to get reaped for more accurate reporting
1433 try:
1433 try:
1434 l = method(False)
1434 l = method(False)
1435 except error.LockHeld:
1435 except error.LockHeld:
1436 l = None
1436 l = None
1437
1437
1438 if l:
1438 if l:
1439 l.release()
1439 l.release()
1440 else:
1440 else:
1441 try:
1441 try:
1442 st = vfs.lstat(name)
1442 st = vfs.lstat(name)
1443 age = now - st[stat.ST_MTIME]
1443 age = now - st[stat.ST_MTIME]
1444 user = util.username(st.st_uid)
1444 user = util.username(st.st_uid)
1445 locker = vfs.readlock(name)
1445 locker = vfs.readlock(name)
1446 if ":" in locker:
1446 if ":" in locker:
1447 host, pid = locker.split(':')
1447 host, pid = locker.split(':')
1448 if host == socket.gethostname():
1448 if host == socket.gethostname():
1449 locker = 'user %s, process %s' % (user or b'None', pid)
1449 locker = 'user %s, process %s' % (user or b'None', pid)
1450 else:
1450 else:
1451 locker = 'user %s, process %s, host %s' \
1451 locker = 'user %s, process %s, host %s' \
1452 % (user or b'None', pid, host)
1452 % (user or b'None', pid, host)
1453 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1453 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1454 return 1
1454 return 1
1455 except OSError as e:
1455 except OSError as e:
1456 if e.errno != errno.ENOENT:
1456 if e.errno != errno.ENOENT:
1457 raise
1457 raise
1458
1458
1459 ui.write(("%-6s free\n") % (name + ":"))
1459 ui.write(("%-6s free\n") % (name + ":"))
1460 return 0
1460 return 0
1461
1461
1462 held += report(repo.svfs, "lock", repo.lock)
1462 held += report(repo.svfs, "lock", repo.lock)
1463 held += report(repo.vfs, "wlock", repo.wlock)
1463 held += report(repo.vfs, "wlock", repo.wlock)
1464
1464
1465 return held
1465 return held
1466
1466
1467 @command('debugmanifestfulltextcache', [
1467 @command('debugmanifestfulltextcache', [
1468 ('', 'clear', False, _('clear the cache')),
1468 ('', 'clear', False, _('clear the cache')),
1469 ('a', 'add', '', _('add the given manifest node to the cache'),
1469 ('a', 'add', '', _('add the given manifest node to the cache'),
1470 _('NODE'))
1470 _('NODE'))
1471 ], '')
1471 ], '')
1472 def debugmanifestfulltextcache(ui, repo, add=None, **opts):
1472 def debugmanifestfulltextcache(ui, repo, add=None, **opts):
1473 """show, clear or amend the contents of the manifest fulltext cache"""
1473 """show, clear or amend the contents of the manifest fulltext cache"""
1474 with repo.lock():
1474 with repo.lock():
1475 r = repo.manifestlog.getstorage(b'')
1475 r = repo.manifestlog.getstorage(b'')
1476 try:
1476 try:
1477 cache = r._fulltextcache
1477 cache = r._fulltextcache
1478 except AttributeError:
1478 except AttributeError:
1479 ui.warn(_(
1479 ui.warn(_(
1480 "Current revlog implementation doesn't appear to have a "
1480 "Current revlog implementation doesn't appear to have a "
1481 'manifest fulltext cache\n'))
1481 'manifest fulltext cache\n'))
1482 return
1482 return
1483
1483
1484 if opts.get(r'clear'):
1484 if opts.get(r'clear'):
1485 cache.clear()
1485 cache.clear()
1486
1486
1487 if add:
1487 if add:
1488 try:
1488 try:
1489 manifest = repo.manifestlog[r.lookup(add)]
1489 manifest = repo.manifestlog[r.lookup(add)]
1490 except error.LookupError as e:
1490 except error.LookupError as e:
1491 raise error.Abort(e, hint="Check your manifest node id")
1491 raise error.Abort(e, hint="Check your manifest node id")
1492 manifest.read() # stores revisision in cache too
1492 manifest.read() # stores revisision in cache too
1493
1493
1494 if not len(cache):
1494 if not len(cache):
1495 ui.write(_('Cache empty'))
1495 ui.write(_('Cache empty'))
1496 else:
1496 else:
1497 ui.write(
1497 ui.write(
1498 _('Cache contains %d manifest entries, in order of most to '
1498 _('Cache contains %d manifest entries, in order of most to '
1499 'least recent:\n') % (len(cache),))
1499 'least recent:\n') % (len(cache),))
1500 totalsize = 0
1500 totalsize = 0
1501 for nodeid in cache:
1501 for nodeid in cache:
1502 # Use cache.get to not update the LRU order
1502 # Use cache.get to not update the LRU order
1503 data = cache.get(nodeid)
1503 data = cache.get(nodeid)
1504 size = len(data)
1504 size = len(data)
1505 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1505 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1506 ui.write(_('id: %s, size %s\n') % (
1506 ui.write(_('id: %s, size %s\n') % (
1507 hex(nodeid), util.bytecount(size)))
1507 hex(nodeid), util.bytecount(size)))
1508 ondisk = cache._opener.stat('manifestfulltextcache').st_size
1508 ondisk = cache._opener.stat('manifestfulltextcache').st_size
1509 ui.write(
1509 ui.write(
1510 _('Total cache data size %s, on-disk %s\n') % (
1510 _('Total cache data size %s, on-disk %s\n') % (
1511 util.bytecount(totalsize), util.bytecount(ondisk))
1511 util.bytecount(totalsize), util.bytecount(ondisk))
1512 )
1512 )
1513
1513
1514 @command('debugmergestate', [], '')
1514 @command('debugmergestate', [], '')
1515 def debugmergestate(ui, repo, *args):
1515 def debugmergestate(ui, repo, *args):
1516 """print merge state
1516 """print merge state
1517
1517
1518 Use --verbose to print out information about whether v1 or v2 merge state
1518 Use --verbose to print out information about whether v1 or v2 merge state
1519 was chosen."""
1519 was chosen."""
1520 def _hashornull(h):
1520 def _hashornull(h):
1521 if h == nullhex:
1521 if h == nullhex:
1522 return 'null'
1522 return 'null'
1523 else:
1523 else:
1524 return h
1524 return h
1525
1525
1526 def printrecords(version):
1526 def printrecords(version):
1527 ui.write(('* version %d records\n') % version)
1527 ui.write(('* version %d records\n') % version)
1528 if version == 1:
1528 if version == 1:
1529 records = v1records
1529 records = v1records
1530 else:
1530 else:
1531 records = v2records
1531 records = v2records
1532
1532
1533 for rtype, record in records:
1533 for rtype, record in records:
1534 # pretty print some record types
1534 # pretty print some record types
1535 if rtype == 'L':
1535 if rtype == 'L':
1536 ui.write(('local: %s\n') % record)
1536 ui.write(('local: %s\n') % record)
1537 elif rtype == 'O':
1537 elif rtype == 'O':
1538 ui.write(('other: %s\n') % record)
1538 ui.write(('other: %s\n') % record)
1539 elif rtype == 'm':
1539 elif rtype == 'm':
1540 driver, mdstate = record.split('\0', 1)
1540 driver, mdstate = record.split('\0', 1)
1541 ui.write(('merge driver: %s (state "%s")\n')
1541 ui.write(('merge driver: %s (state "%s")\n')
1542 % (driver, mdstate))
1542 % (driver, mdstate))
1543 elif rtype in 'FDC':
1543 elif rtype in 'FDC':
1544 r = record.split('\0')
1544 r = record.split('\0')
1545 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1545 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1546 if version == 1:
1546 if version == 1:
1547 onode = 'not stored in v1 format'
1547 onode = 'not stored in v1 format'
1548 flags = r[7]
1548 flags = r[7]
1549 else:
1549 else:
1550 onode, flags = r[7:9]
1550 onode, flags = r[7:9]
1551 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1551 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1552 % (f, rtype, state, _hashornull(hash)))
1552 % (f, rtype, state, _hashornull(hash)))
1553 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1553 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1554 ui.write((' ancestor path: %s (node %s)\n')
1554 ui.write((' ancestor path: %s (node %s)\n')
1555 % (afile, _hashornull(anode)))
1555 % (afile, _hashornull(anode)))
1556 ui.write((' other path: %s (node %s)\n')
1556 ui.write((' other path: %s (node %s)\n')
1557 % (ofile, _hashornull(onode)))
1557 % (ofile, _hashornull(onode)))
1558 elif rtype == 'f':
1558 elif rtype == 'f':
1559 filename, rawextras = record.split('\0', 1)
1559 filename, rawextras = record.split('\0', 1)
1560 extras = rawextras.split('\0')
1560 extras = rawextras.split('\0')
1561 i = 0
1561 i = 0
1562 extrastrings = []
1562 extrastrings = []
1563 while i < len(extras):
1563 while i < len(extras):
1564 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1564 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1565 i += 2
1565 i += 2
1566
1566
1567 ui.write(('file extras: %s (%s)\n')
1567 ui.write(('file extras: %s (%s)\n')
1568 % (filename, ', '.join(extrastrings)))
1568 % (filename, ', '.join(extrastrings)))
1569 elif rtype == 'l':
1569 elif rtype == 'l':
1570 labels = record.split('\0', 2)
1570 labels = record.split('\0', 2)
1571 labels = [l for l in labels if len(l) > 0]
1571 labels = [l for l in labels if len(l) > 0]
1572 ui.write(('labels:\n'))
1572 ui.write(('labels:\n'))
1573 ui.write((' local: %s\n' % labels[0]))
1573 ui.write((' local: %s\n' % labels[0]))
1574 ui.write((' other: %s\n' % labels[1]))
1574 ui.write((' other: %s\n' % labels[1]))
1575 if len(labels) > 2:
1575 if len(labels) > 2:
1576 ui.write((' base: %s\n' % labels[2]))
1576 ui.write((' base: %s\n' % labels[2]))
1577 else:
1577 else:
1578 ui.write(('unrecognized entry: %s\t%s\n')
1578 ui.write(('unrecognized entry: %s\t%s\n')
1579 % (rtype, record.replace('\0', '\t')))
1579 % (rtype, record.replace('\0', '\t')))
1580
1580
1581 # Avoid mergestate.read() since it may raise an exception for unsupported
1581 # Avoid mergestate.read() since it may raise an exception for unsupported
1582 # merge state records. We shouldn't be doing this, but this is OK since this
1582 # merge state records. We shouldn't be doing this, but this is OK since this
1583 # command is pretty low-level.
1583 # command is pretty low-level.
1584 ms = mergemod.mergestate(repo)
1584 ms = mergemod.mergestate(repo)
1585
1585
1586 # sort so that reasonable information is on top
1586 # sort so that reasonable information is on top
1587 v1records = ms._readrecordsv1()
1587 v1records = ms._readrecordsv1()
1588 v2records = ms._readrecordsv2()
1588 v2records = ms._readrecordsv2()
1589 order = 'LOml'
1589 order = 'LOml'
1590 def key(r):
1590 def key(r):
1591 idx = order.find(r[0])
1591 idx = order.find(r[0])
1592 if idx == -1:
1592 if idx == -1:
1593 return (1, r[1])
1593 return (1, r[1])
1594 else:
1594 else:
1595 return (0, idx)
1595 return (0, idx)
1596 v1records.sort(key=key)
1596 v1records.sort(key=key)
1597 v2records.sort(key=key)
1597 v2records.sort(key=key)
1598
1598
1599 if not v1records and not v2records:
1599 if not v1records and not v2records:
1600 ui.write(('no merge state found\n'))
1600 ui.write(('no merge state found\n'))
1601 elif not v2records:
1601 elif not v2records:
1602 ui.note(('no version 2 merge state\n'))
1602 ui.note(('no version 2 merge state\n'))
1603 printrecords(1)
1603 printrecords(1)
1604 elif ms._v1v2match(v1records, v2records):
1604 elif ms._v1v2match(v1records, v2records):
1605 ui.note(('v1 and v2 states match: using v2\n'))
1605 ui.note(('v1 and v2 states match: using v2\n'))
1606 printrecords(2)
1606 printrecords(2)
1607 else:
1607 else:
1608 ui.note(('v1 and v2 states mismatch: using v1\n'))
1608 ui.note(('v1 and v2 states mismatch: using v1\n'))
1609 printrecords(1)
1609 printrecords(1)
1610 if ui.verbose:
1610 if ui.verbose:
1611 printrecords(2)
1611 printrecords(2)
1612
1612
1613 @command('debugnamecomplete', [], _('NAME...'))
1613 @command('debugnamecomplete', [], _('NAME...'))
1614 def debugnamecomplete(ui, repo, *args):
1614 def debugnamecomplete(ui, repo, *args):
1615 '''complete "names" - tags, open branch names, bookmark names'''
1615 '''complete "names" - tags, open branch names, bookmark names'''
1616
1616
1617 names = set()
1617 names = set()
1618 # since we previously only listed open branches, we will handle that
1618 # since we previously only listed open branches, we will handle that
1619 # specially (after this for loop)
1619 # specially (after this for loop)
1620 for name, ns in repo.names.iteritems():
1620 for name, ns in repo.names.iteritems():
1621 if name != 'branches':
1621 if name != 'branches':
1622 names.update(ns.listnames(repo))
1622 names.update(ns.listnames(repo))
1623 names.update(tag for (tag, heads, tip, closed)
1623 names.update(tag for (tag, heads, tip, closed)
1624 in repo.branchmap().iterbranches() if not closed)
1624 in repo.branchmap().iterbranches() if not closed)
1625 completions = set()
1625 completions = set()
1626 if not args:
1626 if not args:
1627 args = ['']
1627 args = ['']
1628 for a in args:
1628 for a in args:
1629 completions.update(n for n in names if n.startswith(a))
1629 completions.update(n for n in names if n.startswith(a))
1630 ui.write('\n'.join(sorted(completions)))
1630 ui.write('\n'.join(sorted(completions)))
1631 ui.write('\n')
1631 ui.write('\n')
1632
1632
1633 @command('debugobsolete',
1633 @command('debugobsolete',
1634 [('', 'flags', 0, _('markers flag')),
1634 [('', 'flags', 0, _('markers flag')),
1635 ('', 'record-parents', False,
1635 ('', 'record-parents', False,
1636 _('record parent information for the precursor')),
1636 _('record parent information for the precursor')),
1637 ('r', 'rev', [], _('display markers relevant to REV')),
1637 ('r', 'rev', [], _('display markers relevant to REV')),
1638 ('', 'exclusive', False, _('restrict display to markers only '
1638 ('', 'exclusive', False, _('restrict display to markers only '
1639 'relevant to REV')),
1639 'relevant to REV')),
1640 ('', 'index', False, _('display index of the marker')),
1640 ('', 'index', False, _('display index of the marker')),
1641 ('', 'delete', [], _('delete markers specified by indices')),
1641 ('', 'delete', [], _('delete markers specified by indices')),
1642 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1642 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1643 _('[OBSOLETED [REPLACEMENT ...]]'))
1643 _('[OBSOLETED [REPLACEMENT ...]]'))
1644 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1644 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1645 """create arbitrary obsolete marker
1645 """create arbitrary obsolete marker
1646
1646
1647 With no arguments, displays the list of obsolescence markers."""
1647 With no arguments, displays the list of obsolescence markers."""
1648
1648
1649 opts = pycompat.byteskwargs(opts)
1649 opts = pycompat.byteskwargs(opts)
1650
1650
1651 def parsenodeid(s):
1651 def parsenodeid(s):
1652 try:
1652 try:
1653 # We do not use revsingle/revrange functions here to accept
1653 # We do not use revsingle/revrange functions here to accept
1654 # arbitrary node identifiers, possibly not present in the
1654 # arbitrary node identifiers, possibly not present in the
1655 # local repository.
1655 # local repository.
1656 n = bin(s)
1656 n = bin(s)
1657 if len(n) != len(nullid):
1657 if len(n) != len(nullid):
1658 raise TypeError()
1658 raise TypeError()
1659 return n
1659 return n
1660 except TypeError:
1660 except TypeError:
1661 raise error.Abort('changeset references must be full hexadecimal '
1661 raise error.Abort('changeset references must be full hexadecimal '
1662 'node identifiers')
1662 'node identifiers')
1663
1663
1664 if opts.get('delete'):
1664 if opts.get('delete'):
1665 indices = []
1665 indices = []
1666 for v in opts.get('delete'):
1666 for v in opts.get('delete'):
1667 try:
1667 try:
1668 indices.append(int(v))
1668 indices.append(int(v))
1669 except ValueError:
1669 except ValueError:
1670 raise error.Abort(_('invalid index value: %r') % v,
1670 raise error.Abort(_('invalid index value: %r') % v,
1671 hint=_('use integers for indices'))
1671 hint=_('use integers for indices'))
1672
1672
1673 if repo.currenttransaction():
1673 if repo.currenttransaction():
1674 raise error.Abort(_('cannot delete obsmarkers in the middle '
1674 raise error.Abort(_('cannot delete obsmarkers in the middle '
1675 'of transaction.'))
1675 'of transaction.'))
1676
1676
1677 with repo.lock():
1677 with repo.lock():
1678 n = repair.deleteobsmarkers(repo.obsstore, indices)
1678 n = repair.deleteobsmarkers(repo.obsstore, indices)
1679 ui.write(_('deleted %i obsolescence markers\n') % n)
1679 ui.write(_('deleted %i obsolescence markers\n') % n)
1680
1680
1681 return
1681 return
1682
1682
1683 if precursor is not None:
1683 if precursor is not None:
1684 if opts['rev']:
1684 if opts['rev']:
1685 raise error.Abort('cannot select revision when creating marker')
1685 raise error.Abort('cannot select revision when creating marker')
1686 metadata = {}
1686 metadata = {}
1687 metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
1687 metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
1688 succs = tuple(parsenodeid(succ) for succ in successors)
1688 succs = tuple(parsenodeid(succ) for succ in successors)
1689 l = repo.lock()
1689 l = repo.lock()
1690 try:
1690 try:
1691 tr = repo.transaction('debugobsolete')
1691 tr = repo.transaction('debugobsolete')
1692 try:
1692 try:
1693 date = opts.get('date')
1693 date = opts.get('date')
1694 if date:
1694 if date:
1695 date = dateutil.parsedate(date)
1695 date = dateutil.parsedate(date)
1696 else:
1696 else:
1697 date = None
1697 date = None
1698 prec = parsenodeid(precursor)
1698 prec = parsenodeid(precursor)
1699 parents = None
1699 parents = None
1700 if opts['record_parents']:
1700 if opts['record_parents']:
1701 if prec not in repo.unfiltered():
1701 if prec not in repo.unfiltered():
1702 raise error.Abort('cannot used --record-parents on '
1702 raise error.Abort('cannot used --record-parents on '
1703 'unknown changesets')
1703 'unknown changesets')
1704 parents = repo.unfiltered()[prec].parents()
1704 parents = repo.unfiltered()[prec].parents()
1705 parents = tuple(p.node() for p in parents)
1705 parents = tuple(p.node() for p in parents)
1706 repo.obsstore.create(tr, prec, succs, opts['flags'],
1706 repo.obsstore.create(tr, prec, succs, opts['flags'],
1707 parents=parents, date=date,
1707 parents=parents, date=date,
1708 metadata=metadata, ui=ui)
1708 metadata=metadata, ui=ui)
1709 tr.close()
1709 tr.close()
1710 except ValueError as exc:
1710 except ValueError as exc:
1711 raise error.Abort(_('bad obsmarker input: %s') %
1711 raise error.Abort(_('bad obsmarker input: %s') %
1712 pycompat.bytestr(exc))
1712 pycompat.bytestr(exc))
1713 finally:
1713 finally:
1714 tr.release()
1714 tr.release()
1715 finally:
1715 finally:
1716 l.release()
1716 l.release()
1717 else:
1717 else:
1718 if opts['rev']:
1718 if opts['rev']:
1719 revs = scmutil.revrange(repo, opts['rev'])
1719 revs = scmutil.revrange(repo, opts['rev'])
1720 nodes = [repo[r].node() for r in revs]
1720 nodes = [repo[r].node() for r in revs]
1721 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1721 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1722 exclusive=opts['exclusive']))
1722 exclusive=opts['exclusive']))
1723 markers.sort(key=lambda x: x._data)
1723 markers.sort(key=lambda x: x._data)
1724 else:
1724 else:
1725 markers = obsutil.getmarkers(repo)
1725 markers = obsutil.getmarkers(repo)
1726
1726
1727 markerstoiter = markers
1727 markerstoiter = markers
1728 isrelevant = lambda m: True
1728 isrelevant = lambda m: True
1729 if opts.get('rev') and opts.get('index'):
1729 if opts.get('rev') and opts.get('index'):
1730 markerstoiter = obsutil.getmarkers(repo)
1730 markerstoiter = obsutil.getmarkers(repo)
1731 markerset = set(markers)
1731 markerset = set(markers)
1732 isrelevant = lambda m: m in markerset
1732 isrelevant = lambda m: m in markerset
1733
1733
1734 fm = ui.formatter('debugobsolete', opts)
1734 fm = ui.formatter('debugobsolete', opts)
1735 for i, m in enumerate(markerstoiter):
1735 for i, m in enumerate(markerstoiter):
1736 if not isrelevant(m):
1736 if not isrelevant(m):
1737 # marker can be irrelevant when we're iterating over a set
1737 # marker can be irrelevant when we're iterating over a set
1738 # of markers (markerstoiter) which is bigger than the set
1738 # of markers (markerstoiter) which is bigger than the set
1739 # of markers we want to display (markers)
1739 # of markers we want to display (markers)
1740 # this can happen if both --index and --rev options are
1740 # this can happen if both --index and --rev options are
1741 # provided and thus we need to iterate over all of the markers
1741 # provided and thus we need to iterate over all of the markers
1742 # to get the correct indices, but only display the ones that
1742 # to get the correct indices, but only display the ones that
1743 # are relevant to --rev value
1743 # are relevant to --rev value
1744 continue
1744 continue
1745 fm.startitem()
1745 fm.startitem()
1746 ind = i if opts.get('index') else None
1746 ind = i if opts.get('index') else None
1747 cmdutil.showmarker(fm, m, index=ind)
1747 cmdutil.showmarker(fm, m, index=ind)
1748 fm.end()
1748 fm.end()
1749
1749
1750 @command('debugpathcomplete',
1750 @command('debugpathcomplete',
1751 [('f', 'full', None, _('complete an entire path')),
1751 [('f', 'full', None, _('complete an entire path')),
1752 ('n', 'normal', None, _('show only normal files')),
1752 ('n', 'normal', None, _('show only normal files')),
1753 ('a', 'added', None, _('show only added files')),
1753 ('a', 'added', None, _('show only added files')),
1754 ('r', 'removed', None, _('show only removed files'))],
1754 ('r', 'removed', None, _('show only removed files'))],
1755 _('FILESPEC...'))
1755 _('FILESPEC...'))
1756 def debugpathcomplete(ui, repo, *specs, **opts):
1756 def debugpathcomplete(ui, repo, *specs, **opts):
1757 '''complete part or all of a tracked path
1757 '''complete part or all of a tracked path
1758
1758
1759 This command supports shells that offer path name completion. It
1759 This command supports shells that offer path name completion. It
1760 currently completes only files already known to the dirstate.
1760 currently completes only files already known to the dirstate.
1761
1761
1762 Completion extends only to the next path segment unless
1762 Completion extends only to the next path segment unless
1763 --full is specified, in which case entire paths are used.'''
1763 --full is specified, in which case entire paths are used.'''
1764
1764
1765 def complete(path, acceptable):
1765 def complete(path, acceptable):
1766 dirstate = repo.dirstate
1766 dirstate = repo.dirstate
1767 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
1767 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
1768 rootdir = repo.root + pycompat.ossep
1768 rootdir = repo.root + pycompat.ossep
1769 if spec != repo.root and not spec.startswith(rootdir):
1769 if spec != repo.root and not spec.startswith(rootdir):
1770 return [], []
1770 return [], []
1771 if os.path.isdir(spec):
1771 if os.path.isdir(spec):
1772 spec += '/'
1772 spec += '/'
1773 spec = spec[len(rootdir):]
1773 spec = spec[len(rootdir):]
1774 fixpaths = pycompat.ossep != '/'
1774 fixpaths = pycompat.ossep != '/'
1775 if fixpaths:
1775 if fixpaths:
1776 spec = spec.replace(pycompat.ossep, '/')
1776 spec = spec.replace(pycompat.ossep, '/')
1777 speclen = len(spec)
1777 speclen = len(spec)
1778 fullpaths = opts[r'full']
1778 fullpaths = opts[r'full']
1779 files, dirs = set(), set()
1779 files, dirs = set(), set()
1780 adddir, addfile = dirs.add, files.add
1780 adddir, addfile = dirs.add, files.add
1781 for f, st in dirstate.iteritems():
1781 for f, st in dirstate.iteritems():
1782 if f.startswith(spec) and st[0] in acceptable:
1782 if f.startswith(spec) and st[0] in acceptable:
1783 if fixpaths:
1783 if fixpaths:
1784 f = f.replace('/', pycompat.ossep)
1784 f = f.replace('/', pycompat.ossep)
1785 if fullpaths:
1785 if fullpaths:
1786 addfile(f)
1786 addfile(f)
1787 continue
1787 continue
1788 s = f.find(pycompat.ossep, speclen)
1788 s = f.find(pycompat.ossep, speclen)
1789 if s >= 0:
1789 if s >= 0:
1790 adddir(f[:s])
1790 adddir(f[:s])
1791 else:
1791 else:
1792 addfile(f)
1792 addfile(f)
1793 return files, dirs
1793 return files, dirs
1794
1794
1795 acceptable = ''
1795 acceptable = ''
1796 if opts[r'normal']:
1796 if opts[r'normal']:
1797 acceptable += 'nm'
1797 acceptable += 'nm'
1798 if opts[r'added']:
1798 if opts[r'added']:
1799 acceptable += 'a'
1799 acceptable += 'a'
1800 if opts[r'removed']:
1800 if opts[r'removed']:
1801 acceptable += 'r'
1801 acceptable += 'r'
1802 cwd = repo.getcwd()
1802 cwd = repo.getcwd()
1803 if not specs:
1803 if not specs:
1804 specs = ['.']
1804 specs = ['.']
1805
1805
1806 files, dirs = set(), set()
1806 files, dirs = set(), set()
1807 for spec in specs:
1807 for spec in specs:
1808 f, d = complete(spec, acceptable or 'nmar')
1808 f, d = complete(spec, acceptable or 'nmar')
1809 files.update(f)
1809 files.update(f)
1810 dirs.update(d)
1810 dirs.update(d)
1811 files.update(dirs)
1811 files.update(dirs)
1812 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1812 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1813 ui.write('\n')
1813 ui.write('\n')
1814
1814
1815 @command('debugpeer', [], _('PATH'), norepo=True)
1815 @command('debugpeer', [], _('PATH'), norepo=True)
1816 def debugpeer(ui, path):
1816 def debugpeer(ui, path):
1817 """establish a connection to a peer repository"""
1817 """establish a connection to a peer repository"""
1818 # Always enable peer request logging. Requires --debug to display
1818 # Always enable peer request logging. Requires --debug to display
1819 # though.
1819 # though.
1820 overrides = {
1820 overrides = {
1821 ('devel', 'debug.peer-request'): True,
1821 ('devel', 'debug.peer-request'): True,
1822 }
1822 }
1823
1823
1824 with ui.configoverride(overrides):
1824 with ui.configoverride(overrides):
1825 peer = hg.peer(ui, {}, path)
1825 peer = hg.peer(ui, {}, path)
1826
1826
1827 local = peer.local() is not None
1827 local = peer.local() is not None
1828 canpush = peer.canpush()
1828 canpush = peer.canpush()
1829
1829
1830 ui.write(_('url: %s\n') % peer.url())
1830 ui.write(_('url: %s\n') % peer.url())
1831 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1831 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1832 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1832 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1833
1833
1834 @command('debugpickmergetool',
1834 @command('debugpickmergetool',
1835 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1835 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1836 ('', 'changedelete', None, _('emulate merging change and delete')),
1836 ('', 'changedelete', None, _('emulate merging change and delete')),
1837 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1837 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1838 _('[PATTERN]...'),
1838 _('[PATTERN]...'),
1839 inferrepo=True)
1839 inferrepo=True)
1840 def debugpickmergetool(ui, repo, *pats, **opts):
1840 def debugpickmergetool(ui, repo, *pats, **opts):
1841 """examine which merge tool is chosen for specified file
1841 """examine which merge tool is chosen for specified file
1842
1842
1843 As described in :hg:`help merge-tools`, Mercurial examines
1843 As described in :hg:`help merge-tools`, Mercurial examines
1844 configurations below in this order to decide which merge tool is
1844 configurations below in this order to decide which merge tool is
1845 chosen for specified file.
1845 chosen for specified file.
1846
1846
1847 1. ``--tool`` option
1847 1. ``--tool`` option
1848 2. ``HGMERGE`` environment variable
1848 2. ``HGMERGE`` environment variable
1849 3. configurations in ``merge-patterns`` section
1849 3. configurations in ``merge-patterns`` section
1850 4. configuration of ``ui.merge``
1850 4. configuration of ``ui.merge``
1851 5. configurations in ``merge-tools`` section
1851 5. configurations in ``merge-tools`` section
1852 6. ``hgmerge`` tool (for historical reason only)
1852 6. ``hgmerge`` tool (for historical reason only)
1853 7. default tool for fallback (``:merge`` or ``:prompt``)
1853 7. default tool for fallback (``:merge`` or ``:prompt``)
1854
1854
1855 This command writes out examination result in the style below::
1855 This command writes out examination result in the style below::
1856
1856
1857 FILE = MERGETOOL
1857 FILE = MERGETOOL
1858
1858
1859 By default, all files known in the first parent context of the
1859 By default, all files known in the first parent context of the
1860 working directory are examined. Use file patterns and/or -I/-X
1860 working directory are examined. Use file patterns and/or -I/-X
1861 options to limit target files. -r/--rev is also useful to examine
1861 options to limit target files. -r/--rev is also useful to examine
1862 files in another context without actual updating to it.
1862 files in another context without actual updating to it.
1863
1863
1864 With --debug, this command shows warning messages while matching
1864 With --debug, this command shows warning messages while matching
1865 against ``merge-patterns`` and so on, too. It is recommended to
1865 against ``merge-patterns`` and so on, too. It is recommended to
1866 use this option with explicit file patterns and/or -I/-X options,
1866 use this option with explicit file patterns and/or -I/-X options,
1867 because this option increases amount of output per file according
1867 because this option increases amount of output per file according
1868 to configurations in hgrc.
1868 to configurations in hgrc.
1869
1869
1870 With -v/--verbose, this command shows configurations below at
1870 With -v/--verbose, this command shows configurations below at
1871 first (only if specified).
1871 first (only if specified).
1872
1872
1873 - ``--tool`` option
1873 - ``--tool`` option
1874 - ``HGMERGE`` environment variable
1874 - ``HGMERGE`` environment variable
1875 - configuration of ``ui.merge``
1875 - configuration of ``ui.merge``
1876
1876
1877 If merge tool is chosen before matching against
1877 If merge tool is chosen before matching against
1878 ``merge-patterns``, this command can't show any helpful
1878 ``merge-patterns``, this command can't show any helpful
1879 information, even with --debug. In such case, information above is
1879 information, even with --debug. In such case, information above is
1880 useful to know why a merge tool is chosen.
1880 useful to know why a merge tool is chosen.
1881 """
1881 """
1882 opts = pycompat.byteskwargs(opts)
1882 opts = pycompat.byteskwargs(opts)
1883 overrides = {}
1883 overrides = {}
1884 if opts['tool']:
1884 if opts['tool']:
1885 overrides[('ui', 'forcemerge')] = opts['tool']
1885 overrides[('ui', 'forcemerge')] = opts['tool']
1886 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1886 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1887
1887
1888 with ui.configoverride(overrides, 'debugmergepatterns'):
1888 with ui.configoverride(overrides, 'debugmergepatterns'):
1889 hgmerge = encoding.environ.get("HGMERGE")
1889 hgmerge = encoding.environ.get("HGMERGE")
1890 if hgmerge is not None:
1890 if hgmerge is not None:
1891 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1891 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1892 uimerge = ui.config("ui", "merge")
1892 uimerge = ui.config("ui", "merge")
1893 if uimerge:
1893 if uimerge:
1894 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1894 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1895
1895
1896 ctx = scmutil.revsingle(repo, opts.get('rev'))
1896 ctx = scmutil.revsingle(repo, opts.get('rev'))
1897 m = scmutil.match(ctx, pats, opts)
1897 m = scmutil.match(ctx, pats, opts)
1898 changedelete = opts['changedelete']
1898 changedelete = opts['changedelete']
1899 for path in ctx.walk(m):
1899 for path in ctx.walk(m):
1900 fctx = ctx[path]
1900 fctx = ctx[path]
1901 try:
1901 try:
1902 if not ui.debugflag:
1902 if not ui.debugflag:
1903 ui.pushbuffer(error=True)
1903 ui.pushbuffer(error=True)
1904 tool, toolpath = filemerge._picktool(repo, ui, path,
1904 tool, toolpath = filemerge._picktool(repo, ui, path,
1905 fctx.isbinary(),
1905 fctx.isbinary(),
1906 'l' in fctx.flags(),
1906 'l' in fctx.flags(),
1907 changedelete)
1907 changedelete)
1908 finally:
1908 finally:
1909 if not ui.debugflag:
1909 if not ui.debugflag:
1910 ui.popbuffer()
1910 ui.popbuffer()
1911 ui.write(('%s = %s\n') % (path, tool))
1911 ui.write(('%s = %s\n') % (path, tool))
1912
1912
1913 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1913 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1914 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1914 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1915 '''access the pushkey key/value protocol
1915 '''access the pushkey key/value protocol
1916
1916
1917 With two args, list the keys in the given namespace.
1917 With two args, list the keys in the given namespace.
1918
1918
1919 With five args, set a key to new if it currently is set to old.
1919 With five args, set a key to new if it currently is set to old.
1920 Reports success or failure.
1920 Reports success or failure.
1921 '''
1921 '''
1922
1922
1923 target = hg.peer(ui, {}, repopath)
1923 target = hg.peer(ui, {}, repopath)
1924 if keyinfo:
1924 if keyinfo:
1925 key, old, new = keyinfo
1925 key, old, new = keyinfo
1926 with target.commandexecutor() as e:
1926 with target.commandexecutor() as e:
1927 r = e.callcommand('pushkey', {
1927 r = e.callcommand('pushkey', {
1928 'namespace': namespace,
1928 'namespace': namespace,
1929 'key': key,
1929 'key': key,
1930 'old': old,
1930 'old': old,
1931 'new': new,
1931 'new': new,
1932 }).result()
1932 }).result()
1933
1933
1934 ui.status(pycompat.bytestr(r) + '\n')
1934 ui.status(pycompat.bytestr(r) + '\n')
1935 return not r
1935 return not r
1936 else:
1936 else:
1937 for k, v in sorted(target.listkeys(namespace).iteritems()):
1937 for k, v in sorted(target.listkeys(namespace).iteritems()):
1938 ui.write("%s\t%s\n" % (stringutil.escapestr(k),
1938 ui.write("%s\t%s\n" % (stringutil.escapestr(k),
1939 stringutil.escapestr(v)))
1939 stringutil.escapestr(v)))
1940
1940
1941 @command('debugpvec', [], _('A B'))
1941 @command('debugpvec', [], _('A B'))
1942 def debugpvec(ui, repo, a, b=None):
1942 def debugpvec(ui, repo, a, b=None):
1943 ca = scmutil.revsingle(repo, a)
1943 ca = scmutil.revsingle(repo, a)
1944 cb = scmutil.revsingle(repo, b)
1944 cb = scmutil.revsingle(repo, b)
1945 pa = pvec.ctxpvec(ca)
1945 pa = pvec.ctxpvec(ca)
1946 pb = pvec.ctxpvec(cb)
1946 pb = pvec.ctxpvec(cb)
1947 if pa == pb:
1947 if pa == pb:
1948 rel = "="
1948 rel = "="
1949 elif pa > pb:
1949 elif pa > pb:
1950 rel = ">"
1950 rel = ">"
1951 elif pa < pb:
1951 elif pa < pb:
1952 rel = "<"
1952 rel = "<"
1953 elif pa | pb:
1953 elif pa | pb:
1954 rel = "|"
1954 rel = "|"
1955 ui.write(_("a: %s\n") % pa)
1955 ui.write(_("a: %s\n") % pa)
1956 ui.write(_("b: %s\n") % pb)
1956 ui.write(_("b: %s\n") % pb)
1957 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1957 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1958 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1958 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1959 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1959 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1960 pa.distance(pb), rel))
1960 pa.distance(pb), rel))
1961
1961
1962 @command('debugrebuilddirstate|debugrebuildstate',
1962 @command('debugrebuilddirstate|debugrebuildstate',
1963 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1963 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1964 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1964 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1965 'the working copy parent')),
1965 'the working copy parent')),
1966 ],
1966 ],
1967 _('[-r REV]'))
1967 _('[-r REV]'))
1968 def debugrebuilddirstate(ui, repo, rev, **opts):
1968 def debugrebuilddirstate(ui, repo, rev, **opts):
1969 """rebuild the dirstate as it would look like for the given revision
1969 """rebuild the dirstate as it would look like for the given revision
1970
1970
1971 If no revision is specified the first current parent will be used.
1971 If no revision is specified the first current parent will be used.
1972
1972
1973 The dirstate will be set to the files of the given revision.
1973 The dirstate will be set to the files of the given revision.
1974 The actual working directory content or existing dirstate
1974 The actual working directory content or existing dirstate
1975 information such as adds or removes is not considered.
1975 information such as adds or removes is not considered.
1976
1976
1977 ``minimal`` will only rebuild the dirstate status for files that claim to be
1977 ``minimal`` will only rebuild the dirstate status for files that claim to be
1978 tracked but are not in the parent manifest, or that exist in the parent
1978 tracked but are not in the parent manifest, or that exist in the parent
1979 manifest but are not in the dirstate. It will not change adds, removes, or
1979 manifest but are not in the dirstate. It will not change adds, removes, or
1980 modified files that are in the working copy parent.
1980 modified files that are in the working copy parent.
1981
1981
1982 One use of this command is to make the next :hg:`status` invocation
1982 One use of this command is to make the next :hg:`status` invocation
1983 check the actual file content.
1983 check the actual file content.
1984 """
1984 """
1985 ctx = scmutil.revsingle(repo, rev)
1985 ctx = scmutil.revsingle(repo, rev)
1986 with repo.wlock():
1986 with repo.wlock():
1987 dirstate = repo.dirstate
1987 dirstate = repo.dirstate
1988 changedfiles = None
1988 changedfiles = None
1989 # See command doc for what minimal does.
1989 # See command doc for what minimal does.
1990 if opts.get(r'minimal'):
1990 if opts.get(r'minimal'):
1991 manifestfiles = set(ctx.manifest().keys())
1991 manifestfiles = set(ctx.manifest().keys())
1992 dirstatefiles = set(dirstate)
1992 dirstatefiles = set(dirstate)
1993 manifestonly = manifestfiles - dirstatefiles
1993 manifestonly = manifestfiles - dirstatefiles
1994 dsonly = dirstatefiles - manifestfiles
1994 dsonly = dirstatefiles - manifestfiles
1995 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1995 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1996 changedfiles = manifestonly | dsnotadded
1996 changedfiles = manifestonly | dsnotadded
1997
1997
1998 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1998 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1999
1999
2000 @command('debugrebuildfncache', [], '')
2000 @command('debugrebuildfncache', [], '')
2001 def debugrebuildfncache(ui, repo):
2001 def debugrebuildfncache(ui, repo):
2002 """rebuild the fncache file"""
2002 """rebuild the fncache file"""
2003 repair.rebuildfncache(ui, repo)
2003 repair.rebuildfncache(ui, repo)
2004
2004
2005 @command('debugrename',
2005 @command('debugrename',
2006 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2006 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2007 _('[-r REV] FILE'))
2007 _('[-r REV] FILE'))
2008 def debugrename(ui, repo, file1, *pats, **opts):
2008 def debugrename(ui, repo, file1, *pats, **opts):
2009 """dump rename information"""
2009 """dump rename information"""
2010
2010
2011 opts = pycompat.byteskwargs(opts)
2011 opts = pycompat.byteskwargs(opts)
2012 ctx = scmutil.revsingle(repo, opts.get('rev'))
2012 ctx = scmutil.revsingle(repo, opts.get('rev'))
2013 m = scmutil.match(ctx, (file1,) + pats, opts)
2013 m = scmutil.match(ctx, (file1,) + pats, opts)
2014 for abs in ctx.walk(m):
2014 for abs in ctx.walk(m):
2015 fctx = ctx[abs]
2015 fctx = ctx[abs]
2016 o = fctx.filelog().renamed(fctx.filenode())
2016 o = fctx.filelog().renamed(fctx.filenode())
2017 rel = m.rel(abs)
2017 rel = m.rel(abs)
2018 if o:
2018 if o:
2019 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2019 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2020 else:
2020 else:
2021 ui.write(_("%s not renamed\n") % rel)
2021 ui.write(_("%s not renamed\n") % rel)
2022
2022
2023 @command('debugrevlog', cmdutil.debugrevlogopts +
2023 @command('debugrevlog', cmdutil.debugrevlogopts +
2024 [('d', 'dump', False, _('dump index data'))],
2024 [('d', 'dump', False, _('dump index data'))],
2025 _('-c|-m|FILE'),
2025 _('-c|-m|FILE'),
2026 optionalrepo=True)
2026 optionalrepo=True)
2027 def debugrevlog(ui, repo, file_=None, **opts):
2027 def debugrevlog(ui, repo, file_=None, **opts):
2028 """show data and statistics about a revlog"""
2028 """show data and statistics about a revlog"""
2029 opts = pycompat.byteskwargs(opts)
2029 opts = pycompat.byteskwargs(opts)
2030 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2030 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2031
2031
2032 if opts.get("dump"):
2032 if opts.get("dump"):
2033 numrevs = len(r)
2033 numrevs = len(r)
2034 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
2034 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
2035 " rawsize totalsize compression heads chainlen\n"))
2035 " rawsize totalsize compression heads chainlen\n"))
2036 ts = 0
2036 ts = 0
2037 heads = set()
2037 heads = set()
2038
2038
2039 for rev in pycompat.xrange(numrevs):
2039 for rev in pycompat.xrange(numrevs):
2040 dbase = r.deltaparent(rev)
2040 dbase = r.deltaparent(rev)
2041 if dbase == -1:
2041 if dbase == -1:
2042 dbase = rev
2042 dbase = rev
2043 cbase = r.chainbase(rev)
2043 cbase = r.chainbase(rev)
2044 clen = r.chainlen(rev)
2044 clen = r.chainlen(rev)
2045 p1, p2 = r.parentrevs(rev)
2045 p1, p2 = r.parentrevs(rev)
2046 rs = r.rawsize(rev)
2046 rs = r.rawsize(rev)
2047 ts = ts + rs
2047 ts = ts + rs
2048 heads -= set(r.parentrevs(rev))
2048 heads -= set(r.parentrevs(rev))
2049 heads.add(rev)
2049 heads.add(rev)
2050 try:
2050 try:
2051 compression = ts / r.end(rev)
2051 compression = ts / r.end(rev)
2052 except ZeroDivisionError:
2052 except ZeroDivisionError:
2053 compression = 0
2053 compression = 0
2054 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2054 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2055 "%11d %5d %8d\n" %
2055 "%11d %5d %8d\n" %
2056 (rev, p1, p2, r.start(rev), r.end(rev),
2056 (rev, p1, p2, r.start(rev), r.end(rev),
2057 r.start(dbase), r.start(cbase),
2057 r.start(dbase), r.start(cbase),
2058 r.start(p1), r.start(p2),
2058 r.start(p1), r.start(p2),
2059 rs, ts, compression, len(heads), clen))
2059 rs, ts, compression, len(heads), clen))
2060 return 0
2060 return 0
2061
2061
2062 v = r.version
2062 v = r.version
2063 format = v & 0xFFFF
2063 format = v & 0xFFFF
2064 flags = []
2064 flags = []
2065 gdelta = False
2065 gdelta = False
2066 if v & revlog.FLAG_INLINE_DATA:
2066 if v & revlog.FLAG_INLINE_DATA:
2067 flags.append('inline')
2067 flags.append('inline')
2068 if v & revlog.FLAG_GENERALDELTA:
2068 if v & revlog.FLAG_GENERALDELTA:
2069 gdelta = True
2069 gdelta = True
2070 flags.append('generaldelta')
2070 flags.append('generaldelta')
2071 if not flags:
2071 if not flags:
2072 flags = ['(none)']
2072 flags = ['(none)']
2073
2073
2074 ### tracks merge vs single parent
2074 ### tracks merge vs single parent
2075 nummerges = 0
2075 nummerges = 0
2076
2076
2077 ### tracks ways the "delta" are build
2077 ### tracks ways the "delta" are build
2078 # nodelta
2078 # nodelta
2079 numempty = 0
2079 numempty = 0
2080 numemptytext = 0
2080 numemptytext = 0
2081 numemptydelta = 0
2081 numemptydelta = 0
2082 # full file content
2082 # full file content
2083 numfull = 0
2083 numfull = 0
2084 # intermediate snapshot against a prior snapshot
2084 # intermediate snapshot against a prior snapshot
2085 numsemi = 0
2085 numsemi = 0
2086 # snapshot count per depth
2086 # snapshot count per depth
2087 numsnapdepth = collections.defaultdict(lambda: 0)
2087 numsnapdepth = collections.defaultdict(lambda: 0)
2088 # delta against previous revision
2088 # delta against previous revision
2089 numprev = 0
2089 numprev = 0
2090 # delta against first or second parent (not prev)
2090 # delta against first or second parent (not prev)
2091 nump1 = 0
2091 nump1 = 0
2092 nump2 = 0
2092 nump2 = 0
2093 # delta against neither prev nor parents
2093 # delta against neither prev nor parents
2094 numother = 0
2094 numother = 0
2095 # delta against prev that are also first or second parent
2095 # delta against prev that are also first or second parent
2096 # (details of `numprev`)
2096 # (details of `numprev`)
2097 nump1prev = 0
2097 nump1prev = 0
2098 nump2prev = 0
2098 nump2prev = 0
2099
2099
2100 # data about delta chain of each revs
2100 # data about delta chain of each revs
2101 chainlengths = []
2101 chainlengths = []
2102 chainbases = []
2102 chainbases = []
2103 chainspans = []
2103 chainspans = []
2104
2104
2105 # data about each revision
2105 # data about each revision
2106 datasize = [None, 0, 0]
2106 datasize = [None, 0, 0]
2107 fullsize = [None, 0, 0]
2107 fullsize = [None, 0, 0]
2108 semisize = [None, 0, 0]
2108 semisize = [None, 0, 0]
2109 # snapshot count per depth
2109 # snapshot count per depth
2110 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2110 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2111 deltasize = [None, 0, 0]
2111 deltasize = [None, 0, 0]
2112 chunktypecounts = {}
2112 chunktypecounts = {}
2113 chunktypesizes = {}
2113 chunktypesizes = {}
2114
2114
2115 def addsize(size, l):
2115 def addsize(size, l):
2116 if l[0] is None or size < l[0]:
2116 if l[0] is None or size < l[0]:
2117 l[0] = size
2117 l[0] = size
2118 if size > l[1]:
2118 if size > l[1]:
2119 l[1] = size
2119 l[1] = size
2120 l[2] += size
2120 l[2] += size
2121
2121
2122 numrevs = len(r)
2122 numrevs = len(r)
2123 for rev in pycompat.xrange(numrevs):
2123 for rev in pycompat.xrange(numrevs):
2124 p1, p2 = r.parentrevs(rev)
2124 p1, p2 = r.parentrevs(rev)
2125 delta = r.deltaparent(rev)
2125 delta = r.deltaparent(rev)
2126 if format > 0:
2126 if format > 0:
2127 addsize(r.rawsize(rev), datasize)
2127 addsize(r.rawsize(rev), datasize)
2128 if p2 != nullrev:
2128 if p2 != nullrev:
2129 nummerges += 1
2129 nummerges += 1
2130 size = r.length(rev)
2130 size = r.length(rev)
2131 if delta == nullrev:
2131 if delta == nullrev:
2132 chainlengths.append(0)
2132 chainlengths.append(0)
2133 chainbases.append(r.start(rev))
2133 chainbases.append(r.start(rev))
2134 chainspans.append(size)
2134 chainspans.append(size)
2135 if size == 0:
2135 if size == 0:
2136 numempty += 1
2136 numempty += 1
2137 numemptytext += 1
2137 numemptytext += 1
2138 else:
2138 else:
2139 numfull += 1
2139 numfull += 1
2140 numsnapdepth[0] += 1
2140 numsnapdepth[0] += 1
2141 addsize(size, fullsize)
2141 addsize(size, fullsize)
2142 addsize(size, snapsizedepth[0])
2142 addsize(size, snapsizedepth[0])
2143 else:
2143 else:
2144 chainlengths.append(chainlengths[delta] + 1)
2144 chainlengths.append(chainlengths[delta] + 1)
2145 baseaddr = chainbases[delta]
2145 baseaddr = chainbases[delta]
2146 revaddr = r.start(rev)
2146 revaddr = r.start(rev)
2147 chainbases.append(baseaddr)
2147 chainbases.append(baseaddr)
2148 chainspans.append((revaddr - baseaddr) + size)
2148 chainspans.append((revaddr - baseaddr) + size)
2149 if size == 0:
2149 if size == 0:
2150 numempty += 1
2150 numempty += 1
2151 numemptydelta += 1
2151 numemptydelta += 1
2152 elif r.issnapshot(rev):
2152 elif r.issnapshot(rev):
2153 addsize(size, semisize)
2153 addsize(size, semisize)
2154 numsemi += 1
2154 numsemi += 1
2155 depth = r.snapshotdepth(rev)
2155 depth = r.snapshotdepth(rev)
2156 numsnapdepth[depth] += 1
2156 numsnapdepth[depth] += 1
2157 addsize(size, snapsizedepth[depth])
2157 addsize(size, snapsizedepth[depth])
2158 else:
2158 else:
2159 addsize(size, deltasize)
2159 addsize(size, deltasize)
2160 if delta == rev - 1:
2160 if delta == rev - 1:
2161 numprev += 1
2161 numprev += 1
2162 if delta == p1:
2162 if delta == p1:
2163 nump1prev += 1
2163 nump1prev += 1
2164 elif delta == p2:
2164 elif delta == p2:
2165 nump2prev += 1
2165 nump2prev += 1
2166 elif delta == p1:
2166 elif delta == p1:
2167 nump1 += 1
2167 nump1 += 1
2168 elif delta == p2:
2168 elif delta == p2:
2169 nump2 += 1
2169 nump2 += 1
2170 elif delta != nullrev:
2170 elif delta != nullrev:
2171 numother += 1
2171 numother += 1
2172
2172
2173 # Obtain data on the raw chunks in the revlog.
2173 # Obtain data on the raw chunks in the revlog.
2174 if util.safehasattr(r, '_getsegmentforrevs'):
2174 if util.safehasattr(r, '_getsegmentforrevs'):
2175 segment = r._getsegmentforrevs(rev, rev)[1]
2175 segment = r._getsegmentforrevs(rev, rev)[1]
2176 else:
2176 else:
2177 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2177 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2178 if segment:
2178 if segment:
2179 chunktype = bytes(segment[0:1])
2179 chunktype = bytes(segment[0:1])
2180 else:
2180 else:
2181 chunktype = 'empty'
2181 chunktype = 'empty'
2182
2182
2183 if chunktype not in chunktypecounts:
2183 if chunktype not in chunktypecounts:
2184 chunktypecounts[chunktype] = 0
2184 chunktypecounts[chunktype] = 0
2185 chunktypesizes[chunktype] = 0
2185 chunktypesizes[chunktype] = 0
2186
2186
2187 chunktypecounts[chunktype] += 1
2187 chunktypecounts[chunktype] += 1
2188 chunktypesizes[chunktype] += size
2188 chunktypesizes[chunktype] += size
2189
2189
2190 # Adjust size min value for empty cases
2190 # Adjust size min value for empty cases
2191 for size in (datasize, fullsize, semisize, deltasize):
2191 for size in (datasize, fullsize, semisize, deltasize):
2192 if size[0] is None:
2192 if size[0] is None:
2193 size[0] = 0
2193 size[0] = 0
2194
2194
2195 numdeltas = numrevs - numfull - numempty - numsemi
2195 numdeltas = numrevs - numfull - numempty - numsemi
2196 numoprev = numprev - nump1prev - nump2prev
2196 numoprev = numprev - nump1prev - nump2prev
2197 totalrawsize = datasize[2]
2197 totalrawsize = datasize[2]
2198 datasize[2] /= numrevs
2198 datasize[2] /= numrevs
2199 fulltotal = fullsize[2]
2199 fulltotal = fullsize[2]
2200 fullsize[2] /= numfull
2200 fullsize[2] /= numfull
2201 semitotal = semisize[2]
2201 semitotal = semisize[2]
2202 snaptotal = {}
2202 snaptotal = {}
2203 if numsemi > 0:
2203 if numsemi > 0:
2204 semisize[2] /= numsemi
2204 semisize[2] /= numsemi
2205 for depth in snapsizedepth:
2205 for depth in snapsizedepth:
2206 snaptotal[depth] = snapsizedepth[depth][2]
2206 snaptotal[depth] = snapsizedepth[depth][2]
2207 snapsizedepth[depth][2] /= numsnapdepth[depth]
2207 snapsizedepth[depth][2] /= numsnapdepth[depth]
2208
2208
2209 deltatotal = deltasize[2]
2209 deltatotal = deltasize[2]
2210 if numdeltas > 0:
2210 if numdeltas > 0:
2211 deltasize[2] /= numdeltas
2211 deltasize[2] /= numdeltas
2212 totalsize = fulltotal + semitotal + deltatotal
2212 totalsize = fulltotal + semitotal + deltatotal
2213 avgchainlen = sum(chainlengths) / numrevs
2213 avgchainlen = sum(chainlengths) / numrevs
2214 maxchainlen = max(chainlengths)
2214 maxchainlen = max(chainlengths)
2215 maxchainspan = max(chainspans)
2215 maxchainspan = max(chainspans)
2216 compratio = 1
2216 compratio = 1
2217 if totalsize:
2217 if totalsize:
2218 compratio = totalrawsize / totalsize
2218 compratio = totalrawsize / totalsize
2219
2219
2220 basedfmtstr = '%%%dd\n'
2220 basedfmtstr = '%%%dd\n'
2221 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2221 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2222
2222
2223 def dfmtstr(max):
2223 def dfmtstr(max):
2224 return basedfmtstr % len(str(max))
2224 return basedfmtstr % len(str(max))
2225 def pcfmtstr(max, padding=0):
2225 def pcfmtstr(max, padding=0):
2226 return basepcfmtstr % (len(str(max)), ' ' * padding)
2226 return basepcfmtstr % (len(str(max)), ' ' * padding)
2227
2227
2228 def pcfmt(value, total):
2228 def pcfmt(value, total):
2229 if total:
2229 if total:
2230 return (value, 100 * float(value) / total)
2230 return (value, 100 * float(value) / total)
2231 else:
2231 else:
2232 return value, 100.0
2232 return value, 100.0
2233
2233
2234 ui.write(('format : %d\n') % format)
2234 ui.write(('format : %d\n') % format)
2235 ui.write(('flags : %s\n') % ', '.join(flags))
2235 ui.write(('flags : %s\n') % ', '.join(flags))
2236
2236
2237 ui.write('\n')
2237 ui.write('\n')
2238 fmt = pcfmtstr(totalsize)
2238 fmt = pcfmtstr(totalsize)
2239 fmt2 = dfmtstr(totalsize)
2239 fmt2 = dfmtstr(totalsize)
2240 ui.write(('revisions : ') + fmt2 % numrevs)
2240 ui.write(('revisions : ') + fmt2 % numrevs)
2241 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2241 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2242 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2242 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2243 ui.write(('revisions : ') + fmt2 % numrevs)
2243 ui.write(('revisions : ') + fmt2 % numrevs)
2244 ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs))
2244 ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs))
2245 ui.write((' text : ')
2245 ui.write((' text : ')
2246 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
2246 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
2247 ui.write((' delta : ')
2247 ui.write((' delta : ')
2248 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
2248 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
2249 ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs))
2249 ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs))
2250 for depth in sorted(numsnapdepth):
2250 for depth in sorted(numsnapdepth):
2251 ui.write((' lvl-%-3d : ' % depth)
2251 ui.write((' lvl-%-3d : ' % depth)
2252 + fmt % pcfmt(numsnapdepth[depth], numrevs))
2252 + fmt % pcfmt(numsnapdepth[depth], numrevs))
2253 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2253 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2254 ui.write(('revision size : ') + fmt2 % totalsize)
2254 ui.write(('revision size : ') + fmt2 % totalsize)
2255 ui.write((' snapshot : ')
2255 ui.write((' snapshot : ')
2256 + fmt % pcfmt(fulltotal + semitotal, totalsize))
2256 + fmt % pcfmt(fulltotal + semitotal, totalsize))
2257 for depth in sorted(numsnapdepth):
2257 for depth in sorted(numsnapdepth):
2258 ui.write((' lvl-%-3d : ' % depth)
2258 ui.write((' lvl-%-3d : ' % depth)
2259 + fmt % pcfmt(snaptotal[depth], totalsize))
2259 + fmt % pcfmt(snaptotal[depth], totalsize))
2260 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2260 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2261
2261
2262 def fmtchunktype(chunktype):
2262 def fmtchunktype(chunktype):
2263 if chunktype == 'empty':
2263 if chunktype == 'empty':
2264 return ' %s : ' % chunktype
2264 return ' %s : ' % chunktype
2265 elif chunktype in pycompat.bytestr(string.ascii_letters):
2265 elif chunktype in pycompat.bytestr(string.ascii_letters):
2266 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2266 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2267 else:
2267 else:
2268 return ' 0x%s : ' % hex(chunktype)
2268 return ' 0x%s : ' % hex(chunktype)
2269
2269
2270 ui.write('\n')
2270 ui.write('\n')
2271 ui.write(('chunks : ') + fmt2 % numrevs)
2271 ui.write(('chunks : ') + fmt2 % numrevs)
2272 for chunktype in sorted(chunktypecounts):
2272 for chunktype in sorted(chunktypecounts):
2273 ui.write(fmtchunktype(chunktype))
2273 ui.write(fmtchunktype(chunktype))
2274 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2274 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2275 ui.write(('chunks size : ') + fmt2 % totalsize)
2275 ui.write(('chunks size : ') + fmt2 % totalsize)
2276 for chunktype in sorted(chunktypecounts):
2276 for chunktype in sorted(chunktypecounts):
2277 ui.write(fmtchunktype(chunktype))
2277 ui.write(fmtchunktype(chunktype))
2278 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2278 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2279
2279
2280 ui.write('\n')
2280 ui.write('\n')
2281 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2281 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2282 ui.write(('avg chain length : ') + fmt % avgchainlen)
2282 ui.write(('avg chain length : ') + fmt % avgchainlen)
2283 ui.write(('max chain length : ') + fmt % maxchainlen)
2283 ui.write(('max chain length : ') + fmt % maxchainlen)
2284 ui.write(('max chain reach : ') + fmt % maxchainspan)
2284 ui.write(('max chain reach : ') + fmt % maxchainspan)
2285 ui.write(('compression ratio : ') + fmt % compratio)
2285 ui.write(('compression ratio : ') + fmt % compratio)
2286
2286
2287 if format > 0:
2287 if format > 0:
2288 ui.write('\n')
2288 ui.write('\n')
2289 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2289 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2290 % tuple(datasize))
2290 % tuple(datasize))
2291 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2291 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2292 % tuple(fullsize))
2292 % tuple(fullsize))
2293 ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n')
2293 ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n')
2294 % tuple(semisize))
2294 % tuple(semisize))
2295 for depth in sorted(snapsizedepth):
2295 for depth in sorted(snapsizedepth):
2296 if depth == 0:
2296 if depth == 0:
2297 continue
2297 continue
2298 ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n')
2298 ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n')
2299 % ((depth,) + tuple(snapsizedepth[depth])))
2299 % ((depth,) + tuple(snapsizedepth[depth])))
2300 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2300 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2301 % tuple(deltasize))
2301 % tuple(deltasize))
2302
2302
2303 if numdeltas > 0:
2303 if numdeltas > 0:
2304 ui.write('\n')
2304 ui.write('\n')
2305 fmt = pcfmtstr(numdeltas)
2305 fmt = pcfmtstr(numdeltas)
2306 fmt2 = pcfmtstr(numdeltas, 4)
2306 fmt2 = pcfmtstr(numdeltas, 4)
2307 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2307 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2308 if numprev > 0:
2308 if numprev > 0:
2309 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2309 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2310 numprev))
2310 numprev))
2311 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2311 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2312 numprev))
2312 numprev))
2313 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2313 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2314 numprev))
2314 numprev))
2315 if gdelta:
2315 if gdelta:
2316 ui.write(('deltas against p1 : ')
2316 ui.write(('deltas against p1 : ')
2317 + fmt % pcfmt(nump1, numdeltas))
2317 + fmt % pcfmt(nump1, numdeltas))
2318 ui.write(('deltas against p2 : ')
2318 ui.write(('deltas against p2 : ')
2319 + fmt % pcfmt(nump2, numdeltas))
2319 + fmt % pcfmt(nump2, numdeltas))
2320 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2320 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2321 numdeltas))
2321 numdeltas))
2322
2322
2323 @command('debugrevlogindex', cmdutil.debugrevlogopts +
2323 @command('debugrevlogindex', cmdutil.debugrevlogopts +
2324 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2324 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2325 _('[-f FORMAT] -c|-m|FILE'),
2325 _('[-f FORMAT] -c|-m|FILE'),
2326 optionalrepo=True)
2326 optionalrepo=True)
2327 def debugrevlogindex(ui, repo, file_=None, **opts):
2327 def debugrevlogindex(ui, repo, file_=None, **opts):
2328 """dump the contents of a revlog index"""
2328 """dump the contents of a revlog index"""
2329 opts = pycompat.byteskwargs(opts)
2329 opts = pycompat.byteskwargs(opts)
2330 r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
2330 r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
2331 format = opts.get('format', 0)
2331 format = opts.get('format', 0)
2332 if format not in (0, 1):
2332 if format not in (0, 1):
2333 raise error.Abort(_("unknown format %d") % format)
2333 raise error.Abort(_("unknown format %d") % format)
2334
2334
2335 if ui.debugflag:
2335 if ui.debugflag:
2336 shortfn = hex
2336 shortfn = hex
2337 else:
2337 else:
2338 shortfn = short
2338 shortfn = short
2339
2339
2340 # There might not be anything in r, so have a sane default
2340 # There might not be anything in r, so have a sane default
2341 idlen = 12
2341 idlen = 12
2342 for i in r:
2342 for i in r:
2343 idlen = len(shortfn(r.node(i)))
2343 idlen = len(shortfn(r.node(i)))
2344 break
2344 break
2345
2345
2346 if format == 0:
2346 if format == 0:
2347 if ui.verbose:
2347 if ui.verbose:
2348 ui.write((" rev offset length linkrev"
2348 ui.write((" rev offset length linkrev"
2349 " %s %s p2\n") % ("nodeid".ljust(idlen),
2349 " %s %s p2\n") % ("nodeid".ljust(idlen),
2350 "p1".ljust(idlen)))
2350 "p1".ljust(idlen)))
2351 else:
2351 else:
2352 ui.write((" rev linkrev %s %s p2\n") % (
2352 ui.write((" rev linkrev %s %s p2\n") % (
2353 "nodeid".ljust(idlen), "p1".ljust(idlen)))
2353 "nodeid".ljust(idlen), "p1".ljust(idlen)))
2354 elif format == 1:
2354 elif format == 1:
2355 if ui.verbose:
2355 if ui.verbose:
2356 ui.write((" rev flag offset length size link p1"
2356 ui.write((" rev flag offset length size link p1"
2357 " p2 %s\n") % "nodeid".rjust(idlen))
2357 " p2 %s\n") % "nodeid".rjust(idlen))
2358 else:
2358 else:
2359 ui.write((" rev flag size link p1 p2 %s\n") %
2359 ui.write((" rev flag size link p1 p2 %s\n") %
2360 "nodeid".rjust(idlen))
2360 "nodeid".rjust(idlen))
2361
2361
2362 for i in r:
2362 for i in r:
2363 node = r.node(i)
2363 node = r.node(i)
2364 if format == 0:
2364 if format == 0:
2365 try:
2365 try:
2366 pp = r.parents(node)
2366 pp = r.parents(node)
2367 except Exception:
2367 except Exception:
2368 pp = [nullid, nullid]
2368 pp = [nullid, nullid]
2369 if ui.verbose:
2369 if ui.verbose:
2370 ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
2370 ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
2371 i, r.start(i), r.length(i), r.linkrev(i),
2371 i, r.start(i), r.length(i), r.linkrev(i),
2372 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
2372 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
2373 else:
2373 else:
2374 ui.write("% 6d % 7d %s %s %s\n" % (
2374 ui.write("% 6d % 7d %s %s %s\n" % (
2375 i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
2375 i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
2376 shortfn(pp[1])))
2376 shortfn(pp[1])))
2377 elif format == 1:
2377 elif format == 1:
2378 pr = r.parentrevs(i)
2378 pr = r.parentrevs(i)
2379 if ui.verbose:
2379 if ui.verbose:
2380 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
2380 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
2381 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2381 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2382 r.linkrev(i), pr[0], pr[1], shortfn(node)))
2382 r.linkrev(i), pr[0], pr[1], shortfn(node)))
2383 else:
2383 else:
2384 ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
2384 ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
2385 i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
2385 i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
2386 shortfn(node)))
2386 shortfn(node)))
2387
2387
2388 @command('debugrevspec',
2388 @command('debugrevspec',
2389 [('', 'optimize', None,
2389 [('', 'optimize', None,
2390 _('print parsed tree after optimizing (DEPRECATED)')),
2390 _('print parsed tree after optimizing (DEPRECATED)')),
2391 ('', 'show-revs', True, _('print list of result revisions (default)')),
2391 ('', 'show-revs', True, _('print list of result revisions (default)')),
2392 ('s', 'show-set', None, _('print internal representation of result set')),
2392 ('s', 'show-set', None, _('print internal representation of result set')),
2393 ('p', 'show-stage', [],
2393 ('p', 'show-stage', [],
2394 _('print parsed tree at the given stage'), _('NAME')),
2394 _('print parsed tree at the given stage'), _('NAME')),
2395 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2395 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2396 ('', 'verify-optimized', False, _('verify optimized result')),
2396 ('', 'verify-optimized', False, _('verify optimized result')),
2397 ],
2397 ],
2398 ('REVSPEC'))
2398 ('REVSPEC'))
2399 def debugrevspec(ui, repo, expr, **opts):
2399 def debugrevspec(ui, repo, expr, **opts):
2400 """parse and apply a revision specification
2400 """parse and apply a revision specification
2401
2401
2402 Use -p/--show-stage option to print the parsed tree at the given stages.
2402 Use -p/--show-stage option to print the parsed tree at the given stages.
2403 Use -p all to print tree at every stage.
2403 Use -p all to print tree at every stage.
2404
2404
2405 Use --no-show-revs option with -s or -p to print only the set
2405 Use --no-show-revs option with -s or -p to print only the set
2406 representation or the parsed tree respectively.
2406 representation or the parsed tree respectively.
2407
2407
2408 Use --verify-optimized to compare the optimized result with the unoptimized
2408 Use --verify-optimized to compare the optimized result with the unoptimized
2409 one. Returns 1 if the optimized result differs.
2409 one. Returns 1 if the optimized result differs.
2410 """
2410 """
2411 opts = pycompat.byteskwargs(opts)
2411 opts = pycompat.byteskwargs(opts)
2412 aliases = ui.configitems('revsetalias')
2412 aliases = ui.configitems('revsetalias')
2413 stages = [
2413 stages = [
2414 ('parsed', lambda tree: tree),
2414 ('parsed', lambda tree: tree),
2415 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2415 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2416 ui.warn)),
2416 ui.warn)),
2417 ('concatenated', revsetlang.foldconcat),
2417 ('concatenated', revsetlang.foldconcat),
2418 ('analyzed', revsetlang.analyze),
2418 ('analyzed', revsetlang.analyze),
2419 ('optimized', revsetlang.optimize),
2419 ('optimized', revsetlang.optimize),
2420 ]
2420 ]
2421 if opts['no_optimized']:
2421 if opts['no_optimized']:
2422 stages = stages[:-1]
2422 stages = stages[:-1]
2423 if opts['verify_optimized'] and opts['no_optimized']:
2423 if opts['verify_optimized'] and opts['no_optimized']:
2424 raise error.Abort(_('cannot use --verify-optimized with '
2424 raise error.Abort(_('cannot use --verify-optimized with '
2425 '--no-optimized'))
2425 '--no-optimized'))
2426 stagenames = set(n for n, f in stages)
2426 stagenames = set(n for n, f in stages)
2427
2427
2428 showalways = set()
2428 showalways = set()
2429 showchanged = set()
2429 showchanged = set()
2430 if ui.verbose and not opts['show_stage']:
2430 if ui.verbose and not opts['show_stage']:
2431 # show parsed tree by --verbose (deprecated)
2431 # show parsed tree by --verbose (deprecated)
2432 showalways.add('parsed')
2432 showalways.add('parsed')
2433 showchanged.update(['expanded', 'concatenated'])
2433 showchanged.update(['expanded', 'concatenated'])
2434 if opts['optimize']:
2434 if opts['optimize']:
2435 showalways.add('optimized')
2435 showalways.add('optimized')
2436 if opts['show_stage'] and opts['optimize']:
2436 if opts['show_stage'] and opts['optimize']:
2437 raise error.Abort(_('cannot use --optimize with --show-stage'))
2437 raise error.Abort(_('cannot use --optimize with --show-stage'))
2438 if opts['show_stage'] == ['all']:
2438 if opts['show_stage'] == ['all']:
2439 showalways.update(stagenames)
2439 showalways.update(stagenames)
2440 else:
2440 else:
2441 for n in opts['show_stage']:
2441 for n in opts['show_stage']:
2442 if n not in stagenames:
2442 if n not in stagenames:
2443 raise error.Abort(_('invalid stage name: %s') % n)
2443 raise error.Abort(_('invalid stage name: %s') % n)
2444 showalways.update(opts['show_stage'])
2444 showalways.update(opts['show_stage'])
2445
2445
2446 treebystage = {}
2446 treebystage = {}
2447 printedtree = None
2447 printedtree = None
2448 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
2448 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
2449 for n, f in stages:
2449 for n, f in stages:
2450 treebystage[n] = tree = f(tree)
2450 treebystage[n] = tree = f(tree)
2451 if n in showalways or (n in showchanged and tree != printedtree):
2451 if n in showalways or (n in showchanged and tree != printedtree):
2452 if opts['show_stage'] or n != 'parsed':
2452 if opts['show_stage'] or n != 'parsed':
2453 ui.write(("* %s:\n") % n)
2453 ui.write(("* %s:\n") % n)
2454 ui.write(revsetlang.prettyformat(tree), "\n")
2454 ui.write(revsetlang.prettyformat(tree), "\n")
2455 printedtree = tree
2455 printedtree = tree
2456
2456
2457 if opts['verify_optimized']:
2457 if opts['verify_optimized']:
2458 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2458 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2459 brevs = revset.makematcher(treebystage['optimized'])(repo)
2459 brevs = revset.makematcher(treebystage['optimized'])(repo)
2460 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2460 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2461 ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
2461 ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
2462 ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
2462 ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
2463 arevs = list(arevs)
2463 arevs = list(arevs)
2464 brevs = list(brevs)
2464 brevs = list(brevs)
2465 if arevs == brevs:
2465 if arevs == brevs:
2466 return 0
2466 return 0
2467 ui.write(('--- analyzed\n'), label='diff.file_a')
2467 ui.write(('--- analyzed\n'), label='diff.file_a')
2468 ui.write(('+++ optimized\n'), label='diff.file_b')
2468 ui.write(('+++ optimized\n'), label='diff.file_b')
2469 sm = difflib.SequenceMatcher(None, arevs, brevs)
2469 sm = difflib.SequenceMatcher(None, arevs, brevs)
2470 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2470 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2471 if tag in ('delete', 'replace'):
2471 if tag in ('delete', 'replace'):
2472 for c in arevs[alo:ahi]:
2472 for c in arevs[alo:ahi]:
2473 ui.write('-%s\n' % c, label='diff.deleted')
2473 ui.write('-%s\n' % c, label='diff.deleted')
2474 if tag in ('insert', 'replace'):
2474 if tag in ('insert', 'replace'):
2475 for c in brevs[blo:bhi]:
2475 for c in brevs[blo:bhi]:
2476 ui.write('+%s\n' % c, label='diff.inserted')
2476 ui.write('+%s\n' % c, label='diff.inserted')
2477 if tag == 'equal':
2477 if tag == 'equal':
2478 for c in arevs[alo:ahi]:
2478 for c in arevs[alo:ahi]:
2479 ui.write(' %s\n' % c)
2479 ui.write(' %s\n' % c)
2480 return 1
2480 return 1
2481
2481
2482 func = revset.makematcher(tree)
2482 func = revset.makematcher(tree)
2483 revs = func(repo)
2483 revs = func(repo)
2484 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2484 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2485 ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
2485 ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
2486 if not opts['show_revs']:
2486 if not opts['show_revs']:
2487 return
2487 return
2488 for c in revs:
2488 for c in revs:
2489 ui.write("%d\n" % c)
2489 ui.write("%d\n" % c)
2490
2490
2491 @command('debugserve', [
2491 @command('debugserve', [
2492 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2492 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2493 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2493 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2494 ('', 'logiofile', '', _('file to log server I/O to')),
2494 ('', 'logiofile', '', _('file to log server I/O to')),
2495 ], '')
2495 ], '')
2496 def debugserve(ui, repo, **opts):
2496 def debugserve(ui, repo, **opts):
2497 """run a server with advanced settings
2497 """run a server with advanced settings
2498
2498
2499 This command is similar to :hg:`serve`. It exists partially as a
2499 This command is similar to :hg:`serve`. It exists partially as a
2500 workaround to the fact that ``hg serve --stdio`` must have specific
2500 workaround to the fact that ``hg serve --stdio`` must have specific
2501 arguments for security reasons.
2501 arguments for security reasons.
2502 """
2502 """
2503 opts = pycompat.byteskwargs(opts)
2503 opts = pycompat.byteskwargs(opts)
2504
2504
2505 if not opts['sshstdio']:
2505 if not opts['sshstdio']:
2506 raise error.Abort(_('only --sshstdio is currently supported'))
2506 raise error.Abort(_('only --sshstdio is currently supported'))
2507
2507
2508 logfh = None
2508 logfh = None
2509
2509
2510 if opts['logiofd'] and opts['logiofile']:
2510 if opts['logiofd'] and opts['logiofile']:
2511 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2511 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2512
2512
2513 if opts['logiofd']:
2513 if opts['logiofd']:
2514 # Line buffered because output is line based.
2514 # Line buffered because output is line based.
2515 try:
2515 try:
2516 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2516 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2517 except OSError as e:
2517 except OSError as e:
2518 if e.errno != errno.ESPIPE:
2518 if e.errno != errno.ESPIPE:
2519 raise
2519 raise
2520 # can't seek a pipe, so `ab` mode fails on py3
2520 # can't seek a pipe, so `ab` mode fails on py3
2521 logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
2521 logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
2522 elif opts['logiofile']:
2522 elif opts['logiofile']:
2523 logfh = open(opts['logiofile'], 'ab', 1)
2523 logfh = open(opts['logiofile'], 'ab', 1)
2524
2524
2525 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2525 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2526 s.serve_forever()
2526 s.serve_forever()
2527
2527
2528 @command('debugsetparents', [], _('REV1 [REV2]'))
2528 @command('debugsetparents', [], _('REV1 [REV2]'))
2529 def debugsetparents(ui, repo, rev1, rev2=None):
2529 def debugsetparents(ui, repo, rev1, rev2=None):
2530 """manually set the parents of the current working directory
2530 """manually set the parents of the current working directory
2531
2531
2532 This is useful for writing repository conversion tools, but should
2532 This is useful for writing repository conversion tools, but should
2533 be used with care. For example, neither the working directory nor the
2533 be used with care. For example, neither the working directory nor the
2534 dirstate is updated, so file status may be incorrect after running this
2534 dirstate is updated, so file status may be incorrect after running this
2535 command.
2535 command.
2536
2536
2537 Returns 0 on success.
2537 Returns 0 on success.
2538 """
2538 """
2539
2539
2540 node1 = scmutil.revsingle(repo, rev1).node()
2540 node1 = scmutil.revsingle(repo, rev1).node()
2541 node2 = scmutil.revsingle(repo, rev2, 'null').node()
2541 node2 = scmutil.revsingle(repo, rev2, 'null').node()
2542
2542
2543 with repo.wlock():
2543 with repo.wlock():
2544 repo.setparents(node1, node2)
2544 repo.setparents(node1, node2)
2545
2545
2546 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2546 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2547 def debugssl(ui, repo, source=None, **opts):
2547 def debugssl(ui, repo, source=None, **opts):
2548 '''test a secure connection to a server
2548 '''test a secure connection to a server
2549
2549
2550 This builds the certificate chain for the server on Windows, installing the
2550 This builds the certificate chain for the server on Windows, installing the
2551 missing intermediates and trusted root via Windows Update if necessary. It
2551 missing intermediates and trusted root via Windows Update if necessary. It
2552 does nothing on other platforms.
2552 does nothing on other platforms.
2553
2553
2554 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2554 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2555 that server is used. See :hg:`help urls` for more information.
2555 that server is used. See :hg:`help urls` for more information.
2556
2556
2557 If the update succeeds, retry the original operation. Otherwise, the cause
2557 If the update succeeds, retry the original operation. Otherwise, the cause
2558 of the SSL error is likely another issue.
2558 of the SSL error is likely another issue.
2559 '''
2559 '''
2560 if not pycompat.iswindows:
2560 if not pycompat.iswindows:
2561 raise error.Abort(_('certificate chain building is only possible on '
2561 raise error.Abort(_('certificate chain building is only possible on '
2562 'Windows'))
2562 'Windows'))
2563
2563
2564 if not source:
2564 if not source:
2565 if not repo:
2565 if not repo:
2566 raise error.Abort(_("there is no Mercurial repository here, and no "
2566 raise error.Abort(_("there is no Mercurial repository here, and no "
2567 "server specified"))
2567 "server specified"))
2568 source = "default"
2568 source = "default"
2569
2569
2570 source, branches = hg.parseurl(ui.expandpath(source))
2570 source, branches = hg.parseurl(ui.expandpath(source))
2571 url = util.url(source)
2571 url = util.url(source)
2572 addr = None
2572 addr = None
2573
2573
2574 defaultport = {'https': 443, 'ssh': 22}
2574 defaultport = {'https': 443, 'ssh': 22}
2575 if url.scheme in defaultport:
2575 if url.scheme in defaultport:
2576 try:
2576 try:
2577 addr = (url.host, int(url.port or defaultport[url.scheme]))
2577 addr = (url.host, int(url.port or defaultport[url.scheme]))
2578 except ValueError:
2578 except ValueError:
2579 raise error.Abort(_("malformed port number in URL"))
2579 raise error.Abort(_("malformed port number in URL"))
2580 else:
2580 else:
2581 raise error.Abort(_("only https and ssh connections are supported"))
2581 raise error.Abort(_("only https and ssh connections are supported"))
2582
2582
2583 from . import win32
2583 from . import win32
2584
2584
2585 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2585 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2586 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2586 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2587
2587
2588 try:
2588 try:
2589 s.connect(addr)
2589 s.connect(addr)
2590 cert = s.getpeercert(True)
2590 cert = s.getpeercert(True)
2591
2591
2592 ui.status(_('checking the certificate chain for %s\n') % url.host)
2592 ui.status(_('checking the certificate chain for %s\n') % url.host)
2593
2593
2594 complete = win32.checkcertificatechain(cert, build=False)
2594 complete = win32.checkcertificatechain(cert, build=False)
2595
2595
2596 if not complete:
2596 if not complete:
2597 ui.status(_('certificate chain is incomplete, updating... '))
2597 ui.status(_('certificate chain is incomplete, updating... '))
2598
2598
2599 if not win32.checkcertificatechain(cert):
2599 if not win32.checkcertificatechain(cert):
2600 ui.status(_('failed.\n'))
2600 ui.status(_('failed.\n'))
2601 else:
2601 else:
2602 ui.status(_('done.\n'))
2602 ui.status(_('done.\n'))
2603 else:
2603 else:
2604 ui.status(_('full certificate chain is available\n'))
2604 ui.status(_('full certificate chain is available\n'))
2605 finally:
2605 finally:
2606 s.close()
2606 s.close()
2607
2607
2608 @command('debugsub',
2608 @command('debugsub',
2609 [('r', 'rev', '',
2609 [('r', 'rev', '',
2610 _('revision to check'), _('REV'))],
2610 _('revision to check'), _('REV'))],
2611 _('[-r REV] [REV]'))
2611 _('[-r REV] [REV]'))
2612 def debugsub(ui, repo, rev=None):
2612 def debugsub(ui, repo, rev=None):
2613 ctx = scmutil.revsingle(repo, rev, None)
2613 ctx = scmutil.revsingle(repo, rev, None)
2614 for k, v in sorted(ctx.substate.items()):
2614 for k, v in sorted(ctx.substate.items()):
2615 ui.write(('path %s\n') % k)
2615 ui.write(('path %s\n') % k)
2616 ui.write((' source %s\n') % v[0])
2616 ui.write((' source %s\n') % v[0])
2617 ui.write((' revision %s\n') % v[1])
2617 ui.write((' revision %s\n') % v[1])
2618
2618
2619 @command('debugsuccessorssets',
2619 @command('debugsuccessorssets',
2620 [('', 'closest', False, _('return closest successors sets only'))],
2620 [('', 'closest', False, _('return closest successors sets only'))],
2621 _('[REV]'))
2621 _('[REV]'))
2622 def debugsuccessorssets(ui, repo, *revs, **opts):
2622 def debugsuccessorssets(ui, repo, *revs, **opts):
2623 """show set of successors for revision
2623 """show set of successors for revision
2624
2624
2625 A successors set of changeset A is a consistent group of revisions that
2625 A successors set of changeset A is a consistent group of revisions that
2626 succeed A. It contains non-obsolete changesets only unless closests
2626 succeed A. It contains non-obsolete changesets only unless closests
2627 successors set is set.
2627 successors set is set.
2628
2628
2629 In most cases a changeset A has a single successors set containing a single
2629 In most cases a changeset A has a single successors set containing a single
2630 successor (changeset A replaced by A').
2630 successor (changeset A replaced by A').
2631
2631
2632 A changeset that is made obsolete with no successors are called "pruned".
2632 A changeset that is made obsolete with no successors are called "pruned".
2633 Such changesets have no successors sets at all.
2633 Such changesets have no successors sets at all.
2634
2634
2635 A changeset that has been "split" will have a successors set containing
2635 A changeset that has been "split" will have a successors set containing
2636 more than one successor.
2636 more than one successor.
2637
2637
2638 A changeset that has been rewritten in multiple different ways is called
2638 A changeset that has been rewritten in multiple different ways is called
2639 "divergent". Such changesets have multiple successor sets (each of which
2639 "divergent". Such changesets have multiple successor sets (each of which
2640 may also be split, i.e. have multiple successors).
2640 may also be split, i.e. have multiple successors).
2641
2641
2642 Results are displayed as follows::
2642 Results are displayed as follows::
2643
2643
2644 <rev1>
2644 <rev1>
2645 <successors-1A>
2645 <successors-1A>
2646 <rev2>
2646 <rev2>
2647 <successors-2A>
2647 <successors-2A>
2648 <successors-2B1> <successors-2B2> <successors-2B3>
2648 <successors-2B1> <successors-2B2> <successors-2B3>
2649
2649
2650 Here rev2 has two possible (i.e. divergent) successors sets. The first
2650 Here rev2 has two possible (i.e. divergent) successors sets. The first
2651 holds one element, whereas the second holds three (i.e. the changeset has
2651 holds one element, whereas the second holds three (i.e. the changeset has
2652 been split).
2652 been split).
2653 """
2653 """
2654 # passed to successorssets caching computation from one call to another
2654 # passed to successorssets caching computation from one call to another
2655 cache = {}
2655 cache = {}
2656 ctx2str = bytes
2656 ctx2str = bytes
2657 node2str = short
2657 node2str = short
2658 for rev in scmutil.revrange(repo, revs):
2658 for rev in scmutil.revrange(repo, revs):
2659 ctx = repo[rev]
2659 ctx = repo[rev]
2660 ui.write('%s\n'% ctx2str(ctx))
2660 ui.write('%s\n'% ctx2str(ctx))
2661 for succsset in obsutil.successorssets(repo, ctx.node(),
2661 for succsset in obsutil.successorssets(repo, ctx.node(),
2662 closest=opts[r'closest'],
2662 closest=opts[r'closest'],
2663 cache=cache):
2663 cache=cache):
2664 if succsset:
2664 if succsset:
2665 ui.write(' ')
2665 ui.write(' ')
2666 ui.write(node2str(succsset[0]))
2666 ui.write(node2str(succsset[0]))
2667 for node in succsset[1:]:
2667 for node in succsset[1:]:
2668 ui.write(' ')
2668 ui.write(' ')
2669 ui.write(node2str(node))
2669 ui.write(node2str(node))
2670 ui.write('\n')
2670 ui.write('\n')
2671
2671
2672 @command('debugtemplate',
2672 @command('debugtemplate',
2673 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2673 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2674 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2674 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2675 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2675 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2676 optionalrepo=True)
2676 optionalrepo=True)
2677 def debugtemplate(ui, repo, tmpl, **opts):
2677 def debugtemplate(ui, repo, tmpl, **opts):
2678 """parse and apply a template
2678 """parse and apply a template
2679
2679
2680 If -r/--rev is given, the template is processed as a log template and
2680 If -r/--rev is given, the template is processed as a log template and
2681 applied to the given changesets. Otherwise, it is processed as a generic
2681 applied to the given changesets. Otherwise, it is processed as a generic
2682 template.
2682 template.
2683
2683
2684 Use --verbose to print the parsed tree.
2684 Use --verbose to print the parsed tree.
2685 """
2685 """
2686 revs = None
2686 revs = None
2687 if opts[r'rev']:
2687 if opts[r'rev']:
2688 if repo is None:
2688 if repo is None:
2689 raise error.RepoError(_('there is no Mercurial repository here '
2689 raise error.RepoError(_('there is no Mercurial repository here '
2690 '(.hg not found)'))
2690 '(.hg not found)'))
2691 revs = scmutil.revrange(repo, opts[r'rev'])
2691 revs = scmutil.revrange(repo, opts[r'rev'])
2692
2692
2693 props = {}
2693 props = {}
2694 for d in opts[r'define']:
2694 for d in opts[r'define']:
2695 try:
2695 try:
2696 k, v = (e.strip() for e in d.split('=', 1))
2696 k, v = (e.strip() for e in d.split('=', 1))
2697 if not k or k == 'ui':
2697 if not k or k == 'ui':
2698 raise ValueError
2698 raise ValueError
2699 props[k] = v
2699 props[k] = v
2700 except ValueError:
2700 except ValueError:
2701 raise error.Abort(_('malformed keyword definition: %s') % d)
2701 raise error.Abort(_('malformed keyword definition: %s') % d)
2702
2702
2703 if ui.verbose:
2703 if ui.verbose:
2704 aliases = ui.configitems('templatealias')
2704 aliases = ui.configitems('templatealias')
2705 tree = templater.parse(tmpl)
2705 tree = templater.parse(tmpl)
2706 ui.note(templater.prettyformat(tree), '\n')
2706 ui.note(templater.prettyformat(tree), '\n')
2707 newtree = templater.expandaliases(tree, aliases)
2707 newtree = templater.expandaliases(tree, aliases)
2708 if newtree != tree:
2708 if newtree != tree:
2709 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2709 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2710
2710
2711 if revs is None:
2711 if revs is None:
2712 tres = formatter.templateresources(ui, repo)
2712 tres = formatter.templateresources(ui, repo)
2713 t = formatter.maketemplater(ui, tmpl, resources=tres)
2713 t = formatter.maketemplater(ui, tmpl, resources=tres)
2714 if ui.verbose:
2714 if ui.verbose:
2715 kwds, funcs = t.symbolsuseddefault()
2715 kwds, funcs = t.symbolsuseddefault()
2716 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2716 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2717 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2717 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2718 ui.write(t.renderdefault(props))
2718 ui.write(t.renderdefault(props))
2719 else:
2719 else:
2720 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2720 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2721 if ui.verbose:
2721 if ui.verbose:
2722 kwds, funcs = displayer.t.symbolsuseddefault()
2722 kwds, funcs = displayer.t.symbolsuseddefault()
2723 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2723 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2724 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2724 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2725 for r in revs:
2725 for r in revs:
2726 displayer.show(repo[r], **pycompat.strkwargs(props))
2726 displayer.show(repo[r], **pycompat.strkwargs(props))
2727 displayer.close()
2727 displayer.close()
2728
2728
2729 @command('debuguigetpass', [
2729 @command('debuguigetpass', [
2730 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2730 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2731 ], _('[-p TEXT]'), norepo=True)
2731 ], _('[-p TEXT]'), norepo=True)
2732 def debuguigetpass(ui, prompt=''):
2732 def debuguigetpass(ui, prompt=''):
2733 """show prompt to type password"""
2733 """show prompt to type password"""
2734 r = ui.getpass(prompt)
2734 r = ui.getpass(prompt)
2735 ui.write(('respose: %s\n') % r)
2735 ui.write(('respose: %s\n') % r)
2736
2736
2737 @command('debuguiprompt', [
2737 @command('debuguiprompt', [
2738 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2738 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2739 ], _('[-p TEXT]'), norepo=True)
2739 ], _('[-p TEXT]'), norepo=True)
2740 def debuguiprompt(ui, prompt=''):
2740 def debuguiprompt(ui, prompt=''):
2741 """show plain prompt"""
2741 """show plain prompt"""
2742 r = ui.prompt(prompt)
2742 r = ui.prompt(prompt)
2743 ui.write(('response: %s\n') % r)
2743 ui.write(('response: %s\n') % r)
2744
2744
2745 @command('debugupdatecaches', [])
2745 @command('debugupdatecaches', [])
2746 def debugupdatecaches(ui, repo, *pats, **opts):
2746 def debugupdatecaches(ui, repo, *pats, **opts):
2747 """warm all known caches in the repository"""
2747 """warm all known caches in the repository"""
2748 with repo.wlock(), repo.lock():
2748 with repo.wlock(), repo.lock():
2749 repo.updatecaches(full=True)
2749 repo.updatecaches(full=True)
2750
2750
2751 @command('debugupgraderepo', [
2751 @command('debugupgraderepo', [
2752 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2752 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2753 ('', 'run', False, _('performs an upgrade')),
2753 ('', 'run', False, _('performs an upgrade')),
2754 ('', 'backup', True, _('keep the old repository content around')),
2754 ])
2755 ])
2755 def debugupgraderepo(ui, repo, run=False, optimize=None):
2756 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True):
2756 """upgrade a repository to use different features
2757 """upgrade a repository to use different features
2757
2758
2758 If no arguments are specified, the repository is evaluated for upgrade
2759 If no arguments are specified, the repository is evaluated for upgrade
2759 and a list of problems and potential optimizations is printed.
2760 and a list of problems and potential optimizations is printed.
2760
2761
2761 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2762 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2762 can be influenced via additional arguments. More details will be provided
2763 can be influenced via additional arguments. More details will be provided
2763 by the command output when run without ``--run``.
2764 by the command output when run without ``--run``.
2764
2765
2765 During the upgrade, the repository will be locked and no writes will be
2766 During the upgrade, the repository will be locked and no writes will be
2766 allowed.
2767 allowed.
2767
2768
2768 At the end of the upgrade, the repository may not be readable while new
2769 At the end of the upgrade, the repository may not be readable while new
2769 repository data is swapped in. This window will be as long as it takes to
2770 repository data is swapped in. This window will be as long as it takes to
2770 rename some directories inside the ``.hg`` directory. On most machines, this
2771 rename some directories inside the ``.hg`` directory. On most machines, this
2771 should complete almost instantaneously and the chances of a consumer being
2772 should complete almost instantaneously and the chances of a consumer being
2772 unable to access the repository should be low.
2773 unable to access the repository should be low.
2773 """
2774 """
2774 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2775 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize,
2776 backup=backup)
2775
2777
2776 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2778 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2777 inferrepo=True)
2779 inferrepo=True)
2778 def debugwalk(ui, repo, *pats, **opts):
2780 def debugwalk(ui, repo, *pats, **opts):
2779 """show how files match on given patterns"""
2781 """show how files match on given patterns"""
2780 opts = pycompat.byteskwargs(opts)
2782 opts = pycompat.byteskwargs(opts)
2781 m = scmutil.match(repo[None], pats, opts)
2783 m = scmutil.match(repo[None], pats, opts)
2782 if ui.verbose:
2784 if ui.verbose:
2783 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
2785 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
2784 items = list(repo[None].walk(m))
2786 items = list(repo[None].walk(m))
2785 if not items:
2787 if not items:
2786 return
2788 return
2787 f = lambda fn: fn
2789 f = lambda fn: fn
2788 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2790 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2789 f = lambda fn: util.normpath(fn)
2791 f = lambda fn: util.normpath(fn)
2790 fmt = 'f %%-%ds %%-%ds %%s' % (
2792 fmt = 'f %%-%ds %%-%ds %%s' % (
2791 max([len(abs) for abs in items]),
2793 max([len(abs) for abs in items]),
2792 max([len(m.rel(abs)) for abs in items]))
2794 max([len(m.rel(abs)) for abs in items]))
2793 for abs in items:
2795 for abs in items:
2794 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2796 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2795 ui.write("%s\n" % line.rstrip())
2797 ui.write("%s\n" % line.rstrip())
2796
2798
2797 @command('debugwhyunstable', [], _('REV'))
2799 @command('debugwhyunstable', [], _('REV'))
2798 def debugwhyunstable(ui, repo, rev):
2800 def debugwhyunstable(ui, repo, rev):
2799 """explain instabilities of a changeset"""
2801 """explain instabilities of a changeset"""
2800 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
2802 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
2801 dnodes = ''
2803 dnodes = ''
2802 if entry.get('divergentnodes'):
2804 if entry.get('divergentnodes'):
2803 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2805 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2804 for ctx in entry['divergentnodes']) + ' '
2806 for ctx in entry['divergentnodes']) + ' '
2805 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2807 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2806 entry['reason'], entry['node']))
2808 entry['reason'], entry['node']))
2807
2809
2808 @command('debugwireargs',
2810 @command('debugwireargs',
2809 [('', 'three', '', 'three'),
2811 [('', 'three', '', 'three'),
2810 ('', 'four', '', 'four'),
2812 ('', 'four', '', 'four'),
2811 ('', 'five', '', 'five'),
2813 ('', 'five', '', 'five'),
2812 ] + cmdutil.remoteopts,
2814 ] + cmdutil.remoteopts,
2813 _('REPO [OPTIONS]... [ONE [TWO]]'),
2815 _('REPO [OPTIONS]... [ONE [TWO]]'),
2814 norepo=True)
2816 norepo=True)
2815 def debugwireargs(ui, repopath, *vals, **opts):
2817 def debugwireargs(ui, repopath, *vals, **opts):
2816 opts = pycompat.byteskwargs(opts)
2818 opts = pycompat.byteskwargs(opts)
2817 repo = hg.peer(ui, opts, repopath)
2819 repo = hg.peer(ui, opts, repopath)
2818 for opt in cmdutil.remoteopts:
2820 for opt in cmdutil.remoteopts:
2819 del opts[opt[1]]
2821 del opts[opt[1]]
2820 args = {}
2822 args = {}
2821 for k, v in opts.iteritems():
2823 for k, v in opts.iteritems():
2822 if v:
2824 if v:
2823 args[k] = v
2825 args[k] = v
2824 args = pycompat.strkwargs(args)
2826 args = pycompat.strkwargs(args)
2825 # run twice to check that we don't mess up the stream for the next command
2827 # run twice to check that we don't mess up the stream for the next command
2826 res1 = repo.debugwireargs(*vals, **args)
2828 res1 = repo.debugwireargs(*vals, **args)
2827 res2 = repo.debugwireargs(*vals, **args)
2829 res2 = repo.debugwireargs(*vals, **args)
2828 ui.write("%s\n" % res1)
2830 ui.write("%s\n" % res1)
2829 if res1 != res2:
2831 if res1 != res2:
2830 ui.warn("%s\n" % res2)
2832 ui.warn("%s\n" % res2)
2831
2833
2832 def _parsewirelangblocks(fh):
2834 def _parsewirelangblocks(fh):
2833 activeaction = None
2835 activeaction = None
2834 blocklines = []
2836 blocklines = []
2835 lastindent = 0
2837 lastindent = 0
2836
2838
2837 for line in fh:
2839 for line in fh:
2838 line = line.rstrip()
2840 line = line.rstrip()
2839 if not line:
2841 if not line:
2840 continue
2842 continue
2841
2843
2842 if line.startswith(b'#'):
2844 if line.startswith(b'#'):
2843 continue
2845 continue
2844
2846
2845 if not line.startswith(b' '):
2847 if not line.startswith(b' '):
2846 # New block. Flush previous one.
2848 # New block. Flush previous one.
2847 if activeaction:
2849 if activeaction:
2848 yield activeaction, blocklines
2850 yield activeaction, blocklines
2849
2851
2850 activeaction = line
2852 activeaction = line
2851 blocklines = []
2853 blocklines = []
2852 lastindent = 0
2854 lastindent = 0
2853 continue
2855 continue
2854
2856
2855 # Else we start with an indent.
2857 # Else we start with an indent.
2856
2858
2857 if not activeaction:
2859 if not activeaction:
2858 raise error.Abort(_('indented line outside of block'))
2860 raise error.Abort(_('indented line outside of block'))
2859
2861
2860 indent = len(line) - len(line.lstrip())
2862 indent = len(line) - len(line.lstrip())
2861
2863
2862 # If this line is indented more than the last line, concatenate it.
2864 # If this line is indented more than the last line, concatenate it.
2863 if indent > lastindent and blocklines:
2865 if indent > lastindent and blocklines:
2864 blocklines[-1] += line.lstrip()
2866 blocklines[-1] += line.lstrip()
2865 else:
2867 else:
2866 blocklines.append(line)
2868 blocklines.append(line)
2867 lastindent = indent
2869 lastindent = indent
2868
2870
2869 # Flush last block.
2871 # Flush last block.
2870 if activeaction:
2872 if activeaction:
2871 yield activeaction, blocklines
2873 yield activeaction, blocklines
2872
2874
2873 @command('debugwireproto',
2875 @command('debugwireproto',
2874 [
2876 [
2875 ('', 'localssh', False, _('start an SSH server for this repo')),
2877 ('', 'localssh', False, _('start an SSH server for this repo')),
2876 ('', 'peer', '', _('construct a specific version of the peer')),
2878 ('', 'peer', '', _('construct a specific version of the peer')),
2877 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2879 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2878 ('', 'nologhandshake', False,
2880 ('', 'nologhandshake', False,
2879 _('do not log I/O related to the peer handshake')),
2881 _('do not log I/O related to the peer handshake')),
2880 ] + cmdutil.remoteopts,
2882 ] + cmdutil.remoteopts,
2881 _('[PATH]'),
2883 _('[PATH]'),
2882 optionalrepo=True)
2884 optionalrepo=True)
2883 def debugwireproto(ui, repo, path=None, **opts):
2885 def debugwireproto(ui, repo, path=None, **opts):
2884 """send wire protocol commands to a server
2886 """send wire protocol commands to a server
2885
2887
2886 This command can be used to issue wire protocol commands to remote
2888 This command can be used to issue wire protocol commands to remote
2887 peers and to debug the raw data being exchanged.
2889 peers and to debug the raw data being exchanged.
2888
2890
2889 ``--localssh`` will start an SSH server against the current repository
2891 ``--localssh`` will start an SSH server against the current repository
2890 and connect to that. By default, the connection will perform a handshake
2892 and connect to that. By default, the connection will perform a handshake
2891 and establish an appropriate peer instance.
2893 and establish an appropriate peer instance.
2892
2894
2893 ``--peer`` can be used to bypass the handshake protocol and construct a
2895 ``--peer`` can be used to bypass the handshake protocol and construct a
2894 peer instance using the specified class type. Valid values are ``raw``,
2896 peer instance using the specified class type. Valid values are ``raw``,
2895 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
2897 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
2896 raw data payloads and don't support higher-level command actions.
2898 raw data payloads and don't support higher-level command actions.
2897
2899
2898 ``--noreadstderr`` can be used to disable automatic reading from stderr
2900 ``--noreadstderr`` can be used to disable automatic reading from stderr
2899 of the peer (for SSH connections only). Disabling automatic reading of
2901 of the peer (for SSH connections only). Disabling automatic reading of
2900 stderr is useful for making output more deterministic.
2902 stderr is useful for making output more deterministic.
2901
2903
2902 Commands are issued via a mini language which is specified via stdin.
2904 Commands are issued via a mini language which is specified via stdin.
2903 The language consists of individual actions to perform. An action is
2905 The language consists of individual actions to perform. An action is
2904 defined by a block. A block is defined as a line with no leading
2906 defined by a block. A block is defined as a line with no leading
2905 space followed by 0 or more lines with leading space. Blocks are
2907 space followed by 0 or more lines with leading space. Blocks are
2906 effectively a high-level command with additional metadata.
2908 effectively a high-level command with additional metadata.
2907
2909
2908 Lines beginning with ``#`` are ignored.
2910 Lines beginning with ``#`` are ignored.
2909
2911
2910 The following sections denote available actions.
2912 The following sections denote available actions.
2911
2913
2912 raw
2914 raw
2913 ---
2915 ---
2914
2916
2915 Send raw data to the server.
2917 Send raw data to the server.
2916
2918
2917 The block payload contains the raw data to send as one atomic send
2919 The block payload contains the raw data to send as one atomic send
2918 operation. The data may not actually be delivered in a single system
2920 operation. The data may not actually be delivered in a single system
2919 call: it depends on the abilities of the transport being used.
2921 call: it depends on the abilities of the transport being used.
2920
2922
2921 Each line in the block is de-indented and concatenated. Then, that
2923 Each line in the block is de-indented and concatenated. Then, that
2922 value is evaluated as a Python b'' literal. This allows the use of
2924 value is evaluated as a Python b'' literal. This allows the use of
2923 backslash escaping, etc.
2925 backslash escaping, etc.
2924
2926
2925 raw+
2927 raw+
2926 ----
2928 ----
2927
2929
2928 Behaves like ``raw`` except flushes output afterwards.
2930 Behaves like ``raw`` except flushes output afterwards.
2929
2931
2930 command <X>
2932 command <X>
2931 -----------
2933 -----------
2932
2934
2933 Send a request to run a named command, whose name follows the ``command``
2935 Send a request to run a named command, whose name follows the ``command``
2934 string.
2936 string.
2935
2937
2936 Arguments to the command are defined as lines in this block. The format of
2938 Arguments to the command are defined as lines in this block. The format of
2937 each line is ``<key> <value>``. e.g.::
2939 each line is ``<key> <value>``. e.g.::
2938
2940
2939 command listkeys
2941 command listkeys
2940 namespace bookmarks
2942 namespace bookmarks
2941
2943
2942 If the value begins with ``eval:``, it will be interpreted as a Python
2944 If the value begins with ``eval:``, it will be interpreted as a Python
2943 literal expression. Otherwise values are interpreted as Python b'' literals.
2945 literal expression. Otherwise values are interpreted as Python b'' literals.
2944 This allows sending complex types and encoding special byte sequences via
2946 This allows sending complex types and encoding special byte sequences via
2945 backslash escaping.
2947 backslash escaping.
2946
2948
2947 The following arguments have special meaning:
2949 The following arguments have special meaning:
2948
2950
2949 ``PUSHFILE``
2951 ``PUSHFILE``
2950 When defined, the *push* mechanism of the peer will be used instead
2952 When defined, the *push* mechanism of the peer will be used instead
2951 of the static request-response mechanism and the content of the
2953 of the static request-response mechanism and the content of the
2952 file specified in the value of this argument will be sent as the
2954 file specified in the value of this argument will be sent as the
2953 command payload.
2955 command payload.
2954
2956
2955 This can be used to submit a local bundle file to the remote.
2957 This can be used to submit a local bundle file to the remote.
2956
2958
2957 batchbegin
2959 batchbegin
2958 ----------
2960 ----------
2959
2961
2960 Instruct the peer to begin a batched send.
2962 Instruct the peer to begin a batched send.
2961
2963
2962 All ``command`` blocks are queued for execution until the next
2964 All ``command`` blocks are queued for execution until the next
2963 ``batchsubmit`` block.
2965 ``batchsubmit`` block.
2964
2966
2965 batchsubmit
2967 batchsubmit
2966 -----------
2968 -----------
2967
2969
2968 Submit previously queued ``command`` blocks as a batch request.
2970 Submit previously queued ``command`` blocks as a batch request.
2969
2971
2970 This action MUST be paired with a ``batchbegin`` action.
2972 This action MUST be paired with a ``batchbegin`` action.
2971
2973
2972 httprequest <method> <path>
2974 httprequest <method> <path>
2973 ---------------------------
2975 ---------------------------
2974
2976
2975 (HTTP peer only)
2977 (HTTP peer only)
2976
2978
2977 Send an HTTP request to the peer.
2979 Send an HTTP request to the peer.
2978
2980
2979 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
2981 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
2980
2982
2981 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
2983 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
2982 headers to add to the request. e.g. ``Accept: foo``.
2984 headers to add to the request. e.g. ``Accept: foo``.
2983
2985
2984 The following arguments are special:
2986 The following arguments are special:
2985
2987
2986 ``BODYFILE``
2988 ``BODYFILE``
2987 The content of the file defined as the value to this argument will be
2989 The content of the file defined as the value to this argument will be
2988 transferred verbatim as the HTTP request body.
2990 transferred verbatim as the HTTP request body.
2989
2991
2990 ``frame <type> <flags> <payload>``
2992 ``frame <type> <flags> <payload>``
2991 Send a unified protocol frame as part of the request body.
2993 Send a unified protocol frame as part of the request body.
2992
2994
2993 All frames will be collected and sent as the body to the HTTP
2995 All frames will be collected and sent as the body to the HTTP
2994 request.
2996 request.
2995
2997
2996 close
2998 close
2997 -----
2999 -----
2998
3000
2999 Close the connection to the server.
3001 Close the connection to the server.
3000
3002
3001 flush
3003 flush
3002 -----
3004 -----
3003
3005
3004 Flush data written to the server.
3006 Flush data written to the server.
3005
3007
3006 readavailable
3008 readavailable
3007 -------------
3009 -------------
3008
3010
3009 Close the write end of the connection and read all available data from
3011 Close the write end of the connection and read all available data from
3010 the server.
3012 the server.
3011
3013
3012 If the connection to the server encompasses multiple pipes, we poll both
3014 If the connection to the server encompasses multiple pipes, we poll both
3013 pipes and read available data.
3015 pipes and read available data.
3014
3016
3015 readline
3017 readline
3016 --------
3018 --------
3017
3019
3018 Read a line of output from the server. If there are multiple output
3020 Read a line of output from the server. If there are multiple output
3019 pipes, reads only the main pipe.
3021 pipes, reads only the main pipe.
3020
3022
3021 ereadline
3023 ereadline
3022 ---------
3024 ---------
3023
3025
3024 Like ``readline``, but read from the stderr pipe, if available.
3026 Like ``readline``, but read from the stderr pipe, if available.
3025
3027
3026 read <X>
3028 read <X>
3027 --------
3029 --------
3028
3030
3029 ``read()`` N bytes from the server's main output pipe.
3031 ``read()`` N bytes from the server's main output pipe.
3030
3032
3031 eread <X>
3033 eread <X>
3032 ---------
3034 ---------
3033
3035
3034 ``read()`` N bytes from the server's stderr pipe, if available.
3036 ``read()`` N bytes from the server's stderr pipe, if available.
3035
3037
3036 Specifying Unified Frame-Based Protocol Frames
3038 Specifying Unified Frame-Based Protocol Frames
3037 ----------------------------------------------
3039 ----------------------------------------------
3038
3040
3039 It is possible to emit a *Unified Frame-Based Protocol* by using special
3041 It is possible to emit a *Unified Frame-Based Protocol* by using special
3040 syntax.
3042 syntax.
3041
3043
3042 A frame is composed as a type, flags, and payload. These can be parsed
3044 A frame is composed as a type, flags, and payload. These can be parsed
3043 from a string of the form:
3045 from a string of the form:
3044
3046
3045 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3047 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3046
3048
3047 ``request-id`` and ``stream-id`` are integers defining the request and
3049 ``request-id`` and ``stream-id`` are integers defining the request and
3048 stream identifiers.
3050 stream identifiers.
3049
3051
3050 ``type`` can be an integer value for the frame type or the string name
3052 ``type`` can be an integer value for the frame type or the string name
3051 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3053 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3052 ``command-name``.
3054 ``command-name``.
3053
3055
3054 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3056 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3055 components. Each component (and there can be just one) can be an integer
3057 components. Each component (and there can be just one) can be an integer
3056 or a flag name for stream flags or frame flags, respectively. Values are
3058 or a flag name for stream flags or frame flags, respectively. Values are
3057 resolved to integers and then bitwise OR'd together.
3059 resolved to integers and then bitwise OR'd together.
3058
3060
3059 ``payload`` represents the raw frame payload. If it begins with
3061 ``payload`` represents the raw frame payload. If it begins with
3060 ``cbor:``, the following string is evaluated as Python code and the
3062 ``cbor:``, the following string is evaluated as Python code and the
3061 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3063 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3062 as a Python byte string literal.
3064 as a Python byte string literal.
3063 """
3065 """
3064 opts = pycompat.byteskwargs(opts)
3066 opts = pycompat.byteskwargs(opts)
3065
3067
3066 if opts['localssh'] and not repo:
3068 if opts['localssh'] and not repo:
3067 raise error.Abort(_('--localssh requires a repository'))
3069 raise error.Abort(_('--localssh requires a repository'))
3068
3070
3069 if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
3071 if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
3070 raise error.Abort(_('invalid value for --peer'),
3072 raise error.Abort(_('invalid value for --peer'),
3071 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
3073 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
3072
3074
3073 if path and opts['localssh']:
3075 if path and opts['localssh']:
3074 raise error.Abort(_('cannot specify --localssh with an explicit '
3076 raise error.Abort(_('cannot specify --localssh with an explicit '
3075 'path'))
3077 'path'))
3076
3078
3077 if ui.interactive():
3079 if ui.interactive():
3078 ui.write(_('(waiting for commands on stdin)\n'))
3080 ui.write(_('(waiting for commands on stdin)\n'))
3079
3081
3080 blocks = list(_parsewirelangblocks(ui.fin))
3082 blocks = list(_parsewirelangblocks(ui.fin))
3081
3083
3082 proc = None
3084 proc = None
3083 stdin = None
3085 stdin = None
3084 stdout = None
3086 stdout = None
3085 stderr = None
3087 stderr = None
3086 opener = None
3088 opener = None
3087
3089
3088 if opts['localssh']:
3090 if opts['localssh']:
3089 # We start the SSH server in its own process so there is process
3091 # We start the SSH server in its own process so there is process
3090 # separation. This prevents a whole class of potential bugs around
3092 # separation. This prevents a whole class of potential bugs around
3091 # shared state from interfering with server operation.
3093 # shared state from interfering with server operation.
3092 args = procutil.hgcmd() + [
3094 args = procutil.hgcmd() + [
3093 '-R', repo.root,
3095 '-R', repo.root,
3094 'debugserve', '--sshstdio',
3096 'debugserve', '--sshstdio',
3095 ]
3097 ]
3096 proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
3098 proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
3097 stdin=subprocess.PIPE,
3099 stdin=subprocess.PIPE,
3098 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3100 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3099 bufsize=0)
3101 bufsize=0)
3100
3102
3101 stdin = proc.stdin
3103 stdin = proc.stdin
3102 stdout = proc.stdout
3104 stdout = proc.stdout
3103 stderr = proc.stderr
3105 stderr = proc.stderr
3104
3106
3105 # We turn the pipes into observers so we can log I/O.
3107 # We turn the pipes into observers so we can log I/O.
3106 if ui.verbose or opts['peer'] == 'raw':
3108 if ui.verbose or opts['peer'] == 'raw':
3107 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
3109 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
3108 logdata=True)
3110 logdata=True)
3109 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
3111 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
3110 logdata=True)
3112 logdata=True)
3111 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
3113 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
3112 logdata=True)
3114 logdata=True)
3113
3115
3114 # --localssh also implies the peer connection settings.
3116 # --localssh also implies the peer connection settings.
3115
3117
3116 url = 'ssh://localserver'
3118 url = 'ssh://localserver'
3117 autoreadstderr = not opts['noreadstderr']
3119 autoreadstderr = not opts['noreadstderr']
3118
3120
3119 if opts['peer'] == 'ssh1':
3121 if opts['peer'] == 'ssh1':
3120 ui.write(_('creating ssh peer for wire protocol version 1\n'))
3122 ui.write(_('creating ssh peer for wire protocol version 1\n'))
3121 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
3123 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
3122 None, autoreadstderr=autoreadstderr)
3124 None, autoreadstderr=autoreadstderr)
3123 elif opts['peer'] == 'ssh2':
3125 elif opts['peer'] == 'ssh2':
3124 ui.write(_('creating ssh peer for wire protocol version 2\n'))
3126 ui.write(_('creating ssh peer for wire protocol version 2\n'))
3125 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
3127 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
3126 None, autoreadstderr=autoreadstderr)
3128 None, autoreadstderr=autoreadstderr)
3127 elif opts['peer'] == 'raw':
3129 elif opts['peer'] == 'raw':
3128 ui.write(_('using raw connection to peer\n'))
3130 ui.write(_('using raw connection to peer\n'))
3129 peer = None
3131 peer = None
3130 else:
3132 else:
3131 ui.write(_('creating ssh peer from handshake results\n'))
3133 ui.write(_('creating ssh peer from handshake results\n'))
3132 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
3134 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
3133 autoreadstderr=autoreadstderr)
3135 autoreadstderr=autoreadstderr)
3134
3136
3135 elif path:
3137 elif path:
3136 # We bypass hg.peer() so we can proxy the sockets.
3138 # We bypass hg.peer() so we can proxy the sockets.
3137 # TODO consider not doing this because we skip
3139 # TODO consider not doing this because we skip
3138 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3140 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3139 u = util.url(path)
3141 u = util.url(path)
3140 if u.scheme != 'http':
3142 if u.scheme != 'http':
3141 raise error.Abort(_('only http:// paths are currently supported'))
3143 raise error.Abort(_('only http:// paths are currently supported'))
3142
3144
3143 url, authinfo = u.authinfo()
3145 url, authinfo = u.authinfo()
3144 openerargs = {
3146 openerargs = {
3145 r'useragent': b'Mercurial debugwireproto',
3147 r'useragent': b'Mercurial debugwireproto',
3146 }
3148 }
3147
3149
3148 # Turn pipes/sockets into observers so we can log I/O.
3150 # Turn pipes/sockets into observers so we can log I/O.
3149 if ui.verbose:
3151 if ui.verbose:
3150 openerargs.update({
3152 openerargs.update({
3151 r'loggingfh': ui,
3153 r'loggingfh': ui,
3152 r'loggingname': b's',
3154 r'loggingname': b's',
3153 r'loggingopts': {
3155 r'loggingopts': {
3154 r'logdata': True,
3156 r'logdata': True,
3155 r'logdataapis': False,
3157 r'logdataapis': False,
3156 },
3158 },
3157 })
3159 })
3158
3160
3159 if ui.debugflag:
3161 if ui.debugflag:
3160 openerargs[r'loggingopts'][r'logdataapis'] = True
3162 openerargs[r'loggingopts'][r'logdataapis'] = True
3161
3163
3162 # Don't send default headers when in raw mode. This allows us to
3164 # Don't send default headers when in raw mode. This allows us to
3163 # bypass most of the behavior of our URL handling code so we can
3165 # bypass most of the behavior of our URL handling code so we can
3164 # have near complete control over what's sent on the wire.
3166 # have near complete control over what's sent on the wire.
3165 if opts['peer'] == 'raw':
3167 if opts['peer'] == 'raw':
3166 openerargs[r'sendaccept'] = False
3168 openerargs[r'sendaccept'] = False
3167
3169
3168 opener = urlmod.opener(ui, authinfo, **openerargs)
3170 opener = urlmod.opener(ui, authinfo, **openerargs)
3169
3171
3170 if opts['peer'] == 'http2':
3172 if opts['peer'] == 'http2':
3171 ui.write(_('creating http peer for wire protocol version 2\n'))
3173 ui.write(_('creating http peer for wire protocol version 2\n'))
3172 # We go through makepeer() because we need an API descriptor for
3174 # We go through makepeer() because we need an API descriptor for
3173 # the peer instance to be useful.
3175 # the peer instance to be useful.
3174 with ui.configoverride({
3176 with ui.configoverride({
3175 ('experimental', 'httppeer.advertise-v2'): True}):
3177 ('experimental', 'httppeer.advertise-v2'): True}):
3176 if opts['nologhandshake']:
3178 if opts['nologhandshake']:
3177 ui.pushbuffer()
3179 ui.pushbuffer()
3178
3180
3179 peer = httppeer.makepeer(ui, path, opener=opener)
3181 peer = httppeer.makepeer(ui, path, opener=opener)
3180
3182
3181 if opts['nologhandshake']:
3183 if opts['nologhandshake']:
3182 ui.popbuffer()
3184 ui.popbuffer()
3183
3185
3184 if not isinstance(peer, httppeer.httpv2peer):
3186 if not isinstance(peer, httppeer.httpv2peer):
3185 raise error.Abort(_('could not instantiate HTTP peer for '
3187 raise error.Abort(_('could not instantiate HTTP peer for '
3186 'wire protocol version 2'),
3188 'wire protocol version 2'),
3187 hint=_('the server may not have the feature '
3189 hint=_('the server may not have the feature '
3188 'enabled or is not allowing this '
3190 'enabled or is not allowing this '
3189 'client version'))
3191 'client version'))
3190
3192
3191 elif opts['peer'] == 'raw':
3193 elif opts['peer'] == 'raw':
3192 ui.write(_('using raw connection to peer\n'))
3194 ui.write(_('using raw connection to peer\n'))
3193 peer = None
3195 peer = None
3194 elif opts['peer']:
3196 elif opts['peer']:
3195 raise error.Abort(_('--peer %s not supported with HTTP peers') %
3197 raise error.Abort(_('--peer %s not supported with HTTP peers') %
3196 opts['peer'])
3198 opts['peer'])
3197 else:
3199 else:
3198 peer = httppeer.makepeer(ui, path, opener=opener)
3200 peer = httppeer.makepeer(ui, path, opener=opener)
3199
3201
3200 # We /could/ populate stdin/stdout with sock.makefile()...
3202 # We /could/ populate stdin/stdout with sock.makefile()...
3201 else:
3203 else:
3202 raise error.Abort(_('unsupported connection configuration'))
3204 raise error.Abort(_('unsupported connection configuration'))
3203
3205
3204 batchedcommands = None
3206 batchedcommands = None
3205
3207
3206 # Now perform actions based on the parsed wire language instructions.
3208 # Now perform actions based on the parsed wire language instructions.
3207 for action, lines in blocks:
3209 for action, lines in blocks:
3208 if action in ('raw', 'raw+'):
3210 if action in ('raw', 'raw+'):
3209 if not stdin:
3211 if not stdin:
3210 raise error.Abort(_('cannot call raw/raw+ on this peer'))
3212 raise error.Abort(_('cannot call raw/raw+ on this peer'))
3211
3213
3212 # Concatenate the data together.
3214 # Concatenate the data together.
3213 data = ''.join(l.lstrip() for l in lines)
3215 data = ''.join(l.lstrip() for l in lines)
3214 data = stringutil.unescapestr(data)
3216 data = stringutil.unescapestr(data)
3215 stdin.write(data)
3217 stdin.write(data)
3216
3218
3217 if action == 'raw+':
3219 if action == 'raw+':
3218 stdin.flush()
3220 stdin.flush()
3219 elif action == 'flush':
3221 elif action == 'flush':
3220 if not stdin:
3222 if not stdin:
3221 raise error.Abort(_('cannot call flush on this peer'))
3223 raise error.Abort(_('cannot call flush on this peer'))
3222 stdin.flush()
3224 stdin.flush()
3223 elif action.startswith('command'):
3225 elif action.startswith('command'):
3224 if not peer:
3226 if not peer:
3225 raise error.Abort(_('cannot send commands unless peer instance '
3227 raise error.Abort(_('cannot send commands unless peer instance '
3226 'is available'))
3228 'is available'))
3227
3229
3228 command = action.split(' ', 1)[1]
3230 command = action.split(' ', 1)[1]
3229
3231
3230 args = {}
3232 args = {}
3231 for line in lines:
3233 for line in lines:
3232 # We need to allow empty values.
3234 # We need to allow empty values.
3233 fields = line.lstrip().split(' ', 1)
3235 fields = line.lstrip().split(' ', 1)
3234 if len(fields) == 1:
3236 if len(fields) == 1:
3235 key = fields[0]
3237 key = fields[0]
3236 value = ''
3238 value = ''
3237 else:
3239 else:
3238 key, value = fields
3240 key, value = fields
3239
3241
3240 if value.startswith('eval:'):
3242 if value.startswith('eval:'):
3241 value = stringutil.evalpythonliteral(value[5:])
3243 value = stringutil.evalpythonliteral(value[5:])
3242 else:
3244 else:
3243 value = stringutil.unescapestr(value)
3245 value = stringutil.unescapestr(value)
3244
3246
3245 args[key] = value
3247 args[key] = value
3246
3248
3247 if batchedcommands is not None:
3249 if batchedcommands is not None:
3248 batchedcommands.append((command, args))
3250 batchedcommands.append((command, args))
3249 continue
3251 continue
3250
3252
3251 ui.status(_('sending %s command\n') % command)
3253 ui.status(_('sending %s command\n') % command)
3252
3254
3253 if 'PUSHFILE' in args:
3255 if 'PUSHFILE' in args:
3254 with open(args['PUSHFILE'], r'rb') as fh:
3256 with open(args['PUSHFILE'], r'rb') as fh:
3255 del args['PUSHFILE']
3257 del args['PUSHFILE']
3256 res, output = peer._callpush(command, fh,
3258 res, output = peer._callpush(command, fh,
3257 **pycompat.strkwargs(args))
3259 **pycompat.strkwargs(args))
3258 ui.status(_('result: %s\n') % stringutil.escapestr(res))
3260 ui.status(_('result: %s\n') % stringutil.escapestr(res))
3259 ui.status(_('remote output: %s\n') %
3261 ui.status(_('remote output: %s\n') %
3260 stringutil.escapestr(output))
3262 stringutil.escapestr(output))
3261 else:
3263 else:
3262 with peer.commandexecutor() as e:
3264 with peer.commandexecutor() as e:
3263 res = e.callcommand(command, args).result()
3265 res = e.callcommand(command, args).result()
3264
3266
3265 if isinstance(res, wireprotov2peer.commandresponse):
3267 if isinstance(res, wireprotov2peer.commandresponse):
3266 val = res.objects()
3268 val = res.objects()
3267 ui.status(_('response: %s\n') %
3269 ui.status(_('response: %s\n') %
3268 stringutil.pprint(val, bprefix=True, indent=2))
3270 stringutil.pprint(val, bprefix=True, indent=2))
3269 else:
3271 else:
3270 ui.status(_('response: %s\n') %
3272 ui.status(_('response: %s\n') %
3271 stringutil.pprint(res, bprefix=True, indent=2))
3273 stringutil.pprint(res, bprefix=True, indent=2))
3272
3274
3273 elif action == 'batchbegin':
3275 elif action == 'batchbegin':
3274 if batchedcommands is not None:
3276 if batchedcommands is not None:
3275 raise error.Abort(_('nested batchbegin not allowed'))
3277 raise error.Abort(_('nested batchbegin not allowed'))
3276
3278
3277 batchedcommands = []
3279 batchedcommands = []
3278 elif action == 'batchsubmit':
3280 elif action == 'batchsubmit':
3279 # There is a batching API we could go through. But it would be
3281 # There is a batching API we could go through. But it would be
3280 # difficult to normalize requests into function calls. It is easier
3282 # difficult to normalize requests into function calls. It is easier
3281 # to bypass this layer and normalize to commands + args.
3283 # to bypass this layer and normalize to commands + args.
3282 ui.status(_('sending batch with %d sub-commands\n') %
3284 ui.status(_('sending batch with %d sub-commands\n') %
3283 len(batchedcommands))
3285 len(batchedcommands))
3284 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
3286 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
3285 ui.status(_('response #%d: %s\n') %
3287 ui.status(_('response #%d: %s\n') %
3286 (i, stringutil.escapestr(chunk)))
3288 (i, stringutil.escapestr(chunk)))
3287
3289
3288 batchedcommands = None
3290 batchedcommands = None
3289
3291
3290 elif action.startswith('httprequest '):
3292 elif action.startswith('httprequest '):
3291 if not opener:
3293 if not opener:
3292 raise error.Abort(_('cannot use httprequest without an HTTP '
3294 raise error.Abort(_('cannot use httprequest without an HTTP '
3293 'peer'))
3295 'peer'))
3294
3296
3295 request = action.split(' ', 2)
3297 request = action.split(' ', 2)
3296 if len(request) != 3:
3298 if len(request) != 3:
3297 raise error.Abort(_('invalid httprequest: expected format is '
3299 raise error.Abort(_('invalid httprequest: expected format is '
3298 '"httprequest <method> <path>'))
3300 '"httprequest <method> <path>'))
3299
3301
3300 method, httppath = request[1:]
3302 method, httppath = request[1:]
3301 headers = {}
3303 headers = {}
3302 body = None
3304 body = None
3303 frames = []
3305 frames = []
3304 for line in lines:
3306 for line in lines:
3305 line = line.lstrip()
3307 line = line.lstrip()
3306 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
3308 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
3307 if m:
3309 if m:
3308 # Headers need to use native strings.
3310 # Headers need to use native strings.
3309 key = pycompat.strurl(m.group(1))
3311 key = pycompat.strurl(m.group(1))
3310 value = pycompat.strurl(m.group(2))
3312 value = pycompat.strurl(m.group(2))
3311 headers[key] = value
3313 headers[key] = value
3312 continue
3314 continue
3313
3315
3314 if line.startswith(b'BODYFILE '):
3316 if line.startswith(b'BODYFILE '):
3315 with open(line.split(b' ', 1), 'rb') as fh:
3317 with open(line.split(b' ', 1), 'rb') as fh:
3316 body = fh.read()
3318 body = fh.read()
3317 elif line.startswith(b'frame '):
3319 elif line.startswith(b'frame '):
3318 frame = wireprotoframing.makeframefromhumanstring(
3320 frame = wireprotoframing.makeframefromhumanstring(
3319 line[len(b'frame '):])
3321 line[len(b'frame '):])
3320
3322
3321 frames.append(frame)
3323 frames.append(frame)
3322 else:
3324 else:
3323 raise error.Abort(_('unknown argument to httprequest: %s') %
3325 raise error.Abort(_('unknown argument to httprequest: %s') %
3324 line)
3326 line)
3325
3327
3326 url = path + httppath
3328 url = path + httppath
3327
3329
3328 if frames:
3330 if frames:
3329 body = b''.join(bytes(f) for f in frames)
3331 body = b''.join(bytes(f) for f in frames)
3330
3332
3331 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
3333 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
3332
3334
3333 # urllib.Request insists on using has_data() as a proxy for
3335 # urllib.Request insists on using has_data() as a proxy for
3334 # determining the request method. Override that to use our
3336 # determining the request method. Override that to use our
3335 # explicitly requested method.
3337 # explicitly requested method.
3336 req.get_method = lambda: pycompat.sysstr(method)
3338 req.get_method = lambda: pycompat.sysstr(method)
3337
3339
3338 try:
3340 try:
3339 res = opener.open(req)
3341 res = opener.open(req)
3340 body = res.read()
3342 body = res.read()
3341 except util.urlerr.urlerror as e:
3343 except util.urlerr.urlerror as e:
3342 # read() method must be called, but only exists in Python 2
3344 # read() method must be called, but only exists in Python 2
3343 getattr(e, 'read', lambda: None)()
3345 getattr(e, 'read', lambda: None)()
3344 continue
3346 continue
3345
3347
3346 ct = res.headers.get(r'Content-Type')
3348 ct = res.headers.get(r'Content-Type')
3347 if ct == r'application/mercurial-cbor':
3349 if ct == r'application/mercurial-cbor':
3348 ui.write(_('cbor> %s\n') %
3350 ui.write(_('cbor> %s\n') %
3349 stringutil.pprint(cborutil.decodeall(body),
3351 stringutil.pprint(cborutil.decodeall(body),
3350 bprefix=True,
3352 bprefix=True,
3351 indent=2))
3353 indent=2))
3352
3354
3353 elif action == 'close':
3355 elif action == 'close':
3354 peer.close()
3356 peer.close()
3355 elif action == 'readavailable':
3357 elif action == 'readavailable':
3356 if not stdout or not stderr:
3358 if not stdout or not stderr:
3357 raise error.Abort(_('readavailable not available on this peer'))
3359 raise error.Abort(_('readavailable not available on this peer'))
3358
3360
3359 stdin.close()
3361 stdin.close()
3360 stdout.read()
3362 stdout.read()
3361 stderr.read()
3363 stderr.read()
3362
3364
3363 elif action == 'readline':
3365 elif action == 'readline':
3364 if not stdout:
3366 if not stdout:
3365 raise error.Abort(_('readline not available on this peer'))
3367 raise error.Abort(_('readline not available on this peer'))
3366 stdout.readline()
3368 stdout.readline()
3367 elif action == 'ereadline':
3369 elif action == 'ereadline':
3368 if not stderr:
3370 if not stderr:
3369 raise error.Abort(_('ereadline not available on this peer'))
3371 raise error.Abort(_('ereadline not available on this peer'))
3370 stderr.readline()
3372 stderr.readline()
3371 elif action.startswith('read '):
3373 elif action.startswith('read '):
3372 count = int(action.split(' ', 1)[1])
3374 count = int(action.split(' ', 1)[1])
3373 if not stdout:
3375 if not stdout:
3374 raise error.Abort(_('read not available on this peer'))
3376 raise error.Abort(_('read not available on this peer'))
3375 stdout.read(count)
3377 stdout.read(count)
3376 elif action.startswith('eread '):
3378 elif action.startswith('eread '):
3377 count = int(action.split(' ', 1)[1])
3379 count = int(action.split(' ', 1)[1])
3378 if not stderr:
3380 if not stderr:
3379 raise error.Abort(_('eread not available on this peer'))
3381 raise error.Abort(_('eread not available on this peer'))
3380 stderr.read(count)
3382 stderr.read(count)
3381 else:
3383 else:
3382 raise error.Abort(_('unknown action: %s') % action)
3384 raise error.Abort(_('unknown action: %s') % action)
3383
3385
3384 if batchedcommands is not None:
3386 if batchedcommands is not None:
3385 raise error.Abort(_('unclosed "batchbegin" request'))
3387 raise error.Abort(_('unclosed "batchbegin" request'))
3386
3388
3387 if peer:
3389 if peer:
3388 peer.close()
3390 peer.close()
3389
3391
3390 if proc:
3392 if proc:
3391 proc.kill()
3393 proc.kill()
@@ -1,912 +1,916 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 changelog,
14 changelog,
15 error,
15 error,
16 filelog,
16 filelog,
17 hg,
17 hg,
18 localrepo,
18 localrepo,
19 manifest,
19 manifest,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 vfs as vfsmod,
24 vfs as vfsmod,
25 )
25 )
26
26
27 def requiredsourcerequirements(repo):
27 def requiredsourcerequirements(repo):
28 """Obtain requirements required to be present to upgrade a repo.
28 """Obtain requirements required to be present to upgrade a repo.
29
29
30 An upgrade will not be allowed if the repository doesn't have the
30 An upgrade will not be allowed if the repository doesn't have the
31 requirements returned by this function.
31 requirements returned by this function.
32 """
32 """
33 return {
33 return {
34 # Introduced in Mercurial 0.9.2.
34 # Introduced in Mercurial 0.9.2.
35 'revlogv1',
35 'revlogv1',
36 # Introduced in Mercurial 0.9.2.
36 # Introduced in Mercurial 0.9.2.
37 'store',
37 'store',
38 }
38 }
39
39
40 def blocksourcerequirements(repo):
40 def blocksourcerequirements(repo):
41 """Obtain requirements that will prevent an upgrade from occurring.
41 """Obtain requirements that will prevent an upgrade from occurring.
42
42
43 An upgrade cannot be performed if the source repository contains a
43 An upgrade cannot be performed if the source repository contains a
44 requirements in the returned set.
44 requirements in the returned set.
45 """
45 """
46 return {
46 return {
47 # The upgrade code does not yet support these experimental features.
47 # The upgrade code does not yet support these experimental features.
48 # This is an artificial limitation.
48 # This is an artificial limitation.
49 'treemanifest',
49 'treemanifest',
50 # This was a precursor to generaldelta and was never enabled by default.
50 # This was a precursor to generaldelta and was never enabled by default.
51 # It should (hopefully) not exist in the wild.
51 # It should (hopefully) not exist in the wild.
52 'parentdelta',
52 'parentdelta',
53 # Upgrade should operate on the actual store, not the shared link.
53 # Upgrade should operate on the actual store, not the shared link.
54 'shared',
54 'shared',
55 }
55 }
56
56
57 def supportremovedrequirements(repo):
57 def supportremovedrequirements(repo):
58 """Obtain requirements that can be removed during an upgrade.
58 """Obtain requirements that can be removed during an upgrade.
59
59
60 If an upgrade were to create a repository that dropped a requirement,
60 If an upgrade were to create a repository that dropped a requirement,
61 the dropped requirement must appear in the returned set for the upgrade
61 the dropped requirement must appear in the returned set for the upgrade
62 to be allowed.
62 to be allowed.
63 """
63 """
64 return {
64 return {
65 localrepo.SPARSEREVLOG_REQUIREMENT,
65 localrepo.SPARSEREVLOG_REQUIREMENT,
66 }
66 }
67
67
68 def supporteddestrequirements(repo):
68 def supporteddestrequirements(repo):
69 """Obtain requirements that upgrade supports in the destination.
69 """Obtain requirements that upgrade supports in the destination.
70
70
71 If the result of the upgrade would create requirements not in this set,
71 If the result of the upgrade would create requirements not in this set,
72 the upgrade is disallowed.
72 the upgrade is disallowed.
73
73
74 Extensions should monkeypatch this to add their custom requirements.
74 Extensions should monkeypatch this to add their custom requirements.
75 """
75 """
76 return {
76 return {
77 'dotencode',
77 'dotencode',
78 'fncache',
78 'fncache',
79 'generaldelta',
79 'generaldelta',
80 'revlogv1',
80 'revlogv1',
81 'store',
81 'store',
82 localrepo.SPARSEREVLOG_REQUIREMENT,
82 localrepo.SPARSEREVLOG_REQUIREMENT,
83 }
83 }
84
84
85 def allowednewrequirements(repo):
85 def allowednewrequirements(repo):
86 """Obtain requirements that can be added to a repository during upgrade.
86 """Obtain requirements that can be added to a repository during upgrade.
87
87
88 This is used to disallow proposed requirements from being added when
88 This is used to disallow proposed requirements from being added when
89 they weren't present before.
89 they weren't present before.
90
90
91 We use a list of allowed requirement additions instead of a list of known
91 We use a list of allowed requirement additions instead of a list of known
92 bad additions because the whitelist approach is safer and will prevent
92 bad additions because the whitelist approach is safer and will prevent
93 future, unknown requirements from accidentally being added.
93 future, unknown requirements from accidentally being added.
94 """
94 """
95 return {
95 return {
96 'dotencode',
96 'dotencode',
97 'fncache',
97 'fncache',
98 'generaldelta',
98 'generaldelta',
99 localrepo.SPARSEREVLOG_REQUIREMENT,
99 localrepo.SPARSEREVLOG_REQUIREMENT,
100 }
100 }
101
101
102 def preservedrequirements(repo):
102 def preservedrequirements(repo):
103 return set()
103 return set()
104
104
105 deficiency = 'deficiency'
105 deficiency = 'deficiency'
106 optimisation = 'optimization'
106 optimisation = 'optimization'
107
107
108 class improvement(object):
108 class improvement(object):
109 """Represents an improvement that can be made as part of an upgrade.
109 """Represents an improvement that can be made as part of an upgrade.
110
110
111 The following attributes are defined on each instance:
111 The following attributes are defined on each instance:
112
112
113 name
113 name
114 Machine-readable string uniquely identifying this improvement. It
114 Machine-readable string uniquely identifying this improvement. It
115 will be mapped to an action later in the upgrade process.
115 will be mapped to an action later in the upgrade process.
116
116
117 type
117 type
118 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
118 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
119 problem. An optimization is an action (sometimes optional) that
119 problem. An optimization is an action (sometimes optional) that
120 can be taken to further improve the state of the repository.
120 can be taken to further improve the state of the repository.
121
121
122 description
122 description
123 Message intended for humans explaining the improvement in more detail,
123 Message intended for humans explaining the improvement in more detail,
124 including the implications of it. For ``deficiency`` types, should be
124 including the implications of it. For ``deficiency`` types, should be
125 worded in the present tense. For ``optimisation`` types, should be
125 worded in the present tense. For ``optimisation`` types, should be
126 worded in the future tense.
126 worded in the future tense.
127
127
128 upgrademessage
128 upgrademessage
129 Message intended for humans explaining what an upgrade addressing this
129 Message intended for humans explaining what an upgrade addressing this
130 issue will do. Should be worded in the future tense.
130 issue will do. Should be worded in the future tense.
131 """
131 """
132 def __init__(self, name, type, description, upgrademessage):
132 def __init__(self, name, type, description, upgrademessage):
133 self.name = name
133 self.name = name
134 self.type = type
134 self.type = type
135 self.description = description
135 self.description = description
136 self.upgrademessage = upgrademessage
136 self.upgrademessage = upgrademessage
137
137
138 def __eq__(self, other):
138 def __eq__(self, other):
139 if not isinstance(other, improvement):
139 if not isinstance(other, improvement):
140 # This is what python tell use to do
140 # This is what python tell use to do
141 return NotImplemented
141 return NotImplemented
142 return self.name == other.name
142 return self.name == other.name
143
143
144 def __ne__(self, other):
144 def __ne__(self, other):
145 return not (self == other)
145 return not (self == other)
146
146
147 def __hash__(self):
147 def __hash__(self):
148 return hash(self.name)
148 return hash(self.name)
149
149
150 allformatvariant = []
150 allformatvariant = []
151
151
152 def registerformatvariant(cls):
152 def registerformatvariant(cls):
153 allformatvariant.append(cls)
153 allformatvariant.append(cls)
154 return cls
154 return cls
155
155
156 class formatvariant(improvement):
156 class formatvariant(improvement):
157 """an improvement subclass dedicated to repository format"""
157 """an improvement subclass dedicated to repository format"""
158 type = deficiency
158 type = deficiency
159 ### The following attributes should be defined for each class:
159 ### The following attributes should be defined for each class:
160
160
161 # machine-readable string uniquely identifying this improvement. it will be
161 # machine-readable string uniquely identifying this improvement. it will be
162 # mapped to an action later in the upgrade process.
162 # mapped to an action later in the upgrade process.
163 name = None
163 name = None
164
164
165 # message intended for humans explaining the improvement in more detail,
165 # message intended for humans explaining the improvement in more detail,
166 # including the implications of it ``deficiency`` types, should be worded
166 # including the implications of it ``deficiency`` types, should be worded
167 # in the present tense.
167 # in the present tense.
168 description = None
168 description = None
169
169
170 # message intended for humans explaining what an upgrade addressing this
170 # message intended for humans explaining what an upgrade addressing this
171 # issue will do. should be worded in the future tense.
171 # issue will do. should be worded in the future tense.
172 upgrademessage = None
172 upgrademessage = None
173
173
174 # value of current Mercurial default for new repository
174 # value of current Mercurial default for new repository
175 default = None
175 default = None
176
176
177 def __init__(self):
177 def __init__(self):
178 raise NotImplementedError()
178 raise NotImplementedError()
179
179
180 @staticmethod
180 @staticmethod
181 def fromrepo(repo):
181 def fromrepo(repo):
182 """current value of the variant in the repository"""
182 """current value of the variant in the repository"""
183 raise NotImplementedError()
183 raise NotImplementedError()
184
184
185 @staticmethod
185 @staticmethod
186 def fromconfig(repo):
186 def fromconfig(repo):
187 """current value of the variant in the configuration"""
187 """current value of the variant in the configuration"""
188 raise NotImplementedError()
188 raise NotImplementedError()
189
189
190 class requirementformatvariant(formatvariant):
190 class requirementformatvariant(formatvariant):
191 """formatvariant based on a 'requirement' name.
191 """formatvariant based on a 'requirement' name.
192
192
193 Many format variant are controlled by a 'requirement'. We define a small
193 Many format variant are controlled by a 'requirement'. We define a small
194 subclass to factor the code.
194 subclass to factor the code.
195 """
195 """
196
196
197 # the requirement that control this format variant
197 # the requirement that control this format variant
198 _requirement = None
198 _requirement = None
199
199
200 @staticmethod
200 @staticmethod
201 def _newreporequirements(ui):
201 def _newreporequirements(ui):
202 return localrepo.newreporequirements(
202 return localrepo.newreporequirements(
203 ui, localrepo.defaultcreateopts(ui))
203 ui, localrepo.defaultcreateopts(ui))
204
204
205 @classmethod
205 @classmethod
206 def fromrepo(cls, repo):
206 def fromrepo(cls, repo):
207 assert cls._requirement is not None
207 assert cls._requirement is not None
208 return cls._requirement in repo.requirements
208 return cls._requirement in repo.requirements
209
209
210 @classmethod
210 @classmethod
211 def fromconfig(cls, repo):
211 def fromconfig(cls, repo):
212 assert cls._requirement is not None
212 assert cls._requirement is not None
213 return cls._requirement in cls._newreporequirements(repo.ui)
213 return cls._requirement in cls._newreporequirements(repo.ui)
214
214
215 @registerformatvariant
215 @registerformatvariant
216 class fncache(requirementformatvariant):
216 class fncache(requirementformatvariant):
217 name = 'fncache'
217 name = 'fncache'
218
218
219 _requirement = 'fncache'
219 _requirement = 'fncache'
220
220
221 default = True
221 default = True
222
222
223 description = _('long and reserved filenames may not work correctly; '
223 description = _('long and reserved filenames may not work correctly; '
224 'repository performance is sub-optimal')
224 'repository performance is sub-optimal')
225
225
226 upgrademessage = _('repository will be more resilient to storing '
226 upgrademessage = _('repository will be more resilient to storing '
227 'certain paths and performance of certain '
227 'certain paths and performance of certain '
228 'operations should be improved')
228 'operations should be improved')
229
229
230 @registerformatvariant
230 @registerformatvariant
231 class dotencode(requirementformatvariant):
231 class dotencode(requirementformatvariant):
232 name = 'dotencode'
232 name = 'dotencode'
233
233
234 _requirement = 'dotencode'
234 _requirement = 'dotencode'
235
235
236 default = True
236 default = True
237
237
238 description = _('storage of filenames beginning with a period or '
238 description = _('storage of filenames beginning with a period or '
239 'space may not work correctly')
239 'space may not work correctly')
240
240
241 upgrademessage = _('repository will be better able to store files '
241 upgrademessage = _('repository will be better able to store files '
242 'beginning with a space or period')
242 'beginning with a space or period')
243
243
244 @registerformatvariant
244 @registerformatvariant
245 class generaldelta(requirementformatvariant):
245 class generaldelta(requirementformatvariant):
246 name = 'generaldelta'
246 name = 'generaldelta'
247
247
248 _requirement = 'generaldelta'
248 _requirement = 'generaldelta'
249
249
250 default = True
250 default = True
251
251
252 description = _('deltas within internal storage are unable to '
252 description = _('deltas within internal storage are unable to '
253 'choose optimal revisions; repository is larger and '
253 'choose optimal revisions; repository is larger and '
254 'slower than it could be; interaction with other '
254 'slower than it could be; interaction with other '
255 'repositories may require extra network and CPU '
255 'repositories may require extra network and CPU '
256 'resources, making "hg push" and "hg pull" slower')
256 'resources, making "hg push" and "hg pull" slower')
257
257
258 upgrademessage = _('repository storage will be able to create '
258 upgrademessage = _('repository storage will be able to create '
259 'optimal deltas; new repository data will be '
259 'optimal deltas; new repository data will be '
260 'smaller and read times should decrease; '
260 'smaller and read times should decrease; '
261 'interacting with other repositories using this '
261 'interacting with other repositories using this '
262 'storage model should require less network and '
262 'storage model should require less network and '
263 'CPU resources, making "hg push" and "hg pull" '
263 'CPU resources, making "hg push" and "hg pull" '
264 'faster')
264 'faster')
265
265
266 @registerformatvariant
266 @registerformatvariant
267 class sparserevlog(requirementformatvariant):
267 class sparserevlog(requirementformatvariant):
268 name = 'sparserevlog'
268 name = 'sparserevlog'
269
269
270 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
270 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
271
271
272 default = True
272 default = True
273
273
274 description = _('in order to limit disk reading and memory usage on older '
274 description = _('in order to limit disk reading and memory usage on older '
275 'version, the span of a delta chain from its root to its '
275 'version, the span of a delta chain from its root to its '
276 'end is limited, whatever the relevant data in this span. '
276 'end is limited, whatever the relevant data in this span. '
277 'This can severly limit Mercurial ability to build good '
277 'This can severly limit Mercurial ability to build good '
278 'chain of delta resulting is much more storage space being '
278 'chain of delta resulting is much more storage space being '
279 'taken and limit reusability of on disk delta during '
279 'taken and limit reusability of on disk delta during '
280 'exchange.'
280 'exchange.'
281 )
281 )
282
282
283 upgrademessage = _('Revlog supports delta chain with more unused data '
283 upgrademessage = _('Revlog supports delta chain with more unused data '
284 'between payload. These gaps will be skipped at read '
284 'between payload. These gaps will be skipped at read '
285 'time. This allows for better delta chains, making a '
285 'time. This allows for better delta chains, making a '
286 'better compression and faster exchange with server.')
286 'better compression and faster exchange with server.')
287
287
288 @registerformatvariant
288 @registerformatvariant
289 class removecldeltachain(formatvariant):
289 class removecldeltachain(formatvariant):
290 name = 'plain-cl-delta'
290 name = 'plain-cl-delta'
291
291
292 default = True
292 default = True
293
293
294 description = _('changelog storage is using deltas instead of '
294 description = _('changelog storage is using deltas instead of '
295 'raw entries; changelog reading and any '
295 'raw entries; changelog reading and any '
296 'operation relying on changelog data are slower '
296 'operation relying on changelog data are slower '
297 'than they could be')
297 'than they could be')
298
298
299 upgrademessage = _('changelog storage will be reformated to '
299 upgrademessage = _('changelog storage will be reformated to '
300 'store raw entries; changelog reading will be '
300 'store raw entries; changelog reading will be '
301 'faster; changelog size may be reduced')
301 'faster; changelog size may be reduced')
302
302
303 @staticmethod
303 @staticmethod
304 def fromrepo(repo):
304 def fromrepo(repo):
305 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
305 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
306 # changelogs with deltas.
306 # changelogs with deltas.
307 cl = repo.changelog
307 cl = repo.changelog
308 chainbase = cl.chainbase
308 chainbase = cl.chainbase
309 return all(rev == chainbase(rev) for rev in cl)
309 return all(rev == chainbase(rev) for rev in cl)
310
310
311 @staticmethod
311 @staticmethod
312 def fromconfig(repo):
312 def fromconfig(repo):
313 return True
313 return True
314
314
315 @registerformatvariant
315 @registerformatvariant
316 class compressionengine(formatvariant):
316 class compressionengine(formatvariant):
317 name = 'compression'
317 name = 'compression'
318 default = 'zlib'
318 default = 'zlib'
319
319
320 description = _('Compresion algorithm used to compress data. '
320 description = _('Compresion algorithm used to compress data. '
321 'Some engine are faster than other')
321 'Some engine are faster than other')
322
322
323 upgrademessage = _('revlog content will be recompressed with the new '
323 upgrademessage = _('revlog content will be recompressed with the new '
324 'algorithm.')
324 'algorithm.')
325
325
326 @classmethod
326 @classmethod
327 def fromrepo(cls, repo):
327 def fromrepo(cls, repo):
328 for req in repo.requirements:
328 for req in repo.requirements:
329 if req.startswith('exp-compression-'):
329 if req.startswith('exp-compression-'):
330 return req.split('-', 2)[2]
330 return req.split('-', 2)[2]
331 return 'zlib'
331 return 'zlib'
332
332
333 @classmethod
333 @classmethod
334 def fromconfig(cls, repo):
334 def fromconfig(cls, repo):
335 return repo.ui.config('experimental', 'format.compression')
335 return repo.ui.config('experimental', 'format.compression')
336
336
337 def finddeficiencies(repo):
337 def finddeficiencies(repo):
338 """returns a list of deficiencies that the repo suffer from"""
338 """returns a list of deficiencies that the repo suffer from"""
339 deficiencies = []
339 deficiencies = []
340
340
341 # We could detect lack of revlogv1 and store here, but they were added
341 # We could detect lack of revlogv1 and store here, but they were added
342 # in 0.9.2 and we don't support upgrading repos without these
342 # in 0.9.2 and we don't support upgrading repos without these
343 # requirements, so let's not bother.
343 # requirements, so let's not bother.
344
344
345 for fv in allformatvariant:
345 for fv in allformatvariant:
346 if not fv.fromrepo(repo):
346 if not fv.fromrepo(repo):
347 deficiencies.append(fv)
347 deficiencies.append(fv)
348
348
349 return deficiencies
349 return deficiencies
350
350
351 # search without '-' to support older form on newer client.
351 # search without '-' to support older form on newer client.
352 #
352 #
353 # We don't enforce backward compatibility for debug command so this
353 # We don't enforce backward compatibility for debug command so this
354 # might eventually be dropped. However, having to use two different
354 # might eventually be dropped. However, having to use two different
355 # forms in script when comparing result is anoying enough to add
355 # forms in script when comparing result is anoying enough to add
356 # backward compatibility for a while.
356 # backward compatibility for a while.
357 legacy_opts_map = {
357 legacy_opts_map = {
358 'redeltaparent': 're-delta-parent',
358 'redeltaparent': 're-delta-parent',
359 'redeltamultibase': 're-delta-multibase',
359 'redeltamultibase': 're-delta-multibase',
360 'redeltaall': 're-delta-all',
360 'redeltaall': 're-delta-all',
361 'redeltafulladd': 're-delta-fulladd',
361 'redeltafulladd': 're-delta-fulladd',
362 }
362 }
363
363
364 def findoptimizations(repo):
364 def findoptimizations(repo):
365 """Determine optimisation that could be used during upgrade"""
365 """Determine optimisation that could be used during upgrade"""
366 # These are unconditionally added. There is logic later that figures out
366 # These are unconditionally added. There is logic later that figures out
367 # which ones to apply.
367 # which ones to apply.
368 optimizations = []
368 optimizations = []
369
369
370 optimizations.append(improvement(
370 optimizations.append(improvement(
371 name='re-delta-parent',
371 name='re-delta-parent',
372 type=optimisation,
372 type=optimisation,
373 description=_('deltas within internal storage will be recalculated to '
373 description=_('deltas within internal storage will be recalculated to '
374 'choose an optimal base revision where this was not '
374 'choose an optimal base revision where this was not '
375 'already done; the size of the repository may shrink and '
375 'already done; the size of the repository may shrink and '
376 'various operations may become faster; the first time '
376 'various operations may become faster; the first time '
377 'this optimization is performed could slow down upgrade '
377 'this optimization is performed could slow down upgrade '
378 'execution considerably; subsequent invocations should '
378 'execution considerably; subsequent invocations should '
379 'not run noticeably slower'),
379 'not run noticeably slower'),
380 upgrademessage=_('deltas within internal storage will choose a new '
380 upgrademessage=_('deltas within internal storage will choose a new '
381 'base revision if needed')))
381 'base revision if needed')))
382
382
383 optimizations.append(improvement(
383 optimizations.append(improvement(
384 name='re-delta-multibase',
384 name='re-delta-multibase',
385 type=optimisation,
385 type=optimisation,
386 description=_('deltas within internal storage will be recalculated '
386 description=_('deltas within internal storage will be recalculated '
387 'against multiple base revision and the smallest '
387 'against multiple base revision and the smallest '
388 'difference will be used; the size of the repository may '
388 'difference will be used; the size of the repository may '
389 'shrink significantly when there are many merges; this '
389 'shrink significantly when there are many merges; this '
390 'optimization will slow down execution in proportion to '
390 'optimization will slow down execution in proportion to '
391 'the number of merges in the repository and the amount '
391 'the number of merges in the repository and the amount '
392 'of files in the repository; this slow down should not '
392 'of files in the repository; this slow down should not '
393 'be significant unless there are tens of thousands of '
393 'be significant unless there are tens of thousands of '
394 'files and thousands of merges'),
394 'files and thousands of merges'),
395 upgrademessage=_('deltas within internal storage will choose an '
395 upgrademessage=_('deltas within internal storage will choose an '
396 'optimal delta by computing deltas against multiple '
396 'optimal delta by computing deltas against multiple '
397 'parents; may slow down execution time '
397 'parents; may slow down execution time '
398 'significantly')))
398 'significantly')))
399
399
400 optimizations.append(improvement(
400 optimizations.append(improvement(
401 name='re-delta-all',
401 name='re-delta-all',
402 type=optimisation,
402 type=optimisation,
403 description=_('deltas within internal storage will always be '
403 description=_('deltas within internal storage will always be '
404 'recalculated without reusing prior deltas; this will '
404 'recalculated without reusing prior deltas; this will '
405 'likely make execution run several times slower; this '
405 'likely make execution run several times slower; this '
406 'optimization is typically not needed'),
406 'optimization is typically not needed'),
407 upgrademessage=_('deltas within internal storage will be fully '
407 upgrademessage=_('deltas within internal storage will be fully '
408 'recomputed; this will likely drastically slow down '
408 'recomputed; this will likely drastically slow down '
409 'execution time')))
409 'execution time')))
410
410
411 optimizations.append(improvement(
411 optimizations.append(improvement(
412 name='re-delta-fulladd',
412 name='re-delta-fulladd',
413 type=optimisation,
413 type=optimisation,
414 description=_('every revision will be re-added as if it was new '
414 description=_('every revision will be re-added as if it was new '
415 'content. It will go through the full storage '
415 'content. It will go through the full storage '
416 'mechanism giving extensions a chance to process it '
416 'mechanism giving extensions a chance to process it '
417 '(eg. lfs). This is similar to "re-delta-all" but even '
417 '(eg. lfs). This is similar to "re-delta-all" but even '
418 'slower since more logic is involved.'),
418 'slower since more logic is involved.'),
419 upgrademessage=_('each revision will be added as new content to the '
419 upgrademessage=_('each revision will be added as new content to the '
420 'internal storage; this will likely drastically slow '
420 'internal storage; this will likely drastically slow '
421 'down execution time, but some extensions might need '
421 'down execution time, but some extensions might need '
422 'it')))
422 'it')))
423
423
424 return optimizations
424 return optimizations
425
425
426 def determineactions(repo, deficiencies, sourcereqs, destreqs):
426 def determineactions(repo, deficiencies, sourcereqs, destreqs):
427 """Determine upgrade actions that will be performed.
427 """Determine upgrade actions that will be performed.
428
428
429 Given a list of improvements as returned by ``finddeficiencies`` and
429 Given a list of improvements as returned by ``finddeficiencies`` and
430 ``findoptimizations``, determine the list of upgrade actions that
430 ``findoptimizations``, determine the list of upgrade actions that
431 will be performed.
431 will be performed.
432
432
433 The role of this function is to filter improvements if needed, apply
433 The role of this function is to filter improvements if needed, apply
434 recommended optimizations from the improvements list that make sense,
434 recommended optimizations from the improvements list that make sense,
435 etc.
435 etc.
436
436
437 Returns a list of action names.
437 Returns a list of action names.
438 """
438 """
439 newactions = []
439 newactions = []
440
440
441 knownreqs = supporteddestrequirements(repo)
441 knownreqs = supporteddestrequirements(repo)
442
442
443 for d in deficiencies:
443 for d in deficiencies:
444 name = d.name
444 name = d.name
445
445
446 # If the action is a requirement that doesn't show up in the
446 # If the action is a requirement that doesn't show up in the
447 # destination requirements, prune the action.
447 # destination requirements, prune the action.
448 if name in knownreqs and name not in destreqs:
448 if name in knownreqs and name not in destreqs:
449 continue
449 continue
450
450
451 newactions.append(d)
451 newactions.append(d)
452
452
453 # FUTURE consider adding some optimizations here for certain transitions.
453 # FUTURE consider adding some optimizations here for certain transitions.
454 # e.g. adding generaldelta could schedule parent redeltas.
454 # e.g. adding generaldelta could schedule parent redeltas.
455
455
456 return newactions
456 return newactions
457
457
458 def _revlogfrompath(repo, path):
458 def _revlogfrompath(repo, path):
459 """Obtain a revlog from a repo path.
459 """Obtain a revlog from a repo path.
460
460
461 An instance of the appropriate class is returned.
461 An instance of the appropriate class is returned.
462 """
462 """
463 if path == '00changelog.i':
463 if path == '00changelog.i':
464 return changelog.changelog(repo.svfs)
464 return changelog.changelog(repo.svfs)
465 elif path.endswith('00manifest.i'):
465 elif path.endswith('00manifest.i'):
466 mandir = path[:-len('00manifest.i')]
466 mandir = path[:-len('00manifest.i')]
467 return manifest.manifestrevlog(repo.svfs, tree=mandir)
467 return manifest.manifestrevlog(repo.svfs, tree=mandir)
468 else:
468 else:
469 #reverse of "/".join(("data", path + ".i"))
469 #reverse of "/".join(("data", path + ".i"))
470 return filelog.filelog(repo.svfs, path[5:-2])
470 return filelog.filelog(repo.svfs, path[5:-2])
471
471
472 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents):
472 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents):
473 """Copy revlogs between 2 repos."""
473 """Copy revlogs between 2 repos."""
474 revcount = 0
474 revcount = 0
475 srcsize = 0
475 srcsize = 0
476 srcrawsize = 0
476 srcrawsize = 0
477 dstsize = 0
477 dstsize = 0
478 fcount = 0
478 fcount = 0
479 frevcount = 0
479 frevcount = 0
480 fsrcsize = 0
480 fsrcsize = 0
481 frawsize = 0
481 frawsize = 0
482 fdstsize = 0
482 fdstsize = 0
483 mcount = 0
483 mcount = 0
484 mrevcount = 0
484 mrevcount = 0
485 msrcsize = 0
485 msrcsize = 0
486 mrawsize = 0
486 mrawsize = 0
487 mdstsize = 0
487 mdstsize = 0
488 crevcount = 0
488 crevcount = 0
489 csrcsize = 0
489 csrcsize = 0
490 crawsize = 0
490 crawsize = 0
491 cdstsize = 0
491 cdstsize = 0
492
492
493 # Perform a pass to collect metadata. This validates we can open all
493 # Perform a pass to collect metadata. This validates we can open all
494 # source files and allows a unified progress bar to be displayed.
494 # source files and allows a unified progress bar to be displayed.
495 for unencoded, encoded, size in srcrepo.store.walk():
495 for unencoded, encoded, size in srcrepo.store.walk():
496 if unencoded.endswith('.d'):
496 if unencoded.endswith('.d'):
497 continue
497 continue
498
498
499 rl = _revlogfrompath(srcrepo, unencoded)
499 rl = _revlogfrompath(srcrepo, unencoded)
500
500
501 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
501 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
502 trackedsize=True, storedsize=True)
502 trackedsize=True, storedsize=True)
503
503
504 revcount += info['revisionscount'] or 0
504 revcount += info['revisionscount'] or 0
505 datasize = info['storedsize'] or 0
505 datasize = info['storedsize'] or 0
506 rawsize = info['trackedsize'] or 0
506 rawsize = info['trackedsize'] or 0
507
507
508 srcsize += datasize
508 srcsize += datasize
509 srcrawsize += rawsize
509 srcrawsize += rawsize
510
510
511 # This is for the separate progress bars.
511 # This is for the separate progress bars.
512 if isinstance(rl, changelog.changelog):
512 if isinstance(rl, changelog.changelog):
513 crevcount += len(rl)
513 crevcount += len(rl)
514 csrcsize += datasize
514 csrcsize += datasize
515 crawsize += rawsize
515 crawsize += rawsize
516 elif isinstance(rl, manifest.manifestrevlog):
516 elif isinstance(rl, manifest.manifestrevlog):
517 mcount += 1
517 mcount += 1
518 mrevcount += len(rl)
518 mrevcount += len(rl)
519 msrcsize += datasize
519 msrcsize += datasize
520 mrawsize += rawsize
520 mrawsize += rawsize
521 elif isinstance(rl, filelog.filelog):
521 elif isinstance(rl, filelog.filelog):
522 fcount += 1
522 fcount += 1
523 frevcount += len(rl)
523 frevcount += len(rl)
524 fsrcsize += datasize
524 fsrcsize += datasize
525 frawsize += rawsize
525 frawsize += rawsize
526 else:
526 else:
527 error.ProgrammingError('unknown revlog type')
527 error.ProgrammingError('unknown revlog type')
528
528
529 if not revcount:
529 if not revcount:
530 return
530 return
531
531
532 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
532 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
533 '%d in changelog)\n') %
533 '%d in changelog)\n') %
534 (revcount, frevcount, mrevcount, crevcount))
534 (revcount, frevcount, mrevcount, crevcount))
535 ui.write(_('migrating %s in store; %s tracked data\n') % (
535 ui.write(_('migrating %s in store; %s tracked data\n') % (
536 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
536 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
537
537
538 # Used to keep track of progress.
538 # Used to keep track of progress.
539 progress = None
539 progress = None
540 def oncopiedrevision(rl, rev, node):
540 def oncopiedrevision(rl, rev, node):
541 progress.increment()
541 progress.increment()
542
542
543 # Do the actual copying.
543 # Do the actual copying.
544 # FUTURE this operation can be farmed off to worker processes.
544 # FUTURE this operation can be farmed off to worker processes.
545 seen = set()
545 seen = set()
546 for unencoded, encoded, size in srcrepo.store.walk():
546 for unencoded, encoded, size in srcrepo.store.walk():
547 if unencoded.endswith('.d'):
547 if unencoded.endswith('.d'):
548 continue
548 continue
549
549
550 oldrl = _revlogfrompath(srcrepo, unencoded)
550 oldrl = _revlogfrompath(srcrepo, unencoded)
551 newrl = _revlogfrompath(dstrepo, unencoded)
551 newrl = _revlogfrompath(dstrepo, unencoded)
552
552
553 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
553 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
554 ui.write(_('finished migrating %d manifest revisions across %d '
554 ui.write(_('finished migrating %d manifest revisions across %d '
555 'manifests; change in size: %s\n') %
555 'manifests; change in size: %s\n') %
556 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
556 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
557
557
558 ui.write(_('migrating changelog containing %d revisions '
558 ui.write(_('migrating changelog containing %d revisions '
559 '(%s in store; %s tracked data)\n') %
559 '(%s in store; %s tracked data)\n') %
560 (crevcount, util.bytecount(csrcsize),
560 (crevcount, util.bytecount(csrcsize),
561 util.bytecount(crawsize)))
561 util.bytecount(crawsize)))
562 seen.add('c')
562 seen.add('c')
563 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
563 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
564 total=crevcount)
564 total=crevcount)
565 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
565 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
566 ui.write(_('finished migrating %d filelog revisions across %d '
566 ui.write(_('finished migrating %d filelog revisions across %d '
567 'filelogs; change in size: %s\n') %
567 'filelogs; change in size: %s\n') %
568 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
568 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
569
569
570 ui.write(_('migrating %d manifests containing %d revisions '
570 ui.write(_('migrating %d manifests containing %d revisions '
571 '(%s in store; %s tracked data)\n') %
571 '(%s in store; %s tracked data)\n') %
572 (mcount, mrevcount, util.bytecount(msrcsize),
572 (mcount, mrevcount, util.bytecount(msrcsize),
573 util.bytecount(mrawsize)))
573 util.bytecount(mrawsize)))
574 seen.add('m')
574 seen.add('m')
575 if progress:
575 if progress:
576 progress.complete()
576 progress.complete()
577 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
577 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
578 total=mrevcount)
578 total=mrevcount)
579 elif 'f' not in seen:
579 elif 'f' not in seen:
580 ui.write(_('migrating %d filelogs containing %d revisions '
580 ui.write(_('migrating %d filelogs containing %d revisions '
581 '(%s in store; %s tracked data)\n') %
581 '(%s in store; %s tracked data)\n') %
582 (fcount, frevcount, util.bytecount(fsrcsize),
582 (fcount, frevcount, util.bytecount(fsrcsize),
583 util.bytecount(frawsize)))
583 util.bytecount(frawsize)))
584 seen.add('f')
584 seen.add('f')
585 if progress:
585 if progress:
586 progress.complete()
586 progress.complete()
587 progress = srcrepo.ui.makeprogress(_('file revisions'),
587 progress = srcrepo.ui.makeprogress(_('file revisions'),
588 total=frevcount)
588 total=frevcount)
589
589
590
590
591 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
591 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
592 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
592 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
593 deltareuse=deltareuse,
593 deltareuse=deltareuse,
594 forcedeltabothparents=forcedeltabothparents)
594 forcedeltabothparents=forcedeltabothparents)
595
595
596 info = newrl.storageinfo(storedsize=True)
596 info = newrl.storageinfo(storedsize=True)
597 datasize = info['storedsize'] or 0
597 datasize = info['storedsize'] or 0
598
598
599 dstsize += datasize
599 dstsize += datasize
600
600
601 if isinstance(newrl, changelog.changelog):
601 if isinstance(newrl, changelog.changelog):
602 cdstsize += datasize
602 cdstsize += datasize
603 elif isinstance(newrl, manifest.manifestrevlog):
603 elif isinstance(newrl, manifest.manifestrevlog):
604 mdstsize += datasize
604 mdstsize += datasize
605 else:
605 else:
606 fdstsize += datasize
606 fdstsize += datasize
607
607
608 progress.complete()
608 progress.complete()
609
609
610 ui.write(_('finished migrating %d changelog revisions; change in size: '
610 ui.write(_('finished migrating %d changelog revisions; change in size: '
611 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
611 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
612
612
613 ui.write(_('finished migrating %d total revisions; total change in store '
613 ui.write(_('finished migrating %d total revisions; total change in store '
614 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
614 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
615
615
616 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
616 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
617 """Determine whether to copy a store file during upgrade.
617 """Determine whether to copy a store file during upgrade.
618
618
619 This function is called when migrating store files from ``srcrepo`` to
619 This function is called when migrating store files from ``srcrepo`` to
620 ``dstrepo`` as part of upgrading a repository.
620 ``dstrepo`` as part of upgrading a repository.
621
621
622 Args:
622 Args:
623 srcrepo: repo we are copying from
623 srcrepo: repo we are copying from
624 dstrepo: repo we are copying to
624 dstrepo: repo we are copying to
625 requirements: set of requirements for ``dstrepo``
625 requirements: set of requirements for ``dstrepo``
626 path: store file being examined
626 path: store file being examined
627 mode: the ``ST_MODE`` file type of ``path``
627 mode: the ``ST_MODE`` file type of ``path``
628 st: ``stat`` data structure for ``path``
628 st: ``stat`` data structure for ``path``
629
629
630 Function should return ``True`` if the file is to be copied.
630 Function should return ``True`` if the file is to be copied.
631 """
631 """
632 # Skip revlogs.
632 # Skip revlogs.
633 if path.endswith(('.i', '.d')):
633 if path.endswith(('.i', '.d')):
634 return False
634 return False
635 # Skip transaction related files.
635 # Skip transaction related files.
636 if path.startswith('undo'):
636 if path.startswith('undo'):
637 return False
637 return False
638 # Only copy regular files.
638 # Only copy regular files.
639 if mode != stat.S_IFREG:
639 if mode != stat.S_IFREG:
640 return False
640 return False
641 # Skip other skipped files.
641 # Skip other skipped files.
642 if path in ('lock', 'fncache'):
642 if path in ('lock', 'fncache'):
643 return False
643 return False
644
644
645 return True
645 return True
646
646
647 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
647 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
648 """Hook point for extensions to perform additional actions during upgrade.
648 """Hook point for extensions to perform additional actions during upgrade.
649
649
650 This function is called after revlogs and store files have been copied but
650 This function is called after revlogs and store files have been copied but
651 before the new store is swapped into the original location.
651 before the new store is swapped into the original location.
652 """
652 """
653
653
654 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
654 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
655 """Do the low-level work of upgrading a repository.
655 """Do the low-level work of upgrading a repository.
656
656
657 The upgrade is effectively performed as a copy between a source
657 The upgrade is effectively performed as a copy between a source
658 repository and a temporary destination repository.
658 repository and a temporary destination repository.
659
659
660 The source repository is unmodified for as long as possible so the
660 The source repository is unmodified for as long as possible so the
661 upgrade can abort at any time without causing loss of service for
661 upgrade can abort at any time without causing loss of service for
662 readers and without corrupting the source repository.
662 readers and without corrupting the source repository.
663 """
663 """
664 assert srcrepo.currentwlock()
664 assert srcrepo.currentwlock()
665 assert dstrepo.currentwlock()
665 assert dstrepo.currentwlock()
666
666
667 ui.write(_('(it is safe to interrupt this process any time before '
667 ui.write(_('(it is safe to interrupt this process any time before '
668 'data migration completes)\n'))
668 'data migration completes)\n'))
669
669
670 if 're-delta-all' in actions:
670 if 're-delta-all' in actions:
671 deltareuse = revlog.revlog.DELTAREUSENEVER
671 deltareuse = revlog.revlog.DELTAREUSENEVER
672 elif 're-delta-parent' in actions:
672 elif 're-delta-parent' in actions:
673 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
673 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
674 elif 're-delta-multibase' in actions:
674 elif 're-delta-multibase' in actions:
675 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
675 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
676 elif 're-delta-fulladd' in actions:
676 elif 're-delta-fulladd' in actions:
677 deltareuse = revlog.revlog.DELTAREUSEFULLADD
677 deltareuse = revlog.revlog.DELTAREUSEFULLADD
678 else:
678 else:
679 deltareuse = revlog.revlog.DELTAREUSEALWAYS
679 deltareuse = revlog.revlog.DELTAREUSEALWAYS
680
680
681 with dstrepo.transaction('upgrade') as tr:
681 with dstrepo.transaction('upgrade') as tr:
682 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
682 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
683 're-delta-multibase' in actions)
683 're-delta-multibase' in actions)
684
684
685 # Now copy other files in the store directory.
685 # Now copy other files in the store directory.
686 # The sorted() makes execution deterministic.
686 # The sorted() makes execution deterministic.
687 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
687 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
688 if not _filterstorefile(srcrepo, dstrepo, requirements,
688 if not _filterstorefile(srcrepo, dstrepo, requirements,
689 p, kind, st):
689 p, kind, st):
690 continue
690 continue
691
691
692 srcrepo.ui.write(_('copying %s\n') % p)
692 srcrepo.ui.write(_('copying %s\n') % p)
693 src = srcrepo.store.rawvfs.join(p)
693 src = srcrepo.store.rawvfs.join(p)
694 dst = dstrepo.store.rawvfs.join(p)
694 dst = dstrepo.store.rawvfs.join(p)
695 util.copyfile(src, dst, copystat=True)
695 util.copyfile(src, dst, copystat=True)
696
696
697 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
697 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
698
698
699 ui.write(_('data fully migrated to temporary repository\n'))
699 ui.write(_('data fully migrated to temporary repository\n'))
700
700
701 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
701 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
702 backupvfs = vfsmod.vfs(backuppath)
702 backupvfs = vfsmod.vfs(backuppath)
703
703
704 # Make a backup of requires file first, as it is the first to be modified.
704 # Make a backup of requires file first, as it is the first to be modified.
705 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
705 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
706
706
707 # We install an arbitrary requirement that clients must not support
707 # We install an arbitrary requirement that clients must not support
708 # as a mechanism to lock out new clients during the data swap. This is
708 # as a mechanism to lock out new clients during the data swap. This is
709 # better than allowing a client to continue while the repository is in
709 # better than allowing a client to continue while the repository is in
710 # an inconsistent state.
710 # an inconsistent state.
711 ui.write(_('marking source repository as being upgraded; clients will be '
711 ui.write(_('marking source repository as being upgraded; clients will be '
712 'unable to read from repository\n'))
712 'unable to read from repository\n'))
713 scmutil.writerequires(srcrepo.vfs,
713 scmutil.writerequires(srcrepo.vfs,
714 srcrepo.requirements | {'upgradeinprogress'})
714 srcrepo.requirements | {'upgradeinprogress'})
715
715
716 ui.write(_('starting in-place swap of repository data\n'))
716 ui.write(_('starting in-place swap of repository data\n'))
717 ui.write(_('replaced files will be backed up at %s\n') %
717 ui.write(_('replaced files will be backed up at %s\n') %
718 backuppath)
718 backuppath)
719
719
720 # Now swap in the new store directory. Doing it as a rename should make
720 # Now swap in the new store directory. Doing it as a rename should make
721 # the operation nearly instantaneous and atomic (at least in well-behaved
721 # the operation nearly instantaneous and atomic (at least in well-behaved
722 # environments).
722 # environments).
723 ui.write(_('replacing store...\n'))
723 ui.write(_('replacing store...\n'))
724 tstart = util.timer()
724 tstart = util.timer()
725 util.rename(srcrepo.spath, backupvfs.join('store'))
725 util.rename(srcrepo.spath, backupvfs.join('store'))
726 util.rename(dstrepo.spath, srcrepo.spath)
726 util.rename(dstrepo.spath, srcrepo.spath)
727 elapsed = util.timer() - tstart
727 elapsed = util.timer() - tstart
728 ui.write(_('store replacement complete; repository was inconsistent for '
728 ui.write(_('store replacement complete; repository was inconsistent for '
729 '%0.1fs\n') % elapsed)
729 '%0.1fs\n') % elapsed)
730
730
731 # We first write the requirements file. Any new requirements will lock
731 # We first write the requirements file. Any new requirements will lock
732 # out legacy clients.
732 # out legacy clients.
733 ui.write(_('finalizing requirements file and making repository readable '
733 ui.write(_('finalizing requirements file and making repository readable '
734 'again\n'))
734 'again\n'))
735 scmutil.writerequires(srcrepo.vfs, requirements)
735 scmutil.writerequires(srcrepo.vfs, requirements)
736
736
737 # The lock file from the old store won't be removed because nothing has a
737 # The lock file from the old store won't be removed because nothing has a
738 # reference to its new location. So clean it up manually. Alternatively, we
738 # reference to its new location. So clean it up manually. Alternatively, we
739 # could update srcrepo.svfs and other variables to point to the new
739 # could update srcrepo.svfs and other variables to point to the new
740 # location. This is simpler.
740 # location. This is simpler.
741 backupvfs.unlink('store/lock')
741 backupvfs.unlink('store/lock')
742
742
743 return backuppath
743 return backuppath
744
744
745 def upgraderepo(ui, repo, run=False, optimize=None):
745 def upgraderepo(ui, repo, run=False, optimize=None, backup=True):
746 """Upgrade a repository in place."""
746 """Upgrade a repository in place."""
747 if optimize is None:
747 if optimize is None:
748 optimize = []
748 optimize = []
749 optimize = set(legacy_opts_map.get(o, o) for o in optimize)
749 optimize = set(legacy_opts_map.get(o, o) for o in optimize)
750 repo = repo.unfiltered()
750 repo = repo.unfiltered()
751
751
752 # Ensure the repository can be upgraded.
752 # Ensure the repository can be upgraded.
753 missingreqs = requiredsourcerequirements(repo) - repo.requirements
753 missingreqs = requiredsourcerequirements(repo) - repo.requirements
754 if missingreqs:
754 if missingreqs:
755 raise error.Abort(_('cannot upgrade repository; requirement '
755 raise error.Abort(_('cannot upgrade repository; requirement '
756 'missing: %s') % _(', ').join(sorted(missingreqs)))
756 'missing: %s') % _(', ').join(sorted(missingreqs)))
757
757
758 blockedreqs = blocksourcerequirements(repo) & repo.requirements
758 blockedreqs = blocksourcerequirements(repo) & repo.requirements
759 if blockedreqs:
759 if blockedreqs:
760 raise error.Abort(_('cannot upgrade repository; unsupported source '
760 raise error.Abort(_('cannot upgrade repository; unsupported source '
761 'requirement: %s') %
761 'requirement: %s') %
762 _(', ').join(sorted(blockedreqs)))
762 _(', ').join(sorted(blockedreqs)))
763
763
764 # FUTURE there is potentially a need to control the wanted requirements via
764 # FUTURE there is potentially a need to control the wanted requirements via
765 # command arguments or via an extension hook point.
765 # command arguments or via an extension hook point.
766 newreqs = localrepo.newreporequirements(
766 newreqs = localrepo.newreporequirements(
767 repo.ui, localrepo.defaultcreateopts(repo.ui))
767 repo.ui, localrepo.defaultcreateopts(repo.ui))
768 newreqs.update(preservedrequirements(repo))
768 newreqs.update(preservedrequirements(repo))
769
769
770 noremovereqs = (repo.requirements - newreqs -
770 noremovereqs = (repo.requirements - newreqs -
771 supportremovedrequirements(repo))
771 supportremovedrequirements(repo))
772 if noremovereqs:
772 if noremovereqs:
773 raise error.Abort(_('cannot upgrade repository; requirement would be '
773 raise error.Abort(_('cannot upgrade repository; requirement would be '
774 'removed: %s') % _(', ').join(sorted(noremovereqs)))
774 'removed: %s') % _(', ').join(sorted(noremovereqs)))
775
775
776 noaddreqs = (newreqs - repo.requirements -
776 noaddreqs = (newreqs - repo.requirements -
777 allowednewrequirements(repo))
777 allowednewrequirements(repo))
778 if noaddreqs:
778 if noaddreqs:
779 raise error.Abort(_('cannot upgrade repository; do not support adding '
779 raise error.Abort(_('cannot upgrade repository; do not support adding '
780 'requirement: %s') %
780 'requirement: %s') %
781 _(', ').join(sorted(noaddreqs)))
781 _(', ').join(sorted(noaddreqs)))
782
782
783 unsupportedreqs = newreqs - supporteddestrequirements(repo)
783 unsupportedreqs = newreqs - supporteddestrequirements(repo)
784 if unsupportedreqs:
784 if unsupportedreqs:
785 raise error.Abort(_('cannot upgrade repository; do not support '
785 raise error.Abort(_('cannot upgrade repository; do not support '
786 'destination requirement: %s') %
786 'destination requirement: %s') %
787 _(', ').join(sorted(unsupportedreqs)))
787 _(', ').join(sorted(unsupportedreqs)))
788
788
789 # Find and validate all improvements that can be made.
789 # Find and validate all improvements that can be made.
790 alloptimizations = findoptimizations(repo)
790 alloptimizations = findoptimizations(repo)
791
791
792 # Apply and Validate arguments.
792 # Apply and Validate arguments.
793 optimizations = []
793 optimizations = []
794 for o in alloptimizations:
794 for o in alloptimizations:
795 if o.name in optimize:
795 if o.name in optimize:
796 optimizations.append(o)
796 optimizations.append(o)
797 optimize.discard(o.name)
797 optimize.discard(o.name)
798
798
799 if optimize: # anything left is unknown
799 if optimize: # anything left is unknown
800 raise error.Abort(_('unknown optimization action requested: %s') %
800 raise error.Abort(_('unknown optimization action requested: %s') %
801 ', '.join(sorted(optimize)),
801 ', '.join(sorted(optimize)),
802 hint=_('run without arguments to see valid '
802 hint=_('run without arguments to see valid '
803 'optimizations'))
803 'optimizations'))
804
804
805 deficiencies = finddeficiencies(repo)
805 deficiencies = finddeficiencies(repo)
806 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
806 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
807 actions.extend(o for o in sorted(optimizations)
807 actions.extend(o for o in sorted(optimizations)
808 # determineactions could have added optimisation
808 # determineactions could have added optimisation
809 if o not in actions)
809 if o not in actions)
810
810
811 def printrequirements():
811 def printrequirements():
812 ui.write(_('requirements\n'))
812 ui.write(_('requirements\n'))
813 ui.write(_(' preserved: %s\n') %
813 ui.write(_(' preserved: %s\n') %
814 _(', ').join(sorted(newreqs & repo.requirements)))
814 _(', ').join(sorted(newreqs & repo.requirements)))
815
815
816 if repo.requirements - newreqs:
816 if repo.requirements - newreqs:
817 ui.write(_(' removed: %s\n') %
817 ui.write(_(' removed: %s\n') %
818 _(', ').join(sorted(repo.requirements - newreqs)))
818 _(', ').join(sorted(repo.requirements - newreqs)))
819
819
820 if newreqs - repo.requirements:
820 if newreqs - repo.requirements:
821 ui.write(_(' added: %s\n') %
821 ui.write(_(' added: %s\n') %
822 _(', ').join(sorted(newreqs - repo.requirements)))
822 _(', ').join(sorted(newreqs - repo.requirements)))
823
823
824 ui.write('\n')
824 ui.write('\n')
825
825
826 def printupgradeactions():
826 def printupgradeactions():
827 for a in actions:
827 for a in actions:
828 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
828 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
829
829
830 if not run:
830 if not run:
831 fromconfig = []
831 fromconfig = []
832 onlydefault = []
832 onlydefault = []
833
833
834 for d in deficiencies:
834 for d in deficiencies:
835 if d.fromconfig(repo):
835 if d.fromconfig(repo):
836 fromconfig.append(d)
836 fromconfig.append(d)
837 elif d.default:
837 elif d.default:
838 onlydefault.append(d)
838 onlydefault.append(d)
839
839
840 if fromconfig or onlydefault:
840 if fromconfig or onlydefault:
841
841
842 if fromconfig:
842 if fromconfig:
843 ui.write(_('repository lacks features recommended by '
843 ui.write(_('repository lacks features recommended by '
844 'current config options:\n\n'))
844 'current config options:\n\n'))
845 for i in fromconfig:
845 for i in fromconfig:
846 ui.write('%s\n %s\n\n' % (i.name, i.description))
846 ui.write('%s\n %s\n\n' % (i.name, i.description))
847
847
848 if onlydefault:
848 if onlydefault:
849 ui.write(_('repository lacks features used by the default '
849 ui.write(_('repository lacks features used by the default '
850 'config options:\n\n'))
850 'config options:\n\n'))
851 for i in onlydefault:
851 for i in onlydefault:
852 ui.write('%s\n %s\n\n' % (i.name, i.description))
852 ui.write('%s\n %s\n\n' % (i.name, i.description))
853
853
854 ui.write('\n')
854 ui.write('\n')
855 else:
855 else:
856 ui.write(_('(no feature deficiencies found in existing '
856 ui.write(_('(no feature deficiencies found in existing '
857 'repository)\n'))
857 'repository)\n'))
858
858
859 ui.write(_('performing an upgrade with "--run" will make the following '
859 ui.write(_('performing an upgrade with "--run" will make the following '
860 'changes:\n\n'))
860 'changes:\n\n'))
861
861
862 printrequirements()
862 printrequirements()
863 printupgradeactions()
863 printupgradeactions()
864
864
865 unusedoptimize = [i for i in alloptimizations if i not in actions]
865 unusedoptimize = [i for i in alloptimizations if i not in actions]
866
866
867 if unusedoptimize:
867 if unusedoptimize:
868 ui.write(_('additional optimizations are available by specifying '
868 ui.write(_('additional optimizations are available by specifying '
869 '"--optimize <name>":\n\n'))
869 '"--optimize <name>":\n\n'))
870 for i in unusedoptimize:
870 for i in unusedoptimize:
871 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
871 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
872 return
872 return
873
873
874 # Else we're in the run=true case.
874 # Else we're in the run=true case.
875 ui.write(_('upgrade will perform the following actions:\n\n'))
875 ui.write(_('upgrade will perform the following actions:\n\n'))
876 printrequirements()
876 printrequirements()
877 printupgradeactions()
877 printupgradeactions()
878
878
879 upgradeactions = [a.name for a in actions]
879 upgradeactions = [a.name for a in actions]
880
880
881 ui.write(_('beginning upgrade...\n'))
881 ui.write(_('beginning upgrade...\n'))
882 with repo.wlock(), repo.lock():
882 with repo.wlock(), repo.lock():
883 ui.write(_('repository locked and read-only\n'))
883 ui.write(_('repository locked and read-only\n'))
884 # Our strategy for upgrading the repository is to create a new,
884 # Our strategy for upgrading the repository is to create a new,
885 # temporary repository, write data to it, then do a swap of the
885 # temporary repository, write data to it, then do a swap of the
886 # data. There are less heavyweight ways to do this, but it is easier
886 # data. There are less heavyweight ways to do this, but it is easier
887 # to create a new repo object than to instantiate all the components
887 # to create a new repo object than to instantiate all the components
888 # (like the store) separately.
888 # (like the store) separately.
889 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
889 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
890 backuppath = None
890 backuppath = None
891 try:
891 try:
892 ui.write(_('creating temporary repository to stage migrated '
892 ui.write(_('creating temporary repository to stage migrated '
893 'data: %s\n') % tmppath)
893 'data: %s\n') % tmppath)
894
894
895 # clone ui without using ui.copy because repo.ui is protected
895 # clone ui without using ui.copy because repo.ui is protected
896 repoui = repo.ui.__class__(repo.ui)
896 repoui = repo.ui.__class__(repo.ui)
897 dstrepo = hg.repository(repoui, path=tmppath, create=True)
897 dstrepo = hg.repository(repoui, path=tmppath, create=True)
898
898
899 with dstrepo.wlock(), dstrepo.lock():
899 with dstrepo.wlock(), dstrepo.lock():
900 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
900 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
901 upgradeactions)
901 upgradeactions)
902 if not (backup or backuppath is None):
903 ui.write(_('removing old repository content%s\n') % backuppath)
904 repo.vfs.rmtree(backuppath, forcibly=True)
905 backuppath = None
902
906
903 finally:
907 finally:
904 ui.write(_('removing temporary repository %s\n') % tmppath)
908 ui.write(_('removing temporary repository %s\n') % tmppath)
905 repo.vfs.rmtree(tmppath, forcibly=True)
909 repo.vfs.rmtree(tmppath, forcibly=True)
906
910
907 if backuppath:
911 if backuppath:
908 ui.warn(_('copy of old repository backed up at %s\n') %
912 ui.warn(_('copy of old repository backed up at %s\n') %
909 backuppath)
913 backuppath)
910 ui.warn(_('the old repository will not be deleted; remove '
914 ui.warn(_('the old repository will not be deleted; remove '
911 'it to free up disk space once the upgraded '
915 'it to free up disk space once the upgraded '
912 'repository is verified\n'))
916 'repository is verified\n'))
@@ -1,408 +1,408 b''
1 Show all commands except debug commands
1 Show all commands except debug commands
2 $ hg debugcomplete
2 $ hg debugcomplete
3 add
3 add
4 addremove
4 addremove
5 annotate
5 annotate
6 archive
6 archive
7 backout
7 backout
8 bisect
8 bisect
9 bookmarks
9 bookmarks
10 branch
10 branch
11 branches
11 branches
12 bundle
12 bundle
13 cat
13 cat
14 clone
14 clone
15 commit
15 commit
16 config
16 config
17 copy
17 copy
18 diff
18 diff
19 export
19 export
20 files
20 files
21 forget
21 forget
22 graft
22 graft
23 grep
23 grep
24 heads
24 heads
25 help
25 help
26 identify
26 identify
27 import
27 import
28 incoming
28 incoming
29 init
29 init
30 locate
30 locate
31 log
31 log
32 manifest
32 manifest
33 merge
33 merge
34 outgoing
34 outgoing
35 parents
35 parents
36 paths
36 paths
37 phase
37 phase
38 pull
38 pull
39 push
39 push
40 recover
40 recover
41 remove
41 remove
42 rename
42 rename
43 resolve
43 resolve
44 revert
44 revert
45 rollback
45 rollback
46 root
46 root
47 serve
47 serve
48 status
48 status
49 summary
49 summary
50 tag
50 tag
51 tags
51 tags
52 tip
52 tip
53 unbundle
53 unbundle
54 update
54 update
55 verify
55 verify
56 version
56 version
57
57
58 Show all commands that start with "a"
58 Show all commands that start with "a"
59 $ hg debugcomplete a
59 $ hg debugcomplete a
60 add
60 add
61 addremove
61 addremove
62 annotate
62 annotate
63 archive
63 archive
64
64
65 Do not show debug commands if there are other candidates
65 Do not show debug commands if there are other candidates
66 $ hg debugcomplete d
66 $ hg debugcomplete d
67 diff
67 diff
68
68
69 Show debug commands if there are no other candidates
69 Show debug commands if there are no other candidates
70 $ hg debugcomplete debug
70 $ hg debugcomplete debug
71 debugancestor
71 debugancestor
72 debugapplystreamclonebundle
72 debugapplystreamclonebundle
73 debugbuilddag
73 debugbuilddag
74 debugbundle
74 debugbundle
75 debugcapabilities
75 debugcapabilities
76 debugcheckstate
76 debugcheckstate
77 debugcolor
77 debugcolor
78 debugcommands
78 debugcommands
79 debugcomplete
79 debugcomplete
80 debugconfig
80 debugconfig
81 debugcreatestreamclonebundle
81 debugcreatestreamclonebundle
82 debugdag
82 debugdag
83 debugdata
83 debugdata
84 debugdate
84 debugdate
85 debugdeltachain
85 debugdeltachain
86 debugdirstate
86 debugdirstate
87 debugdiscovery
87 debugdiscovery
88 debugdownload
88 debugdownload
89 debugextensions
89 debugextensions
90 debugfileset
90 debugfileset
91 debugformat
91 debugformat
92 debugfsinfo
92 debugfsinfo
93 debuggetbundle
93 debuggetbundle
94 debugignore
94 debugignore
95 debugindex
95 debugindex
96 debugindexdot
96 debugindexdot
97 debugindexstats
97 debugindexstats
98 debuginstall
98 debuginstall
99 debugknown
99 debugknown
100 debuglabelcomplete
100 debuglabelcomplete
101 debuglocks
101 debuglocks
102 debugmanifestfulltextcache
102 debugmanifestfulltextcache
103 debugmergestate
103 debugmergestate
104 debugnamecomplete
104 debugnamecomplete
105 debugobsolete
105 debugobsolete
106 debugpathcomplete
106 debugpathcomplete
107 debugpeer
107 debugpeer
108 debugpickmergetool
108 debugpickmergetool
109 debugpushkey
109 debugpushkey
110 debugpvec
110 debugpvec
111 debugrebuilddirstate
111 debugrebuilddirstate
112 debugrebuildfncache
112 debugrebuildfncache
113 debugrename
113 debugrename
114 debugrevlog
114 debugrevlog
115 debugrevlogindex
115 debugrevlogindex
116 debugrevspec
116 debugrevspec
117 debugserve
117 debugserve
118 debugsetparents
118 debugsetparents
119 debugssl
119 debugssl
120 debugsub
120 debugsub
121 debugsuccessorssets
121 debugsuccessorssets
122 debugtemplate
122 debugtemplate
123 debuguigetpass
123 debuguigetpass
124 debuguiprompt
124 debuguiprompt
125 debugupdatecaches
125 debugupdatecaches
126 debugupgraderepo
126 debugupgraderepo
127 debugwalk
127 debugwalk
128 debugwhyunstable
128 debugwhyunstable
129 debugwireargs
129 debugwireargs
130 debugwireproto
130 debugwireproto
131
131
132 Do not show the alias of a debug command if there are other candidates
132 Do not show the alias of a debug command if there are other candidates
133 (this should hide rawcommit)
133 (this should hide rawcommit)
134 $ hg debugcomplete r
134 $ hg debugcomplete r
135 recover
135 recover
136 remove
136 remove
137 rename
137 rename
138 resolve
138 resolve
139 revert
139 revert
140 rollback
140 rollback
141 root
141 root
142 Show the alias of a debug command if there are no other candidates
142 Show the alias of a debug command if there are no other candidates
143 $ hg debugcomplete rawc
143 $ hg debugcomplete rawc
144
144
145
145
146 Show the global options
146 Show the global options
147 $ hg debugcomplete --options | sort
147 $ hg debugcomplete --options | sort
148 --color
148 --color
149 --config
149 --config
150 --cwd
150 --cwd
151 --debug
151 --debug
152 --debugger
152 --debugger
153 --encoding
153 --encoding
154 --encodingmode
154 --encodingmode
155 --help
155 --help
156 --hidden
156 --hidden
157 --noninteractive
157 --noninteractive
158 --pager
158 --pager
159 --profile
159 --profile
160 --quiet
160 --quiet
161 --repository
161 --repository
162 --time
162 --time
163 --traceback
163 --traceback
164 --verbose
164 --verbose
165 --version
165 --version
166 -R
166 -R
167 -h
167 -h
168 -q
168 -q
169 -v
169 -v
170 -y
170 -y
171
171
172 Show the options for the "serve" command
172 Show the options for the "serve" command
173 $ hg debugcomplete --options serve | sort
173 $ hg debugcomplete --options serve | sort
174 --accesslog
174 --accesslog
175 --address
175 --address
176 --certificate
176 --certificate
177 --cmdserver
177 --cmdserver
178 --color
178 --color
179 --config
179 --config
180 --cwd
180 --cwd
181 --daemon
181 --daemon
182 --daemon-postexec
182 --daemon-postexec
183 --debug
183 --debug
184 --debugger
184 --debugger
185 --encoding
185 --encoding
186 --encodingmode
186 --encodingmode
187 --errorlog
187 --errorlog
188 --help
188 --help
189 --hidden
189 --hidden
190 --ipv6
190 --ipv6
191 --name
191 --name
192 --noninteractive
192 --noninteractive
193 --pager
193 --pager
194 --pid-file
194 --pid-file
195 --port
195 --port
196 --prefix
196 --prefix
197 --print-url
197 --print-url
198 --profile
198 --profile
199 --quiet
199 --quiet
200 --repository
200 --repository
201 --stdio
201 --stdio
202 --style
202 --style
203 --subrepos
203 --subrepos
204 --templates
204 --templates
205 --time
205 --time
206 --traceback
206 --traceback
207 --verbose
207 --verbose
208 --version
208 --version
209 --web-conf
209 --web-conf
210 -6
210 -6
211 -A
211 -A
212 -E
212 -E
213 -R
213 -R
214 -S
214 -S
215 -a
215 -a
216 -d
216 -d
217 -h
217 -h
218 -n
218 -n
219 -p
219 -p
220 -q
220 -q
221 -t
221 -t
222 -v
222 -v
223 -y
223 -y
224
224
225 Show an error if we use --options with an ambiguous abbreviation
225 Show an error if we use --options with an ambiguous abbreviation
226 $ hg debugcomplete --options s
226 $ hg debugcomplete --options s
227 hg: command 's' is ambiguous:
227 hg: command 's' is ambiguous:
228 serve showconfig status summary
228 serve showconfig status summary
229 [255]
229 [255]
230
230
231 Show all commands + options
231 Show all commands + options
232 $ hg debugcommands
232 $ hg debugcommands
233 add: include, exclude, subrepos, dry-run
233 add: include, exclude, subrepos, dry-run
234 addremove: similarity, subrepos, include, exclude, dry-run
234 addremove: similarity, subrepos, include, exclude, dry-run
235 annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
235 annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
236 archive: no-decode, prefix, rev, type, subrepos, include, exclude
236 archive: no-decode, prefix, rev, type, subrepos, include, exclude
237 backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
237 backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
238 bisect: reset, good, bad, skip, extend, command, noupdate
238 bisect: reset, good, bad, skip, extend, command, noupdate
239 bookmarks: force, rev, delete, rename, inactive, list, template
239 bookmarks: force, rev, delete, rename, inactive, list, template
240 branch: force, clean, rev
240 branch: force, clean, rev
241 branches: active, closed, rev, template
241 branches: active, closed, rev, template
242 bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
242 bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
243 cat: output, rev, decode, include, exclude, template
243 cat: output, rev, decode, include, exclude, template
244 clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
244 clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
245 commit: addremove, close-branch, amend, secret, edit, interactive, include, exclude, message, logfile, date, user, subrepos
245 commit: addremove, close-branch, amend, secret, edit, interactive, include, exclude, message, logfile, date, user, subrepos
246 config: untrusted, edit, local, global, template
246 config: untrusted, edit, local, global, template
247 copy: after, force, include, exclude, dry-run
247 copy: after, force, include, exclude, dry-run
248 debugancestor:
248 debugancestor:
249 debugapplystreamclonebundle:
249 debugapplystreamclonebundle:
250 debugbuilddag: mergeable-file, overwritten-file, new-file
250 debugbuilddag: mergeable-file, overwritten-file, new-file
251 debugbundle: all, part-type, spec
251 debugbundle: all, part-type, spec
252 debugcapabilities:
252 debugcapabilities:
253 debugcheckstate:
253 debugcheckstate:
254 debugcolor: style
254 debugcolor: style
255 debugcommands:
255 debugcommands:
256 debugcomplete: options
256 debugcomplete: options
257 debugcreatestreamclonebundle:
257 debugcreatestreamclonebundle:
258 debugdag: tags, branches, dots, spaces
258 debugdag: tags, branches, dots, spaces
259 debugdata: changelog, manifest, dir
259 debugdata: changelog, manifest, dir
260 debugdate: extended
260 debugdate: extended
261 debugdeltachain: changelog, manifest, dir, template
261 debugdeltachain: changelog, manifest, dir, template
262 debugdirstate: nodates, dates, datesort
262 debugdirstate: nodates, dates, datesort
263 debugdiscovery: old, nonheads, rev, ssh, remotecmd, insecure
263 debugdiscovery: old, nonheads, rev, ssh, remotecmd, insecure
264 debugdownload: output
264 debugdownload: output
265 debugextensions: template
265 debugextensions: template
266 debugfileset: rev, all-files, show-matcher, show-stage
266 debugfileset: rev, all-files, show-matcher, show-stage
267 debugformat: template
267 debugformat: template
268 debugfsinfo:
268 debugfsinfo:
269 debuggetbundle: head, common, type
269 debuggetbundle: head, common, type
270 debugignore:
270 debugignore:
271 debugindex: changelog, manifest, dir, template
271 debugindex: changelog, manifest, dir, template
272 debugindexdot: changelog, manifest, dir
272 debugindexdot: changelog, manifest, dir
273 debugindexstats:
273 debugindexstats:
274 debuginstall: template
274 debuginstall: template
275 debugknown:
275 debugknown:
276 debuglabelcomplete:
276 debuglabelcomplete:
277 debuglocks: force-lock, force-wlock, set-lock, set-wlock
277 debuglocks: force-lock, force-wlock, set-lock, set-wlock
278 debugmanifestfulltextcache: clear, add
278 debugmanifestfulltextcache: clear, add
279 debugmergestate:
279 debugmergestate:
280 debugnamecomplete:
280 debugnamecomplete:
281 debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
281 debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
282 debugpathcomplete: full, normal, added, removed
282 debugpathcomplete: full, normal, added, removed
283 debugpeer:
283 debugpeer:
284 debugpickmergetool: rev, changedelete, include, exclude, tool
284 debugpickmergetool: rev, changedelete, include, exclude, tool
285 debugpushkey:
285 debugpushkey:
286 debugpvec:
286 debugpvec:
287 debugrebuilddirstate: rev, minimal
287 debugrebuilddirstate: rev, minimal
288 debugrebuildfncache:
288 debugrebuildfncache:
289 debugrename: rev
289 debugrename: rev
290 debugrevlog: changelog, manifest, dir, dump
290 debugrevlog: changelog, manifest, dir, dump
291 debugrevlogindex: changelog, manifest, dir, format
291 debugrevlogindex: changelog, manifest, dir, format
292 debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
292 debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
293 debugserve: sshstdio, logiofd, logiofile
293 debugserve: sshstdio, logiofd, logiofile
294 debugsetparents:
294 debugsetparents:
295 debugssl:
295 debugssl:
296 debugsub: rev
296 debugsub: rev
297 debugsuccessorssets: closest
297 debugsuccessorssets: closest
298 debugtemplate: rev, define
298 debugtemplate: rev, define
299 debuguigetpass: prompt
299 debuguigetpass: prompt
300 debuguiprompt: prompt
300 debuguiprompt: prompt
301 debugupdatecaches:
301 debugupdatecaches:
302 debugupgraderepo: optimize, run
302 debugupgraderepo: optimize, run, backup
303 debugwalk: include, exclude
303 debugwalk: include, exclude
304 debugwhyunstable:
304 debugwhyunstable:
305 debugwireargs: three, four, five, ssh, remotecmd, insecure
305 debugwireargs: three, four, five, ssh, remotecmd, insecure
306 debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure
306 debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure
307 diff: rev, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
307 diff: rev, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
308 export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
308 export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
309 files: rev, print0, include, exclude, template, subrepos
309 files: rev, print0, include, exclude, template, subrepos
310 forget: interactive, include, exclude, dry-run
310 forget: interactive, include, exclude, dry-run
311 graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
311 graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
312 grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
312 grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
313 heads: rev, topo, active, closed, style, template
313 heads: rev, topo, active, closed, style, template
314 help: extension, command, keyword, system
314 help: extension, command, keyword, system
315 identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
315 identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
316 import: strip, base, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
316 import: strip, base, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
317 incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
317 incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
318 init: ssh, remotecmd, insecure
318 init: ssh, remotecmd, insecure
319 locate: rev, print0, fullpath, include, exclude
319 locate: rev, print0, fullpath, include, exclude
320 log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
320 log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
321 manifest: rev, all, template
321 manifest: rev, all, template
322 merge: force, rev, preview, abort, tool
322 merge: force, rev, preview, abort, tool
323 outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
323 outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
324 parents: rev, style, template
324 parents: rev, style, template
325 paths: template
325 paths: template
326 phase: public, draft, secret, force, rev
326 phase: public, draft, secret, force, rev
327 pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure
327 pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure
328 push: force, rev, bookmark, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
328 push: force, rev, bookmark, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
329 recover:
329 recover:
330 remove: after, force, subrepos, include, exclude, dry-run
330 remove: after, force, subrepos, include, exclude, dry-run
331 rename: after, force, include, exclude, dry-run
331 rename: after, force, include, exclude, dry-run
332 resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
332 resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
333 revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
333 revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
334 rollback: dry-run, force
334 rollback: dry-run, force
335 root:
335 root:
336 serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
336 serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
337 status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
337 status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
338 summary: remote
338 summary: remote
339 tag: force, local, rev, remove, edit, message, date, user
339 tag: force, local, rev, remove, edit, message, date, user
340 tags: template
340 tags: template
341 tip: patch, git, style, template
341 tip: patch, git, style, template
342 unbundle: update
342 unbundle: update
343 update: clean, check, merge, date, rev, tool
343 update: clean, check, merge, date, rev, tool
344 verify:
344 verify:
345 version: template
345 version: template
346
346
347 $ hg init a
347 $ hg init a
348 $ cd a
348 $ cd a
349 $ echo fee > fee
349 $ echo fee > fee
350 $ hg ci -q -Amfee
350 $ hg ci -q -Amfee
351 $ hg tag fee
351 $ hg tag fee
352 $ mkdir fie
352 $ mkdir fie
353 $ echo dead > fie/dead
353 $ echo dead > fie/dead
354 $ echo live > fie/live
354 $ echo live > fie/live
355 $ hg bookmark fo
355 $ hg bookmark fo
356 $ hg branch -q fie
356 $ hg branch -q fie
357 $ hg ci -q -Amfie
357 $ hg ci -q -Amfie
358 $ echo fo > fo
358 $ echo fo > fo
359 $ hg branch -qf default
359 $ hg branch -qf default
360 $ hg ci -q -Amfo
360 $ hg ci -q -Amfo
361 $ echo Fum > Fum
361 $ echo Fum > Fum
362 $ hg ci -q -AmFum
362 $ hg ci -q -AmFum
363 $ hg bookmark Fum
363 $ hg bookmark Fum
364
364
365 Test debugpathcomplete
365 Test debugpathcomplete
366
366
367 $ hg debugpathcomplete f
367 $ hg debugpathcomplete f
368 fee
368 fee
369 fie
369 fie
370 fo
370 fo
371 $ hg debugpathcomplete -f f
371 $ hg debugpathcomplete -f f
372 fee
372 fee
373 fie/dead
373 fie/dead
374 fie/live
374 fie/live
375 fo
375 fo
376
376
377 $ hg rm Fum
377 $ hg rm Fum
378 $ hg debugpathcomplete -r F
378 $ hg debugpathcomplete -r F
379 Fum
379 Fum
380
380
381 Test debugnamecomplete
381 Test debugnamecomplete
382
382
383 $ hg debugnamecomplete
383 $ hg debugnamecomplete
384 Fum
384 Fum
385 default
385 default
386 fee
386 fee
387 fie
387 fie
388 fo
388 fo
389 tip
389 tip
390 $ hg debugnamecomplete f
390 $ hg debugnamecomplete f
391 fee
391 fee
392 fie
392 fie
393 fo
393 fo
394
394
395 Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still
395 Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still
396 used for completions in some shells.
396 used for completions in some shells.
397
397
398 $ hg debuglabelcomplete
398 $ hg debuglabelcomplete
399 Fum
399 Fum
400 default
400 default
401 fee
401 fee
402 fie
402 fie
403 fo
403 fo
404 tip
404 tip
405 $ hg debuglabelcomplete f
405 $ hg debuglabelcomplete f
406 fee
406 fee
407 fie
407 fie
408 fo
408 fo
@@ -1,802 +1,843 b''
1 #require no-reposimplestore
1 #require no-reposimplestore
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > share =
5 > share =
6 > EOF
6 > EOF
7
7
8 store and revlogv1 are required in source
8 store and revlogv1 are required in source
9
9
10 $ hg --config format.usestore=false init no-store
10 $ hg --config format.usestore=false init no-store
11 $ hg -R no-store debugupgraderepo
11 $ hg -R no-store debugupgraderepo
12 abort: cannot upgrade repository; requirement missing: store
12 abort: cannot upgrade repository; requirement missing: store
13 [255]
13 [255]
14
14
15 $ hg init no-revlogv1
15 $ hg init no-revlogv1
16 $ cat > no-revlogv1/.hg/requires << EOF
16 $ cat > no-revlogv1/.hg/requires << EOF
17 > dotencode
17 > dotencode
18 > fncache
18 > fncache
19 > generaldelta
19 > generaldelta
20 > store
20 > store
21 > EOF
21 > EOF
22
22
23 $ hg -R no-revlogv1 debugupgraderepo
23 $ hg -R no-revlogv1 debugupgraderepo
24 abort: cannot upgrade repository; requirement missing: revlogv1
24 abort: cannot upgrade repository; requirement missing: revlogv1
25 [255]
25 [255]
26
26
27 Cannot upgrade shared repositories
27 Cannot upgrade shared repositories
28
28
29 $ hg init share-parent
29 $ hg init share-parent
30 $ hg -q share share-parent share-child
30 $ hg -q share share-parent share-child
31
31
32 $ hg -R share-child debugupgraderepo
32 $ hg -R share-child debugupgraderepo
33 abort: cannot upgrade repository; unsupported source requirement: shared
33 abort: cannot upgrade repository; unsupported source requirement: shared
34 [255]
34 [255]
35
35
36 Do not yet support upgrading treemanifest repos
36 Do not yet support upgrading treemanifest repos
37
37
38 $ hg --config experimental.treemanifest=true init treemanifest
38 $ hg --config experimental.treemanifest=true init treemanifest
39 $ hg -R treemanifest debugupgraderepo
39 $ hg -R treemanifest debugupgraderepo
40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
41 [255]
41 [255]
42
42
43 Cannot add treemanifest requirement during upgrade
43 Cannot add treemanifest requirement during upgrade
44
44
45 $ hg init disallowaddedreq
45 $ hg init disallowaddedreq
46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
48 [255]
48 [255]
49
49
50 An upgrade of a repository created with recommended settings only suggests optimizations
50 An upgrade of a repository created with recommended settings only suggests optimizations
51
51
52 $ hg init empty
52 $ hg init empty
53 $ cd empty
53 $ cd empty
54 $ hg debugformat
54 $ hg debugformat
55 format-variant repo
55 format-variant repo
56 fncache: yes
56 fncache: yes
57 dotencode: yes
57 dotencode: yes
58 generaldelta: yes
58 generaldelta: yes
59 sparserevlog: yes
59 sparserevlog: yes
60 plain-cl-delta: yes
60 plain-cl-delta: yes
61 compression: zlib
61 compression: zlib
62 $ hg debugformat --verbose
62 $ hg debugformat --verbose
63 format-variant repo config default
63 format-variant repo config default
64 fncache: yes yes yes
64 fncache: yes yes yes
65 dotencode: yes yes yes
65 dotencode: yes yes yes
66 generaldelta: yes yes yes
66 generaldelta: yes yes yes
67 sparserevlog: yes yes yes
67 sparserevlog: yes yes yes
68 plain-cl-delta: yes yes yes
68 plain-cl-delta: yes yes yes
69 compression: zlib zlib zlib
69 compression: zlib zlib zlib
70 $ hg debugformat --verbose --config format.usefncache=no
70 $ hg debugformat --verbose --config format.usefncache=no
71 format-variant repo config default
71 format-variant repo config default
72 fncache: yes no yes
72 fncache: yes no yes
73 dotencode: yes no yes
73 dotencode: yes no yes
74 generaldelta: yes yes yes
74 generaldelta: yes yes yes
75 sparserevlog: yes yes yes
75 sparserevlog: yes yes yes
76 plain-cl-delta: yes yes yes
76 plain-cl-delta: yes yes yes
77 compression: zlib zlib zlib
77 compression: zlib zlib zlib
78 $ hg debugformat --verbose --config format.usefncache=no --color=debug
78 $ hg debugformat --verbose --config format.usefncache=no --color=debug
79 format-variant repo config default
79 format-variant repo config default
80 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
80 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
81 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
81 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
82 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
82 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
83 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
83 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
84 [formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
84 [formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
85 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
85 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
86 $ hg debugformat -Tjson
86 $ hg debugformat -Tjson
87 [
87 [
88 {
88 {
89 "config": true,
89 "config": true,
90 "default": true,
90 "default": true,
91 "name": "fncache",
91 "name": "fncache",
92 "repo": true
92 "repo": true
93 },
93 },
94 {
94 {
95 "config": true,
95 "config": true,
96 "default": true,
96 "default": true,
97 "name": "dotencode",
97 "name": "dotencode",
98 "repo": true
98 "repo": true
99 },
99 },
100 {
100 {
101 "config": true,
101 "config": true,
102 "default": true,
102 "default": true,
103 "name": "generaldelta",
103 "name": "generaldelta",
104 "repo": true
104 "repo": true
105 },
105 },
106 {
106 {
107 "config": true,
107 "config": true,
108 "default": true,
108 "default": true,
109 "name": "sparserevlog",
109 "name": "sparserevlog",
110 "repo": true
110 "repo": true
111 },
111 },
112 {
112 {
113 "config": true,
113 "config": true,
114 "default": true,
114 "default": true,
115 "name": "plain-cl-delta",
115 "name": "plain-cl-delta",
116 "repo": true
116 "repo": true
117 },
117 },
118 {
118 {
119 "config": "zlib",
119 "config": "zlib",
120 "default": "zlib",
120 "default": "zlib",
121 "name": "compression",
121 "name": "compression",
122 "repo": "zlib"
122 "repo": "zlib"
123 }
123 }
124 ]
124 ]
125 $ hg debugupgraderepo
125 $ hg debugupgraderepo
126 (no feature deficiencies found in existing repository)
126 (no feature deficiencies found in existing repository)
127 performing an upgrade with "--run" will make the following changes:
127 performing an upgrade with "--run" will make the following changes:
128
128
129 requirements
129 requirements
130 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
130 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
131
131
132 additional optimizations are available by specifying "--optimize <name>":
132 additional optimizations are available by specifying "--optimize <name>":
133
133
134 re-delta-parent
134 re-delta-parent
135 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
135 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
136
136
137 re-delta-multibase
137 re-delta-multibase
138 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
138 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
139
139
140 re-delta-all
140 re-delta-all
141 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
141 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
142
142
143 re-delta-fulladd
143 re-delta-fulladd
144 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
144 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
145
145
146
146
147 --optimize can be used to add optimizations
147 --optimize can be used to add optimizations
148
148
149 $ hg debugupgrade --optimize redeltaparent
149 $ hg debugupgrade --optimize redeltaparent
150 (no feature deficiencies found in existing repository)
150 (no feature deficiencies found in existing repository)
151 performing an upgrade with "--run" will make the following changes:
151 performing an upgrade with "--run" will make the following changes:
152
152
153 requirements
153 requirements
154 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
154 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
155
155
156 re-delta-parent
156 re-delta-parent
157 deltas within internal storage will choose a new base revision if needed
157 deltas within internal storage will choose a new base revision if needed
158
158
159 additional optimizations are available by specifying "--optimize <name>":
159 additional optimizations are available by specifying "--optimize <name>":
160
160
161 re-delta-multibase
161 re-delta-multibase
162 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
162 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
163
163
164 re-delta-all
164 re-delta-all
165 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
165 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
166
166
167 re-delta-fulladd
167 re-delta-fulladd
168 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
168 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
169
169
170
170
171 modern form of the option
171 modern form of the option
172
172
173 $ hg debugupgrade --optimize re-delta-parent
173 $ hg debugupgrade --optimize re-delta-parent
174 (no feature deficiencies found in existing repository)
174 (no feature deficiencies found in existing repository)
175 performing an upgrade with "--run" will make the following changes:
175 performing an upgrade with "--run" will make the following changes:
176
176
177 requirements
177 requirements
178 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
178 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
179
179
180 re-delta-parent
180 re-delta-parent
181 deltas within internal storage will choose a new base revision if needed
181 deltas within internal storage will choose a new base revision if needed
182
182
183 additional optimizations are available by specifying "--optimize <name>":
183 additional optimizations are available by specifying "--optimize <name>":
184
184
185 re-delta-multibase
185 re-delta-multibase
186 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
186 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
187
187
188 re-delta-all
188 re-delta-all
189 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
189 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
190
190
191 re-delta-fulladd
191 re-delta-fulladd
192 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
192 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
193
193
194
194
195 unknown optimization:
195 unknown optimization:
196
196
197 $ hg debugupgrade --optimize foobar
197 $ hg debugupgrade --optimize foobar
198 abort: unknown optimization action requested: foobar
198 abort: unknown optimization action requested: foobar
199 (run without arguments to see valid optimizations)
199 (run without arguments to see valid optimizations)
200 [255]
200 [255]
201
201
202 Various sub-optimal detections work
202 Various sub-optimal detections work
203
203
204 $ cat > .hg/requires << EOF
204 $ cat > .hg/requires << EOF
205 > revlogv1
205 > revlogv1
206 > store
206 > store
207 > EOF
207 > EOF
208
208
209 $ hg debugformat
209 $ hg debugformat
210 format-variant repo
210 format-variant repo
211 fncache: no
211 fncache: no
212 dotencode: no
212 dotencode: no
213 generaldelta: no
213 generaldelta: no
214 sparserevlog: no
214 sparserevlog: no
215 plain-cl-delta: yes
215 plain-cl-delta: yes
216 compression: zlib
216 compression: zlib
217 $ hg debugformat --verbose
217 $ hg debugformat --verbose
218 format-variant repo config default
218 format-variant repo config default
219 fncache: no yes yes
219 fncache: no yes yes
220 dotencode: no yes yes
220 dotencode: no yes yes
221 generaldelta: no yes yes
221 generaldelta: no yes yes
222 sparserevlog: no yes yes
222 sparserevlog: no yes yes
223 plain-cl-delta: yes yes yes
223 plain-cl-delta: yes yes yes
224 compression: zlib zlib zlib
224 compression: zlib zlib zlib
225 $ hg debugformat --verbose --config format.usegeneraldelta=no
225 $ hg debugformat --verbose --config format.usegeneraldelta=no
226 format-variant repo config default
226 format-variant repo config default
227 fncache: no yes yes
227 fncache: no yes yes
228 dotencode: no yes yes
228 dotencode: no yes yes
229 generaldelta: no no yes
229 generaldelta: no no yes
230 sparserevlog: no no yes
230 sparserevlog: no no yes
231 plain-cl-delta: yes yes yes
231 plain-cl-delta: yes yes yes
232 compression: zlib zlib zlib
232 compression: zlib zlib zlib
233 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
233 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
234 format-variant repo config default
234 format-variant repo config default
235 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
235 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
236 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
236 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
237 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
237 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
238 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
238 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
239 [formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
239 [formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
240 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
240 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
241 $ hg debugupgraderepo
241 $ hg debugupgraderepo
242 repository lacks features recommended by current config options:
242 repository lacks features recommended by current config options:
243
243
244 fncache
244 fncache
245 long and reserved filenames may not work correctly; repository performance is sub-optimal
245 long and reserved filenames may not work correctly; repository performance is sub-optimal
246
246
247 dotencode
247 dotencode
248 storage of filenames beginning with a period or space may not work correctly
248 storage of filenames beginning with a period or space may not work correctly
249
249
250 generaldelta
250 generaldelta
251 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
251 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
252
252
253 sparserevlog
253 sparserevlog
254 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
254 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
255
255
256
256
257 performing an upgrade with "--run" will make the following changes:
257 performing an upgrade with "--run" will make the following changes:
258
258
259 requirements
259 requirements
260 preserved: revlogv1, store
260 preserved: revlogv1, store
261 added: dotencode, fncache, generaldelta, sparserevlog
261 added: dotencode, fncache, generaldelta, sparserevlog
262
262
263 fncache
263 fncache
264 repository will be more resilient to storing certain paths and performance of certain operations should be improved
264 repository will be more resilient to storing certain paths and performance of certain operations should be improved
265
265
266 dotencode
266 dotencode
267 repository will be better able to store files beginning with a space or period
267 repository will be better able to store files beginning with a space or period
268
268
269 generaldelta
269 generaldelta
270 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
270 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
271
271
272 sparserevlog
272 sparserevlog
273 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
273 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
274
274
275 additional optimizations are available by specifying "--optimize <name>":
275 additional optimizations are available by specifying "--optimize <name>":
276
276
277 re-delta-parent
277 re-delta-parent
278 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
278 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
279
279
280 re-delta-multibase
280 re-delta-multibase
281 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
281 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
282
282
283 re-delta-all
283 re-delta-all
284 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
284 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
285
285
286 re-delta-fulladd
286 re-delta-fulladd
287 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
287 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
288
288
289
289
290 $ hg --config format.dotencode=false debugupgraderepo
290 $ hg --config format.dotencode=false debugupgraderepo
291 repository lacks features recommended by current config options:
291 repository lacks features recommended by current config options:
292
292
293 fncache
293 fncache
294 long and reserved filenames may not work correctly; repository performance is sub-optimal
294 long and reserved filenames may not work correctly; repository performance is sub-optimal
295
295
296 generaldelta
296 generaldelta
297 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
297 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
298
298
299 sparserevlog
299 sparserevlog
300 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
300 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
301
301
302 repository lacks features used by the default config options:
302 repository lacks features used by the default config options:
303
303
304 dotencode
304 dotencode
305 storage of filenames beginning with a period or space may not work correctly
305 storage of filenames beginning with a period or space may not work correctly
306
306
307
307
308 performing an upgrade with "--run" will make the following changes:
308 performing an upgrade with "--run" will make the following changes:
309
309
310 requirements
310 requirements
311 preserved: revlogv1, store
311 preserved: revlogv1, store
312 added: fncache, generaldelta, sparserevlog
312 added: fncache, generaldelta, sparserevlog
313
313
314 fncache
314 fncache
315 repository will be more resilient to storing certain paths and performance of certain operations should be improved
315 repository will be more resilient to storing certain paths and performance of certain operations should be improved
316
316
317 generaldelta
317 generaldelta
318 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
318 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
319
319
320 sparserevlog
320 sparserevlog
321 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
321 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
322
322
323 additional optimizations are available by specifying "--optimize <name>":
323 additional optimizations are available by specifying "--optimize <name>":
324
324
325 re-delta-parent
325 re-delta-parent
326 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
326 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
327
327
328 re-delta-multibase
328 re-delta-multibase
329 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
329 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
330
330
331 re-delta-all
331 re-delta-all
332 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
332 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
333
333
334 re-delta-fulladd
334 re-delta-fulladd
335 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
335 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
336
336
337
337
338 $ cd ..
338 $ cd ..
339
339
340 Upgrading a repository that is already modern essentially no-ops
340 Upgrading a repository that is already modern essentially no-ops
341
341
342 $ hg init modern
342 $ hg init modern
343 $ hg -R modern debugupgraderepo --run
343 $ hg -R modern debugupgraderepo --run
344 upgrade will perform the following actions:
344 upgrade will perform the following actions:
345
345
346 requirements
346 requirements
347 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
347 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
348
348
349 beginning upgrade...
349 beginning upgrade...
350 repository locked and read-only
350 repository locked and read-only
351 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
351 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
352 (it is safe to interrupt this process any time before data migration completes)
352 (it is safe to interrupt this process any time before data migration completes)
353 data fully migrated to temporary repository
353 data fully migrated to temporary repository
354 marking source repository as being upgraded; clients will be unable to read from repository
354 marking source repository as being upgraded; clients will be unable to read from repository
355 starting in-place swap of repository data
355 starting in-place swap of repository data
356 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
356 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
357 replacing store...
357 replacing store...
358 store replacement complete; repository was inconsistent for *s (glob)
358 store replacement complete; repository was inconsistent for *s (glob)
359 finalizing requirements file and making repository readable again
359 finalizing requirements file and making repository readable again
360 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
360 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
361 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
361 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
362 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
362 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
363
363
364 Upgrading a repository to generaldelta works
364 Upgrading a repository to generaldelta works
365
365
366 $ hg --config format.usegeneraldelta=false init upgradegd
366 $ hg --config format.usegeneraldelta=false init upgradegd
367 $ cd upgradegd
367 $ cd upgradegd
368 $ touch f0
368 $ touch f0
369 $ hg -q commit -A -m initial
369 $ hg -q commit -A -m initial
370 $ touch f1
370 $ touch f1
371 $ hg -q commit -A -m 'add f1'
371 $ hg -q commit -A -m 'add f1'
372 $ hg -q up -r 0
372 $ hg -q up -r 0
373 $ touch f2
373 $ touch f2
374 $ hg -q commit -A -m 'add f2'
374 $ hg -q commit -A -m 'add f2'
375
375
376 $ hg debugupgraderepo --run --config format.sparse-revlog=false
376 $ hg debugupgraderepo --run --config format.sparse-revlog=false
377 upgrade will perform the following actions:
377 upgrade will perform the following actions:
378
378
379 requirements
379 requirements
380 preserved: dotencode, fncache, revlogv1, store
380 preserved: dotencode, fncache, revlogv1, store
381 added: generaldelta
381 added: generaldelta
382
382
383 generaldelta
383 generaldelta
384 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
384 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
385
385
386 beginning upgrade...
386 beginning upgrade...
387 repository locked and read-only
387 repository locked and read-only
388 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
388 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
389 (it is safe to interrupt this process any time before data migration completes)
389 (it is safe to interrupt this process any time before data migration completes)
390 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
390 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
391 migrating 917 bytes in store; 401 bytes tracked data
391 migrating 917 bytes in store; 401 bytes tracked data
392 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
392 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
393 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
393 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
394 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
394 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
395 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
395 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
396 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
396 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
397 finished migrating 3 changelog revisions; change in size: 0 bytes
397 finished migrating 3 changelog revisions; change in size: 0 bytes
398 finished migrating 9 total revisions; total change in store size: 0 bytes
398 finished migrating 9 total revisions; total change in store size: 0 bytes
399 copying phaseroots
399 copying phaseroots
400 data fully migrated to temporary repository
400 data fully migrated to temporary repository
401 marking source repository as being upgraded; clients will be unable to read from repository
401 marking source repository as being upgraded; clients will be unable to read from repository
402 starting in-place swap of repository data
402 starting in-place swap of repository data
403 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
403 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
404 replacing store...
404 replacing store...
405 store replacement complete; repository was inconsistent for *s (glob)
405 store replacement complete; repository was inconsistent for *s (glob)
406 finalizing requirements file and making repository readable again
406 finalizing requirements file and making repository readable again
407 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
407 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
408 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
408 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
409 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
409 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
410
410
411 Original requirements backed up
411 Original requirements backed up
412
412
413 $ cat .hg/upgradebackup.*/requires
413 $ cat .hg/upgradebackup.*/requires
414 dotencode
414 dotencode
415 fncache
415 fncache
416 revlogv1
416 revlogv1
417 store
417 store
418
418
419 generaldelta added to original requirements files
419 generaldelta added to original requirements files
420
420
421 $ cat .hg/requires
421 $ cat .hg/requires
422 dotencode
422 dotencode
423 fncache
423 fncache
424 generaldelta
424 generaldelta
425 revlogv1
425 revlogv1
426 store
426 store
427
427
428 store directory has files we expect
428 store directory has files we expect
429
429
430 $ ls .hg/store
430 $ ls .hg/store
431 00changelog.i
431 00changelog.i
432 00manifest.i
432 00manifest.i
433 data
433 data
434 fncache
434 fncache
435 phaseroots
435 phaseroots
436 undo
436 undo
437 undo.backupfiles
437 undo.backupfiles
438 undo.phaseroots
438 undo.phaseroots
439
439
440 manifest should be generaldelta
440 manifest should be generaldelta
441
441
442 $ hg debugrevlog -m | grep flags
442 $ hg debugrevlog -m | grep flags
443 flags : inline, generaldelta
443 flags : inline, generaldelta
444
444
445 verify should be happy
445 verify should be happy
446
446
447 $ hg verify
447 $ hg verify
448 checking changesets
448 checking changesets
449 checking manifests
449 checking manifests
450 crosschecking files in changesets and manifests
450 crosschecking files in changesets and manifests
451 checking files
451 checking files
452 checked 3 changesets with 3 changes to 3 files
452 checked 3 changesets with 3 changes to 3 files
453
453
454 old store should be backed up
454 old store should be backed up
455
455
456 $ ls -d .hg/upgradebackup.*/
457 .hg/upgradebackup.*/ (glob)
456 $ ls .hg/upgradebackup.*/store
458 $ ls .hg/upgradebackup.*/store
457 00changelog.i
459 00changelog.i
458 00manifest.i
460 00manifest.i
459 data
461 data
460 fncache
462 fncache
461 phaseroots
463 phaseroots
462 undo
464 undo
463 undo.backup.fncache
465 undo.backup.fncache
464 undo.backupfiles
466 undo.backupfiles
465 undo.phaseroots
467 undo.phaseroots
466
468
469 unless --no-backup is passed
470
471 $ rm -rf .hg/upgradebackup.*/
472 $ hg debugupgraderepo --run --no-backup
473 upgrade will perform the following actions:
474
475 requirements
476 preserved: dotencode, fncache, generaldelta, revlogv1, store
477 added: sparserevlog
478
479 sparserevlog
480 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
481
482 beginning upgrade...
483 repository locked and read-only
484 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
485 (it is safe to interrupt this process any time before data migration completes)
486 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
487 migrating 917 bytes in store; 401 bytes tracked data
488 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
489 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
490 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
491 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
492 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
493 finished migrating 3 changelog revisions; change in size: 0 bytes
494 finished migrating 9 total revisions; total change in store size: 0 bytes
495 copying phaseroots
496 data fully migrated to temporary repository
497 marking source repository as being upgraded; clients will be unable to read from repository
498 starting in-place swap of repository data
499 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
500 replacing store...
501 store replacement complete; repository was inconsistent for 0.0s
502 finalizing requirements file and making repository readable again
503 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
504 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
505 $ ls -1 .hg/ | grep upgradebackup
506 [1]
467 $ cd ..
507 $ cd ..
468
508
509
469 store files with special filenames aren't encoded during copy
510 store files with special filenames aren't encoded during copy
470
511
471 $ hg init store-filenames
512 $ hg init store-filenames
472 $ cd store-filenames
513 $ cd store-filenames
473 $ touch foo
514 $ touch foo
474 $ hg -q commit -A -m initial
515 $ hg -q commit -A -m initial
475 $ touch .hg/store/.XX_special_filename
516 $ touch .hg/store/.XX_special_filename
476
517
477 $ hg debugupgraderepo --run
518 $ hg debugupgraderepo --run
478 upgrade will perform the following actions:
519 upgrade will perform the following actions:
479
520
480 requirements
521 requirements
481 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
522 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
482
523
483 beginning upgrade...
524 beginning upgrade...
484 repository locked and read-only
525 repository locked and read-only
485 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
526 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
486 (it is safe to interrupt this process any time before data migration completes)
527 (it is safe to interrupt this process any time before data migration completes)
487 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
528 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
488 migrating 301 bytes in store; 107 bytes tracked data
529 migrating 301 bytes in store; 107 bytes tracked data
489 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
530 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
490 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
531 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
491 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
532 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
492 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
533 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
493 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
534 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
494 finished migrating 1 changelog revisions; change in size: 0 bytes
535 finished migrating 1 changelog revisions; change in size: 0 bytes
495 finished migrating 3 total revisions; total change in store size: 0 bytes
536 finished migrating 3 total revisions; total change in store size: 0 bytes
496 copying .XX_special_filename
537 copying .XX_special_filename
497 copying phaseroots
538 copying phaseroots
498 data fully migrated to temporary repository
539 data fully migrated to temporary repository
499 marking source repository as being upgraded; clients will be unable to read from repository
540 marking source repository as being upgraded; clients will be unable to read from repository
500 starting in-place swap of repository data
541 starting in-place swap of repository data
501 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
542 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
502 replacing store...
543 replacing store...
503 store replacement complete; repository was inconsistent for *s (glob)
544 store replacement complete; repository was inconsistent for *s (glob)
504 finalizing requirements file and making repository readable again
545 finalizing requirements file and making repository readable again
505 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
546 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
506 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
547 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
507 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
548 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
508 $ hg debugupgraderepo --run --optimize redeltafulladd
549 $ hg debugupgraderepo --run --optimize redeltafulladd
509 upgrade will perform the following actions:
550 upgrade will perform the following actions:
510
551
511 requirements
552 requirements
512 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
553 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
513
554
514 re-delta-fulladd
555 re-delta-fulladd
515 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
556 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
516
557
517 beginning upgrade...
558 beginning upgrade...
518 repository locked and read-only
559 repository locked and read-only
519 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
560 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
520 (it is safe to interrupt this process any time before data migration completes)
561 (it is safe to interrupt this process any time before data migration completes)
521 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
562 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
522 migrating 301 bytes in store; 107 bytes tracked data
563 migrating 301 bytes in store; 107 bytes tracked data
523 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
564 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
524 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
565 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
525 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
566 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
526 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
567 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
527 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
568 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
528 finished migrating 1 changelog revisions; change in size: 0 bytes
569 finished migrating 1 changelog revisions; change in size: 0 bytes
529 finished migrating 3 total revisions; total change in store size: 0 bytes
570 finished migrating 3 total revisions; total change in store size: 0 bytes
530 copying .XX_special_filename
571 copying .XX_special_filename
531 copying phaseroots
572 copying phaseroots
532 data fully migrated to temporary repository
573 data fully migrated to temporary repository
533 marking source repository as being upgraded; clients will be unable to read from repository
574 marking source repository as being upgraded; clients will be unable to read from repository
534 starting in-place swap of repository data
575 starting in-place swap of repository data
535 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
576 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
536 replacing store...
577 replacing store...
537 store replacement complete; repository was inconsistent for *s (glob)
578 store replacement complete; repository was inconsistent for *s (glob)
538 finalizing requirements file and making repository readable again
579 finalizing requirements file and making repository readable again
539 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
580 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
540 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
581 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
541 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
582 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
542
583
543 fncache is valid after upgrade
584 fncache is valid after upgrade
544
585
545 $ hg debugrebuildfncache
586 $ hg debugrebuildfncache
546 fncache already up to date
587 fncache already up to date
547
588
548 $ cd ..
589 $ cd ..
549
590
550 Check upgrading a large file repository
591 Check upgrading a large file repository
551 ---------------------------------------
592 ---------------------------------------
552
593
553 $ hg init largefilesrepo
594 $ hg init largefilesrepo
554 $ cat << EOF >> largefilesrepo/.hg/hgrc
595 $ cat << EOF >> largefilesrepo/.hg/hgrc
555 > [extensions]
596 > [extensions]
556 > largefiles =
597 > largefiles =
557 > EOF
598 > EOF
558
599
559 $ cd largefilesrepo
600 $ cd largefilesrepo
560 $ touch foo
601 $ touch foo
561 $ hg add --large foo
602 $ hg add --large foo
562 $ hg -q commit -m initial
603 $ hg -q commit -m initial
563 $ cat .hg/requires
604 $ cat .hg/requires
564 dotencode
605 dotencode
565 fncache
606 fncache
566 generaldelta
607 generaldelta
567 largefiles
608 largefiles
568 revlogv1
609 revlogv1
569 sparserevlog
610 sparserevlog
570 store
611 store
571
612
572 $ hg debugupgraderepo --run
613 $ hg debugupgraderepo --run
573 upgrade will perform the following actions:
614 upgrade will perform the following actions:
574
615
575 requirements
616 requirements
576 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
617 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
577
618
578 beginning upgrade...
619 beginning upgrade...
579 repository locked and read-only
620 repository locked and read-only
580 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
621 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
581 (it is safe to interrupt this process any time before data migration completes)
622 (it is safe to interrupt this process any time before data migration completes)
582 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
623 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
583 migrating 355 bytes in store; 160 bytes tracked data
624 migrating 355 bytes in store; 160 bytes tracked data
584 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
625 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
585 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
626 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
586 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
627 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
587 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
628 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
588 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
629 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
589 finished migrating 1 changelog revisions; change in size: 0 bytes
630 finished migrating 1 changelog revisions; change in size: 0 bytes
590 finished migrating 3 total revisions; total change in store size: 0 bytes
631 finished migrating 3 total revisions; total change in store size: 0 bytes
591 copying phaseroots
632 copying phaseroots
592 data fully migrated to temporary repository
633 data fully migrated to temporary repository
593 marking source repository as being upgraded; clients will be unable to read from repository
634 marking source repository as being upgraded; clients will be unable to read from repository
594 starting in-place swap of repository data
635 starting in-place swap of repository data
595 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
636 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
596 replacing store...
637 replacing store...
597 store replacement complete; repository was inconsistent for *s (glob)
638 store replacement complete; repository was inconsistent for *s (glob)
598 finalizing requirements file and making repository readable again
639 finalizing requirements file and making repository readable again
599 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
640 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
600 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
641 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
601 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
642 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
602 $ cat .hg/requires
643 $ cat .hg/requires
603 dotencode
644 dotencode
604 fncache
645 fncache
605 generaldelta
646 generaldelta
606 largefiles
647 largefiles
607 revlogv1
648 revlogv1
608 sparserevlog
649 sparserevlog
609 store
650 store
610
651
611 $ cat << EOF >> .hg/hgrc
652 $ cat << EOF >> .hg/hgrc
612 > [extensions]
653 > [extensions]
613 > lfs =
654 > lfs =
614 > [lfs]
655 > [lfs]
615 > threshold = 10
656 > threshold = 10
616 > EOF
657 > EOF
617 $ echo '123456789012345' > lfs.bin
658 $ echo '123456789012345' > lfs.bin
618 $ hg ci -Am 'lfs.bin'
659 $ hg ci -Am 'lfs.bin'
619 adding lfs.bin
660 adding lfs.bin
620 $ grep lfs .hg/requires
661 $ grep lfs .hg/requires
621 lfs
662 lfs
622 $ find .hg/store/lfs -type f
663 $ find .hg/store/lfs -type f
623 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
664 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
624
665
625 $ hg debugupgraderepo --run
666 $ hg debugupgraderepo --run
626 upgrade will perform the following actions:
667 upgrade will perform the following actions:
627
668
628 requirements
669 requirements
629 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
670 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
630
671
631 beginning upgrade...
672 beginning upgrade...
632 repository locked and read-only
673 repository locked and read-only
633 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
674 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
634 (it is safe to interrupt this process any time before data migration completes)
675 (it is safe to interrupt this process any time before data migration completes)
635 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
676 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
636 migrating 801 bytes in store; 467 bytes tracked data
677 migrating 801 bytes in store; 467 bytes tracked data
637 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
678 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
638 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
679 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
639 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
680 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
640 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
681 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
641 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
682 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
642 finished migrating 2 changelog revisions; change in size: 0 bytes
683 finished migrating 2 changelog revisions; change in size: 0 bytes
643 finished migrating 6 total revisions; total change in store size: 0 bytes
684 finished migrating 6 total revisions; total change in store size: 0 bytes
644 copying phaseroots
685 copying phaseroots
645 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
686 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
646 data fully migrated to temporary repository
687 data fully migrated to temporary repository
647 marking source repository as being upgraded; clients will be unable to read from repository
688 marking source repository as being upgraded; clients will be unable to read from repository
648 starting in-place swap of repository data
689 starting in-place swap of repository data
649 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
690 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
650 replacing store...
691 replacing store...
651 store replacement complete; repository was inconsistent for *s (glob)
692 store replacement complete; repository was inconsistent for *s (glob)
652 finalizing requirements file and making repository readable again
693 finalizing requirements file and making repository readable again
653 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
694 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
654 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
695 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
655 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
696 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
656
697
657 $ grep lfs .hg/requires
698 $ grep lfs .hg/requires
658 lfs
699 lfs
659 $ find .hg/store/lfs -type f
700 $ find .hg/store/lfs -type f
660 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
701 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
661 $ hg verify
702 $ hg verify
662 checking changesets
703 checking changesets
663 checking manifests
704 checking manifests
664 crosschecking files in changesets and manifests
705 crosschecking files in changesets and manifests
665 checking files
706 checking files
666 checked 2 changesets with 2 changes to 2 files
707 checked 2 changesets with 2 changes to 2 files
667 $ hg debugdata lfs.bin 0
708 $ hg debugdata lfs.bin 0
668 version https://git-lfs.github.com/spec/v1
709 version https://git-lfs.github.com/spec/v1
669 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
710 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
670 size 16
711 size 16
671 x-is-binary 0
712 x-is-binary 0
672
713
673 $ cd ..
714 $ cd ..
674
715
675 repository config is taken in account
716 repository config is taken in account
676 -------------------------------------
717 -------------------------------------
677
718
678 $ cat << EOF >> $HGRCPATH
719 $ cat << EOF >> $HGRCPATH
679 > [format]
720 > [format]
680 > maxchainlen = 1
721 > maxchainlen = 1
681 > EOF
722 > EOF
682
723
683 $ hg init localconfig
724 $ hg init localconfig
684 $ cd localconfig
725 $ cd localconfig
685 $ cat << EOF > file
726 $ cat << EOF > file
686 > some content
727 > some content
687 > with some length
728 > with some length
688 > to make sure we get a delta
729 > to make sure we get a delta
689 > after changes
730 > after changes
690 > very long
731 > very long
691 > very long
732 > very long
692 > very long
733 > very long
693 > very long
734 > very long
694 > very long
735 > very long
695 > very long
736 > very long
696 > very long
737 > very long
697 > very long
738 > very long
698 > very long
739 > very long
699 > very long
740 > very long
700 > very long
741 > very long
701 > EOF
742 > EOF
702 $ hg -q commit -A -m A
743 $ hg -q commit -A -m A
703 $ echo "new line" >> file
744 $ echo "new line" >> file
704 $ hg -q commit -m B
745 $ hg -q commit -m B
705 $ echo "new line" >> file
746 $ echo "new line" >> file
706 $ hg -q commit -m C
747 $ hg -q commit -m C
707
748
708 $ cat << EOF >> .hg/hgrc
749 $ cat << EOF >> .hg/hgrc
709 > [format]
750 > [format]
710 > maxchainlen = 9001
751 > maxchainlen = 9001
711 > EOF
752 > EOF
712 $ hg config format
753 $ hg config format
713 format.maxchainlen=9001
754 format.maxchainlen=9001
714 $ hg debugdeltachain file
755 $ hg debugdeltachain file
715 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
756 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
716 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
757 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
717 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
758 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
718 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
759 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
719
760
720 $ hg debugupgraderepo --run --optimize redeltaall
761 $ hg debugupgraderepo --run --optimize redeltaall
721 upgrade will perform the following actions:
762 upgrade will perform the following actions:
722
763
723 requirements
764 requirements
724 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
765 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
725
766
726 re-delta-all
767 re-delta-all
727 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
768 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
728
769
729 beginning upgrade...
770 beginning upgrade...
730 repository locked and read-only
771 repository locked and read-only
731 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
772 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
732 (it is safe to interrupt this process any time before data migration completes)
773 (it is safe to interrupt this process any time before data migration completes)
733 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
774 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
734 migrating 1019 bytes in store; 882 bytes tracked data
775 migrating 1019 bytes in store; 882 bytes tracked data
735 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
776 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
736 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
777 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
737 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
778 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
738 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
779 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
739 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
780 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
740 finished migrating 3 changelog revisions; change in size: 0 bytes
781 finished migrating 3 changelog revisions; change in size: 0 bytes
741 finished migrating 9 total revisions; total change in store size: -9 bytes
782 finished migrating 9 total revisions; total change in store size: -9 bytes
742 copying phaseroots
783 copying phaseroots
743 data fully migrated to temporary repository
784 data fully migrated to temporary repository
744 marking source repository as being upgraded; clients will be unable to read from repository
785 marking source repository as being upgraded; clients will be unable to read from repository
745 starting in-place swap of repository data
786 starting in-place swap of repository data
746 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
787 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
747 replacing store...
788 replacing store...
748 store replacement complete; repository was inconsistent for *s (glob)
789 store replacement complete; repository was inconsistent for *s (glob)
749 finalizing requirements file and making repository readable again
790 finalizing requirements file and making repository readable again
750 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
791 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
751 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
792 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
752 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
793 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
753 $ hg debugdeltachain file
794 $ hg debugdeltachain file
754 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
795 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
755 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
796 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
756 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
797 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
757 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
798 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
758 $ cd ..
799 $ cd ..
759
800
760 $ cat << EOF >> $HGRCPATH
801 $ cat << EOF >> $HGRCPATH
761 > [format]
802 > [format]
762 > maxchainlen = 9001
803 > maxchainlen = 9001
763 > EOF
804 > EOF
764
805
765 Check upgrading a sparse-revlog repository
806 Check upgrading a sparse-revlog repository
766 ---------------------------------------
807 ---------------------------------------
767
808
768 $ hg init sparserevlogrepo --config format.sparse-revlog=no
809 $ hg init sparserevlogrepo --config format.sparse-revlog=no
769 $ cd sparserevlogrepo
810 $ cd sparserevlogrepo
770 $ touch foo
811 $ touch foo
771 $ hg add foo
812 $ hg add foo
772 $ hg -q commit -m "foo"
813 $ hg -q commit -m "foo"
773 $ cat .hg/requires
814 $ cat .hg/requires
774 dotencode
815 dotencode
775 fncache
816 fncache
776 generaldelta
817 generaldelta
777 revlogv1
818 revlogv1
778 store
819 store
779
820
780 Check that we can add the sparse-revlog format requirement
821 Check that we can add the sparse-revlog format requirement
781 $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null
822 $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null
782 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
823 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
783 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
824 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
784 $ cat .hg/requires
825 $ cat .hg/requires
785 dotencode
826 dotencode
786 fncache
827 fncache
787 generaldelta
828 generaldelta
788 revlogv1
829 revlogv1
789 sparserevlog
830 sparserevlog
790 store
831 store
791
832
792 Check that we can remove the sparse-revlog format requirement
833 Check that we can remove the sparse-revlog format requirement
793 $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null
834 $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null
794 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
835 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
795 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
836 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
796 $ cat .hg/requires
837 $ cat .hg/requires
797 dotencode
838 dotencode
798 fncache
839 fncache
799 generaldelta
840 generaldelta
800 revlogv1
841 revlogv1
801 store
842 store
802 $ cd ..
843 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now