##// END OF EJS Templates
upgrade: add an argument to control changelog upgrade...
marmoute -
r43099:908ff446 default
parent child Browse files
Show More
@@ -1,3496 +1,3499
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import difflib
12 import difflib
13 import errno
13 import errno
14 import operator
14 import operator
15 import os
15 import os
16 import random
16 import random
17 import re
17 import re
18 import socket
18 import socket
19 import ssl
19 import ssl
20 import stat
20 import stat
21 import string
21 import string
22 import subprocess
22 import subprocess
23 import sys
23 import sys
24 import time
24 import time
25
25
26 from .i18n import _
26 from .i18n import _
27 from .node import (
27 from .node import (
28 bin,
28 bin,
29 hex,
29 hex,
30 nullhex,
30 nullhex,
31 nullid,
31 nullid,
32 nullrev,
32 nullrev,
33 short,
33 short,
34 )
34 )
35 from . import (
35 from . import (
36 bundle2,
36 bundle2,
37 changegroup,
37 changegroup,
38 cmdutil,
38 cmdutil,
39 color,
39 color,
40 context,
40 context,
41 copies,
41 copies,
42 dagparser,
42 dagparser,
43 encoding,
43 encoding,
44 error,
44 error,
45 exchange,
45 exchange,
46 extensions,
46 extensions,
47 filemerge,
47 filemerge,
48 filesetlang,
48 filesetlang,
49 formatter,
49 formatter,
50 hg,
50 hg,
51 httppeer,
51 httppeer,
52 localrepo,
52 localrepo,
53 lock as lockmod,
53 lock as lockmod,
54 logcmdutil,
54 logcmdutil,
55 merge as mergemod,
55 merge as mergemod,
56 obsolete,
56 obsolete,
57 obsutil,
57 obsutil,
58 phases,
58 phases,
59 policy,
59 policy,
60 pvec,
60 pvec,
61 pycompat,
61 pycompat,
62 registrar,
62 registrar,
63 repair,
63 repair,
64 revlog,
64 revlog,
65 revset,
65 revset,
66 revsetlang,
66 revsetlang,
67 scmutil,
67 scmutil,
68 setdiscovery,
68 setdiscovery,
69 simplemerge,
69 simplemerge,
70 sshpeer,
70 sshpeer,
71 sslutil,
71 sslutil,
72 streamclone,
72 streamclone,
73 templater,
73 templater,
74 treediscovery,
74 treediscovery,
75 upgrade,
75 upgrade,
76 url as urlmod,
76 url as urlmod,
77 util,
77 util,
78 vfs as vfsmod,
78 vfs as vfsmod,
79 wireprotoframing,
79 wireprotoframing,
80 wireprotoserver,
80 wireprotoserver,
81 wireprotov2peer,
81 wireprotov2peer,
82 )
82 )
83 from .utils import (
83 from .utils import (
84 cborutil,
84 cborutil,
85 compression,
85 compression,
86 dateutil,
86 dateutil,
87 procutil,
87 procutil,
88 stringutil,
88 stringutil,
89 )
89 )
90
90
91 from .revlogutils import (
91 from .revlogutils import (
92 deltas as deltautil
92 deltas as deltautil
93 )
93 )
94
94
95 release = lockmod.release
95 release = lockmod.release
96
96
97 command = registrar.command()
97 command = registrar.command()
98
98
99 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
99 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
100 def debugancestor(ui, repo, *args):
100 def debugancestor(ui, repo, *args):
101 """find the ancestor revision of two revisions in a given index"""
101 """find the ancestor revision of two revisions in a given index"""
102 if len(args) == 3:
102 if len(args) == 3:
103 index, rev1, rev2 = args
103 index, rev1, rev2 = args
104 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
104 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
105 lookup = r.lookup
105 lookup = r.lookup
106 elif len(args) == 2:
106 elif len(args) == 2:
107 if not repo:
107 if not repo:
108 raise error.Abort(_('there is no Mercurial repository here '
108 raise error.Abort(_('there is no Mercurial repository here '
109 '(.hg not found)'))
109 '(.hg not found)'))
110 rev1, rev2 = args
110 rev1, rev2 = args
111 r = repo.changelog
111 r = repo.changelog
112 lookup = repo.lookup
112 lookup = repo.lookup
113 else:
113 else:
114 raise error.Abort(_('either two or three arguments required'))
114 raise error.Abort(_('either two or three arguments required'))
115 a = r.ancestor(lookup(rev1), lookup(rev2))
115 a = r.ancestor(lookup(rev1), lookup(rev2))
116 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
116 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
117
117
118 @command('debugapplystreamclonebundle', [], 'FILE')
118 @command('debugapplystreamclonebundle', [], 'FILE')
119 def debugapplystreamclonebundle(ui, repo, fname):
119 def debugapplystreamclonebundle(ui, repo, fname):
120 """apply a stream clone bundle file"""
120 """apply a stream clone bundle file"""
121 f = hg.openpath(ui, fname)
121 f = hg.openpath(ui, fname)
122 gen = exchange.readbundle(ui, f, fname)
122 gen = exchange.readbundle(ui, f, fname)
123 gen.apply(repo)
123 gen.apply(repo)
124
124
125 @command('debugbuilddag',
125 @command('debugbuilddag',
126 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
126 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
127 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
127 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
128 ('n', 'new-file', None, _('add new file at each rev'))],
128 ('n', 'new-file', None, _('add new file at each rev'))],
129 _('[OPTION]... [TEXT]'))
129 _('[OPTION]... [TEXT]'))
130 def debugbuilddag(ui, repo, text=None,
130 def debugbuilddag(ui, repo, text=None,
131 mergeable_file=False,
131 mergeable_file=False,
132 overwritten_file=False,
132 overwritten_file=False,
133 new_file=False):
133 new_file=False):
134 """builds a repo with a given DAG from scratch in the current empty repo
134 """builds a repo with a given DAG from scratch in the current empty repo
135
135
136 The description of the DAG is read from stdin if not given on the
136 The description of the DAG is read from stdin if not given on the
137 command line.
137 command line.
138
138
139 Elements:
139 Elements:
140
140
141 - "+n" is a linear run of n nodes based on the current default parent
141 - "+n" is a linear run of n nodes based on the current default parent
142 - "." is a single node based on the current default parent
142 - "." is a single node based on the current default parent
143 - "$" resets the default parent to null (implied at the start);
143 - "$" resets the default parent to null (implied at the start);
144 otherwise the default parent is always the last node created
144 otherwise the default parent is always the last node created
145 - "<p" sets the default parent to the backref p
145 - "<p" sets the default parent to the backref p
146 - "*p" is a fork at parent p, which is a backref
146 - "*p" is a fork at parent p, which is a backref
147 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
147 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
148 - "/p2" is a merge of the preceding node and p2
148 - "/p2" is a merge of the preceding node and p2
149 - ":tag" defines a local tag for the preceding node
149 - ":tag" defines a local tag for the preceding node
150 - "@branch" sets the named branch for subsequent nodes
150 - "@branch" sets the named branch for subsequent nodes
151 - "#...\\n" is a comment up to the end of the line
151 - "#...\\n" is a comment up to the end of the line
152
152
153 Whitespace between the above elements is ignored.
153 Whitespace between the above elements is ignored.
154
154
155 A backref is either
155 A backref is either
156
156
157 - a number n, which references the node curr-n, where curr is the current
157 - a number n, which references the node curr-n, where curr is the current
158 node, or
158 node, or
159 - the name of a local tag you placed earlier using ":tag", or
159 - the name of a local tag you placed earlier using ":tag", or
160 - empty to denote the default parent.
160 - empty to denote the default parent.
161
161
162 All string valued-elements are either strictly alphanumeric, or must
162 All string valued-elements are either strictly alphanumeric, or must
163 be enclosed in double quotes ("..."), with "\\" as escape character.
163 be enclosed in double quotes ("..."), with "\\" as escape character.
164 """
164 """
165
165
166 if text is None:
166 if text is None:
167 ui.status(_("reading DAG from stdin\n"))
167 ui.status(_("reading DAG from stdin\n"))
168 text = ui.fin.read()
168 text = ui.fin.read()
169
169
170 cl = repo.changelog
170 cl = repo.changelog
171 if len(cl) > 0:
171 if len(cl) > 0:
172 raise error.Abort(_('repository is not empty'))
172 raise error.Abort(_('repository is not empty'))
173
173
174 # determine number of revs in DAG
174 # determine number of revs in DAG
175 total = 0
175 total = 0
176 for type, data in dagparser.parsedag(text):
176 for type, data in dagparser.parsedag(text):
177 if type == 'n':
177 if type == 'n':
178 total += 1
178 total += 1
179
179
180 if mergeable_file:
180 if mergeable_file:
181 linesperrev = 2
181 linesperrev = 2
182 # make a file with k lines per rev
182 # make a file with k lines per rev
183 initialmergedlines = ['%d' % i
183 initialmergedlines = ['%d' % i
184 for i in pycompat.xrange(0, total * linesperrev)]
184 for i in pycompat.xrange(0, total * linesperrev)]
185 initialmergedlines.append("")
185 initialmergedlines.append("")
186
186
187 tags = []
187 tags = []
188 progress = ui.makeprogress(_('building'), unit=_('revisions'),
188 progress = ui.makeprogress(_('building'), unit=_('revisions'),
189 total=total)
189 total=total)
190 with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
190 with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
191 at = -1
191 at = -1
192 atbranch = 'default'
192 atbranch = 'default'
193 nodeids = []
193 nodeids = []
194 id = 0
194 id = 0
195 progress.update(id)
195 progress.update(id)
196 for type, data in dagparser.parsedag(text):
196 for type, data in dagparser.parsedag(text):
197 if type == 'n':
197 if type == 'n':
198 ui.note(('node %s\n' % pycompat.bytestr(data)))
198 ui.note(('node %s\n' % pycompat.bytestr(data)))
199 id, ps = data
199 id, ps = data
200
200
201 files = []
201 files = []
202 filecontent = {}
202 filecontent = {}
203
203
204 p2 = None
204 p2 = None
205 if mergeable_file:
205 if mergeable_file:
206 fn = "mf"
206 fn = "mf"
207 p1 = repo[ps[0]]
207 p1 = repo[ps[0]]
208 if len(ps) > 1:
208 if len(ps) > 1:
209 p2 = repo[ps[1]]
209 p2 = repo[ps[1]]
210 pa = p1.ancestor(p2)
210 pa = p1.ancestor(p2)
211 base, local, other = [x[fn].data() for x in (pa, p1,
211 base, local, other = [x[fn].data() for x in (pa, p1,
212 p2)]
212 p2)]
213 m3 = simplemerge.Merge3Text(base, local, other)
213 m3 = simplemerge.Merge3Text(base, local, other)
214 ml = [l.strip() for l in m3.merge_lines()]
214 ml = [l.strip() for l in m3.merge_lines()]
215 ml.append("")
215 ml.append("")
216 elif at > 0:
216 elif at > 0:
217 ml = p1[fn].data().split("\n")
217 ml = p1[fn].data().split("\n")
218 else:
218 else:
219 ml = initialmergedlines
219 ml = initialmergedlines
220 ml[id * linesperrev] += " r%i" % id
220 ml[id * linesperrev] += " r%i" % id
221 mergedtext = "\n".join(ml)
221 mergedtext = "\n".join(ml)
222 files.append(fn)
222 files.append(fn)
223 filecontent[fn] = mergedtext
223 filecontent[fn] = mergedtext
224
224
225 if overwritten_file:
225 if overwritten_file:
226 fn = "of"
226 fn = "of"
227 files.append(fn)
227 files.append(fn)
228 filecontent[fn] = "r%i\n" % id
228 filecontent[fn] = "r%i\n" % id
229
229
230 if new_file:
230 if new_file:
231 fn = "nf%i" % id
231 fn = "nf%i" % id
232 files.append(fn)
232 files.append(fn)
233 filecontent[fn] = "r%i\n" % id
233 filecontent[fn] = "r%i\n" % id
234 if len(ps) > 1:
234 if len(ps) > 1:
235 if not p2:
235 if not p2:
236 p2 = repo[ps[1]]
236 p2 = repo[ps[1]]
237 for fn in p2:
237 for fn in p2:
238 if fn.startswith("nf"):
238 if fn.startswith("nf"):
239 files.append(fn)
239 files.append(fn)
240 filecontent[fn] = p2[fn].data()
240 filecontent[fn] = p2[fn].data()
241
241
242 def fctxfn(repo, cx, path):
242 def fctxfn(repo, cx, path):
243 if path in filecontent:
243 if path in filecontent:
244 return context.memfilectx(repo, cx, path,
244 return context.memfilectx(repo, cx, path,
245 filecontent[path])
245 filecontent[path])
246 return None
246 return None
247
247
248 if len(ps) == 0 or ps[0] < 0:
248 if len(ps) == 0 or ps[0] < 0:
249 pars = [None, None]
249 pars = [None, None]
250 elif len(ps) == 1:
250 elif len(ps) == 1:
251 pars = [nodeids[ps[0]], None]
251 pars = [nodeids[ps[0]], None]
252 else:
252 else:
253 pars = [nodeids[p] for p in ps]
253 pars = [nodeids[p] for p in ps]
254 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
254 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
255 date=(id, 0),
255 date=(id, 0),
256 user="debugbuilddag",
256 user="debugbuilddag",
257 extra={'branch': atbranch})
257 extra={'branch': atbranch})
258 nodeid = repo.commitctx(cx)
258 nodeid = repo.commitctx(cx)
259 nodeids.append(nodeid)
259 nodeids.append(nodeid)
260 at = id
260 at = id
261 elif type == 'l':
261 elif type == 'l':
262 id, name = data
262 id, name = data
263 ui.note(('tag %s\n' % name))
263 ui.note(('tag %s\n' % name))
264 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
264 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
265 elif type == 'a':
265 elif type == 'a':
266 ui.note(('branch %s\n' % data))
266 ui.note(('branch %s\n' % data))
267 atbranch = data
267 atbranch = data
268 progress.update(id)
268 progress.update(id)
269
269
270 if tags:
270 if tags:
271 repo.vfs.write("localtags", "".join(tags))
271 repo.vfs.write("localtags", "".join(tags))
272
272
273 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
273 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
274 indent_string = ' ' * indent
274 indent_string = ' ' * indent
275 if all:
275 if all:
276 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
276 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
277 % indent_string)
277 % indent_string)
278
278
279 def showchunks(named):
279 def showchunks(named):
280 ui.write("\n%s%s\n" % (indent_string, named))
280 ui.write("\n%s%s\n" % (indent_string, named))
281 for deltadata in gen.deltaiter():
281 for deltadata in gen.deltaiter():
282 node, p1, p2, cs, deltabase, delta, flags = deltadata
282 node, p1, p2, cs, deltabase, delta, flags = deltadata
283 ui.write("%s%s %s %s %s %s %d\n" %
283 ui.write("%s%s %s %s %s %s %d\n" %
284 (indent_string, hex(node), hex(p1), hex(p2),
284 (indent_string, hex(node), hex(p1), hex(p2),
285 hex(cs), hex(deltabase), len(delta)))
285 hex(cs), hex(deltabase), len(delta)))
286
286
287 chunkdata = gen.changelogheader()
287 chunkdata = gen.changelogheader()
288 showchunks("changelog")
288 showchunks("changelog")
289 chunkdata = gen.manifestheader()
289 chunkdata = gen.manifestheader()
290 showchunks("manifest")
290 showchunks("manifest")
291 for chunkdata in iter(gen.filelogheader, {}):
291 for chunkdata in iter(gen.filelogheader, {}):
292 fname = chunkdata['filename']
292 fname = chunkdata['filename']
293 showchunks(fname)
293 showchunks(fname)
294 else:
294 else:
295 if isinstance(gen, bundle2.unbundle20):
295 if isinstance(gen, bundle2.unbundle20):
296 raise error.Abort(_('use debugbundle2 for this file'))
296 raise error.Abort(_('use debugbundle2 for this file'))
297 chunkdata = gen.changelogheader()
297 chunkdata = gen.changelogheader()
298 for deltadata in gen.deltaiter():
298 for deltadata in gen.deltaiter():
299 node, p1, p2, cs, deltabase, delta, flags = deltadata
299 node, p1, p2, cs, deltabase, delta, flags = deltadata
300 ui.write("%s%s\n" % (indent_string, hex(node)))
300 ui.write("%s%s\n" % (indent_string, hex(node)))
301
301
302 def _debugobsmarkers(ui, part, indent=0, **opts):
302 def _debugobsmarkers(ui, part, indent=0, **opts):
303 """display version and markers contained in 'data'"""
303 """display version and markers contained in 'data'"""
304 opts = pycompat.byteskwargs(opts)
304 opts = pycompat.byteskwargs(opts)
305 data = part.read()
305 data = part.read()
306 indent_string = ' ' * indent
306 indent_string = ' ' * indent
307 try:
307 try:
308 version, markers = obsolete._readmarkers(data)
308 version, markers = obsolete._readmarkers(data)
309 except error.UnknownVersion as exc:
309 except error.UnknownVersion as exc:
310 msg = "%sunsupported version: %s (%d bytes)\n"
310 msg = "%sunsupported version: %s (%d bytes)\n"
311 msg %= indent_string, exc.version, len(data)
311 msg %= indent_string, exc.version, len(data)
312 ui.write(msg)
312 ui.write(msg)
313 else:
313 else:
314 msg = "%sversion: %d (%d bytes)\n"
314 msg = "%sversion: %d (%d bytes)\n"
315 msg %= indent_string, version, len(data)
315 msg %= indent_string, version, len(data)
316 ui.write(msg)
316 ui.write(msg)
317 fm = ui.formatter('debugobsolete', opts)
317 fm = ui.formatter('debugobsolete', opts)
318 for rawmarker in sorted(markers):
318 for rawmarker in sorted(markers):
319 m = obsutil.marker(None, rawmarker)
319 m = obsutil.marker(None, rawmarker)
320 fm.startitem()
320 fm.startitem()
321 fm.plain(indent_string)
321 fm.plain(indent_string)
322 cmdutil.showmarker(fm, m)
322 cmdutil.showmarker(fm, m)
323 fm.end()
323 fm.end()
324
324
325 def _debugphaseheads(ui, data, indent=0):
325 def _debugphaseheads(ui, data, indent=0):
326 """display version and markers contained in 'data'"""
326 """display version and markers contained in 'data'"""
327 indent_string = ' ' * indent
327 indent_string = ' ' * indent
328 headsbyphase = phases.binarydecode(data)
328 headsbyphase = phases.binarydecode(data)
329 for phase in phases.allphases:
329 for phase in phases.allphases:
330 for head in headsbyphase[phase]:
330 for head in headsbyphase[phase]:
331 ui.write(indent_string)
331 ui.write(indent_string)
332 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
332 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
333
333
334 def _quasirepr(thing):
334 def _quasirepr(thing):
335 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
335 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
336 return '{%s}' % (
336 return '{%s}' % (
337 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
337 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
338 return pycompat.bytestr(repr(thing))
338 return pycompat.bytestr(repr(thing))
339
339
340 def _debugbundle2(ui, gen, all=None, **opts):
340 def _debugbundle2(ui, gen, all=None, **opts):
341 """lists the contents of a bundle2"""
341 """lists the contents of a bundle2"""
342 if not isinstance(gen, bundle2.unbundle20):
342 if not isinstance(gen, bundle2.unbundle20):
343 raise error.Abort(_('not a bundle2 file'))
343 raise error.Abort(_('not a bundle2 file'))
344 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
344 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
345 parttypes = opts.get(r'part_type', [])
345 parttypes = opts.get(r'part_type', [])
346 for part in gen.iterparts():
346 for part in gen.iterparts():
347 if parttypes and part.type not in parttypes:
347 if parttypes and part.type not in parttypes:
348 continue
348 continue
349 msg = '%s -- %s (mandatory: %r)\n'
349 msg = '%s -- %s (mandatory: %r)\n'
350 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
350 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
351 if part.type == 'changegroup':
351 if part.type == 'changegroup':
352 version = part.params.get('version', '01')
352 version = part.params.get('version', '01')
353 cg = changegroup.getunbundler(version, part, 'UN')
353 cg = changegroup.getunbundler(version, part, 'UN')
354 if not ui.quiet:
354 if not ui.quiet:
355 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
355 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
356 if part.type == 'obsmarkers':
356 if part.type == 'obsmarkers':
357 if not ui.quiet:
357 if not ui.quiet:
358 _debugobsmarkers(ui, part, indent=4, **opts)
358 _debugobsmarkers(ui, part, indent=4, **opts)
359 if part.type == 'phase-heads':
359 if part.type == 'phase-heads':
360 if not ui.quiet:
360 if not ui.quiet:
361 _debugphaseheads(ui, part, indent=4)
361 _debugphaseheads(ui, part, indent=4)
362
362
363 @command('debugbundle',
363 @command('debugbundle',
364 [('a', 'all', None, _('show all details')),
364 [('a', 'all', None, _('show all details')),
365 ('', 'part-type', [], _('show only the named part type')),
365 ('', 'part-type', [], _('show only the named part type')),
366 ('', 'spec', None, _('print the bundlespec of the bundle'))],
366 ('', 'spec', None, _('print the bundlespec of the bundle'))],
367 _('FILE'),
367 _('FILE'),
368 norepo=True)
368 norepo=True)
369 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
369 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
370 """lists the contents of a bundle"""
370 """lists the contents of a bundle"""
371 with hg.openpath(ui, bundlepath) as f:
371 with hg.openpath(ui, bundlepath) as f:
372 if spec:
372 if spec:
373 spec = exchange.getbundlespec(ui, f)
373 spec = exchange.getbundlespec(ui, f)
374 ui.write('%s\n' % spec)
374 ui.write('%s\n' % spec)
375 return
375 return
376
376
377 gen = exchange.readbundle(ui, f, bundlepath)
377 gen = exchange.readbundle(ui, f, bundlepath)
378 if isinstance(gen, bundle2.unbundle20):
378 if isinstance(gen, bundle2.unbundle20):
379 return _debugbundle2(ui, gen, all=all, **opts)
379 return _debugbundle2(ui, gen, all=all, **opts)
380 _debugchangegroup(ui, gen, all=all, **opts)
380 _debugchangegroup(ui, gen, all=all, **opts)
381
381
382 @command('debugcapabilities',
382 @command('debugcapabilities',
383 [], _('PATH'),
383 [], _('PATH'),
384 norepo=True)
384 norepo=True)
385 def debugcapabilities(ui, path, **opts):
385 def debugcapabilities(ui, path, **opts):
386 """lists the capabilities of a remote peer"""
386 """lists the capabilities of a remote peer"""
387 opts = pycompat.byteskwargs(opts)
387 opts = pycompat.byteskwargs(opts)
388 peer = hg.peer(ui, opts, path)
388 peer = hg.peer(ui, opts, path)
389 caps = peer.capabilities()
389 caps = peer.capabilities()
390 ui.write(('Main capabilities:\n'))
390 ui.write(('Main capabilities:\n'))
391 for c in sorted(caps):
391 for c in sorted(caps):
392 ui.write((' %s\n') % c)
392 ui.write((' %s\n') % c)
393 b2caps = bundle2.bundle2caps(peer)
393 b2caps = bundle2.bundle2caps(peer)
394 if b2caps:
394 if b2caps:
395 ui.write(('Bundle2 capabilities:\n'))
395 ui.write(('Bundle2 capabilities:\n'))
396 for key, values in sorted(b2caps.iteritems()):
396 for key, values in sorted(b2caps.iteritems()):
397 ui.write((' %s\n') % key)
397 ui.write((' %s\n') % key)
398 for v in values:
398 for v in values:
399 ui.write((' %s\n') % v)
399 ui.write((' %s\n') % v)
400
400
401 @command('debugcheckstate', [], '')
401 @command('debugcheckstate', [], '')
402 def debugcheckstate(ui, repo):
402 def debugcheckstate(ui, repo):
403 """validate the correctness of the current dirstate"""
403 """validate the correctness of the current dirstate"""
404 parent1, parent2 = repo.dirstate.parents()
404 parent1, parent2 = repo.dirstate.parents()
405 m1 = repo[parent1].manifest()
405 m1 = repo[parent1].manifest()
406 m2 = repo[parent2].manifest()
406 m2 = repo[parent2].manifest()
407 errors = 0
407 errors = 0
408 for f in repo.dirstate:
408 for f in repo.dirstate:
409 state = repo.dirstate[f]
409 state = repo.dirstate[f]
410 if state in "nr" and f not in m1:
410 if state in "nr" and f not in m1:
411 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
411 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
412 errors += 1
412 errors += 1
413 if state in "a" and f in m1:
413 if state in "a" and f in m1:
414 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
414 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
415 errors += 1
415 errors += 1
416 if state in "m" and f not in m1 and f not in m2:
416 if state in "m" and f not in m1 and f not in m2:
417 ui.warn(_("%s in state %s, but not in either manifest\n") %
417 ui.warn(_("%s in state %s, but not in either manifest\n") %
418 (f, state))
418 (f, state))
419 errors += 1
419 errors += 1
420 for f in m1:
420 for f in m1:
421 state = repo.dirstate[f]
421 state = repo.dirstate[f]
422 if state not in "nrm":
422 if state not in "nrm":
423 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
423 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
424 errors += 1
424 errors += 1
425 if errors:
425 if errors:
426 error = _(".hg/dirstate inconsistent with current parent's manifest")
426 error = _(".hg/dirstate inconsistent with current parent's manifest")
427 raise error.Abort(error)
427 raise error.Abort(error)
428
428
429 @command('debugcolor',
429 @command('debugcolor',
430 [('', 'style', None, _('show all configured styles'))],
430 [('', 'style', None, _('show all configured styles'))],
431 'hg debugcolor')
431 'hg debugcolor')
432 def debugcolor(ui, repo, **opts):
432 def debugcolor(ui, repo, **opts):
433 """show available color, effects or style"""
433 """show available color, effects or style"""
434 ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
434 ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
435 if opts.get(r'style'):
435 if opts.get(r'style'):
436 return _debugdisplaystyle(ui)
436 return _debugdisplaystyle(ui)
437 else:
437 else:
438 return _debugdisplaycolor(ui)
438 return _debugdisplaycolor(ui)
439
439
440 def _debugdisplaycolor(ui):
440 def _debugdisplaycolor(ui):
441 ui = ui.copy()
441 ui = ui.copy()
442 ui._styles.clear()
442 ui._styles.clear()
443 for effect in color._activeeffects(ui).keys():
443 for effect in color._activeeffects(ui).keys():
444 ui._styles[effect] = effect
444 ui._styles[effect] = effect
445 if ui._terminfoparams:
445 if ui._terminfoparams:
446 for k, v in ui.configitems('color'):
446 for k, v in ui.configitems('color'):
447 if k.startswith('color.'):
447 if k.startswith('color.'):
448 ui._styles[k] = k[6:]
448 ui._styles[k] = k[6:]
449 elif k.startswith('terminfo.'):
449 elif k.startswith('terminfo.'):
450 ui._styles[k] = k[9:]
450 ui._styles[k] = k[9:]
451 ui.write(_('available colors:\n'))
451 ui.write(_('available colors:\n'))
452 # sort label with a '_' after the other to group '_background' entry.
452 # sort label with a '_' after the other to group '_background' entry.
453 items = sorted(ui._styles.items(),
453 items = sorted(ui._styles.items(),
454 key=lambda i: ('_' in i[0], i[0], i[1]))
454 key=lambda i: ('_' in i[0], i[0], i[1]))
455 for colorname, label in items:
455 for colorname, label in items:
456 ui.write(('%s\n') % colorname, label=label)
456 ui.write(('%s\n') % colorname, label=label)
457
457
458 def _debugdisplaystyle(ui):
458 def _debugdisplaystyle(ui):
459 ui.write(_('available style:\n'))
459 ui.write(_('available style:\n'))
460 if not ui._styles:
460 if not ui._styles:
461 return
461 return
462 width = max(len(s) for s in ui._styles)
462 width = max(len(s) for s in ui._styles)
463 for label, effects in sorted(ui._styles.items()):
463 for label, effects in sorted(ui._styles.items()):
464 ui.write('%s' % label, label=label)
464 ui.write('%s' % label, label=label)
465 if effects:
465 if effects:
466 # 50
466 # 50
467 ui.write(': ')
467 ui.write(': ')
468 ui.write(' ' * (max(0, width - len(label))))
468 ui.write(' ' * (max(0, width - len(label))))
469 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
469 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
470 ui.write('\n')
470 ui.write('\n')
471
471
472 @command('debugcreatestreamclonebundle', [], 'FILE')
472 @command('debugcreatestreamclonebundle', [], 'FILE')
473 def debugcreatestreamclonebundle(ui, repo, fname):
473 def debugcreatestreamclonebundle(ui, repo, fname):
474 """create a stream clone bundle file
474 """create a stream clone bundle file
475
475
476 Stream bundles are special bundles that are essentially archives of
476 Stream bundles are special bundles that are essentially archives of
477 revlog files. They are commonly used for cloning very quickly.
477 revlog files. They are commonly used for cloning very quickly.
478 """
478 """
479 # TODO we may want to turn this into an abort when this functionality
479 # TODO we may want to turn this into an abort when this functionality
480 # is moved into `hg bundle`.
480 # is moved into `hg bundle`.
481 if phases.hassecret(repo):
481 if phases.hassecret(repo):
482 ui.warn(_('(warning: stream clone bundle will contain secret '
482 ui.warn(_('(warning: stream clone bundle will contain secret '
483 'revisions)\n'))
483 'revisions)\n'))
484
484
485 requirements, gen = streamclone.generatebundlev1(repo)
485 requirements, gen = streamclone.generatebundlev1(repo)
486 changegroup.writechunks(ui, gen, fname)
486 changegroup.writechunks(ui, gen, fname)
487
487
488 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
488 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
489
489
490 @command('debugdag',
490 @command('debugdag',
491 [('t', 'tags', None, _('use tags as labels')),
491 [('t', 'tags', None, _('use tags as labels')),
492 ('b', 'branches', None, _('annotate with branch names')),
492 ('b', 'branches', None, _('annotate with branch names')),
493 ('', 'dots', None, _('use dots for runs')),
493 ('', 'dots', None, _('use dots for runs')),
494 ('s', 'spaces', None, _('separate elements by spaces'))],
494 ('s', 'spaces', None, _('separate elements by spaces'))],
495 _('[OPTION]... [FILE [REV]...]'),
495 _('[OPTION]... [FILE [REV]...]'),
496 optionalrepo=True)
496 optionalrepo=True)
497 def debugdag(ui, repo, file_=None, *revs, **opts):
497 def debugdag(ui, repo, file_=None, *revs, **opts):
498 """format the changelog or an index DAG as a concise textual description
498 """format the changelog or an index DAG as a concise textual description
499
499
500 If you pass a revlog index, the revlog's DAG is emitted. If you list
500 If you pass a revlog index, the revlog's DAG is emitted. If you list
501 revision numbers, they get labeled in the output as rN.
501 revision numbers, they get labeled in the output as rN.
502
502
503 Otherwise, the changelog DAG of the current repo is emitted.
503 Otherwise, the changelog DAG of the current repo is emitted.
504 """
504 """
505 spaces = opts.get(r'spaces')
505 spaces = opts.get(r'spaces')
506 dots = opts.get(r'dots')
506 dots = opts.get(r'dots')
507 if file_:
507 if file_:
508 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
508 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
509 file_)
509 file_)
510 revs = set((int(r) for r in revs))
510 revs = set((int(r) for r in revs))
511 def events():
511 def events():
512 for r in rlog:
512 for r in rlog:
513 yield 'n', (r, list(p for p in rlog.parentrevs(r)
513 yield 'n', (r, list(p for p in rlog.parentrevs(r)
514 if p != -1))
514 if p != -1))
515 if r in revs:
515 if r in revs:
516 yield 'l', (r, "r%i" % r)
516 yield 'l', (r, "r%i" % r)
517 elif repo:
517 elif repo:
518 cl = repo.changelog
518 cl = repo.changelog
519 tags = opts.get(r'tags')
519 tags = opts.get(r'tags')
520 branches = opts.get(r'branches')
520 branches = opts.get(r'branches')
521 if tags:
521 if tags:
522 labels = {}
522 labels = {}
523 for l, n in repo.tags().items():
523 for l, n in repo.tags().items():
524 labels.setdefault(cl.rev(n), []).append(l)
524 labels.setdefault(cl.rev(n), []).append(l)
525 def events():
525 def events():
526 b = "default"
526 b = "default"
527 for r in cl:
527 for r in cl:
528 if branches:
528 if branches:
529 newb = cl.read(cl.node(r))[5]['branch']
529 newb = cl.read(cl.node(r))[5]['branch']
530 if newb != b:
530 if newb != b:
531 yield 'a', newb
531 yield 'a', newb
532 b = newb
532 b = newb
533 yield 'n', (r, list(p for p in cl.parentrevs(r)
533 yield 'n', (r, list(p for p in cl.parentrevs(r)
534 if p != -1))
534 if p != -1))
535 if tags:
535 if tags:
536 ls = labels.get(r)
536 ls = labels.get(r)
537 if ls:
537 if ls:
538 for l in ls:
538 for l in ls:
539 yield 'l', (r, l)
539 yield 'l', (r, l)
540 else:
540 else:
541 raise error.Abort(_('need repo for changelog dag'))
541 raise error.Abort(_('need repo for changelog dag'))
542
542
543 for line in dagparser.dagtextlines(events(),
543 for line in dagparser.dagtextlines(events(),
544 addspaces=spaces,
544 addspaces=spaces,
545 wraplabels=True,
545 wraplabels=True,
546 wrapannotations=True,
546 wrapannotations=True,
547 wrapnonlinear=dots,
547 wrapnonlinear=dots,
548 usedots=dots,
548 usedots=dots,
549 maxlinewidth=70):
549 maxlinewidth=70):
550 ui.write(line)
550 ui.write(line)
551 ui.write("\n")
551 ui.write("\n")
552
552
553 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
553 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
554 def debugdata(ui, repo, file_, rev=None, **opts):
554 def debugdata(ui, repo, file_, rev=None, **opts):
555 """dump the contents of a data file revision"""
555 """dump the contents of a data file revision"""
556 opts = pycompat.byteskwargs(opts)
556 opts = pycompat.byteskwargs(opts)
557 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
557 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
558 if rev is not None:
558 if rev is not None:
559 raise error.CommandError('debugdata', _('invalid arguments'))
559 raise error.CommandError('debugdata', _('invalid arguments'))
560 file_, rev = None, file_
560 file_, rev = None, file_
561 elif rev is None:
561 elif rev is None:
562 raise error.CommandError('debugdata', _('invalid arguments'))
562 raise error.CommandError('debugdata', _('invalid arguments'))
563 r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
563 r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
564 try:
564 try:
565 ui.write(r.rawdata(r.lookup(rev)))
565 ui.write(r.rawdata(r.lookup(rev)))
566 except KeyError:
566 except KeyError:
567 raise error.Abort(_('invalid revision identifier %s') % rev)
567 raise error.Abort(_('invalid revision identifier %s') % rev)
568
568
569 @command('debugdate',
569 @command('debugdate',
570 [('e', 'extended', None, _('try extended date formats'))],
570 [('e', 'extended', None, _('try extended date formats'))],
571 _('[-e] DATE [RANGE]'),
571 _('[-e] DATE [RANGE]'),
572 norepo=True, optionalrepo=True)
572 norepo=True, optionalrepo=True)
573 def debugdate(ui, date, range=None, **opts):
573 def debugdate(ui, date, range=None, **opts):
574 """parse and display a date"""
574 """parse and display a date"""
575 if opts[r"extended"]:
575 if opts[r"extended"]:
576 d = dateutil.parsedate(date, util.extendeddateformats)
576 d = dateutil.parsedate(date, util.extendeddateformats)
577 else:
577 else:
578 d = dateutil.parsedate(date)
578 d = dateutil.parsedate(date)
579 ui.write(("internal: %d %d\n") % d)
579 ui.write(("internal: %d %d\n") % d)
580 ui.write(("standard: %s\n") % dateutil.datestr(d))
580 ui.write(("standard: %s\n") % dateutil.datestr(d))
581 if range:
581 if range:
582 m = dateutil.matchdate(range)
582 m = dateutil.matchdate(range)
583 ui.write(("match: %s\n") % m(d[0]))
583 ui.write(("match: %s\n") % m(d[0]))
584
584
585 @command('debugdeltachain',
585 @command('debugdeltachain',
586 cmdutil.debugrevlogopts + cmdutil.formatteropts,
586 cmdutil.debugrevlogopts + cmdutil.formatteropts,
587 _('-c|-m|FILE'),
587 _('-c|-m|FILE'),
588 optionalrepo=True)
588 optionalrepo=True)
589 def debugdeltachain(ui, repo, file_=None, **opts):
589 def debugdeltachain(ui, repo, file_=None, **opts):
590 """dump information about delta chains in a revlog
590 """dump information about delta chains in a revlog
591
591
592 Output can be templatized. Available template keywords are:
592 Output can be templatized. Available template keywords are:
593
593
594 :``rev``: revision number
594 :``rev``: revision number
595 :``chainid``: delta chain identifier (numbered by unique base)
595 :``chainid``: delta chain identifier (numbered by unique base)
596 :``chainlen``: delta chain length to this revision
596 :``chainlen``: delta chain length to this revision
597 :``prevrev``: previous revision in delta chain
597 :``prevrev``: previous revision in delta chain
598 :``deltatype``: role of delta / how it was computed
598 :``deltatype``: role of delta / how it was computed
599 :``compsize``: compressed size of revision
599 :``compsize``: compressed size of revision
600 :``uncompsize``: uncompressed size of revision
600 :``uncompsize``: uncompressed size of revision
601 :``chainsize``: total size of compressed revisions in chain
601 :``chainsize``: total size of compressed revisions in chain
602 :``chainratio``: total chain size divided by uncompressed revision size
602 :``chainratio``: total chain size divided by uncompressed revision size
603 (new delta chains typically start at ratio 2.00)
603 (new delta chains typically start at ratio 2.00)
604 :``lindist``: linear distance from base revision in delta chain to end
604 :``lindist``: linear distance from base revision in delta chain to end
605 of this revision
605 of this revision
606 :``extradist``: total size of revisions not part of this delta chain from
606 :``extradist``: total size of revisions not part of this delta chain from
607 base of delta chain to end of this revision; a measurement
607 base of delta chain to end of this revision; a measurement
608 of how much extra data we need to read/seek across to read
608 of how much extra data we need to read/seek across to read
609 the delta chain for this revision
609 the delta chain for this revision
610 :``extraratio``: extradist divided by chainsize; another representation of
610 :``extraratio``: extradist divided by chainsize; another representation of
611 how much unrelated data is needed to load this delta chain
611 how much unrelated data is needed to load this delta chain
612
612
613 If the repository is configured to use the sparse read, additional keywords
613 If the repository is configured to use the sparse read, additional keywords
614 are available:
614 are available:
615
615
616 :``readsize``: total size of data read from the disk for a revision
616 :``readsize``: total size of data read from the disk for a revision
617 (sum of the sizes of all the blocks)
617 (sum of the sizes of all the blocks)
618 :``largestblock``: size of the largest block of data read from the disk
618 :``largestblock``: size of the largest block of data read from the disk
619 :``readdensity``: density of useful bytes in the data read from the disk
619 :``readdensity``: density of useful bytes in the data read from the disk
620 :``srchunks``: in how many data hunks the whole revision would be read
620 :``srchunks``: in how many data hunks the whole revision would be read
621
621
622 The sparse read can be enabled with experimental.sparse-read = True
622 The sparse read can be enabled with experimental.sparse-read = True
623 """
623 """
624 opts = pycompat.byteskwargs(opts)
624 opts = pycompat.byteskwargs(opts)
625 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
625 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
626 index = r.index
626 index = r.index
627 start = r.start
627 start = r.start
628 length = r.length
628 length = r.length
629 generaldelta = r.version & revlog.FLAG_GENERALDELTA
629 generaldelta = r.version & revlog.FLAG_GENERALDELTA
630 withsparseread = getattr(r, '_withsparseread', False)
630 withsparseread = getattr(r, '_withsparseread', False)
631
631
632 def revinfo(rev):
632 def revinfo(rev):
633 e = index[rev]
633 e = index[rev]
634 compsize = e[1]
634 compsize = e[1]
635 uncompsize = e[2]
635 uncompsize = e[2]
636 chainsize = 0
636 chainsize = 0
637
637
638 if generaldelta:
638 if generaldelta:
639 if e[3] == e[5]:
639 if e[3] == e[5]:
640 deltatype = 'p1'
640 deltatype = 'p1'
641 elif e[3] == e[6]:
641 elif e[3] == e[6]:
642 deltatype = 'p2'
642 deltatype = 'p2'
643 elif e[3] == rev - 1:
643 elif e[3] == rev - 1:
644 deltatype = 'prev'
644 deltatype = 'prev'
645 elif e[3] == rev:
645 elif e[3] == rev:
646 deltatype = 'base'
646 deltatype = 'base'
647 else:
647 else:
648 deltatype = 'other'
648 deltatype = 'other'
649 else:
649 else:
650 if e[3] == rev:
650 if e[3] == rev:
651 deltatype = 'base'
651 deltatype = 'base'
652 else:
652 else:
653 deltatype = 'prev'
653 deltatype = 'prev'
654
654
655 chain = r._deltachain(rev)[0]
655 chain = r._deltachain(rev)[0]
656 for iterrev in chain:
656 for iterrev in chain:
657 e = index[iterrev]
657 e = index[iterrev]
658 chainsize += e[1]
658 chainsize += e[1]
659
659
660 return compsize, uncompsize, deltatype, chain, chainsize
660 return compsize, uncompsize, deltatype, chain, chainsize
661
661
662 fm = ui.formatter('debugdeltachain', opts)
662 fm = ui.formatter('debugdeltachain', opts)
663
663
664 fm.plain(' rev chain# chainlen prev delta '
664 fm.plain(' rev chain# chainlen prev delta '
665 'size rawsize chainsize ratio lindist extradist '
665 'size rawsize chainsize ratio lindist extradist '
666 'extraratio')
666 'extraratio')
667 if withsparseread:
667 if withsparseread:
668 fm.plain(' readsize largestblk rddensity srchunks')
668 fm.plain(' readsize largestblk rddensity srchunks')
669 fm.plain('\n')
669 fm.plain('\n')
670
670
671 chainbases = {}
671 chainbases = {}
672 for rev in r:
672 for rev in r:
673 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
673 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
674 chainbase = chain[0]
674 chainbase = chain[0]
675 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
675 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
676 basestart = start(chainbase)
676 basestart = start(chainbase)
677 revstart = start(rev)
677 revstart = start(rev)
678 lineardist = revstart + comp - basestart
678 lineardist = revstart + comp - basestart
679 extradist = lineardist - chainsize
679 extradist = lineardist - chainsize
680 try:
680 try:
681 prevrev = chain[-2]
681 prevrev = chain[-2]
682 except IndexError:
682 except IndexError:
683 prevrev = -1
683 prevrev = -1
684
684
685 if uncomp != 0:
685 if uncomp != 0:
686 chainratio = float(chainsize) / float(uncomp)
686 chainratio = float(chainsize) / float(uncomp)
687 else:
687 else:
688 chainratio = chainsize
688 chainratio = chainsize
689
689
690 if chainsize != 0:
690 if chainsize != 0:
691 extraratio = float(extradist) / float(chainsize)
691 extraratio = float(extradist) / float(chainsize)
692 else:
692 else:
693 extraratio = extradist
693 extraratio = extradist
694
694
695 fm.startitem()
695 fm.startitem()
696 fm.write('rev chainid chainlen prevrev deltatype compsize '
696 fm.write('rev chainid chainlen prevrev deltatype compsize '
697 'uncompsize chainsize chainratio lindist extradist '
697 'uncompsize chainsize chainratio lindist extradist '
698 'extraratio',
698 'extraratio',
699 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
699 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
700 rev, chainid, len(chain), prevrev, deltatype, comp,
700 rev, chainid, len(chain), prevrev, deltatype, comp,
701 uncomp, chainsize, chainratio, lineardist, extradist,
701 uncomp, chainsize, chainratio, lineardist, extradist,
702 extraratio,
702 extraratio,
703 rev=rev, chainid=chainid, chainlen=len(chain),
703 rev=rev, chainid=chainid, chainlen=len(chain),
704 prevrev=prevrev, deltatype=deltatype, compsize=comp,
704 prevrev=prevrev, deltatype=deltatype, compsize=comp,
705 uncompsize=uncomp, chainsize=chainsize,
705 uncompsize=uncomp, chainsize=chainsize,
706 chainratio=chainratio, lindist=lineardist,
706 chainratio=chainratio, lindist=lineardist,
707 extradist=extradist, extraratio=extraratio)
707 extradist=extradist, extraratio=extraratio)
708 if withsparseread:
708 if withsparseread:
709 readsize = 0
709 readsize = 0
710 largestblock = 0
710 largestblock = 0
711 srchunks = 0
711 srchunks = 0
712
712
713 for revschunk in deltautil.slicechunk(r, chain):
713 for revschunk in deltautil.slicechunk(r, chain):
714 srchunks += 1
714 srchunks += 1
715 blkend = start(revschunk[-1]) + length(revschunk[-1])
715 blkend = start(revschunk[-1]) + length(revschunk[-1])
716 blksize = blkend - start(revschunk[0])
716 blksize = blkend - start(revschunk[0])
717
717
718 readsize += blksize
718 readsize += blksize
719 if largestblock < blksize:
719 if largestblock < blksize:
720 largestblock = blksize
720 largestblock = blksize
721
721
722 if readsize:
722 if readsize:
723 readdensity = float(chainsize) / float(readsize)
723 readdensity = float(chainsize) / float(readsize)
724 else:
724 else:
725 readdensity = 1
725 readdensity = 1
726
726
727 fm.write('readsize largestblock readdensity srchunks',
727 fm.write('readsize largestblock readdensity srchunks',
728 ' %10d %10d %9.5f %8d',
728 ' %10d %10d %9.5f %8d',
729 readsize, largestblock, readdensity, srchunks,
729 readsize, largestblock, readdensity, srchunks,
730 readsize=readsize, largestblock=largestblock,
730 readsize=readsize, largestblock=largestblock,
731 readdensity=readdensity, srchunks=srchunks)
731 readdensity=readdensity, srchunks=srchunks)
732
732
733 fm.plain('\n')
733 fm.plain('\n')
734
734
735 fm.end()
735 fm.end()
736
736
737 @command('debugdirstate|debugstate',
737 @command('debugdirstate|debugstate',
738 [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
738 [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
739 ('', 'dates', True, _('display the saved mtime')),
739 ('', 'dates', True, _('display the saved mtime')),
740 ('', 'datesort', None, _('sort by saved mtime'))],
740 ('', 'datesort', None, _('sort by saved mtime'))],
741 _('[OPTION]...'))
741 _('[OPTION]...'))
742 def debugstate(ui, repo, **opts):
742 def debugstate(ui, repo, **opts):
743 """show the contents of the current dirstate"""
743 """show the contents of the current dirstate"""
744
744
745 nodates = not opts[r'dates']
745 nodates = not opts[r'dates']
746 if opts.get(r'nodates') is not None:
746 if opts.get(r'nodates') is not None:
747 nodates = True
747 nodates = True
748 datesort = opts.get(r'datesort')
748 datesort = opts.get(r'datesort')
749
749
750 if datesort:
750 if datesort:
751 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
751 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
752 else:
752 else:
753 keyfunc = None # sort by filename
753 keyfunc = None # sort by filename
754 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
754 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
755 if ent[3] == -1:
755 if ent[3] == -1:
756 timestr = 'unset '
756 timestr = 'unset '
757 elif nodates:
757 elif nodates:
758 timestr = 'set '
758 timestr = 'set '
759 else:
759 else:
760 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
760 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
761 time.localtime(ent[3]))
761 time.localtime(ent[3]))
762 timestr = encoding.strtolocal(timestr)
762 timestr = encoding.strtolocal(timestr)
763 if ent[1] & 0o20000:
763 if ent[1] & 0o20000:
764 mode = 'lnk'
764 mode = 'lnk'
765 else:
765 else:
766 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
766 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
767 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
767 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
768 for f in repo.dirstate.copies():
768 for f in repo.dirstate.copies():
769 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
769 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
770
770
771 @command('debugdiscovery',
771 @command('debugdiscovery',
772 [('', 'old', None, _('use old-style discovery')),
772 [('', 'old', None, _('use old-style discovery')),
773 ('', 'nonheads', None,
773 ('', 'nonheads', None,
774 _('use old-style discovery with non-heads included')),
774 _('use old-style discovery with non-heads included')),
775 ('', 'rev', [], 'restrict discovery to this set of revs'),
775 ('', 'rev', [], 'restrict discovery to this set of revs'),
776 ('', 'seed', '12323', 'specify the random seed use for discovery'),
776 ('', 'seed', '12323', 'specify the random seed use for discovery'),
777 ] + cmdutil.remoteopts,
777 ] + cmdutil.remoteopts,
778 _('[--rev REV] [OTHER]'))
778 _('[--rev REV] [OTHER]'))
779 def debugdiscovery(ui, repo, remoteurl="default", **opts):
779 def debugdiscovery(ui, repo, remoteurl="default", **opts):
780 """runs the changeset discovery protocol in isolation"""
780 """runs the changeset discovery protocol in isolation"""
781 opts = pycompat.byteskwargs(opts)
781 opts = pycompat.byteskwargs(opts)
782 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
782 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
783 remote = hg.peer(repo, opts, remoteurl)
783 remote = hg.peer(repo, opts, remoteurl)
784 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
784 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
785
785
786 # make sure tests are repeatable
786 # make sure tests are repeatable
787 random.seed(int(opts['seed']))
787 random.seed(int(opts['seed']))
788
788
789
789
790
790
791 if opts.get('old'):
791 if opts.get('old'):
792 def doit(pushedrevs, remoteheads, remote=remote):
792 def doit(pushedrevs, remoteheads, remote=remote):
793 if not util.safehasattr(remote, 'branches'):
793 if not util.safehasattr(remote, 'branches'):
794 # enable in-client legacy support
794 # enable in-client legacy support
795 remote = localrepo.locallegacypeer(remote.local())
795 remote = localrepo.locallegacypeer(remote.local())
796 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
796 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
797 force=True)
797 force=True)
798 common = set(common)
798 common = set(common)
799 if not opts.get('nonheads'):
799 if not opts.get('nonheads'):
800 ui.write(("unpruned common: %s\n") %
800 ui.write(("unpruned common: %s\n") %
801 " ".join(sorted(short(n) for n in common)))
801 " ".join(sorted(short(n) for n in common)))
802
802
803 clnode = repo.changelog.node
803 clnode = repo.changelog.node
804 common = repo.revs('heads(::%ln)', common)
804 common = repo.revs('heads(::%ln)', common)
805 common = {clnode(r) for r in common}
805 common = {clnode(r) for r in common}
806 return common, hds
806 return common, hds
807 else:
807 else:
808 def doit(pushedrevs, remoteheads, remote=remote):
808 def doit(pushedrevs, remoteheads, remote=remote):
809 nodes = None
809 nodes = None
810 if pushedrevs:
810 if pushedrevs:
811 revs = scmutil.revrange(repo, pushedrevs)
811 revs = scmutil.revrange(repo, pushedrevs)
812 nodes = [repo[r].node() for r in revs]
812 nodes = [repo[r].node() for r in revs]
813 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
813 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
814 ancestorsof=nodes)
814 ancestorsof=nodes)
815 return common, hds
815 return common, hds
816
816
817 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
817 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
818 localrevs = opts['rev']
818 localrevs = opts['rev']
819 with util.timedcm('debug-discovery') as t:
819 with util.timedcm('debug-discovery') as t:
820 common, hds = doit(localrevs, remoterevs)
820 common, hds = doit(localrevs, remoterevs)
821
821
822 # compute all statistics
822 # compute all statistics
823 common = set(common)
823 common = set(common)
824 rheads = set(hds)
824 rheads = set(hds)
825 lheads = set(repo.heads())
825 lheads = set(repo.heads())
826
826
827 data = {}
827 data = {}
828 data['elapsed'] = t.elapsed
828 data['elapsed'] = t.elapsed
829 data['nb-common'] = len(common)
829 data['nb-common'] = len(common)
830 data['nb-common-local'] = len(common & lheads)
830 data['nb-common-local'] = len(common & lheads)
831 data['nb-common-remote'] = len(common & rheads)
831 data['nb-common-remote'] = len(common & rheads)
832 data['nb-common-both'] = len(common & rheads & lheads)
832 data['nb-common-both'] = len(common & rheads & lheads)
833 data['nb-local'] = len(lheads)
833 data['nb-local'] = len(lheads)
834 data['nb-local-missing'] = data['nb-local'] - data['nb-common-local']
834 data['nb-local-missing'] = data['nb-local'] - data['nb-common-local']
835 data['nb-remote'] = len(rheads)
835 data['nb-remote'] = len(rheads)
836 data['nb-remote-unknown'] = data['nb-remote'] - data['nb-common-remote']
836 data['nb-remote-unknown'] = data['nb-remote'] - data['nb-common-remote']
837 data['nb-revs'] = len(repo.revs('all()'))
837 data['nb-revs'] = len(repo.revs('all()'))
838 data['nb-revs-common'] = len(repo.revs('::%ln', common))
838 data['nb-revs-common'] = len(repo.revs('::%ln', common))
839 data['nb-revs-missing'] = data['nb-revs'] - data['nb-revs-common']
839 data['nb-revs-missing'] = data['nb-revs'] - data['nb-revs-common']
840
840
841 # display discovery summary
841 # display discovery summary
842 ui.write(("elapsed time: %(elapsed)f seconds\n") % data)
842 ui.write(("elapsed time: %(elapsed)f seconds\n") % data)
843 ui.write(("heads summary:\n"))
843 ui.write(("heads summary:\n"))
844 ui.write((" total common heads: %(nb-common)9d\n") % data)
844 ui.write((" total common heads: %(nb-common)9d\n") % data)
845 ui.write((" also local heads: %(nb-common-local)9d\n") % data)
845 ui.write((" also local heads: %(nb-common-local)9d\n") % data)
846 ui.write((" also remote heads: %(nb-common-remote)9d\n") % data)
846 ui.write((" also remote heads: %(nb-common-remote)9d\n") % data)
847 ui.write((" both: %(nb-common-both)9d\n") % data)
847 ui.write((" both: %(nb-common-both)9d\n") % data)
848 ui.write((" local heads: %(nb-local)9d\n") % data)
848 ui.write((" local heads: %(nb-local)9d\n") % data)
849 ui.write((" common: %(nb-common-local)9d\n") % data)
849 ui.write((" common: %(nb-common-local)9d\n") % data)
850 ui.write((" missing: %(nb-local-missing)9d\n") % data)
850 ui.write((" missing: %(nb-local-missing)9d\n") % data)
851 ui.write((" remote heads: %(nb-remote)9d\n") % data)
851 ui.write((" remote heads: %(nb-remote)9d\n") % data)
852 ui.write((" common: %(nb-common-remote)9d\n") % data)
852 ui.write((" common: %(nb-common-remote)9d\n") % data)
853 ui.write((" unknown: %(nb-remote-unknown)9d\n") % data)
853 ui.write((" unknown: %(nb-remote-unknown)9d\n") % data)
854 ui.write(("local changesets: %(nb-revs)9d\n") % data)
854 ui.write(("local changesets: %(nb-revs)9d\n") % data)
855 ui.write((" common: %(nb-revs-common)9d\n") % data)
855 ui.write((" common: %(nb-revs-common)9d\n") % data)
856 ui.write((" missing: %(nb-revs-missing)9d\n") % data)
856 ui.write((" missing: %(nb-revs-missing)9d\n") % data)
857
857
858 if ui.verbose:
858 if ui.verbose:
859 ui.write(("common heads: %s\n") %
859 ui.write(("common heads: %s\n") %
860 " ".join(sorted(short(n) for n in common)))
860 " ".join(sorted(short(n) for n in common)))
861
861
862 _chunksize = 4 << 10
862 _chunksize = 4 << 10
863
863
864 @command('debugdownload',
864 @command('debugdownload',
865 [
865 [
866 ('o', 'output', '', _('path')),
866 ('o', 'output', '', _('path')),
867 ],
867 ],
868 optionalrepo=True)
868 optionalrepo=True)
869 def debugdownload(ui, repo, url, output=None, **opts):
869 def debugdownload(ui, repo, url, output=None, **opts):
870 """download a resource using Mercurial logic and config
870 """download a resource using Mercurial logic and config
871 """
871 """
872 fh = urlmod.open(ui, url, output)
872 fh = urlmod.open(ui, url, output)
873
873
874 dest = ui
874 dest = ui
875 if output:
875 if output:
876 dest = open(output, "wb", _chunksize)
876 dest = open(output, "wb", _chunksize)
877 try:
877 try:
878 data = fh.read(_chunksize)
878 data = fh.read(_chunksize)
879 while data:
879 while data:
880 dest.write(data)
880 dest.write(data)
881 data = fh.read(_chunksize)
881 data = fh.read(_chunksize)
882 finally:
882 finally:
883 if output:
883 if output:
884 dest.close()
884 dest.close()
885
885
886 @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
886 @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
887 def debugextensions(ui, repo, **opts):
887 def debugextensions(ui, repo, **opts):
888 '''show information about active extensions'''
888 '''show information about active extensions'''
889 opts = pycompat.byteskwargs(opts)
889 opts = pycompat.byteskwargs(opts)
890 exts = extensions.extensions(ui)
890 exts = extensions.extensions(ui)
891 hgver = util.version()
891 hgver = util.version()
892 fm = ui.formatter('debugextensions', opts)
892 fm = ui.formatter('debugextensions', opts)
893 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
893 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
894 isinternal = extensions.ismoduleinternal(extmod)
894 isinternal = extensions.ismoduleinternal(extmod)
895 extsource = pycompat.fsencode(extmod.__file__)
895 extsource = pycompat.fsencode(extmod.__file__)
896 if isinternal:
896 if isinternal:
897 exttestedwith = [] # never expose magic string to users
897 exttestedwith = [] # never expose magic string to users
898 else:
898 else:
899 exttestedwith = getattr(extmod, 'testedwith', '').split()
899 exttestedwith = getattr(extmod, 'testedwith', '').split()
900 extbuglink = getattr(extmod, 'buglink', None)
900 extbuglink = getattr(extmod, 'buglink', None)
901
901
902 fm.startitem()
902 fm.startitem()
903
903
904 if ui.quiet or ui.verbose:
904 if ui.quiet or ui.verbose:
905 fm.write('name', '%s\n', extname)
905 fm.write('name', '%s\n', extname)
906 else:
906 else:
907 fm.write('name', '%s', extname)
907 fm.write('name', '%s', extname)
908 if isinternal or hgver in exttestedwith:
908 if isinternal or hgver in exttestedwith:
909 fm.plain('\n')
909 fm.plain('\n')
910 elif not exttestedwith:
910 elif not exttestedwith:
911 fm.plain(_(' (untested!)\n'))
911 fm.plain(_(' (untested!)\n'))
912 else:
912 else:
913 lasttestedversion = exttestedwith[-1]
913 lasttestedversion = exttestedwith[-1]
914 fm.plain(' (%s!)\n' % lasttestedversion)
914 fm.plain(' (%s!)\n' % lasttestedversion)
915
915
916 fm.condwrite(ui.verbose and extsource, 'source',
916 fm.condwrite(ui.verbose and extsource, 'source',
917 _(' location: %s\n'), extsource or "")
917 _(' location: %s\n'), extsource or "")
918
918
919 if ui.verbose:
919 if ui.verbose:
920 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
920 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
921 fm.data(bundled=isinternal)
921 fm.data(bundled=isinternal)
922
922
923 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
923 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
924 _(' tested with: %s\n'),
924 _(' tested with: %s\n'),
925 fm.formatlist(exttestedwith, name='ver'))
925 fm.formatlist(exttestedwith, name='ver'))
926
926
927 fm.condwrite(ui.verbose and extbuglink, 'buglink',
927 fm.condwrite(ui.verbose and extbuglink, 'buglink',
928 _(' bug reporting: %s\n'), extbuglink or "")
928 _(' bug reporting: %s\n'), extbuglink or "")
929
929
930 fm.end()
930 fm.end()
931
931
932 @command('debugfileset',
932 @command('debugfileset',
933 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
933 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
934 ('', 'all-files', False,
934 ('', 'all-files', False,
935 _('test files from all revisions and working directory')),
935 _('test files from all revisions and working directory')),
936 ('s', 'show-matcher', None,
936 ('s', 'show-matcher', None,
937 _('print internal representation of matcher')),
937 _('print internal representation of matcher')),
938 ('p', 'show-stage', [],
938 ('p', 'show-stage', [],
939 _('print parsed tree at the given stage'), _('NAME'))],
939 _('print parsed tree at the given stage'), _('NAME'))],
940 _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
940 _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
941 def debugfileset(ui, repo, expr, **opts):
941 def debugfileset(ui, repo, expr, **opts):
942 '''parse and apply a fileset specification'''
942 '''parse and apply a fileset specification'''
943 from . import fileset
943 from . import fileset
944 fileset.symbols # force import of fileset so we have predicates to optimize
944 fileset.symbols # force import of fileset so we have predicates to optimize
945 opts = pycompat.byteskwargs(opts)
945 opts = pycompat.byteskwargs(opts)
946 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
946 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
947
947
948 stages = [
948 stages = [
949 ('parsed', pycompat.identity),
949 ('parsed', pycompat.identity),
950 ('analyzed', filesetlang.analyze),
950 ('analyzed', filesetlang.analyze),
951 ('optimized', filesetlang.optimize),
951 ('optimized', filesetlang.optimize),
952 ]
952 ]
953 stagenames = set(n for n, f in stages)
953 stagenames = set(n for n, f in stages)
954
954
955 showalways = set()
955 showalways = set()
956 if ui.verbose and not opts['show_stage']:
956 if ui.verbose and not opts['show_stage']:
957 # show parsed tree by --verbose (deprecated)
957 # show parsed tree by --verbose (deprecated)
958 showalways.add('parsed')
958 showalways.add('parsed')
959 if opts['show_stage'] == ['all']:
959 if opts['show_stage'] == ['all']:
960 showalways.update(stagenames)
960 showalways.update(stagenames)
961 else:
961 else:
962 for n in opts['show_stage']:
962 for n in opts['show_stage']:
963 if n not in stagenames:
963 if n not in stagenames:
964 raise error.Abort(_('invalid stage name: %s') % n)
964 raise error.Abort(_('invalid stage name: %s') % n)
965 showalways.update(opts['show_stage'])
965 showalways.update(opts['show_stage'])
966
966
967 tree = filesetlang.parse(expr)
967 tree = filesetlang.parse(expr)
968 for n, f in stages:
968 for n, f in stages:
969 tree = f(tree)
969 tree = f(tree)
970 if n in showalways:
970 if n in showalways:
971 if opts['show_stage'] or n != 'parsed':
971 if opts['show_stage'] or n != 'parsed':
972 ui.write(("* %s:\n") % n)
972 ui.write(("* %s:\n") % n)
973 ui.write(filesetlang.prettyformat(tree), "\n")
973 ui.write(filesetlang.prettyformat(tree), "\n")
974
974
975 files = set()
975 files = set()
976 if opts['all_files']:
976 if opts['all_files']:
977 for r in repo:
977 for r in repo:
978 c = repo[r]
978 c = repo[r]
979 files.update(c.files())
979 files.update(c.files())
980 files.update(c.substate)
980 files.update(c.substate)
981 if opts['all_files'] or ctx.rev() is None:
981 if opts['all_files'] or ctx.rev() is None:
982 wctx = repo[None]
982 wctx = repo[None]
983 files.update(repo.dirstate.walk(scmutil.matchall(repo),
983 files.update(repo.dirstate.walk(scmutil.matchall(repo),
984 subrepos=list(wctx.substate),
984 subrepos=list(wctx.substate),
985 unknown=True, ignored=True))
985 unknown=True, ignored=True))
986 files.update(wctx.substate)
986 files.update(wctx.substate)
987 else:
987 else:
988 files.update(ctx.files())
988 files.update(ctx.files())
989 files.update(ctx.substate)
989 files.update(ctx.substate)
990
990
991 m = ctx.matchfileset(expr)
991 m = ctx.matchfileset(expr)
992 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
992 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
993 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
993 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
994 for f in sorted(files):
994 for f in sorted(files):
995 if not m(f):
995 if not m(f):
996 continue
996 continue
997 ui.write("%s\n" % f)
997 ui.write("%s\n" % f)
998
998
999 @command('debugformat',
999 @command('debugformat',
1000 [] + cmdutil.formatteropts)
1000 [] + cmdutil.formatteropts)
1001 def debugformat(ui, repo, **opts):
1001 def debugformat(ui, repo, **opts):
1002 """display format information about the current repository
1002 """display format information about the current repository
1003
1003
1004 Use --verbose to get extra information about current config value and
1004 Use --verbose to get extra information about current config value and
1005 Mercurial default."""
1005 Mercurial default."""
1006 opts = pycompat.byteskwargs(opts)
1006 opts = pycompat.byteskwargs(opts)
1007 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1007 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1008 maxvariantlength = max(len('format-variant'), maxvariantlength)
1008 maxvariantlength = max(len('format-variant'), maxvariantlength)
1009
1009
1010 def makeformatname(name):
1010 def makeformatname(name):
1011 return '%s:' + (' ' * (maxvariantlength - len(name)))
1011 return '%s:' + (' ' * (maxvariantlength - len(name)))
1012
1012
1013 fm = ui.formatter('debugformat', opts)
1013 fm = ui.formatter('debugformat', opts)
1014 if fm.isplain():
1014 if fm.isplain():
1015 def formatvalue(value):
1015 def formatvalue(value):
1016 if util.safehasattr(value, 'startswith'):
1016 if util.safehasattr(value, 'startswith'):
1017 return value
1017 return value
1018 if value:
1018 if value:
1019 return 'yes'
1019 return 'yes'
1020 else:
1020 else:
1021 return 'no'
1021 return 'no'
1022 else:
1022 else:
1023 formatvalue = pycompat.identity
1023 formatvalue = pycompat.identity
1024
1024
1025 fm.plain('format-variant')
1025 fm.plain('format-variant')
1026 fm.plain(' ' * (maxvariantlength - len('format-variant')))
1026 fm.plain(' ' * (maxvariantlength - len('format-variant')))
1027 fm.plain(' repo')
1027 fm.plain(' repo')
1028 if ui.verbose:
1028 if ui.verbose:
1029 fm.plain(' config default')
1029 fm.plain(' config default')
1030 fm.plain('\n')
1030 fm.plain('\n')
1031 for fv in upgrade.allformatvariant:
1031 for fv in upgrade.allformatvariant:
1032 fm.startitem()
1032 fm.startitem()
1033 repovalue = fv.fromrepo(repo)
1033 repovalue = fv.fromrepo(repo)
1034 configvalue = fv.fromconfig(repo)
1034 configvalue = fv.fromconfig(repo)
1035
1035
1036 if repovalue != configvalue:
1036 if repovalue != configvalue:
1037 namelabel = 'formatvariant.name.mismatchconfig'
1037 namelabel = 'formatvariant.name.mismatchconfig'
1038 repolabel = 'formatvariant.repo.mismatchconfig'
1038 repolabel = 'formatvariant.repo.mismatchconfig'
1039 elif repovalue != fv.default:
1039 elif repovalue != fv.default:
1040 namelabel = 'formatvariant.name.mismatchdefault'
1040 namelabel = 'formatvariant.name.mismatchdefault'
1041 repolabel = 'formatvariant.repo.mismatchdefault'
1041 repolabel = 'formatvariant.repo.mismatchdefault'
1042 else:
1042 else:
1043 namelabel = 'formatvariant.name.uptodate'
1043 namelabel = 'formatvariant.name.uptodate'
1044 repolabel = 'formatvariant.repo.uptodate'
1044 repolabel = 'formatvariant.repo.uptodate'
1045
1045
1046 fm.write('name', makeformatname(fv.name), fv.name,
1046 fm.write('name', makeformatname(fv.name), fv.name,
1047 label=namelabel)
1047 label=namelabel)
1048 fm.write('repo', ' %3s', formatvalue(repovalue),
1048 fm.write('repo', ' %3s', formatvalue(repovalue),
1049 label=repolabel)
1049 label=repolabel)
1050 if fv.default != configvalue:
1050 if fv.default != configvalue:
1051 configlabel = 'formatvariant.config.special'
1051 configlabel = 'formatvariant.config.special'
1052 else:
1052 else:
1053 configlabel = 'formatvariant.config.default'
1053 configlabel = 'formatvariant.config.default'
1054 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
1054 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
1055 label=configlabel)
1055 label=configlabel)
1056 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
1056 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
1057 label='formatvariant.default')
1057 label='formatvariant.default')
1058 fm.plain('\n')
1058 fm.plain('\n')
1059 fm.end()
1059 fm.end()
1060
1060
1061 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
1061 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
1062 def debugfsinfo(ui, path="."):
1062 def debugfsinfo(ui, path="."):
1063 """show information detected about current filesystem"""
1063 """show information detected about current filesystem"""
1064 ui.write(('path: %s\n') % path)
1064 ui.write(('path: %s\n') % path)
1065 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
1065 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
1066 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
1066 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
1067 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
1067 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
1068 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
1068 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
1069 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
1069 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
1070 casesensitive = '(unknown)'
1070 casesensitive = '(unknown)'
1071 try:
1071 try:
1072 with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
1072 with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
1073 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
1073 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
1074 except OSError:
1074 except OSError:
1075 pass
1075 pass
1076 ui.write(('case-sensitive: %s\n') % casesensitive)
1076 ui.write(('case-sensitive: %s\n') % casesensitive)
1077
1077
1078 @command('debuggetbundle',
1078 @command('debuggetbundle',
1079 [('H', 'head', [], _('id of head node'), _('ID')),
1079 [('H', 'head', [], _('id of head node'), _('ID')),
1080 ('C', 'common', [], _('id of common node'), _('ID')),
1080 ('C', 'common', [], _('id of common node'), _('ID')),
1081 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1081 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1082 _('REPO FILE [-H|-C ID]...'),
1082 _('REPO FILE [-H|-C ID]...'),
1083 norepo=True)
1083 norepo=True)
1084 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1084 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1085 """retrieves a bundle from a repo
1085 """retrieves a bundle from a repo
1086
1086
1087 Every ID must be a full-length hex node id string. Saves the bundle to the
1087 Every ID must be a full-length hex node id string. Saves the bundle to the
1088 given file.
1088 given file.
1089 """
1089 """
1090 opts = pycompat.byteskwargs(opts)
1090 opts = pycompat.byteskwargs(opts)
1091 repo = hg.peer(ui, opts, repopath)
1091 repo = hg.peer(ui, opts, repopath)
1092 if not repo.capable('getbundle'):
1092 if not repo.capable('getbundle'):
1093 raise error.Abort("getbundle() not supported by target repository")
1093 raise error.Abort("getbundle() not supported by target repository")
1094 args = {}
1094 args = {}
1095 if common:
1095 if common:
1096 args[r'common'] = [bin(s) for s in common]
1096 args[r'common'] = [bin(s) for s in common]
1097 if head:
1097 if head:
1098 args[r'heads'] = [bin(s) for s in head]
1098 args[r'heads'] = [bin(s) for s in head]
1099 # TODO: get desired bundlecaps from command line.
1099 # TODO: get desired bundlecaps from command line.
1100 args[r'bundlecaps'] = None
1100 args[r'bundlecaps'] = None
1101 bundle = repo.getbundle('debug', **args)
1101 bundle = repo.getbundle('debug', **args)
1102
1102
1103 bundletype = opts.get('type', 'bzip2').lower()
1103 bundletype = opts.get('type', 'bzip2').lower()
1104 btypes = {'none': 'HG10UN',
1104 btypes = {'none': 'HG10UN',
1105 'bzip2': 'HG10BZ',
1105 'bzip2': 'HG10BZ',
1106 'gzip': 'HG10GZ',
1106 'gzip': 'HG10GZ',
1107 'bundle2': 'HG20'}
1107 'bundle2': 'HG20'}
1108 bundletype = btypes.get(bundletype)
1108 bundletype = btypes.get(bundletype)
1109 if bundletype not in bundle2.bundletypes:
1109 if bundletype not in bundle2.bundletypes:
1110 raise error.Abort(_('unknown bundle type specified with --type'))
1110 raise error.Abort(_('unknown bundle type specified with --type'))
1111 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1111 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1112
1112
1113 @command('debugignore', [], '[FILE]')
1113 @command('debugignore', [], '[FILE]')
1114 def debugignore(ui, repo, *files, **opts):
1114 def debugignore(ui, repo, *files, **opts):
1115 """display the combined ignore pattern and information about ignored files
1115 """display the combined ignore pattern and information about ignored files
1116
1116
1117 With no argument display the combined ignore pattern.
1117 With no argument display the combined ignore pattern.
1118
1118
1119 Given space separated file names, shows if the given file is ignored and
1119 Given space separated file names, shows if the given file is ignored and
1120 if so, show the ignore rule (file and line number) that matched it.
1120 if so, show the ignore rule (file and line number) that matched it.
1121 """
1121 """
1122 ignore = repo.dirstate._ignore
1122 ignore = repo.dirstate._ignore
1123 if not files:
1123 if not files:
1124 # Show all the patterns
1124 # Show all the patterns
1125 ui.write("%s\n" % pycompat.byterepr(ignore))
1125 ui.write("%s\n" % pycompat.byterepr(ignore))
1126 else:
1126 else:
1127 m = scmutil.match(repo[None], pats=files)
1127 m = scmutil.match(repo[None], pats=files)
1128 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1128 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1129 for f in m.files():
1129 for f in m.files():
1130 nf = util.normpath(f)
1130 nf = util.normpath(f)
1131 ignored = None
1131 ignored = None
1132 ignoredata = None
1132 ignoredata = None
1133 if nf != '.':
1133 if nf != '.':
1134 if ignore(nf):
1134 if ignore(nf):
1135 ignored = nf
1135 ignored = nf
1136 ignoredata = repo.dirstate._ignorefileandline(nf)
1136 ignoredata = repo.dirstate._ignorefileandline(nf)
1137 else:
1137 else:
1138 for p in util.finddirs(nf):
1138 for p in util.finddirs(nf):
1139 if ignore(p):
1139 if ignore(p):
1140 ignored = p
1140 ignored = p
1141 ignoredata = repo.dirstate._ignorefileandline(p)
1141 ignoredata = repo.dirstate._ignorefileandline(p)
1142 break
1142 break
1143 if ignored:
1143 if ignored:
1144 if ignored == nf:
1144 if ignored == nf:
1145 ui.write(_("%s is ignored\n") % uipathfn(f))
1145 ui.write(_("%s is ignored\n") % uipathfn(f))
1146 else:
1146 else:
1147 ui.write(_("%s is ignored because of "
1147 ui.write(_("%s is ignored because of "
1148 "containing directory %s\n")
1148 "containing directory %s\n")
1149 % (uipathfn(f), ignored))
1149 % (uipathfn(f), ignored))
1150 ignorefile, lineno, line = ignoredata
1150 ignorefile, lineno, line = ignoredata
1151 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1151 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1152 % (ignorefile, lineno, line))
1152 % (ignorefile, lineno, line))
1153 else:
1153 else:
1154 ui.write(_("%s is not ignored\n") % uipathfn(f))
1154 ui.write(_("%s is not ignored\n") % uipathfn(f))
1155
1155
1156 @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
1156 @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
1157 _('-c|-m|FILE'))
1157 _('-c|-m|FILE'))
1158 def debugindex(ui, repo, file_=None, **opts):
1158 def debugindex(ui, repo, file_=None, **opts):
1159 """dump index data for a storage primitive"""
1159 """dump index data for a storage primitive"""
1160 opts = pycompat.byteskwargs(opts)
1160 opts = pycompat.byteskwargs(opts)
1161 store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
1161 store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
1162
1162
1163 if ui.debugflag:
1163 if ui.debugflag:
1164 shortfn = hex
1164 shortfn = hex
1165 else:
1165 else:
1166 shortfn = short
1166 shortfn = short
1167
1167
1168 idlen = 12
1168 idlen = 12
1169 for i in store:
1169 for i in store:
1170 idlen = len(shortfn(store.node(i)))
1170 idlen = len(shortfn(store.node(i)))
1171 break
1171 break
1172
1172
1173 fm = ui.formatter('debugindex', opts)
1173 fm = ui.formatter('debugindex', opts)
1174 fm.plain(b' rev linkrev %s %s p2\n' % (
1174 fm.plain(b' rev linkrev %s %s p2\n' % (
1175 b'nodeid'.ljust(idlen),
1175 b'nodeid'.ljust(idlen),
1176 b'p1'.ljust(idlen)))
1176 b'p1'.ljust(idlen)))
1177
1177
1178 for rev in store:
1178 for rev in store:
1179 node = store.node(rev)
1179 node = store.node(rev)
1180 parents = store.parents(node)
1180 parents = store.parents(node)
1181
1181
1182 fm.startitem()
1182 fm.startitem()
1183 fm.write(b'rev', b'%6d ', rev)
1183 fm.write(b'rev', b'%6d ', rev)
1184 fm.write(b'linkrev', '%7d ', store.linkrev(rev))
1184 fm.write(b'linkrev', '%7d ', store.linkrev(rev))
1185 fm.write(b'node', '%s ', shortfn(node))
1185 fm.write(b'node', '%s ', shortfn(node))
1186 fm.write(b'p1', '%s ', shortfn(parents[0]))
1186 fm.write(b'p1', '%s ', shortfn(parents[0]))
1187 fm.write(b'p2', '%s', shortfn(parents[1]))
1187 fm.write(b'p2', '%s', shortfn(parents[1]))
1188 fm.plain(b'\n')
1188 fm.plain(b'\n')
1189
1189
1190 fm.end()
1190 fm.end()
1191
1191
1192 @command('debugindexdot', cmdutil.debugrevlogopts,
1192 @command('debugindexdot', cmdutil.debugrevlogopts,
1193 _('-c|-m|FILE'), optionalrepo=True)
1193 _('-c|-m|FILE'), optionalrepo=True)
1194 def debugindexdot(ui, repo, file_=None, **opts):
1194 def debugindexdot(ui, repo, file_=None, **opts):
1195 """dump an index DAG as a graphviz dot file"""
1195 """dump an index DAG as a graphviz dot file"""
1196 opts = pycompat.byteskwargs(opts)
1196 opts = pycompat.byteskwargs(opts)
1197 r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
1197 r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
1198 ui.write(("digraph G {\n"))
1198 ui.write(("digraph G {\n"))
1199 for i in r:
1199 for i in r:
1200 node = r.node(i)
1200 node = r.node(i)
1201 pp = r.parents(node)
1201 pp = r.parents(node)
1202 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1202 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1203 if pp[1] != nullid:
1203 if pp[1] != nullid:
1204 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1204 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1205 ui.write("}\n")
1205 ui.write("}\n")
1206
1206
1207 @command('debugindexstats', [])
1207 @command('debugindexstats', [])
1208 def debugindexstats(ui, repo):
1208 def debugindexstats(ui, repo):
1209 """show stats related to the changelog index"""
1209 """show stats related to the changelog index"""
1210 repo.changelog.shortest(nullid, 1)
1210 repo.changelog.shortest(nullid, 1)
1211 index = repo.changelog.index
1211 index = repo.changelog.index
1212 if not util.safehasattr(index, 'stats'):
1212 if not util.safehasattr(index, 'stats'):
1213 raise error.Abort(_('debugindexstats only works with native code'))
1213 raise error.Abort(_('debugindexstats only works with native code'))
1214 for k, v in sorted(index.stats().items()):
1214 for k, v in sorted(index.stats().items()):
1215 ui.write('%s: %d\n' % (k, v))
1215 ui.write('%s: %d\n' % (k, v))
1216
1216
1217 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1217 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1218 def debuginstall(ui, **opts):
1218 def debuginstall(ui, **opts):
1219 '''test Mercurial installation
1219 '''test Mercurial installation
1220
1220
1221 Returns 0 on success.
1221 Returns 0 on success.
1222 '''
1222 '''
1223 opts = pycompat.byteskwargs(opts)
1223 opts = pycompat.byteskwargs(opts)
1224
1224
1225 problems = 0
1225 problems = 0
1226
1226
1227 fm = ui.formatter('debuginstall', opts)
1227 fm = ui.formatter('debuginstall', opts)
1228 fm.startitem()
1228 fm.startitem()
1229
1229
1230 # encoding
1230 # encoding
1231 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1231 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1232 err = None
1232 err = None
1233 try:
1233 try:
1234 codecs.lookup(pycompat.sysstr(encoding.encoding))
1234 codecs.lookup(pycompat.sysstr(encoding.encoding))
1235 except LookupError as inst:
1235 except LookupError as inst:
1236 err = stringutil.forcebytestr(inst)
1236 err = stringutil.forcebytestr(inst)
1237 problems += 1
1237 problems += 1
1238 fm.condwrite(err, 'encodingerror', _(" %s\n"
1238 fm.condwrite(err, 'encodingerror', _(" %s\n"
1239 " (check that your locale is properly set)\n"), err)
1239 " (check that your locale is properly set)\n"), err)
1240
1240
1241 # Python
1241 # Python
1242 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1242 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1243 pycompat.sysexecutable or _("unknown"))
1243 pycompat.sysexecutable or _("unknown"))
1244 fm.write('pythonver', _("checking Python version (%s)\n"),
1244 fm.write('pythonver', _("checking Python version (%s)\n"),
1245 ("%d.%d.%d" % sys.version_info[:3]))
1245 ("%d.%d.%d" % sys.version_info[:3]))
1246 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1246 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1247 os.path.dirname(pycompat.fsencode(os.__file__)))
1247 os.path.dirname(pycompat.fsencode(os.__file__)))
1248
1248
1249 security = set(sslutil.supportedprotocols)
1249 security = set(sslutil.supportedprotocols)
1250 if sslutil.hassni:
1250 if sslutil.hassni:
1251 security.add('sni')
1251 security.add('sni')
1252
1252
1253 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1253 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1254 fm.formatlist(sorted(security), name='protocol',
1254 fm.formatlist(sorted(security), name='protocol',
1255 fmt='%s', sep=','))
1255 fmt='%s', sep=','))
1256
1256
1257 # These are warnings, not errors. So don't increment problem count. This
1257 # These are warnings, not errors. So don't increment problem count. This
1258 # may change in the future.
1258 # may change in the future.
1259 if 'tls1.2' not in security:
1259 if 'tls1.2' not in security:
1260 fm.plain(_(' TLS 1.2 not supported by Python install; '
1260 fm.plain(_(' TLS 1.2 not supported by Python install; '
1261 'network connections lack modern security\n'))
1261 'network connections lack modern security\n'))
1262 if 'sni' not in security:
1262 if 'sni' not in security:
1263 fm.plain(_(' SNI not supported by Python install; may have '
1263 fm.plain(_(' SNI not supported by Python install; may have '
1264 'connectivity issues with some servers\n'))
1264 'connectivity issues with some servers\n'))
1265
1265
1266 # TODO print CA cert info
1266 # TODO print CA cert info
1267
1267
1268 # hg version
1268 # hg version
1269 hgver = util.version()
1269 hgver = util.version()
1270 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1270 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1271 hgver.split('+')[0])
1271 hgver.split('+')[0])
1272 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1272 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1273 '+'.join(hgver.split('+')[1:]))
1273 '+'.join(hgver.split('+')[1:]))
1274
1274
1275 # compiled modules
1275 # compiled modules
1276 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1276 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1277 policy.policy)
1277 policy.policy)
1278 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1278 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1279 os.path.dirname(pycompat.fsencode(__file__)))
1279 os.path.dirname(pycompat.fsencode(__file__)))
1280
1280
1281 rustandc = policy.policy in ('rust+c', 'rust+c-allow')
1281 rustandc = policy.policy in ('rust+c', 'rust+c-allow')
1282 rustext = rustandc # for now, that's the only case
1282 rustext = rustandc # for now, that's the only case
1283 cext = policy.policy in ('c', 'allow') or rustandc
1283 cext = policy.policy in ('c', 'allow') or rustandc
1284 nopure = cext or rustext
1284 nopure = cext or rustext
1285 if nopure:
1285 if nopure:
1286 err = None
1286 err = None
1287 try:
1287 try:
1288 if cext:
1288 if cext:
1289 from .cext import (
1289 from .cext import (
1290 base85,
1290 base85,
1291 bdiff,
1291 bdiff,
1292 mpatch,
1292 mpatch,
1293 osutil,
1293 osutil,
1294 )
1294 )
1295 # quiet pyflakes
1295 # quiet pyflakes
1296 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1296 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1297 if rustext:
1297 if rustext:
1298 from .rustext import (
1298 from .rustext import (
1299 ancestor,
1299 ancestor,
1300 dirstate,
1300 dirstate,
1301 )
1301 )
1302 dir(ancestor), dir(dirstate) # quiet pyflakes
1302 dir(ancestor), dir(dirstate) # quiet pyflakes
1303 except Exception as inst:
1303 except Exception as inst:
1304 err = stringutil.forcebytestr(inst)
1304 err = stringutil.forcebytestr(inst)
1305 problems += 1
1305 problems += 1
1306 fm.condwrite(err, 'extensionserror', " %s\n", err)
1306 fm.condwrite(err, 'extensionserror', " %s\n", err)
1307
1307
1308 compengines = util.compengines._engines.values()
1308 compengines = util.compengines._engines.values()
1309 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1309 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1310 fm.formatlist(sorted(e.name() for e in compengines),
1310 fm.formatlist(sorted(e.name() for e in compengines),
1311 name='compengine', fmt='%s', sep=', '))
1311 name='compengine', fmt='%s', sep=', '))
1312 fm.write('compenginesavail', _('checking available compression engines '
1312 fm.write('compenginesavail', _('checking available compression engines '
1313 '(%s)\n'),
1313 '(%s)\n'),
1314 fm.formatlist(sorted(e.name() for e in compengines
1314 fm.formatlist(sorted(e.name() for e in compengines
1315 if e.available()),
1315 if e.available()),
1316 name='compengine', fmt='%s', sep=', '))
1316 name='compengine', fmt='%s', sep=', '))
1317 wirecompengines = compression.compengines.supportedwireengines(
1317 wirecompengines = compression.compengines.supportedwireengines(
1318 compression.SERVERROLE)
1318 compression.SERVERROLE)
1319 fm.write('compenginesserver', _('checking available compression engines '
1319 fm.write('compenginesserver', _('checking available compression engines '
1320 'for wire protocol (%s)\n'),
1320 'for wire protocol (%s)\n'),
1321 fm.formatlist([e.name() for e in wirecompengines
1321 fm.formatlist([e.name() for e in wirecompengines
1322 if e.wireprotosupport()],
1322 if e.wireprotosupport()],
1323 name='compengine', fmt='%s', sep=', '))
1323 name='compengine', fmt='%s', sep=', '))
1324 re2 = 'missing'
1324 re2 = 'missing'
1325 if util._re2:
1325 if util._re2:
1326 re2 = 'available'
1326 re2 = 'available'
1327 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1327 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1328 fm.data(re2=bool(util._re2))
1328 fm.data(re2=bool(util._re2))
1329
1329
1330 # templates
1330 # templates
1331 p = templater.templatepaths()
1331 p = templater.templatepaths()
1332 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1332 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1333 fm.condwrite(not p, '', _(" no template directories found\n"))
1333 fm.condwrite(not p, '', _(" no template directories found\n"))
1334 if p:
1334 if p:
1335 m = templater.templatepath("map-cmdline.default")
1335 m = templater.templatepath("map-cmdline.default")
1336 if m:
1336 if m:
1337 # template found, check if it is working
1337 # template found, check if it is working
1338 err = None
1338 err = None
1339 try:
1339 try:
1340 templater.templater.frommapfile(m)
1340 templater.templater.frommapfile(m)
1341 except Exception as inst:
1341 except Exception as inst:
1342 err = stringutil.forcebytestr(inst)
1342 err = stringutil.forcebytestr(inst)
1343 p = None
1343 p = None
1344 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1344 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1345 else:
1345 else:
1346 p = None
1346 p = None
1347 fm.condwrite(p, 'defaulttemplate',
1347 fm.condwrite(p, 'defaulttemplate',
1348 _("checking default template (%s)\n"), m)
1348 _("checking default template (%s)\n"), m)
1349 fm.condwrite(not m, 'defaulttemplatenotfound',
1349 fm.condwrite(not m, 'defaulttemplatenotfound',
1350 _(" template '%s' not found\n"), "default")
1350 _(" template '%s' not found\n"), "default")
1351 if not p:
1351 if not p:
1352 problems += 1
1352 problems += 1
1353 fm.condwrite(not p, '',
1353 fm.condwrite(not p, '',
1354 _(" (templates seem to have been installed incorrectly)\n"))
1354 _(" (templates seem to have been installed incorrectly)\n"))
1355
1355
1356 # editor
1356 # editor
1357 editor = ui.geteditor()
1357 editor = ui.geteditor()
1358 editor = util.expandpath(editor)
1358 editor = util.expandpath(editor)
1359 editorbin = procutil.shellsplit(editor)[0]
1359 editorbin = procutil.shellsplit(editor)[0]
1360 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1360 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1361 cmdpath = procutil.findexe(editorbin)
1361 cmdpath = procutil.findexe(editorbin)
1362 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1362 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1363 _(" No commit editor set and can't find %s in PATH\n"
1363 _(" No commit editor set and can't find %s in PATH\n"
1364 " (specify a commit editor in your configuration"
1364 " (specify a commit editor in your configuration"
1365 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1365 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1366 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1366 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1367 _(" Can't find editor '%s' in PATH\n"
1367 _(" Can't find editor '%s' in PATH\n"
1368 " (specify a commit editor in your configuration"
1368 " (specify a commit editor in your configuration"
1369 " file)\n"), not cmdpath and editorbin)
1369 " file)\n"), not cmdpath and editorbin)
1370 if not cmdpath and editor != 'vi':
1370 if not cmdpath and editor != 'vi':
1371 problems += 1
1371 problems += 1
1372
1372
1373 # check username
1373 # check username
1374 username = None
1374 username = None
1375 err = None
1375 err = None
1376 try:
1376 try:
1377 username = ui.username()
1377 username = ui.username()
1378 except error.Abort as e:
1378 except error.Abort as e:
1379 err = stringutil.forcebytestr(e)
1379 err = stringutil.forcebytestr(e)
1380 problems += 1
1380 problems += 1
1381
1381
1382 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1382 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1383 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1383 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1384 " (specify a username in your configuration file)\n"), err)
1384 " (specify a username in your configuration file)\n"), err)
1385
1385
1386 for name, mod in extensions.extensions():
1386 for name, mod in extensions.extensions():
1387 handler = getattr(mod, 'debuginstall', None)
1387 handler = getattr(mod, 'debuginstall', None)
1388 if handler is not None:
1388 if handler is not None:
1389 problems += handler(ui, fm)
1389 problems += handler(ui, fm)
1390
1390
1391 fm.condwrite(not problems, '',
1391 fm.condwrite(not problems, '',
1392 _("no problems detected\n"))
1392 _("no problems detected\n"))
1393 if not problems:
1393 if not problems:
1394 fm.data(problems=problems)
1394 fm.data(problems=problems)
1395 fm.condwrite(problems, 'problems',
1395 fm.condwrite(problems, 'problems',
1396 _("%d problems detected,"
1396 _("%d problems detected,"
1397 " please check your install!\n"), problems)
1397 " please check your install!\n"), problems)
1398 fm.end()
1398 fm.end()
1399
1399
1400 return problems
1400 return problems
1401
1401
1402 @command('debugknown', [], _('REPO ID...'), norepo=True)
1402 @command('debugknown', [], _('REPO ID...'), norepo=True)
1403 def debugknown(ui, repopath, *ids, **opts):
1403 def debugknown(ui, repopath, *ids, **opts):
1404 """test whether node ids are known to a repo
1404 """test whether node ids are known to a repo
1405
1405
1406 Every ID must be a full-length hex node id string. Returns a list of 0s
1406 Every ID must be a full-length hex node id string. Returns a list of 0s
1407 and 1s indicating unknown/known.
1407 and 1s indicating unknown/known.
1408 """
1408 """
1409 opts = pycompat.byteskwargs(opts)
1409 opts = pycompat.byteskwargs(opts)
1410 repo = hg.peer(ui, opts, repopath)
1410 repo = hg.peer(ui, opts, repopath)
1411 if not repo.capable('known'):
1411 if not repo.capable('known'):
1412 raise error.Abort("known() not supported by target repository")
1412 raise error.Abort("known() not supported by target repository")
1413 flags = repo.known([bin(s) for s in ids])
1413 flags = repo.known([bin(s) for s in ids])
1414 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1414 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1415
1415
1416 @command('debuglabelcomplete', [], _('LABEL...'))
1416 @command('debuglabelcomplete', [], _('LABEL...'))
1417 def debuglabelcomplete(ui, repo, *args):
1417 def debuglabelcomplete(ui, repo, *args):
1418 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1418 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1419 debugnamecomplete(ui, repo, *args)
1419 debugnamecomplete(ui, repo, *args)
1420
1420
1421 @command('debuglocks',
1421 @command('debuglocks',
1422 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1422 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1423 ('W', 'force-wlock', None,
1423 ('W', 'force-wlock', None,
1424 _('free the working state lock (DANGEROUS)')),
1424 _('free the working state lock (DANGEROUS)')),
1425 ('s', 'set-lock', None, _('set the store lock until stopped')),
1425 ('s', 'set-lock', None, _('set the store lock until stopped')),
1426 ('S', 'set-wlock', None,
1426 ('S', 'set-wlock', None,
1427 _('set the working state lock until stopped'))],
1427 _('set the working state lock until stopped'))],
1428 _('[OPTION]...'))
1428 _('[OPTION]...'))
1429 def debuglocks(ui, repo, **opts):
1429 def debuglocks(ui, repo, **opts):
1430 """show or modify state of locks
1430 """show or modify state of locks
1431
1431
1432 By default, this command will show which locks are held. This
1432 By default, this command will show which locks are held. This
1433 includes the user and process holding the lock, the amount of time
1433 includes the user and process holding the lock, the amount of time
1434 the lock has been held, and the machine name where the process is
1434 the lock has been held, and the machine name where the process is
1435 running if it's not local.
1435 running if it's not local.
1436
1436
1437 Locks protect the integrity of Mercurial's data, so should be
1437 Locks protect the integrity of Mercurial's data, so should be
1438 treated with care. System crashes or other interruptions may cause
1438 treated with care. System crashes or other interruptions may cause
1439 locks to not be properly released, though Mercurial will usually
1439 locks to not be properly released, though Mercurial will usually
1440 detect and remove such stale locks automatically.
1440 detect and remove such stale locks automatically.
1441
1441
1442 However, detecting stale locks may not always be possible (for
1442 However, detecting stale locks may not always be possible (for
1443 instance, on a shared filesystem). Removing locks may also be
1443 instance, on a shared filesystem). Removing locks may also be
1444 blocked by filesystem permissions.
1444 blocked by filesystem permissions.
1445
1445
1446 Setting a lock will prevent other commands from changing the data.
1446 Setting a lock will prevent other commands from changing the data.
1447 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1447 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1448 The set locks are removed when the command exits.
1448 The set locks are removed when the command exits.
1449
1449
1450 Returns 0 if no locks are held.
1450 Returns 0 if no locks are held.
1451
1451
1452 """
1452 """
1453
1453
1454 if opts.get(r'force_lock'):
1454 if opts.get(r'force_lock'):
1455 repo.svfs.unlink('lock')
1455 repo.svfs.unlink('lock')
1456 if opts.get(r'force_wlock'):
1456 if opts.get(r'force_wlock'):
1457 repo.vfs.unlink('wlock')
1457 repo.vfs.unlink('wlock')
1458 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1458 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1459 return 0
1459 return 0
1460
1460
1461 locks = []
1461 locks = []
1462 try:
1462 try:
1463 if opts.get(r'set_wlock'):
1463 if opts.get(r'set_wlock'):
1464 try:
1464 try:
1465 locks.append(repo.wlock(False))
1465 locks.append(repo.wlock(False))
1466 except error.LockHeld:
1466 except error.LockHeld:
1467 raise error.Abort(_('wlock is already held'))
1467 raise error.Abort(_('wlock is already held'))
1468 if opts.get(r'set_lock'):
1468 if opts.get(r'set_lock'):
1469 try:
1469 try:
1470 locks.append(repo.lock(False))
1470 locks.append(repo.lock(False))
1471 except error.LockHeld:
1471 except error.LockHeld:
1472 raise error.Abort(_('lock is already held'))
1472 raise error.Abort(_('lock is already held'))
1473 if len(locks):
1473 if len(locks):
1474 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1474 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1475 return 0
1475 return 0
1476 finally:
1476 finally:
1477 release(*locks)
1477 release(*locks)
1478
1478
1479 now = time.time()
1479 now = time.time()
1480 held = 0
1480 held = 0
1481
1481
1482 def report(vfs, name, method):
1482 def report(vfs, name, method):
1483 # this causes stale locks to get reaped for more accurate reporting
1483 # this causes stale locks to get reaped for more accurate reporting
1484 try:
1484 try:
1485 l = method(False)
1485 l = method(False)
1486 except error.LockHeld:
1486 except error.LockHeld:
1487 l = None
1487 l = None
1488
1488
1489 if l:
1489 if l:
1490 l.release()
1490 l.release()
1491 else:
1491 else:
1492 try:
1492 try:
1493 st = vfs.lstat(name)
1493 st = vfs.lstat(name)
1494 age = now - st[stat.ST_MTIME]
1494 age = now - st[stat.ST_MTIME]
1495 user = util.username(st.st_uid)
1495 user = util.username(st.st_uid)
1496 locker = vfs.readlock(name)
1496 locker = vfs.readlock(name)
1497 if ":" in locker:
1497 if ":" in locker:
1498 host, pid = locker.split(':')
1498 host, pid = locker.split(':')
1499 if host == socket.gethostname():
1499 if host == socket.gethostname():
1500 locker = 'user %s, process %s' % (user or b'None', pid)
1500 locker = 'user %s, process %s' % (user or b'None', pid)
1501 else:
1501 else:
1502 locker = ('user %s, process %s, host %s'
1502 locker = ('user %s, process %s, host %s'
1503 % (user or b'None', pid, host))
1503 % (user or b'None', pid, host))
1504 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1504 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1505 return 1
1505 return 1
1506 except OSError as e:
1506 except OSError as e:
1507 if e.errno != errno.ENOENT:
1507 if e.errno != errno.ENOENT:
1508 raise
1508 raise
1509
1509
1510 ui.write(("%-6s free\n") % (name + ":"))
1510 ui.write(("%-6s free\n") % (name + ":"))
1511 return 0
1511 return 0
1512
1512
1513 held += report(repo.svfs, "lock", repo.lock)
1513 held += report(repo.svfs, "lock", repo.lock)
1514 held += report(repo.vfs, "wlock", repo.wlock)
1514 held += report(repo.vfs, "wlock", repo.wlock)
1515
1515
1516 return held
1516 return held
1517
1517
1518 @command('debugmanifestfulltextcache', [
1518 @command('debugmanifestfulltextcache', [
1519 ('', 'clear', False, _('clear the cache')),
1519 ('', 'clear', False, _('clear the cache')),
1520 ('a', 'add', [], _('add the given manifest nodes to the cache'),
1520 ('a', 'add', [], _('add the given manifest nodes to the cache'),
1521 _('NODE'))
1521 _('NODE'))
1522 ], '')
1522 ], '')
1523 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1523 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1524 """show, clear or amend the contents of the manifest fulltext cache"""
1524 """show, clear or amend the contents of the manifest fulltext cache"""
1525
1525
1526 def getcache():
1526 def getcache():
1527 r = repo.manifestlog.getstorage(b'')
1527 r = repo.manifestlog.getstorage(b'')
1528 try:
1528 try:
1529 return r._fulltextcache
1529 return r._fulltextcache
1530 except AttributeError:
1530 except AttributeError:
1531 msg = _("Current revlog implementation doesn't appear to have a "
1531 msg = _("Current revlog implementation doesn't appear to have a "
1532 "manifest fulltext cache\n")
1532 "manifest fulltext cache\n")
1533 raise error.Abort(msg)
1533 raise error.Abort(msg)
1534
1534
1535 if opts.get(r'clear'):
1535 if opts.get(r'clear'):
1536 with repo.wlock():
1536 with repo.wlock():
1537 cache = getcache()
1537 cache = getcache()
1538 cache.clear(clear_persisted_data=True)
1538 cache.clear(clear_persisted_data=True)
1539 return
1539 return
1540
1540
1541 if add:
1541 if add:
1542 with repo.wlock():
1542 with repo.wlock():
1543 m = repo.manifestlog
1543 m = repo.manifestlog
1544 store = m.getstorage(b'')
1544 store = m.getstorage(b'')
1545 for n in add:
1545 for n in add:
1546 try:
1546 try:
1547 manifest = m[store.lookup(n)]
1547 manifest = m[store.lookup(n)]
1548 except error.LookupError as e:
1548 except error.LookupError as e:
1549 raise error.Abort(e, hint="Check your manifest node id")
1549 raise error.Abort(e, hint="Check your manifest node id")
1550 manifest.read() # stores revisision in cache too
1550 manifest.read() # stores revisision in cache too
1551 return
1551 return
1552
1552
1553 cache = getcache()
1553 cache = getcache()
1554 if not len(cache):
1554 if not len(cache):
1555 ui.write(_('cache empty\n'))
1555 ui.write(_('cache empty\n'))
1556 else:
1556 else:
1557 ui.write(
1557 ui.write(
1558 _('cache contains %d manifest entries, in order of most to '
1558 _('cache contains %d manifest entries, in order of most to '
1559 'least recent:\n') % (len(cache),))
1559 'least recent:\n') % (len(cache),))
1560 totalsize = 0
1560 totalsize = 0
1561 for nodeid in cache:
1561 for nodeid in cache:
1562 # Use cache.get to not update the LRU order
1562 # Use cache.get to not update the LRU order
1563 data = cache.peek(nodeid)
1563 data = cache.peek(nodeid)
1564 size = len(data)
1564 size = len(data)
1565 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1565 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1566 ui.write(_('id: %s, size %s\n') % (
1566 ui.write(_('id: %s, size %s\n') % (
1567 hex(nodeid), util.bytecount(size)))
1567 hex(nodeid), util.bytecount(size)))
1568 ondisk = cache._opener.stat('manifestfulltextcache').st_size
1568 ondisk = cache._opener.stat('manifestfulltextcache').st_size
1569 ui.write(
1569 ui.write(
1570 _('total cache data size %s, on-disk %s\n') % (
1570 _('total cache data size %s, on-disk %s\n') % (
1571 util.bytecount(totalsize), util.bytecount(ondisk))
1571 util.bytecount(totalsize), util.bytecount(ondisk))
1572 )
1572 )
1573
1573
1574 @command('debugmergestate', [], '')
1574 @command('debugmergestate', [], '')
1575 def debugmergestate(ui, repo, *args):
1575 def debugmergestate(ui, repo, *args):
1576 """print merge state
1576 """print merge state
1577
1577
1578 Use --verbose to print out information about whether v1 or v2 merge state
1578 Use --verbose to print out information about whether v1 or v2 merge state
1579 was chosen."""
1579 was chosen."""
1580 def _hashornull(h):
1580 def _hashornull(h):
1581 if h == nullhex:
1581 if h == nullhex:
1582 return 'null'
1582 return 'null'
1583 else:
1583 else:
1584 return h
1584 return h
1585
1585
1586 def printrecords(version):
1586 def printrecords(version):
1587 ui.write(('* version %d records\n') % version)
1587 ui.write(('* version %d records\n') % version)
1588 if version == 1:
1588 if version == 1:
1589 records = v1records
1589 records = v1records
1590 else:
1590 else:
1591 records = v2records
1591 records = v2records
1592
1592
1593 for rtype, record in records:
1593 for rtype, record in records:
1594 # pretty print some record types
1594 # pretty print some record types
1595 if rtype == 'L':
1595 if rtype == 'L':
1596 ui.write(('local: %s\n') % record)
1596 ui.write(('local: %s\n') % record)
1597 elif rtype == 'O':
1597 elif rtype == 'O':
1598 ui.write(('other: %s\n') % record)
1598 ui.write(('other: %s\n') % record)
1599 elif rtype == 'm':
1599 elif rtype == 'm':
1600 driver, mdstate = record.split('\0', 1)
1600 driver, mdstate = record.split('\0', 1)
1601 ui.write(('merge driver: %s (state "%s")\n')
1601 ui.write(('merge driver: %s (state "%s")\n')
1602 % (driver, mdstate))
1602 % (driver, mdstate))
1603 elif rtype in 'FDC':
1603 elif rtype in 'FDC':
1604 r = record.split('\0')
1604 r = record.split('\0')
1605 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1605 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1606 if version == 1:
1606 if version == 1:
1607 onode = 'not stored in v1 format'
1607 onode = 'not stored in v1 format'
1608 flags = r[7]
1608 flags = r[7]
1609 else:
1609 else:
1610 onode, flags = r[7:9]
1610 onode, flags = r[7:9]
1611 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1611 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1612 % (f, rtype, state, _hashornull(hash)))
1612 % (f, rtype, state, _hashornull(hash)))
1613 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1613 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1614 ui.write((' ancestor path: %s (node %s)\n')
1614 ui.write((' ancestor path: %s (node %s)\n')
1615 % (afile, _hashornull(anode)))
1615 % (afile, _hashornull(anode)))
1616 ui.write((' other path: %s (node %s)\n')
1616 ui.write((' other path: %s (node %s)\n')
1617 % (ofile, _hashornull(onode)))
1617 % (ofile, _hashornull(onode)))
1618 elif rtype == 'f':
1618 elif rtype == 'f':
1619 filename, rawextras = record.split('\0', 1)
1619 filename, rawextras = record.split('\0', 1)
1620 extras = rawextras.split('\0')
1620 extras = rawextras.split('\0')
1621 i = 0
1621 i = 0
1622 extrastrings = []
1622 extrastrings = []
1623 while i < len(extras):
1623 while i < len(extras):
1624 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1624 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1625 i += 2
1625 i += 2
1626
1626
1627 ui.write(('file extras: %s (%s)\n')
1627 ui.write(('file extras: %s (%s)\n')
1628 % (filename, ', '.join(extrastrings)))
1628 % (filename, ', '.join(extrastrings)))
1629 elif rtype == 'l':
1629 elif rtype == 'l':
1630 labels = record.split('\0', 2)
1630 labels = record.split('\0', 2)
1631 labels = [l for l in labels if len(l) > 0]
1631 labels = [l for l in labels if len(l) > 0]
1632 ui.write(('labels:\n'))
1632 ui.write(('labels:\n'))
1633 ui.write((' local: %s\n' % labels[0]))
1633 ui.write((' local: %s\n' % labels[0]))
1634 ui.write((' other: %s\n' % labels[1]))
1634 ui.write((' other: %s\n' % labels[1]))
1635 if len(labels) > 2:
1635 if len(labels) > 2:
1636 ui.write((' base: %s\n' % labels[2]))
1636 ui.write((' base: %s\n' % labels[2]))
1637 else:
1637 else:
1638 ui.write(('unrecognized entry: %s\t%s\n')
1638 ui.write(('unrecognized entry: %s\t%s\n')
1639 % (rtype, record.replace('\0', '\t')))
1639 % (rtype, record.replace('\0', '\t')))
1640
1640
1641 # Avoid mergestate.read() since it may raise an exception for unsupported
1641 # Avoid mergestate.read() since it may raise an exception for unsupported
1642 # merge state records. We shouldn't be doing this, but this is OK since this
1642 # merge state records. We shouldn't be doing this, but this is OK since this
1643 # command is pretty low-level.
1643 # command is pretty low-level.
1644 ms = mergemod.mergestate(repo)
1644 ms = mergemod.mergestate(repo)
1645
1645
1646 # sort so that reasonable information is on top
1646 # sort so that reasonable information is on top
1647 v1records = ms._readrecordsv1()
1647 v1records = ms._readrecordsv1()
1648 v2records = ms._readrecordsv2()
1648 v2records = ms._readrecordsv2()
1649 order = 'LOml'
1649 order = 'LOml'
1650 def key(r):
1650 def key(r):
1651 idx = order.find(r[0])
1651 idx = order.find(r[0])
1652 if idx == -1:
1652 if idx == -1:
1653 return (1, r[1])
1653 return (1, r[1])
1654 else:
1654 else:
1655 return (0, idx)
1655 return (0, idx)
1656 v1records.sort(key=key)
1656 v1records.sort(key=key)
1657 v2records.sort(key=key)
1657 v2records.sort(key=key)
1658
1658
1659 if not v1records and not v2records:
1659 if not v1records and not v2records:
1660 ui.write(('no merge state found\n'))
1660 ui.write(('no merge state found\n'))
1661 elif not v2records:
1661 elif not v2records:
1662 ui.note(('no version 2 merge state\n'))
1662 ui.note(('no version 2 merge state\n'))
1663 printrecords(1)
1663 printrecords(1)
1664 elif ms._v1v2match(v1records, v2records):
1664 elif ms._v1v2match(v1records, v2records):
1665 ui.note(('v1 and v2 states match: using v2\n'))
1665 ui.note(('v1 and v2 states match: using v2\n'))
1666 printrecords(2)
1666 printrecords(2)
1667 else:
1667 else:
1668 ui.note(('v1 and v2 states mismatch: using v1\n'))
1668 ui.note(('v1 and v2 states mismatch: using v1\n'))
1669 printrecords(1)
1669 printrecords(1)
1670 if ui.verbose:
1670 if ui.verbose:
1671 printrecords(2)
1671 printrecords(2)
1672
1672
1673 @command('debugnamecomplete', [], _('NAME...'))
1673 @command('debugnamecomplete', [], _('NAME...'))
1674 def debugnamecomplete(ui, repo, *args):
1674 def debugnamecomplete(ui, repo, *args):
1675 '''complete "names" - tags, open branch names, bookmark names'''
1675 '''complete "names" - tags, open branch names, bookmark names'''
1676
1676
1677 names = set()
1677 names = set()
1678 # since we previously only listed open branches, we will handle that
1678 # since we previously only listed open branches, we will handle that
1679 # specially (after this for loop)
1679 # specially (after this for loop)
1680 for name, ns in repo.names.iteritems():
1680 for name, ns in repo.names.iteritems():
1681 if name != 'branches':
1681 if name != 'branches':
1682 names.update(ns.listnames(repo))
1682 names.update(ns.listnames(repo))
1683 names.update(tag for (tag, heads, tip, closed)
1683 names.update(tag for (tag, heads, tip, closed)
1684 in repo.branchmap().iterbranches() if not closed)
1684 in repo.branchmap().iterbranches() if not closed)
1685 completions = set()
1685 completions = set()
1686 if not args:
1686 if not args:
1687 args = ['']
1687 args = ['']
1688 for a in args:
1688 for a in args:
1689 completions.update(n for n in names if n.startswith(a))
1689 completions.update(n for n in names if n.startswith(a))
1690 ui.write('\n'.join(sorted(completions)))
1690 ui.write('\n'.join(sorted(completions)))
1691 ui.write('\n')
1691 ui.write('\n')
1692
1692
1693 @command('debugobsolete',
1693 @command('debugobsolete',
1694 [('', 'flags', 0, _('markers flag')),
1694 [('', 'flags', 0, _('markers flag')),
1695 ('', 'record-parents', False,
1695 ('', 'record-parents', False,
1696 _('record parent information for the precursor')),
1696 _('record parent information for the precursor')),
1697 ('r', 'rev', [], _('display markers relevant to REV')),
1697 ('r', 'rev', [], _('display markers relevant to REV')),
1698 ('', 'exclusive', False, _('restrict display to markers only '
1698 ('', 'exclusive', False, _('restrict display to markers only '
1699 'relevant to REV')),
1699 'relevant to REV')),
1700 ('', 'index', False, _('display index of the marker')),
1700 ('', 'index', False, _('display index of the marker')),
1701 ('', 'delete', [], _('delete markers specified by indices')),
1701 ('', 'delete', [], _('delete markers specified by indices')),
1702 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1702 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1703 _('[OBSOLETED [REPLACEMENT ...]]'))
1703 _('[OBSOLETED [REPLACEMENT ...]]'))
1704 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1704 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1705 """create arbitrary obsolete marker
1705 """create arbitrary obsolete marker
1706
1706
1707 With no arguments, displays the list of obsolescence markers."""
1707 With no arguments, displays the list of obsolescence markers."""
1708
1708
1709 opts = pycompat.byteskwargs(opts)
1709 opts = pycompat.byteskwargs(opts)
1710
1710
1711 def parsenodeid(s):
1711 def parsenodeid(s):
1712 try:
1712 try:
1713 # We do not use revsingle/revrange functions here to accept
1713 # We do not use revsingle/revrange functions here to accept
1714 # arbitrary node identifiers, possibly not present in the
1714 # arbitrary node identifiers, possibly not present in the
1715 # local repository.
1715 # local repository.
1716 n = bin(s)
1716 n = bin(s)
1717 if len(n) != len(nullid):
1717 if len(n) != len(nullid):
1718 raise TypeError()
1718 raise TypeError()
1719 return n
1719 return n
1720 except TypeError:
1720 except TypeError:
1721 raise error.Abort('changeset references must be full hexadecimal '
1721 raise error.Abort('changeset references must be full hexadecimal '
1722 'node identifiers')
1722 'node identifiers')
1723
1723
1724 if opts.get('delete'):
1724 if opts.get('delete'):
1725 indices = []
1725 indices = []
1726 for v in opts.get('delete'):
1726 for v in opts.get('delete'):
1727 try:
1727 try:
1728 indices.append(int(v))
1728 indices.append(int(v))
1729 except ValueError:
1729 except ValueError:
1730 raise error.Abort(_('invalid index value: %r') % v,
1730 raise error.Abort(_('invalid index value: %r') % v,
1731 hint=_('use integers for indices'))
1731 hint=_('use integers for indices'))
1732
1732
1733 if repo.currenttransaction():
1733 if repo.currenttransaction():
1734 raise error.Abort(_('cannot delete obsmarkers in the middle '
1734 raise error.Abort(_('cannot delete obsmarkers in the middle '
1735 'of transaction.'))
1735 'of transaction.'))
1736
1736
1737 with repo.lock():
1737 with repo.lock():
1738 n = repair.deleteobsmarkers(repo.obsstore, indices)
1738 n = repair.deleteobsmarkers(repo.obsstore, indices)
1739 ui.write(_('deleted %i obsolescence markers\n') % n)
1739 ui.write(_('deleted %i obsolescence markers\n') % n)
1740
1740
1741 return
1741 return
1742
1742
1743 if precursor is not None:
1743 if precursor is not None:
1744 if opts['rev']:
1744 if opts['rev']:
1745 raise error.Abort('cannot select revision when creating marker')
1745 raise error.Abort('cannot select revision when creating marker')
1746 metadata = {}
1746 metadata = {}
1747 metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
1747 metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
1748 succs = tuple(parsenodeid(succ) for succ in successors)
1748 succs = tuple(parsenodeid(succ) for succ in successors)
1749 l = repo.lock()
1749 l = repo.lock()
1750 try:
1750 try:
1751 tr = repo.transaction('debugobsolete')
1751 tr = repo.transaction('debugobsolete')
1752 try:
1752 try:
1753 date = opts.get('date')
1753 date = opts.get('date')
1754 if date:
1754 if date:
1755 date = dateutil.parsedate(date)
1755 date = dateutil.parsedate(date)
1756 else:
1756 else:
1757 date = None
1757 date = None
1758 prec = parsenodeid(precursor)
1758 prec = parsenodeid(precursor)
1759 parents = None
1759 parents = None
1760 if opts['record_parents']:
1760 if opts['record_parents']:
1761 if prec not in repo.unfiltered():
1761 if prec not in repo.unfiltered():
1762 raise error.Abort('cannot used --record-parents on '
1762 raise error.Abort('cannot used --record-parents on '
1763 'unknown changesets')
1763 'unknown changesets')
1764 parents = repo.unfiltered()[prec].parents()
1764 parents = repo.unfiltered()[prec].parents()
1765 parents = tuple(p.node() for p in parents)
1765 parents = tuple(p.node() for p in parents)
1766 repo.obsstore.create(tr, prec, succs, opts['flags'],
1766 repo.obsstore.create(tr, prec, succs, opts['flags'],
1767 parents=parents, date=date,
1767 parents=parents, date=date,
1768 metadata=metadata, ui=ui)
1768 metadata=metadata, ui=ui)
1769 tr.close()
1769 tr.close()
1770 except ValueError as exc:
1770 except ValueError as exc:
1771 raise error.Abort(_('bad obsmarker input: %s') %
1771 raise error.Abort(_('bad obsmarker input: %s') %
1772 pycompat.bytestr(exc))
1772 pycompat.bytestr(exc))
1773 finally:
1773 finally:
1774 tr.release()
1774 tr.release()
1775 finally:
1775 finally:
1776 l.release()
1776 l.release()
1777 else:
1777 else:
1778 if opts['rev']:
1778 if opts['rev']:
1779 revs = scmutil.revrange(repo, opts['rev'])
1779 revs = scmutil.revrange(repo, opts['rev'])
1780 nodes = [repo[r].node() for r in revs]
1780 nodes = [repo[r].node() for r in revs]
1781 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1781 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1782 exclusive=opts['exclusive']))
1782 exclusive=opts['exclusive']))
1783 markers.sort(key=lambda x: x._data)
1783 markers.sort(key=lambda x: x._data)
1784 else:
1784 else:
1785 markers = obsutil.getmarkers(repo)
1785 markers = obsutil.getmarkers(repo)
1786
1786
1787 markerstoiter = markers
1787 markerstoiter = markers
1788 isrelevant = lambda m: True
1788 isrelevant = lambda m: True
1789 if opts.get('rev') and opts.get('index'):
1789 if opts.get('rev') and opts.get('index'):
1790 markerstoiter = obsutil.getmarkers(repo)
1790 markerstoiter = obsutil.getmarkers(repo)
1791 markerset = set(markers)
1791 markerset = set(markers)
1792 isrelevant = lambda m: m in markerset
1792 isrelevant = lambda m: m in markerset
1793
1793
1794 fm = ui.formatter('debugobsolete', opts)
1794 fm = ui.formatter('debugobsolete', opts)
1795 for i, m in enumerate(markerstoiter):
1795 for i, m in enumerate(markerstoiter):
1796 if not isrelevant(m):
1796 if not isrelevant(m):
1797 # marker can be irrelevant when we're iterating over a set
1797 # marker can be irrelevant when we're iterating over a set
1798 # of markers (markerstoiter) which is bigger than the set
1798 # of markers (markerstoiter) which is bigger than the set
1799 # of markers we want to display (markers)
1799 # of markers we want to display (markers)
1800 # this can happen if both --index and --rev options are
1800 # this can happen if both --index and --rev options are
1801 # provided and thus we need to iterate over all of the markers
1801 # provided and thus we need to iterate over all of the markers
1802 # to get the correct indices, but only display the ones that
1802 # to get the correct indices, but only display the ones that
1803 # are relevant to --rev value
1803 # are relevant to --rev value
1804 continue
1804 continue
1805 fm.startitem()
1805 fm.startitem()
1806 ind = i if opts.get('index') else None
1806 ind = i if opts.get('index') else None
1807 cmdutil.showmarker(fm, m, index=ind)
1807 cmdutil.showmarker(fm, m, index=ind)
1808 fm.end()
1808 fm.end()
1809
1809
1810 @command('debugp1copies',
1810 @command('debugp1copies',
1811 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1811 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1812 _('[-r REV]'))
1812 _('[-r REV]'))
1813 def debugp1copies(ui, repo, **opts):
1813 def debugp1copies(ui, repo, **opts):
1814 """dump copy information compared to p1"""
1814 """dump copy information compared to p1"""
1815
1815
1816 opts = pycompat.byteskwargs(opts)
1816 opts = pycompat.byteskwargs(opts)
1817 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1817 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1818 for dst, src in ctx.p1copies().items():
1818 for dst, src in ctx.p1copies().items():
1819 ui.write('%s -> %s\n' % (src, dst))
1819 ui.write('%s -> %s\n' % (src, dst))
1820
1820
1821 @command('debugp2copies',
1821 @command('debugp2copies',
1822 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1822 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1823 _('[-r REV]'))
1823 _('[-r REV]'))
1824 def debugp1copies(ui, repo, **opts):
1824 def debugp1copies(ui, repo, **opts):
1825 """dump copy information compared to p2"""
1825 """dump copy information compared to p2"""
1826
1826
1827 opts = pycompat.byteskwargs(opts)
1827 opts = pycompat.byteskwargs(opts)
1828 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1828 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1829 for dst, src in ctx.p2copies().items():
1829 for dst, src in ctx.p2copies().items():
1830 ui.write('%s -> %s\n' % (src, dst))
1830 ui.write('%s -> %s\n' % (src, dst))
1831
1831
1832 @command('debugpathcomplete',
1832 @command('debugpathcomplete',
1833 [('f', 'full', None, _('complete an entire path')),
1833 [('f', 'full', None, _('complete an entire path')),
1834 ('n', 'normal', None, _('show only normal files')),
1834 ('n', 'normal', None, _('show only normal files')),
1835 ('a', 'added', None, _('show only added files')),
1835 ('a', 'added', None, _('show only added files')),
1836 ('r', 'removed', None, _('show only removed files'))],
1836 ('r', 'removed', None, _('show only removed files'))],
1837 _('FILESPEC...'))
1837 _('FILESPEC...'))
1838 def debugpathcomplete(ui, repo, *specs, **opts):
1838 def debugpathcomplete(ui, repo, *specs, **opts):
1839 '''complete part or all of a tracked path
1839 '''complete part or all of a tracked path
1840
1840
1841 This command supports shells that offer path name completion. It
1841 This command supports shells that offer path name completion. It
1842 currently completes only files already known to the dirstate.
1842 currently completes only files already known to the dirstate.
1843
1843
1844 Completion extends only to the next path segment unless
1844 Completion extends only to the next path segment unless
1845 --full is specified, in which case entire paths are used.'''
1845 --full is specified, in which case entire paths are used.'''
1846
1846
1847 def complete(path, acceptable):
1847 def complete(path, acceptable):
1848 dirstate = repo.dirstate
1848 dirstate = repo.dirstate
1849 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
1849 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
1850 rootdir = repo.root + pycompat.ossep
1850 rootdir = repo.root + pycompat.ossep
1851 if spec != repo.root and not spec.startswith(rootdir):
1851 if spec != repo.root and not spec.startswith(rootdir):
1852 return [], []
1852 return [], []
1853 if os.path.isdir(spec):
1853 if os.path.isdir(spec):
1854 spec += '/'
1854 spec += '/'
1855 spec = spec[len(rootdir):]
1855 spec = spec[len(rootdir):]
1856 fixpaths = pycompat.ossep != '/'
1856 fixpaths = pycompat.ossep != '/'
1857 if fixpaths:
1857 if fixpaths:
1858 spec = spec.replace(pycompat.ossep, '/')
1858 spec = spec.replace(pycompat.ossep, '/')
1859 speclen = len(spec)
1859 speclen = len(spec)
1860 fullpaths = opts[r'full']
1860 fullpaths = opts[r'full']
1861 files, dirs = set(), set()
1861 files, dirs = set(), set()
1862 adddir, addfile = dirs.add, files.add
1862 adddir, addfile = dirs.add, files.add
1863 for f, st in dirstate.iteritems():
1863 for f, st in dirstate.iteritems():
1864 if f.startswith(spec) and st[0] in acceptable:
1864 if f.startswith(spec) and st[0] in acceptable:
1865 if fixpaths:
1865 if fixpaths:
1866 f = f.replace('/', pycompat.ossep)
1866 f = f.replace('/', pycompat.ossep)
1867 if fullpaths:
1867 if fullpaths:
1868 addfile(f)
1868 addfile(f)
1869 continue
1869 continue
1870 s = f.find(pycompat.ossep, speclen)
1870 s = f.find(pycompat.ossep, speclen)
1871 if s >= 0:
1871 if s >= 0:
1872 adddir(f[:s])
1872 adddir(f[:s])
1873 else:
1873 else:
1874 addfile(f)
1874 addfile(f)
1875 return files, dirs
1875 return files, dirs
1876
1876
1877 acceptable = ''
1877 acceptable = ''
1878 if opts[r'normal']:
1878 if opts[r'normal']:
1879 acceptable += 'nm'
1879 acceptable += 'nm'
1880 if opts[r'added']:
1880 if opts[r'added']:
1881 acceptable += 'a'
1881 acceptable += 'a'
1882 if opts[r'removed']:
1882 if opts[r'removed']:
1883 acceptable += 'r'
1883 acceptable += 'r'
1884 cwd = repo.getcwd()
1884 cwd = repo.getcwd()
1885 if not specs:
1885 if not specs:
1886 specs = ['.']
1886 specs = ['.']
1887
1887
1888 files, dirs = set(), set()
1888 files, dirs = set(), set()
1889 for spec in specs:
1889 for spec in specs:
1890 f, d = complete(spec, acceptable or 'nmar')
1890 f, d = complete(spec, acceptable or 'nmar')
1891 files.update(f)
1891 files.update(f)
1892 dirs.update(d)
1892 dirs.update(d)
1893 files.update(dirs)
1893 files.update(dirs)
1894 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1894 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1895 ui.write('\n')
1895 ui.write('\n')
1896
1896
1897 @command('debugpathcopies',
1897 @command('debugpathcopies',
1898 cmdutil.walkopts,
1898 cmdutil.walkopts,
1899 'hg debugpathcopies REV1 REV2 [FILE]',
1899 'hg debugpathcopies REV1 REV2 [FILE]',
1900 inferrepo=True)
1900 inferrepo=True)
1901 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
1901 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
1902 """show copies between two revisions"""
1902 """show copies between two revisions"""
1903 ctx1 = scmutil.revsingle(repo, rev1)
1903 ctx1 = scmutil.revsingle(repo, rev1)
1904 ctx2 = scmutil.revsingle(repo, rev2)
1904 ctx2 = scmutil.revsingle(repo, rev2)
1905 m = scmutil.match(ctx1, pats, opts)
1905 m = scmutil.match(ctx1, pats, opts)
1906 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
1906 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
1907 ui.write('%s -> %s\n' % (src, dst))
1907 ui.write('%s -> %s\n' % (src, dst))
1908
1908
1909 @command('debugpeer', [], _('PATH'), norepo=True)
1909 @command('debugpeer', [], _('PATH'), norepo=True)
1910 def debugpeer(ui, path):
1910 def debugpeer(ui, path):
1911 """establish a connection to a peer repository"""
1911 """establish a connection to a peer repository"""
1912 # Always enable peer request logging. Requires --debug to display
1912 # Always enable peer request logging. Requires --debug to display
1913 # though.
1913 # though.
1914 overrides = {
1914 overrides = {
1915 ('devel', 'debug.peer-request'): True,
1915 ('devel', 'debug.peer-request'): True,
1916 }
1916 }
1917
1917
1918 with ui.configoverride(overrides):
1918 with ui.configoverride(overrides):
1919 peer = hg.peer(ui, {}, path)
1919 peer = hg.peer(ui, {}, path)
1920
1920
1921 local = peer.local() is not None
1921 local = peer.local() is not None
1922 canpush = peer.canpush()
1922 canpush = peer.canpush()
1923
1923
1924 ui.write(_('url: %s\n') % peer.url())
1924 ui.write(_('url: %s\n') % peer.url())
1925 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1925 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1926 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1926 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1927
1927
1928 @command('debugpickmergetool',
1928 @command('debugpickmergetool',
1929 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1929 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1930 ('', 'changedelete', None, _('emulate merging change and delete')),
1930 ('', 'changedelete', None, _('emulate merging change and delete')),
1931 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1931 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1932 _('[PATTERN]...'),
1932 _('[PATTERN]...'),
1933 inferrepo=True)
1933 inferrepo=True)
1934 def debugpickmergetool(ui, repo, *pats, **opts):
1934 def debugpickmergetool(ui, repo, *pats, **opts):
1935 """examine which merge tool is chosen for specified file
1935 """examine which merge tool is chosen for specified file
1936
1936
1937 As described in :hg:`help merge-tools`, Mercurial examines
1937 As described in :hg:`help merge-tools`, Mercurial examines
1938 configurations below in this order to decide which merge tool is
1938 configurations below in this order to decide which merge tool is
1939 chosen for specified file.
1939 chosen for specified file.
1940
1940
1941 1. ``--tool`` option
1941 1. ``--tool`` option
1942 2. ``HGMERGE`` environment variable
1942 2. ``HGMERGE`` environment variable
1943 3. configurations in ``merge-patterns`` section
1943 3. configurations in ``merge-patterns`` section
1944 4. configuration of ``ui.merge``
1944 4. configuration of ``ui.merge``
1945 5. configurations in ``merge-tools`` section
1945 5. configurations in ``merge-tools`` section
1946 6. ``hgmerge`` tool (for historical reason only)
1946 6. ``hgmerge`` tool (for historical reason only)
1947 7. default tool for fallback (``:merge`` or ``:prompt``)
1947 7. default tool for fallback (``:merge`` or ``:prompt``)
1948
1948
1949 This command writes out examination result in the style below::
1949 This command writes out examination result in the style below::
1950
1950
1951 FILE = MERGETOOL
1951 FILE = MERGETOOL
1952
1952
1953 By default, all files known in the first parent context of the
1953 By default, all files known in the first parent context of the
1954 working directory are examined. Use file patterns and/or -I/-X
1954 working directory are examined. Use file patterns and/or -I/-X
1955 options to limit target files. -r/--rev is also useful to examine
1955 options to limit target files. -r/--rev is also useful to examine
1956 files in another context without actual updating to it.
1956 files in another context without actual updating to it.
1957
1957
1958 With --debug, this command shows warning messages while matching
1958 With --debug, this command shows warning messages while matching
1959 against ``merge-patterns`` and so on, too. It is recommended to
1959 against ``merge-patterns`` and so on, too. It is recommended to
1960 use this option with explicit file patterns and/or -I/-X options,
1960 use this option with explicit file patterns and/or -I/-X options,
1961 because this option increases amount of output per file according
1961 because this option increases amount of output per file according
1962 to configurations in hgrc.
1962 to configurations in hgrc.
1963
1963
1964 With -v/--verbose, this command shows configurations below at
1964 With -v/--verbose, this command shows configurations below at
1965 first (only if specified).
1965 first (only if specified).
1966
1966
1967 - ``--tool`` option
1967 - ``--tool`` option
1968 - ``HGMERGE`` environment variable
1968 - ``HGMERGE`` environment variable
1969 - configuration of ``ui.merge``
1969 - configuration of ``ui.merge``
1970
1970
1971 If merge tool is chosen before matching against
1971 If merge tool is chosen before matching against
1972 ``merge-patterns``, this command can't show any helpful
1972 ``merge-patterns``, this command can't show any helpful
1973 information, even with --debug. In such case, information above is
1973 information, even with --debug. In such case, information above is
1974 useful to know why a merge tool is chosen.
1974 useful to know why a merge tool is chosen.
1975 """
1975 """
1976 opts = pycompat.byteskwargs(opts)
1976 opts = pycompat.byteskwargs(opts)
1977 overrides = {}
1977 overrides = {}
1978 if opts['tool']:
1978 if opts['tool']:
1979 overrides[('ui', 'forcemerge')] = opts['tool']
1979 overrides[('ui', 'forcemerge')] = opts['tool']
1980 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1980 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1981
1981
1982 with ui.configoverride(overrides, 'debugmergepatterns'):
1982 with ui.configoverride(overrides, 'debugmergepatterns'):
1983 hgmerge = encoding.environ.get("HGMERGE")
1983 hgmerge = encoding.environ.get("HGMERGE")
1984 if hgmerge is not None:
1984 if hgmerge is not None:
1985 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1985 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1986 uimerge = ui.config("ui", "merge")
1986 uimerge = ui.config("ui", "merge")
1987 if uimerge:
1987 if uimerge:
1988 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1988 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1989
1989
1990 ctx = scmutil.revsingle(repo, opts.get('rev'))
1990 ctx = scmutil.revsingle(repo, opts.get('rev'))
1991 m = scmutil.match(ctx, pats, opts)
1991 m = scmutil.match(ctx, pats, opts)
1992 changedelete = opts['changedelete']
1992 changedelete = opts['changedelete']
1993 for path in ctx.walk(m):
1993 for path in ctx.walk(m):
1994 fctx = ctx[path]
1994 fctx = ctx[path]
1995 try:
1995 try:
1996 if not ui.debugflag:
1996 if not ui.debugflag:
1997 ui.pushbuffer(error=True)
1997 ui.pushbuffer(error=True)
1998 tool, toolpath = filemerge._picktool(repo, ui, path,
1998 tool, toolpath = filemerge._picktool(repo, ui, path,
1999 fctx.isbinary(),
1999 fctx.isbinary(),
2000 'l' in fctx.flags(),
2000 'l' in fctx.flags(),
2001 changedelete)
2001 changedelete)
2002 finally:
2002 finally:
2003 if not ui.debugflag:
2003 if not ui.debugflag:
2004 ui.popbuffer()
2004 ui.popbuffer()
2005 ui.write(('%s = %s\n') % (path, tool))
2005 ui.write(('%s = %s\n') % (path, tool))
2006
2006
2007 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2007 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2008 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2008 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2009 '''access the pushkey key/value protocol
2009 '''access the pushkey key/value protocol
2010
2010
2011 With two args, list the keys in the given namespace.
2011 With two args, list the keys in the given namespace.
2012
2012
2013 With five args, set a key to new if it currently is set to old.
2013 With five args, set a key to new if it currently is set to old.
2014 Reports success or failure.
2014 Reports success or failure.
2015 '''
2015 '''
2016
2016
2017 target = hg.peer(ui, {}, repopath)
2017 target = hg.peer(ui, {}, repopath)
2018 if keyinfo:
2018 if keyinfo:
2019 key, old, new = keyinfo
2019 key, old, new = keyinfo
2020 with target.commandexecutor() as e:
2020 with target.commandexecutor() as e:
2021 r = e.callcommand('pushkey', {
2021 r = e.callcommand('pushkey', {
2022 'namespace': namespace,
2022 'namespace': namespace,
2023 'key': key,
2023 'key': key,
2024 'old': old,
2024 'old': old,
2025 'new': new,
2025 'new': new,
2026 }).result()
2026 }).result()
2027
2027
2028 ui.status(pycompat.bytestr(r) + '\n')
2028 ui.status(pycompat.bytestr(r) + '\n')
2029 return not r
2029 return not r
2030 else:
2030 else:
2031 for k, v in sorted(target.listkeys(namespace).iteritems()):
2031 for k, v in sorted(target.listkeys(namespace).iteritems()):
2032 ui.write("%s\t%s\n" % (stringutil.escapestr(k),
2032 ui.write("%s\t%s\n" % (stringutil.escapestr(k),
2033 stringutil.escapestr(v)))
2033 stringutil.escapestr(v)))
2034
2034
2035 @command('debugpvec', [], _('A B'))
2035 @command('debugpvec', [], _('A B'))
2036 def debugpvec(ui, repo, a, b=None):
2036 def debugpvec(ui, repo, a, b=None):
2037 ca = scmutil.revsingle(repo, a)
2037 ca = scmutil.revsingle(repo, a)
2038 cb = scmutil.revsingle(repo, b)
2038 cb = scmutil.revsingle(repo, b)
2039 pa = pvec.ctxpvec(ca)
2039 pa = pvec.ctxpvec(ca)
2040 pb = pvec.ctxpvec(cb)
2040 pb = pvec.ctxpvec(cb)
2041 if pa == pb:
2041 if pa == pb:
2042 rel = "="
2042 rel = "="
2043 elif pa > pb:
2043 elif pa > pb:
2044 rel = ">"
2044 rel = ">"
2045 elif pa < pb:
2045 elif pa < pb:
2046 rel = "<"
2046 rel = "<"
2047 elif pa | pb:
2047 elif pa | pb:
2048 rel = "|"
2048 rel = "|"
2049 ui.write(_("a: %s\n") % pa)
2049 ui.write(_("a: %s\n") % pa)
2050 ui.write(_("b: %s\n") % pb)
2050 ui.write(_("b: %s\n") % pb)
2051 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2051 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2052 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
2052 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
2053 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
2053 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
2054 pa.distance(pb), rel))
2054 pa.distance(pb), rel))
2055
2055
2056 @command('debugrebuilddirstate|debugrebuildstate',
2056 @command('debugrebuilddirstate|debugrebuildstate',
2057 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
2057 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
2058 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
2058 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
2059 'the working copy parent')),
2059 'the working copy parent')),
2060 ],
2060 ],
2061 _('[-r REV]'))
2061 _('[-r REV]'))
2062 def debugrebuilddirstate(ui, repo, rev, **opts):
2062 def debugrebuilddirstate(ui, repo, rev, **opts):
2063 """rebuild the dirstate as it would look like for the given revision
2063 """rebuild the dirstate as it would look like for the given revision
2064
2064
2065 If no revision is specified the first current parent will be used.
2065 If no revision is specified the first current parent will be used.
2066
2066
2067 The dirstate will be set to the files of the given revision.
2067 The dirstate will be set to the files of the given revision.
2068 The actual working directory content or existing dirstate
2068 The actual working directory content or existing dirstate
2069 information such as adds or removes is not considered.
2069 information such as adds or removes is not considered.
2070
2070
2071 ``minimal`` will only rebuild the dirstate status for files that claim to be
2071 ``minimal`` will only rebuild the dirstate status for files that claim to be
2072 tracked but are not in the parent manifest, or that exist in the parent
2072 tracked but are not in the parent manifest, or that exist in the parent
2073 manifest but are not in the dirstate. It will not change adds, removes, or
2073 manifest but are not in the dirstate. It will not change adds, removes, or
2074 modified files that are in the working copy parent.
2074 modified files that are in the working copy parent.
2075
2075
2076 One use of this command is to make the next :hg:`status` invocation
2076 One use of this command is to make the next :hg:`status` invocation
2077 check the actual file content.
2077 check the actual file content.
2078 """
2078 """
2079 ctx = scmutil.revsingle(repo, rev)
2079 ctx = scmutil.revsingle(repo, rev)
2080 with repo.wlock():
2080 with repo.wlock():
2081 dirstate = repo.dirstate
2081 dirstate = repo.dirstate
2082 changedfiles = None
2082 changedfiles = None
2083 # See command doc for what minimal does.
2083 # See command doc for what minimal does.
2084 if opts.get(r'minimal'):
2084 if opts.get(r'minimal'):
2085 manifestfiles = set(ctx.manifest().keys())
2085 manifestfiles = set(ctx.manifest().keys())
2086 dirstatefiles = set(dirstate)
2086 dirstatefiles = set(dirstate)
2087 manifestonly = manifestfiles - dirstatefiles
2087 manifestonly = manifestfiles - dirstatefiles
2088 dsonly = dirstatefiles - manifestfiles
2088 dsonly = dirstatefiles - manifestfiles
2089 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
2089 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
2090 changedfiles = manifestonly | dsnotadded
2090 changedfiles = manifestonly | dsnotadded
2091
2091
2092 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2092 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2093
2093
2094 @command('debugrebuildfncache', [], '')
2094 @command('debugrebuildfncache', [], '')
2095 def debugrebuildfncache(ui, repo):
2095 def debugrebuildfncache(ui, repo):
2096 """rebuild the fncache file"""
2096 """rebuild the fncache file"""
2097 repair.rebuildfncache(ui, repo)
2097 repair.rebuildfncache(ui, repo)
2098
2098
2099 @command('debugrename',
2099 @command('debugrename',
2100 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2100 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2101 _('[-r REV] [FILE]...'))
2101 _('[-r REV] [FILE]...'))
2102 def debugrename(ui, repo, *pats, **opts):
2102 def debugrename(ui, repo, *pats, **opts):
2103 """dump rename information"""
2103 """dump rename information"""
2104
2104
2105 opts = pycompat.byteskwargs(opts)
2105 opts = pycompat.byteskwargs(opts)
2106 ctx = scmutil.revsingle(repo, opts.get('rev'))
2106 ctx = scmutil.revsingle(repo, opts.get('rev'))
2107 m = scmutil.match(ctx, pats, opts)
2107 m = scmutil.match(ctx, pats, opts)
2108 for abs in ctx.walk(m):
2108 for abs in ctx.walk(m):
2109 fctx = ctx[abs]
2109 fctx = ctx[abs]
2110 o = fctx.filelog().renamed(fctx.filenode())
2110 o = fctx.filelog().renamed(fctx.filenode())
2111 rel = repo.pathto(abs)
2111 rel = repo.pathto(abs)
2112 if o:
2112 if o:
2113 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2113 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2114 else:
2114 else:
2115 ui.write(_("%s not renamed\n") % rel)
2115 ui.write(_("%s not renamed\n") % rel)
2116
2116
2117 @command('debugrevlog', cmdutil.debugrevlogopts +
2117 @command('debugrevlog', cmdutil.debugrevlogopts +
2118 [('d', 'dump', False, _('dump index data'))],
2118 [('d', 'dump', False, _('dump index data'))],
2119 _('-c|-m|FILE'),
2119 _('-c|-m|FILE'),
2120 optionalrepo=True)
2120 optionalrepo=True)
2121 def debugrevlog(ui, repo, file_=None, **opts):
2121 def debugrevlog(ui, repo, file_=None, **opts):
2122 """show data and statistics about a revlog"""
2122 """show data and statistics about a revlog"""
2123 opts = pycompat.byteskwargs(opts)
2123 opts = pycompat.byteskwargs(opts)
2124 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2124 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2125
2125
2126 if opts.get("dump"):
2126 if opts.get("dump"):
2127 numrevs = len(r)
2127 numrevs = len(r)
2128 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
2128 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
2129 " rawsize totalsize compression heads chainlen\n"))
2129 " rawsize totalsize compression heads chainlen\n"))
2130 ts = 0
2130 ts = 0
2131 heads = set()
2131 heads = set()
2132
2132
2133 for rev in pycompat.xrange(numrevs):
2133 for rev in pycompat.xrange(numrevs):
2134 dbase = r.deltaparent(rev)
2134 dbase = r.deltaparent(rev)
2135 if dbase == -1:
2135 if dbase == -1:
2136 dbase = rev
2136 dbase = rev
2137 cbase = r.chainbase(rev)
2137 cbase = r.chainbase(rev)
2138 clen = r.chainlen(rev)
2138 clen = r.chainlen(rev)
2139 p1, p2 = r.parentrevs(rev)
2139 p1, p2 = r.parentrevs(rev)
2140 rs = r.rawsize(rev)
2140 rs = r.rawsize(rev)
2141 ts = ts + rs
2141 ts = ts + rs
2142 heads -= set(r.parentrevs(rev))
2142 heads -= set(r.parentrevs(rev))
2143 heads.add(rev)
2143 heads.add(rev)
2144 try:
2144 try:
2145 compression = ts / r.end(rev)
2145 compression = ts / r.end(rev)
2146 except ZeroDivisionError:
2146 except ZeroDivisionError:
2147 compression = 0
2147 compression = 0
2148 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2148 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2149 "%11d %5d %8d\n" %
2149 "%11d %5d %8d\n" %
2150 (rev, p1, p2, r.start(rev), r.end(rev),
2150 (rev, p1, p2, r.start(rev), r.end(rev),
2151 r.start(dbase), r.start(cbase),
2151 r.start(dbase), r.start(cbase),
2152 r.start(p1), r.start(p2),
2152 r.start(p1), r.start(p2),
2153 rs, ts, compression, len(heads), clen))
2153 rs, ts, compression, len(heads), clen))
2154 return 0
2154 return 0
2155
2155
2156 v = r.version
2156 v = r.version
2157 format = v & 0xFFFF
2157 format = v & 0xFFFF
2158 flags = []
2158 flags = []
2159 gdelta = False
2159 gdelta = False
2160 if v & revlog.FLAG_INLINE_DATA:
2160 if v & revlog.FLAG_INLINE_DATA:
2161 flags.append('inline')
2161 flags.append('inline')
2162 if v & revlog.FLAG_GENERALDELTA:
2162 if v & revlog.FLAG_GENERALDELTA:
2163 gdelta = True
2163 gdelta = True
2164 flags.append('generaldelta')
2164 flags.append('generaldelta')
2165 if not flags:
2165 if not flags:
2166 flags = ['(none)']
2166 flags = ['(none)']
2167
2167
2168 ### tracks merge vs single parent
2168 ### tracks merge vs single parent
2169 nummerges = 0
2169 nummerges = 0
2170
2170
2171 ### tracks ways the "delta" are build
2171 ### tracks ways the "delta" are build
2172 # nodelta
2172 # nodelta
2173 numempty = 0
2173 numempty = 0
2174 numemptytext = 0
2174 numemptytext = 0
2175 numemptydelta = 0
2175 numemptydelta = 0
2176 # full file content
2176 # full file content
2177 numfull = 0
2177 numfull = 0
2178 # intermediate snapshot against a prior snapshot
2178 # intermediate snapshot against a prior snapshot
2179 numsemi = 0
2179 numsemi = 0
2180 # snapshot count per depth
2180 # snapshot count per depth
2181 numsnapdepth = collections.defaultdict(lambda: 0)
2181 numsnapdepth = collections.defaultdict(lambda: 0)
2182 # delta against previous revision
2182 # delta against previous revision
2183 numprev = 0
2183 numprev = 0
2184 # delta against first or second parent (not prev)
2184 # delta against first or second parent (not prev)
2185 nump1 = 0
2185 nump1 = 0
2186 nump2 = 0
2186 nump2 = 0
2187 # delta against neither prev nor parents
2187 # delta against neither prev nor parents
2188 numother = 0
2188 numother = 0
2189 # delta against prev that are also first or second parent
2189 # delta against prev that are also first or second parent
2190 # (details of `numprev`)
2190 # (details of `numprev`)
2191 nump1prev = 0
2191 nump1prev = 0
2192 nump2prev = 0
2192 nump2prev = 0
2193
2193
2194 # data about delta chain of each revs
2194 # data about delta chain of each revs
2195 chainlengths = []
2195 chainlengths = []
2196 chainbases = []
2196 chainbases = []
2197 chainspans = []
2197 chainspans = []
2198
2198
2199 # data about each revision
2199 # data about each revision
2200 datasize = [None, 0, 0]
2200 datasize = [None, 0, 0]
2201 fullsize = [None, 0, 0]
2201 fullsize = [None, 0, 0]
2202 semisize = [None, 0, 0]
2202 semisize = [None, 0, 0]
2203 # snapshot count per depth
2203 # snapshot count per depth
2204 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2204 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2205 deltasize = [None, 0, 0]
2205 deltasize = [None, 0, 0]
2206 chunktypecounts = {}
2206 chunktypecounts = {}
2207 chunktypesizes = {}
2207 chunktypesizes = {}
2208
2208
2209 def addsize(size, l):
2209 def addsize(size, l):
2210 if l[0] is None or size < l[0]:
2210 if l[0] is None or size < l[0]:
2211 l[0] = size
2211 l[0] = size
2212 if size > l[1]:
2212 if size > l[1]:
2213 l[1] = size
2213 l[1] = size
2214 l[2] += size
2214 l[2] += size
2215
2215
2216 numrevs = len(r)
2216 numrevs = len(r)
2217 for rev in pycompat.xrange(numrevs):
2217 for rev in pycompat.xrange(numrevs):
2218 p1, p2 = r.parentrevs(rev)
2218 p1, p2 = r.parentrevs(rev)
2219 delta = r.deltaparent(rev)
2219 delta = r.deltaparent(rev)
2220 if format > 0:
2220 if format > 0:
2221 addsize(r.rawsize(rev), datasize)
2221 addsize(r.rawsize(rev), datasize)
2222 if p2 != nullrev:
2222 if p2 != nullrev:
2223 nummerges += 1
2223 nummerges += 1
2224 size = r.length(rev)
2224 size = r.length(rev)
2225 if delta == nullrev:
2225 if delta == nullrev:
2226 chainlengths.append(0)
2226 chainlengths.append(0)
2227 chainbases.append(r.start(rev))
2227 chainbases.append(r.start(rev))
2228 chainspans.append(size)
2228 chainspans.append(size)
2229 if size == 0:
2229 if size == 0:
2230 numempty += 1
2230 numempty += 1
2231 numemptytext += 1
2231 numemptytext += 1
2232 else:
2232 else:
2233 numfull += 1
2233 numfull += 1
2234 numsnapdepth[0] += 1
2234 numsnapdepth[0] += 1
2235 addsize(size, fullsize)
2235 addsize(size, fullsize)
2236 addsize(size, snapsizedepth[0])
2236 addsize(size, snapsizedepth[0])
2237 else:
2237 else:
2238 chainlengths.append(chainlengths[delta] + 1)
2238 chainlengths.append(chainlengths[delta] + 1)
2239 baseaddr = chainbases[delta]
2239 baseaddr = chainbases[delta]
2240 revaddr = r.start(rev)
2240 revaddr = r.start(rev)
2241 chainbases.append(baseaddr)
2241 chainbases.append(baseaddr)
2242 chainspans.append((revaddr - baseaddr) + size)
2242 chainspans.append((revaddr - baseaddr) + size)
2243 if size == 0:
2243 if size == 0:
2244 numempty += 1
2244 numempty += 1
2245 numemptydelta += 1
2245 numemptydelta += 1
2246 elif r.issnapshot(rev):
2246 elif r.issnapshot(rev):
2247 addsize(size, semisize)
2247 addsize(size, semisize)
2248 numsemi += 1
2248 numsemi += 1
2249 depth = r.snapshotdepth(rev)
2249 depth = r.snapshotdepth(rev)
2250 numsnapdepth[depth] += 1
2250 numsnapdepth[depth] += 1
2251 addsize(size, snapsizedepth[depth])
2251 addsize(size, snapsizedepth[depth])
2252 else:
2252 else:
2253 addsize(size, deltasize)
2253 addsize(size, deltasize)
2254 if delta == rev - 1:
2254 if delta == rev - 1:
2255 numprev += 1
2255 numprev += 1
2256 if delta == p1:
2256 if delta == p1:
2257 nump1prev += 1
2257 nump1prev += 1
2258 elif delta == p2:
2258 elif delta == p2:
2259 nump2prev += 1
2259 nump2prev += 1
2260 elif delta == p1:
2260 elif delta == p1:
2261 nump1 += 1
2261 nump1 += 1
2262 elif delta == p2:
2262 elif delta == p2:
2263 nump2 += 1
2263 nump2 += 1
2264 elif delta != nullrev:
2264 elif delta != nullrev:
2265 numother += 1
2265 numother += 1
2266
2266
2267 # Obtain data on the raw chunks in the revlog.
2267 # Obtain data on the raw chunks in the revlog.
2268 if util.safehasattr(r, '_getsegmentforrevs'):
2268 if util.safehasattr(r, '_getsegmentforrevs'):
2269 segment = r._getsegmentforrevs(rev, rev)[1]
2269 segment = r._getsegmentforrevs(rev, rev)[1]
2270 else:
2270 else:
2271 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2271 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2272 if segment:
2272 if segment:
2273 chunktype = bytes(segment[0:1])
2273 chunktype = bytes(segment[0:1])
2274 else:
2274 else:
2275 chunktype = 'empty'
2275 chunktype = 'empty'
2276
2276
2277 if chunktype not in chunktypecounts:
2277 if chunktype not in chunktypecounts:
2278 chunktypecounts[chunktype] = 0
2278 chunktypecounts[chunktype] = 0
2279 chunktypesizes[chunktype] = 0
2279 chunktypesizes[chunktype] = 0
2280
2280
2281 chunktypecounts[chunktype] += 1
2281 chunktypecounts[chunktype] += 1
2282 chunktypesizes[chunktype] += size
2282 chunktypesizes[chunktype] += size
2283
2283
2284 # Adjust size min value for empty cases
2284 # Adjust size min value for empty cases
2285 for size in (datasize, fullsize, semisize, deltasize):
2285 for size in (datasize, fullsize, semisize, deltasize):
2286 if size[0] is None:
2286 if size[0] is None:
2287 size[0] = 0
2287 size[0] = 0
2288
2288
2289 numdeltas = numrevs - numfull - numempty - numsemi
2289 numdeltas = numrevs - numfull - numempty - numsemi
2290 numoprev = numprev - nump1prev - nump2prev
2290 numoprev = numprev - nump1prev - nump2prev
2291 totalrawsize = datasize[2]
2291 totalrawsize = datasize[2]
2292 datasize[2] /= numrevs
2292 datasize[2] /= numrevs
2293 fulltotal = fullsize[2]
2293 fulltotal = fullsize[2]
2294 if numfull == 0:
2294 if numfull == 0:
2295 fullsize[2] = 0
2295 fullsize[2] = 0
2296 else:
2296 else:
2297 fullsize[2] /= numfull
2297 fullsize[2] /= numfull
2298 semitotal = semisize[2]
2298 semitotal = semisize[2]
2299 snaptotal = {}
2299 snaptotal = {}
2300 if numsemi > 0:
2300 if numsemi > 0:
2301 semisize[2] /= numsemi
2301 semisize[2] /= numsemi
2302 for depth in snapsizedepth:
2302 for depth in snapsizedepth:
2303 snaptotal[depth] = snapsizedepth[depth][2]
2303 snaptotal[depth] = snapsizedepth[depth][2]
2304 snapsizedepth[depth][2] /= numsnapdepth[depth]
2304 snapsizedepth[depth][2] /= numsnapdepth[depth]
2305
2305
2306 deltatotal = deltasize[2]
2306 deltatotal = deltasize[2]
2307 if numdeltas > 0:
2307 if numdeltas > 0:
2308 deltasize[2] /= numdeltas
2308 deltasize[2] /= numdeltas
2309 totalsize = fulltotal + semitotal + deltatotal
2309 totalsize = fulltotal + semitotal + deltatotal
2310 avgchainlen = sum(chainlengths) / numrevs
2310 avgchainlen = sum(chainlengths) / numrevs
2311 maxchainlen = max(chainlengths)
2311 maxchainlen = max(chainlengths)
2312 maxchainspan = max(chainspans)
2312 maxchainspan = max(chainspans)
2313 compratio = 1
2313 compratio = 1
2314 if totalsize:
2314 if totalsize:
2315 compratio = totalrawsize / totalsize
2315 compratio = totalrawsize / totalsize
2316
2316
2317 basedfmtstr = '%%%dd\n'
2317 basedfmtstr = '%%%dd\n'
2318 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2318 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2319
2319
2320 def dfmtstr(max):
2320 def dfmtstr(max):
2321 return basedfmtstr % len(str(max))
2321 return basedfmtstr % len(str(max))
2322 def pcfmtstr(max, padding=0):
2322 def pcfmtstr(max, padding=0):
2323 return basepcfmtstr % (len(str(max)), ' ' * padding)
2323 return basepcfmtstr % (len(str(max)), ' ' * padding)
2324
2324
2325 def pcfmt(value, total):
2325 def pcfmt(value, total):
2326 if total:
2326 if total:
2327 return (value, 100 * float(value) / total)
2327 return (value, 100 * float(value) / total)
2328 else:
2328 else:
2329 return value, 100.0
2329 return value, 100.0
2330
2330
2331 ui.write(('format : %d\n') % format)
2331 ui.write(('format : %d\n') % format)
2332 ui.write(('flags : %s\n') % ', '.join(flags))
2332 ui.write(('flags : %s\n') % ', '.join(flags))
2333
2333
2334 ui.write('\n')
2334 ui.write('\n')
2335 fmt = pcfmtstr(totalsize)
2335 fmt = pcfmtstr(totalsize)
2336 fmt2 = dfmtstr(totalsize)
2336 fmt2 = dfmtstr(totalsize)
2337 ui.write(('revisions : ') + fmt2 % numrevs)
2337 ui.write(('revisions : ') + fmt2 % numrevs)
2338 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2338 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2339 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2339 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2340 ui.write(('revisions : ') + fmt2 % numrevs)
2340 ui.write(('revisions : ') + fmt2 % numrevs)
2341 ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs))
2341 ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs))
2342 ui.write((' text : ')
2342 ui.write((' text : ')
2343 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
2343 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
2344 ui.write((' delta : ')
2344 ui.write((' delta : ')
2345 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
2345 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
2346 ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs))
2346 ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs))
2347 for depth in sorted(numsnapdepth):
2347 for depth in sorted(numsnapdepth):
2348 ui.write((' lvl-%-3d : ' % depth)
2348 ui.write((' lvl-%-3d : ' % depth)
2349 + fmt % pcfmt(numsnapdepth[depth], numrevs))
2349 + fmt % pcfmt(numsnapdepth[depth], numrevs))
2350 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2350 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2351 ui.write(('revision size : ') + fmt2 % totalsize)
2351 ui.write(('revision size : ') + fmt2 % totalsize)
2352 ui.write((' snapshot : ')
2352 ui.write((' snapshot : ')
2353 + fmt % pcfmt(fulltotal + semitotal, totalsize))
2353 + fmt % pcfmt(fulltotal + semitotal, totalsize))
2354 for depth in sorted(numsnapdepth):
2354 for depth in sorted(numsnapdepth):
2355 ui.write((' lvl-%-3d : ' % depth)
2355 ui.write((' lvl-%-3d : ' % depth)
2356 + fmt % pcfmt(snaptotal[depth], totalsize))
2356 + fmt % pcfmt(snaptotal[depth], totalsize))
2357 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2357 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2358
2358
2359 def fmtchunktype(chunktype):
2359 def fmtchunktype(chunktype):
2360 if chunktype == 'empty':
2360 if chunktype == 'empty':
2361 return ' %s : ' % chunktype
2361 return ' %s : ' % chunktype
2362 elif chunktype in pycompat.bytestr(string.ascii_letters):
2362 elif chunktype in pycompat.bytestr(string.ascii_letters):
2363 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2363 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2364 else:
2364 else:
2365 return ' 0x%s : ' % hex(chunktype)
2365 return ' 0x%s : ' % hex(chunktype)
2366
2366
2367 ui.write('\n')
2367 ui.write('\n')
2368 ui.write(('chunks : ') + fmt2 % numrevs)
2368 ui.write(('chunks : ') + fmt2 % numrevs)
2369 for chunktype in sorted(chunktypecounts):
2369 for chunktype in sorted(chunktypecounts):
2370 ui.write(fmtchunktype(chunktype))
2370 ui.write(fmtchunktype(chunktype))
2371 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2371 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2372 ui.write(('chunks size : ') + fmt2 % totalsize)
2372 ui.write(('chunks size : ') + fmt2 % totalsize)
2373 for chunktype in sorted(chunktypecounts):
2373 for chunktype in sorted(chunktypecounts):
2374 ui.write(fmtchunktype(chunktype))
2374 ui.write(fmtchunktype(chunktype))
2375 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2375 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2376
2376
2377 ui.write('\n')
2377 ui.write('\n')
2378 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2378 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2379 ui.write(('avg chain length : ') + fmt % avgchainlen)
2379 ui.write(('avg chain length : ') + fmt % avgchainlen)
2380 ui.write(('max chain length : ') + fmt % maxchainlen)
2380 ui.write(('max chain length : ') + fmt % maxchainlen)
2381 ui.write(('max chain reach : ') + fmt % maxchainspan)
2381 ui.write(('max chain reach : ') + fmt % maxchainspan)
2382 ui.write(('compression ratio : ') + fmt % compratio)
2382 ui.write(('compression ratio : ') + fmt % compratio)
2383
2383
2384 if format > 0:
2384 if format > 0:
2385 ui.write('\n')
2385 ui.write('\n')
2386 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2386 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2387 % tuple(datasize))
2387 % tuple(datasize))
2388 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2388 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2389 % tuple(fullsize))
2389 % tuple(fullsize))
2390 ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n')
2390 ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n')
2391 % tuple(semisize))
2391 % tuple(semisize))
2392 for depth in sorted(snapsizedepth):
2392 for depth in sorted(snapsizedepth):
2393 if depth == 0:
2393 if depth == 0:
2394 continue
2394 continue
2395 ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n')
2395 ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n')
2396 % ((depth,) + tuple(snapsizedepth[depth])))
2396 % ((depth,) + tuple(snapsizedepth[depth])))
2397 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2397 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2398 % tuple(deltasize))
2398 % tuple(deltasize))
2399
2399
2400 if numdeltas > 0:
2400 if numdeltas > 0:
2401 ui.write('\n')
2401 ui.write('\n')
2402 fmt = pcfmtstr(numdeltas)
2402 fmt = pcfmtstr(numdeltas)
2403 fmt2 = pcfmtstr(numdeltas, 4)
2403 fmt2 = pcfmtstr(numdeltas, 4)
2404 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2404 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2405 if numprev > 0:
2405 if numprev > 0:
2406 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2406 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2407 numprev))
2407 numprev))
2408 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2408 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2409 numprev))
2409 numprev))
2410 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2410 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2411 numprev))
2411 numprev))
2412 if gdelta:
2412 if gdelta:
2413 ui.write(('deltas against p1 : ')
2413 ui.write(('deltas against p1 : ')
2414 + fmt % pcfmt(nump1, numdeltas))
2414 + fmt % pcfmt(nump1, numdeltas))
2415 ui.write(('deltas against p2 : ')
2415 ui.write(('deltas against p2 : ')
2416 + fmt % pcfmt(nump2, numdeltas))
2416 + fmt % pcfmt(nump2, numdeltas))
2417 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2417 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2418 numdeltas))
2418 numdeltas))
2419
2419
2420 @command('debugrevlogindex', cmdutil.debugrevlogopts +
2420 @command('debugrevlogindex', cmdutil.debugrevlogopts +
2421 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2421 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2422 _('[-f FORMAT] -c|-m|FILE'),
2422 _('[-f FORMAT] -c|-m|FILE'),
2423 optionalrepo=True)
2423 optionalrepo=True)
2424 def debugrevlogindex(ui, repo, file_=None, **opts):
2424 def debugrevlogindex(ui, repo, file_=None, **opts):
2425 """dump the contents of a revlog index"""
2425 """dump the contents of a revlog index"""
2426 opts = pycompat.byteskwargs(opts)
2426 opts = pycompat.byteskwargs(opts)
2427 r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
2427 r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
2428 format = opts.get('format', 0)
2428 format = opts.get('format', 0)
2429 if format not in (0, 1):
2429 if format not in (0, 1):
2430 raise error.Abort(_("unknown format %d") % format)
2430 raise error.Abort(_("unknown format %d") % format)
2431
2431
2432 if ui.debugflag:
2432 if ui.debugflag:
2433 shortfn = hex
2433 shortfn = hex
2434 else:
2434 else:
2435 shortfn = short
2435 shortfn = short
2436
2436
2437 # There might not be anything in r, so have a sane default
2437 # There might not be anything in r, so have a sane default
2438 idlen = 12
2438 idlen = 12
2439 for i in r:
2439 for i in r:
2440 idlen = len(shortfn(r.node(i)))
2440 idlen = len(shortfn(r.node(i)))
2441 break
2441 break
2442
2442
2443 if format == 0:
2443 if format == 0:
2444 if ui.verbose:
2444 if ui.verbose:
2445 ui.write((" rev offset length linkrev"
2445 ui.write((" rev offset length linkrev"
2446 " %s %s p2\n") % ("nodeid".ljust(idlen),
2446 " %s %s p2\n") % ("nodeid".ljust(idlen),
2447 "p1".ljust(idlen)))
2447 "p1".ljust(idlen)))
2448 else:
2448 else:
2449 ui.write((" rev linkrev %s %s p2\n") % (
2449 ui.write((" rev linkrev %s %s p2\n") % (
2450 "nodeid".ljust(idlen), "p1".ljust(idlen)))
2450 "nodeid".ljust(idlen), "p1".ljust(idlen)))
2451 elif format == 1:
2451 elif format == 1:
2452 if ui.verbose:
2452 if ui.verbose:
2453 ui.write((" rev flag offset length size link p1"
2453 ui.write((" rev flag offset length size link p1"
2454 " p2 %s\n") % "nodeid".rjust(idlen))
2454 " p2 %s\n") % "nodeid".rjust(idlen))
2455 else:
2455 else:
2456 ui.write((" rev flag size link p1 p2 %s\n") %
2456 ui.write((" rev flag size link p1 p2 %s\n") %
2457 "nodeid".rjust(idlen))
2457 "nodeid".rjust(idlen))
2458
2458
2459 for i in r:
2459 for i in r:
2460 node = r.node(i)
2460 node = r.node(i)
2461 if format == 0:
2461 if format == 0:
2462 try:
2462 try:
2463 pp = r.parents(node)
2463 pp = r.parents(node)
2464 except Exception:
2464 except Exception:
2465 pp = [nullid, nullid]
2465 pp = [nullid, nullid]
2466 if ui.verbose:
2466 if ui.verbose:
2467 ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
2467 ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
2468 i, r.start(i), r.length(i), r.linkrev(i),
2468 i, r.start(i), r.length(i), r.linkrev(i),
2469 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
2469 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
2470 else:
2470 else:
2471 ui.write("% 6d % 7d %s %s %s\n" % (
2471 ui.write("% 6d % 7d %s %s %s\n" % (
2472 i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
2472 i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
2473 shortfn(pp[1])))
2473 shortfn(pp[1])))
2474 elif format == 1:
2474 elif format == 1:
2475 pr = r.parentrevs(i)
2475 pr = r.parentrevs(i)
2476 if ui.verbose:
2476 if ui.verbose:
2477 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
2477 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
2478 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2478 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2479 r.linkrev(i), pr[0], pr[1], shortfn(node)))
2479 r.linkrev(i), pr[0], pr[1], shortfn(node)))
2480 else:
2480 else:
2481 ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
2481 ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
2482 i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
2482 i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
2483 shortfn(node)))
2483 shortfn(node)))
2484
2484
2485 @command('debugrevspec',
2485 @command('debugrevspec',
2486 [('', 'optimize', None,
2486 [('', 'optimize', None,
2487 _('print parsed tree after optimizing (DEPRECATED)')),
2487 _('print parsed tree after optimizing (DEPRECATED)')),
2488 ('', 'show-revs', True, _('print list of result revisions (default)')),
2488 ('', 'show-revs', True, _('print list of result revisions (default)')),
2489 ('s', 'show-set', None, _('print internal representation of result set')),
2489 ('s', 'show-set', None, _('print internal representation of result set')),
2490 ('p', 'show-stage', [],
2490 ('p', 'show-stage', [],
2491 _('print parsed tree at the given stage'), _('NAME')),
2491 _('print parsed tree at the given stage'), _('NAME')),
2492 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2492 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2493 ('', 'verify-optimized', False, _('verify optimized result')),
2493 ('', 'verify-optimized', False, _('verify optimized result')),
2494 ],
2494 ],
2495 ('REVSPEC'))
2495 ('REVSPEC'))
2496 def debugrevspec(ui, repo, expr, **opts):
2496 def debugrevspec(ui, repo, expr, **opts):
2497 """parse and apply a revision specification
2497 """parse and apply a revision specification
2498
2498
2499 Use -p/--show-stage option to print the parsed tree at the given stages.
2499 Use -p/--show-stage option to print the parsed tree at the given stages.
2500 Use -p all to print tree at every stage.
2500 Use -p all to print tree at every stage.
2501
2501
2502 Use --no-show-revs option with -s or -p to print only the set
2502 Use --no-show-revs option with -s or -p to print only the set
2503 representation or the parsed tree respectively.
2503 representation or the parsed tree respectively.
2504
2504
2505 Use --verify-optimized to compare the optimized result with the unoptimized
2505 Use --verify-optimized to compare the optimized result with the unoptimized
2506 one. Returns 1 if the optimized result differs.
2506 one. Returns 1 if the optimized result differs.
2507 """
2507 """
2508 opts = pycompat.byteskwargs(opts)
2508 opts = pycompat.byteskwargs(opts)
2509 aliases = ui.configitems('revsetalias')
2509 aliases = ui.configitems('revsetalias')
2510 stages = [
2510 stages = [
2511 ('parsed', lambda tree: tree),
2511 ('parsed', lambda tree: tree),
2512 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2512 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2513 ui.warn)),
2513 ui.warn)),
2514 ('concatenated', revsetlang.foldconcat),
2514 ('concatenated', revsetlang.foldconcat),
2515 ('analyzed', revsetlang.analyze),
2515 ('analyzed', revsetlang.analyze),
2516 ('optimized', revsetlang.optimize),
2516 ('optimized', revsetlang.optimize),
2517 ]
2517 ]
2518 if opts['no_optimized']:
2518 if opts['no_optimized']:
2519 stages = stages[:-1]
2519 stages = stages[:-1]
2520 if opts['verify_optimized'] and opts['no_optimized']:
2520 if opts['verify_optimized'] and opts['no_optimized']:
2521 raise error.Abort(_('cannot use --verify-optimized with '
2521 raise error.Abort(_('cannot use --verify-optimized with '
2522 '--no-optimized'))
2522 '--no-optimized'))
2523 stagenames = set(n for n, f in stages)
2523 stagenames = set(n for n, f in stages)
2524
2524
2525 showalways = set()
2525 showalways = set()
2526 showchanged = set()
2526 showchanged = set()
2527 if ui.verbose and not opts['show_stage']:
2527 if ui.verbose and not opts['show_stage']:
2528 # show parsed tree by --verbose (deprecated)
2528 # show parsed tree by --verbose (deprecated)
2529 showalways.add('parsed')
2529 showalways.add('parsed')
2530 showchanged.update(['expanded', 'concatenated'])
2530 showchanged.update(['expanded', 'concatenated'])
2531 if opts['optimize']:
2531 if opts['optimize']:
2532 showalways.add('optimized')
2532 showalways.add('optimized')
2533 if opts['show_stage'] and opts['optimize']:
2533 if opts['show_stage'] and opts['optimize']:
2534 raise error.Abort(_('cannot use --optimize with --show-stage'))
2534 raise error.Abort(_('cannot use --optimize with --show-stage'))
2535 if opts['show_stage'] == ['all']:
2535 if opts['show_stage'] == ['all']:
2536 showalways.update(stagenames)
2536 showalways.update(stagenames)
2537 else:
2537 else:
2538 for n in opts['show_stage']:
2538 for n in opts['show_stage']:
2539 if n not in stagenames:
2539 if n not in stagenames:
2540 raise error.Abort(_('invalid stage name: %s') % n)
2540 raise error.Abort(_('invalid stage name: %s') % n)
2541 showalways.update(opts['show_stage'])
2541 showalways.update(opts['show_stage'])
2542
2542
2543 treebystage = {}
2543 treebystage = {}
2544 printedtree = None
2544 printedtree = None
2545 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
2545 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
2546 for n, f in stages:
2546 for n, f in stages:
2547 treebystage[n] = tree = f(tree)
2547 treebystage[n] = tree = f(tree)
2548 if n in showalways or (n in showchanged and tree != printedtree):
2548 if n in showalways or (n in showchanged and tree != printedtree):
2549 if opts['show_stage'] or n != 'parsed':
2549 if opts['show_stage'] or n != 'parsed':
2550 ui.write(("* %s:\n") % n)
2550 ui.write(("* %s:\n") % n)
2551 ui.write(revsetlang.prettyformat(tree), "\n")
2551 ui.write(revsetlang.prettyformat(tree), "\n")
2552 printedtree = tree
2552 printedtree = tree
2553
2553
2554 if opts['verify_optimized']:
2554 if opts['verify_optimized']:
2555 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2555 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2556 brevs = revset.makematcher(treebystage['optimized'])(repo)
2556 brevs = revset.makematcher(treebystage['optimized'])(repo)
2557 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2557 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2558 ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
2558 ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
2559 ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
2559 ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
2560 arevs = list(arevs)
2560 arevs = list(arevs)
2561 brevs = list(brevs)
2561 brevs = list(brevs)
2562 if arevs == brevs:
2562 if arevs == brevs:
2563 return 0
2563 return 0
2564 ui.write(('--- analyzed\n'), label='diff.file_a')
2564 ui.write(('--- analyzed\n'), label='diff.file_a')
2565 ui.write(('+++ optimized\n'), label='diff.file_b')
2565 ui.write(('+++ optimized\n'), label='diff.file_b')
2566 sm = difflib.SequenceMatcher(None, arevs, brevs)
2566 sm = difflib.SequenceMatcher(None, arevs, brevs)
2567 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2567 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2568 if tag in (r'delete', r'replace'):
2568 if tag in (r'delete', r'replace'):
2569 for c in arevs[alo:ahi]:
2569 for c in arevs[alo:ahi]:
2570 ui.write('-%d\n' % c, label='diff.deleted')
2570 ui.write('-%d\n' % c, label='diff.deleted')
2571 if tag in (r'insert', r'replace'):
2571 if tag in (r'insert', r'replace'):
2572 for c in brevs[blo:bhi]:
2572 for c in brevs[blo:bhi]:
2573 ui.write('+%d\n' % c, label='diff.inserted')
2573 ui.write('+%d\n' % c, label='diff.inserted')
2574 if tag == r'equal':
2574 if tag == r'equal':
2575 for c in arevs[alo:ahi]:
2575 for c in arevs[alo:ahi]:
2576 ui.write(' %d\n' % c)
2576 ui.write(' %d\n' % c)
2577 return 1
2577 return 1
2578
2578
2579 func = revset.makematcher(tree)
2579 func = revset.makematcher(tree)
2580 revs = func(repo)
2580 revs = func(repo)
2581 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2581 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2582 ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
2582 ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
2583 if not opts['show_revs']:
2583 if not opts['show_revs']:
2584 return
2584 return
2585 for c in revs:
2585 for c in revs:
2586 ui.write("%d\n" % c)
2586 ui.write("%d\n" % c)
2587
2587
2588 @command('debugserve', [
2588 @command('debugserve', [
2589 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2589 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2590 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2590 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2591 ('', 'logiofile', '', _('file to log server I/O to')),
2591 ('', 'logiofile', '', _('file to log server I/O to')),
2592 ], '')
2592 ], '')
2593 def debugserve(ui, repo, **opts):
2593 def debugserve(ui, repo, **opts):
2594 """run a server with advanced settings
2594 """run a server with advanced settings
2595
2595
2596 This command is similar to :hg:`serve`. It exists partially as a
2596 This command is similar to :hg:`serve`. It exists partially as a
2597 workaround to the fact that ``hg serve --stdio`` must have specific
2597 workaround to the fact that ``hg serve --stdio`` must have specific
2598 arguments for security reasons.
2598 arguments for security reasons.
2599 """
2599 """
2600 opts = pycompat.byteskwargs(opts)
2600 opts = pycompat.byteskwargs(opts)
2601
2601
2602 if not opts['sshstdio']:
2602 if not opts['sshstdio']:
2603 raise error.Abort(_('only --sshstdio is currently supported'))
2603 raise error.Abort(_('only --sshstdio is currently supported'))
2604
2604
2605 logfh = None
2605 logfh = None
2606
2606
2607 if opts['logiofd'] and opts['logiofile']:
2607 if opts['logiofd'] and opts['logiofile']:
2608 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2608 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2609
2609
2610 if opts['logiofd']:
2610 if opts['logiofd']:
2611 # Line buffered because output is line based.
2611 # Line buffered because output is line based.
2612 try:
2612 try:
2613 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2613 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2614 except OSError as e:
2614 except OSError as e:
2615 if e.errno != errno.ESPIPE:
2615 if e.errno != errno.ESPIPE:
2616 raise
2616 raise
2617 # can't seek a pipe, so `ab` mode fails on py3
2617 # can't seek a pipe, so `ab` mode fails on py3
2618 logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
2618 logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
2619 elif opts['logiofile']:
2619 elif opts['logiofile']:
2620 logfh = open(opts['logiofile'], 'ab', 1)
2620 logfh = open(opts['logiofile'], 'ab', 1)
2621
2621
2622 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2622 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2623 s.serve_forever()
2623 s.serve_forever()
2624
2624
2625 @command('debugsetparents', [], _('REV1 [REV2]'))
2625 @command('debugsetparents', [], _('REV1 [REV2]'))
2626 def debugsetparents(ui, repo, rev1, rev2=None):
2626 def debugsetparents(ui, repo, rev1, rev2=None):
2627 """manually set the parents of the current working directory
2627 """manually set the parents of the current working directory
2628
2628
2629 This is useful for writing repository conversion tools, but should
2629 This is useful for writing repository conversion tools, but should
2630 be used with care. For example, neither the working directory nor the
2630 be used with care. For example, neither the working directory nor the
2631 dirstate is updated, so file status may be incorrect after running this
2631 dirstate is updated, so file status may be incorrect after running this
2632 command.
2632 command.
2633
2633
2634 Returns 0 on success.
2634 Returns 0 on success.
2635 """
2635 """
2636
2636
2637 node1 = scmutil.revsingle(repo, rev1).node()
2637 node1 = scmutil.revsingle(repo, rev1).node()
2638 node2 = scmutil.revsingle(repo, rev2, 'null').node()
2638 node2 = scmutil.revsingle(repo, rev2, 'null').node()
2639
2639
2640 with repo.wlock():
2640 with repo.wlock():
2641 repo.setparents(node1, node2)
2641 repo.setparents(node1, node2)
2642
2642
2643 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2643 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2644 def debugssl(ui, repo, source=None, **opts):
2644 def debugssl(ui, repo, source=None, **opts):
2645 '''test a secure connection to a server
2645 '''test a secure connection to a server
2646
2646
2647 This builds the certificate chain for the server on Windows, installing the
2647 This builds the certificate chain for the server on Windows, installing the
2648 missing intermediates and trusted root via Windows Update if necessary. It
2648 missing intermediates and trusted root via Windows Update if necessary. It
2649 does nothing on other platforms.
2649 does nothing on other platforms.
2650
2650
2651 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2651 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2652 that server is used. See :hg:`help urls` for more information.
2652 that server is used. See :hg:`help urls` for more information.
2653
2653
2654 If the update succeeds, retry the original operation. Otherwise, the cause
2654 If the update succeeds, retry the original operation. Otherwise, the cause
2655 of the SSL error is likely another issue.
2655 of the SSL error is likely another issue.
2656 '''
2656 '''
2657 if not pycompat.iswindows:
2657 if not pycompat.iswindows:
2658 raise error.Abort(_('certificate chain building is only possible on '
2658 raise error.Abort(_('certificate chain building is only possible on '
2659 'Windows'))
2659 'Windows'))
2660
2660
2661 if not source:
2661 if not source:
2662 if not repo:
2662 if not repo:
2663 raise error.Abort(_("there is no Mercurial repository here, and no "
2663 raise error.Abort(_("there is no Mercurial repository here, and no "
2664 "server specified"))
2664 "server specified"))
2665 source = "default"
2665 source = "default"
2666
2666
2667 source, branches = hg.parseurl(ui.expandpath(source))
2667 source, branches = hg.parseurl(ui.expandpath(source))
2668 url = util.url(source)
2668 url = util.url(source)
2669
2669
2670 defaultport = {'https': 443, 'ssh': 22}
2670 defaultport = {'https': 443, 'ssh': 22}
2671 if url.scheme in defaultport:
2671 if url.scheme in defaultport:
2672 try:
2672 try:
2673 addr = (url.host, int(url.port or defaultport[url.scheme]))
2673 addr = (url.host, int(url.port or defaultport[url.scheme]))
2674 except ValueError:
2674 except ValueError:
2675 raise error.Abort(_("malformed port number in URL"))
2675 raise error.Abort(_("malformed port number in URL"))
2676 else:
2676 else:
2677 raise error.Abort(_("only https and ssh connections are supported"))
2677 raise error.Abort(_("only https and ssh connections are supported"))
2678
2678
2679 from . import win32
2679 from . import win32
2680
2680
2681 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2681 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2682 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2682 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2683
2683
2684 try:
2684 try:
2685 s.connect(addr)
2685 s.connect(addr)
2686 cert = s.getpeercert(True)
2686 cert = s.getpeercert(True)
2687
2687
2688 ui.status(_('checking the certificate chain for %s\n') % url.host)
2688 ui.status(_('checking the certificate chain for %s\n') % url.host)
2689
2689
2690 complete = win32.checkcertificatechain(cert, build=False)
2690 complete = win32.checkcertificatechain(cert, build=False)
2691
2691
2692 if not complete:
2692 if not complete:
2693 ui.status(_('certificate chain is incomplete, updating... '))
2693 ui.status(_('certificate chain is incomplete, updating... '))
2694
2694
2695 if not win32.checkcertificatechain(cert):
2695 if not win32.checkcertificatechain(cert):
2696 ui.status(_('failed.\n'))
2696 ui.status(_('failed.\n'))
2697 else:
2697 else:
2698 ui.status(_('done.\n'))
2698 ui.status(_('done.\n'))
2699 else:
2699 else:
2700 ui.status(_('full certificate chain is available\n'))
2700 ui.status(_('full certificate chain is available\n'))
2701 finally:
2701 finally:
2702 s.close()
2702 s.close()
2703
2703
2704 @command('debugsub',
2704 @command('debugsub',
2705 [('r', 'rev', '',
2705 [('r', 'rev', '',
2706 _('revision to check'), _('REV'))],
2706 _('revision to check'), _('REV'))],
2707 _('[-r REV] [REV]'))
2707 _('[-r REV] [REV]'))
2708 def debugsub(ui, repo, rev=None):
2708 def debugsub(ui, repo, rev=None):
2709 ctx = scmutil.revsingle(repo, rev, None)
2709 ctx = scmutil.revsingle(repo, rev, None)
2710 for k, v in sorted(ctx.substate.items()):
2710 for k, v in sorted(ctx.substate.items()):
2711 ui.write(('path %s\n') % k)
2711 ui.write(('path %s\n') % k)
2712 ui.write((' source %s\n') % v[0])
2712 ui.write((' source %s\n') % v[0])
2713 ui.write((' revision %s\n') % v[1])
2713 ui.write((' revision %s\n') % v[1])
2714
2714
2715 @command('debugsuccessorssets',
2715 @command('debugsuccessorssets',
2716 [('', 'closest', False, _('return closest successors sets only'))],
2716 [('', 'closest', False, _('return closest successors sets only'))],
2717 _('[REV]'))
2717 _('[REV]'))
2718 def debugsuccessorssets(ui, repo, *revs, **opts):
2718 def debugsuccessorssets(ui, repo, *revs, **opts):
2719 """show set of successors for revision
2719 """show set of successors for revision
2720
2720
2721 A successors set of changeset A is a consistent group of revisions that
2721 A successors set of changeset A is a consistent group of revisions that
2722 succeed A. It contains non-obsolete changesets only unless closests
2722 succeed A. It contains non-obsolete changesets only unless closests
2723 successors set is set.
2723 successors set is set.
2724
2724
2725 In most cases a changeset A has a single successors set containing a single
2725 In most cases a changeset A has a single successors set containing a single
2726 successor (changeset A replaced by A').
2726 successor (changeset A replaced by A').
2727
2727
2728 A changeset that is made obsolete with no successors are called "pruned".
2728 A changeset that is made obsolete with no successors are called "pruned".
2729 Such changesets have no successors sets at all.
2729 Such changesets have no successors sets at all.
2730
2730
2731 A changeset that has been "split" will have a successors set containing
2731 A changeset that has been "split" will have a successors set containing
2732 more than one successor.
2732 more than one successor.
2733
2733
2734 A changeset that has been rewritten in multiple different ways is called
2734 A changeset that has been rewritten in multiple different ways is called
2735 "divergent". Such changesets have multiple successor sets (each of which
2735 "divergent". Such changesets have multiple successor sets (each of which
2736 may also be split, i.e. have multiple successors).
2736 may also be split, i.e. have multiple successors).
2737
2737
2738 Results are displayed as follows::
2738 Results are displayed as follows::
2739
2739
2740 <rev1>
2740 <rev1>
2741 <successors-1A>
2741 <successors-1A>
2742 <rev2>
2742 <rev2>
2743 <successors-2A>
2743 <successors-2A>
2744 <successors-2B1> <successors-2B2> <successors-2B3>
2744 <successors-2B1> <successors-2B2> <successors-2B3>
2745
2745
2746 Here rev2 has two possible (i.e. divergent) successors sets. The first
2746 Here rev2 has two possible (i.e. divergent) successors sets. The first
2747 holds one element, whereas the second holds three (i.e. the changeset has
2747 holds one element, whereas the second holds three (i.e. the changeset has
2748 been split).
2748 been split).
2749 """
2749 """
2750 # passed to successorssets caching computation from one call to another
2750 # passed to successorssets caching computation from one call to another
2751 cache = {}
2751 cache = {}
2752 ctx2str = bytes
2752 ctx2str = bytes
2753 node2str = short
2753 node2str = short
2754 for rev in scmutil.revrange(repo, revs):
2754 for rev in scmutil.revrange(repo, revs):
2755 ctx = repo[rev]
2755 ctx = repo[rev]
2756 ui.write('%s\n'% ctx2str(ctx))
2756 ui.write('%s\n'% ctx2str(ctx))
2757 for succsset in obsutil.successorssets(repo, ctx.node(),
2757 for succsset in obsutil.successorssets(repo, ctx.node(),
2758 closest=opts[r'closest'],
2758 closest=opts[r'closest'],
2759 cache=cache):
2759 cache=cache):
2760 if succsset:
2760 if succsset:
2761 ui.write(' ')
2761 ui.write(' ')
2762 ui.write(node2str(succsset[0]))
2762 ui.write(node2str(succsset[0]))
2763 for node in succsset[1:]:
2763 for node in succsset[1:]:
2764 ui.write(' ')
2764 ui.write(' ')
2765 ui.write(node2str(node))
2765 ui.write(node2str(node))
2766 ui.write('\n')
2766 ui.write('\n')
2767
2767
2768 @command('debugtemplate',
2768 @command('debugtemplate',
2769 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2769 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2770 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2770 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2771 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2771 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2772 optionalrepo=True)
2772 optionalrepo=True)
2773 def debugtemplate(ui, repo, tmpl, **opts):
2773 def debugtemplate(ui, repo, tmpl, **opts):
2774 """parse and apply a template
2774 """parse and apply a template
2775
2775
2776 If -r/--rev is given, the template is processed as a log template and
2776 If -r/--rev is given, the template is processed as a log template and
2777 applied to the given changesets. Otherwise, it is processed as a generic
2777 applied to the given changesets. Otherwise, it is processed as a generic
2778 template.
2778 template.
2779
2779
2780 Use --verbose to print the parsed tree.
2780 Use --verbose to print the parsed tree.
2781 """
2781 """
2782 revs = None
2782 revs = None
2783 if opts[r'rev']:
2783 if opts[r'rev']:
2784 if repo is None:
2784 if repo is None:
2785 raise error.RepoError(_('there is no Mercurial repository here '
2785 raise error.RepoError(_('there is no Mercurial repository here '
2786 '(.hg not found)'))
2786 '(.hg not found)'))
2787 revs = scmutil.revrange(repo, opts[r'rev'])
2787 revs = scmutil.revrange(repo, opts[r'rev'])
2788
2788
2789 props = {}
2789 props = {}
2790 for d in opts[r'define']:
2790 for d in opts[r'define']:
2791 try:
2791 try:
2792 k, v = (e.strip() for e in d.split('=', 1))
2792 k, v = (e.strip() for e in d.split('=', 1))
2793 if not k or k == 'ui':
2793 if not k or k == 'ui':
2794 raise ValueError
2794 raise ValueError
2795 props[k] = v
2795 props[k] = v
2796 except ValueError:
2796 except ValueError:
2797 raise error.Abort(_('malformed keyword definition: %s') % d)
2797 raise error.Abort(_('malformed keyword definition: %s') % d)
2798
2798
2799 if ui.verbose:
2799 if ui.verbose:
2800 aliases = ui.configitems('templatealias')
2800 aliases = ui.configitems('templatealias')
2801 tree = templater.parse(tmpl)
2801 tree = templater.parse(tmpl)
2802 ui.note(templater.prettyformat(tree), '\n')
2802 ui.note(templater.prettyformat(tree), '\n')
2803 newtree = templater.expandaliases(tree, aliases)
2803 newtree = templater.expandaliases(tree, aliases)
2804 if newtree != tree:
2804 if newtree != tree:
2805 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2805 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2806
2806
2807 if revs is None:
2807 if revs is None:
2808 tres = formatter.templateresources(ui, repo)
2808 tres = formatter.templateresources(ui, repo)
2809 t = formatter.maketemplater(ui, tmpl, resources=tres)
2809 t = formatter.maketemplater(ui, tmpl, resources=tres)
2810 if ui.verbose:
2810 if ui.verbose:
2811 kwds, funcs = t.symbolsuseddefault()
2811 kwds, funcs = t.symbolsuseddefault()
2812 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2812 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2813 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2813 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2814 ui.write(t.renderdefault(props))
2814 ui.write(t.renderdefault(props))
2815 else:
2815 else:
2816 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2816 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2817 if ui.verbose:
2817 if ui.verbose:
2818 kwds, funcs = displayer.t.symbolsuseddefault()
2818 kwds, funcs = displayer.t.symbolsuseddefault()
2819 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2819 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2820 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2820 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2821 for r in revs:
2821 for r in revs:
2822 displayer.show(repo[r], **pycompat.strkwargs(props))
2822 displayer.show(repo[r], **pycompat.strkwargs(props))
2823 displayer.close()
2823 displayer.close()
2824
2824
2825 @command('debuguigetpass', [
2825 @command('debuguigetpass', [
2826 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2826 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2827 ], _('[-p TEXT]'), norepo=True)
2827 ], _('[-p TEXT]'), norepo=True)
2828 def debuguigetpass(ui, prompt=''):
2828 def debuguigetpass(ui, prompt=''):
2829 """show prompt to type password"""
2829 """show prompt to type password"""
2830 r = ui.getpass(prompt)
2830 r = ui.getpass(prompt)
2831 ui.write(('respose: %s\n') % r)
2831 ui.write(('respose: %s\n') % r)
2832
2832
2833 @command('debuguiprompt', [
2833 @command('debuguiprompt', [
2834 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2834 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2835 ], _('[-p TEXT]'), norepo=True)
2835 ], _('[-p TEXT]'), norepo=True)
2836 def debuguiprompt(ui, prompt=''):
2836 def debuguiprompt(ui, prompt=''):
2837 """show plain prompt"""
2837 """show plain prompt"""
2838 r = ui.prompt(prompt)
2838 r = ui.prompt(prompt)
2839 ui.write(('response: %s\n') % r)
2839 ui.write(('response: %s\n') % r)
2840
2840
2841 @command('debugupdatecaches', [])
2841 @command('debugupdatecaches', [])
2842 def debugupdatecaches(ui, repo, *pats, **opts):
2842 def debugupdatecaches(ui, repo, *pats, **opts):
2843 """warm all known caches in the repository"""
2843 """warm all known caches in the repository"""
2844 with repo.wlock(), repo.lock():
2844 with repo.wlock(), repo.lock():
2845 repo.updatecaches(full=True)
2845 repo.updatecaches(full=True)
2846
2846
2847 @command('debugupgraderepo', [
2847 @command('debugupgraderepo', [
2848 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2848 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2849 ('', 'run', False, _('performs an upgrade')),
2849 ('', 'run', False, _('performs an upgrade')),
2850 ('', 'backup', True, _('keep the old repository content around')),
2850 ('', 'backup', True, _('keep the old repository content around')),
2851 ('', 'changelog', None, _('select the changelog for upgrade')),
2851 ('', 'manifest', None, _('select the manifest for upgrade')),
2852 ('', 'manifest', None, _('select the manifest for upgrade')),
2852 ])
2853 ])
2853 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
2854 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
2854 """upgrade a repository to use different features
2855 """upgrade a repository to use different features
2855
2856
2856 If no arguments are specified, the repository is evaluated for upgrade
2857 If no arguments are specified, the repository is evaluated for upgrade
2857 and a list of problems and potential optimizations is printed.
2858 and a list of problems and potential optimizations is printed.
2858
2859
2859 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2860 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2860 can be influenced via additional arguments. More details will be provided
2861 can be influenced via additional arguments. More details will be provided
2861 by the command output when run without ``--run``.
2862 by the command output when run without ``--run``.
2862
2863
2863 During the upgrade, the repository will be locked and no writes will be
2864 During the upgrade, the repository will be locked and no writes will be
2864 allowed.
2865 allowed.
2865
2866
2866 At the end of the upgrade, the repository may not be readable while new
2867 At the end of the upgrade, the repository may not be readable while new
2867 repository data is swapped in. This window will be as long as it takes to
2868 repository data is swapped in. This window will be as long as it takes to
2868 rename some directories inside the ``.hg`` directory. On most machines, this
2869 rename some directories inside the ``.hg`` directory. On most machines, this
2869 should complete almost instantaneously and the chances of a consumer being
2870 should complete almost instantaneously and the chances of a consumer being
2870 unable to access the repository should be low.
2871 unable to access the repository should be low.
2871
2872
2872 By default, all revlog will be upgraded. You can restrict this using flag
2873 By default, all revlog will be upgraded. You can restrict this using flag
2873 such as `--manifest`:
2874 such as `--manifest`:
2874
2875
2875 * `--manifest`: only optimize the manifest
2876 * `--manifest`: only optimize the manifest
2876 * `--no-manifest`: optimize all revlog but the manifest
2877 * `--no-manifest`: optimize all revlog but the manifest
2878 * `--changelog`: optimize the changelog only
2879 * `--no-changelog --no-manifest`: optimize filelogs only
2877 """
2880 """
2878 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize,
2881 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize,
2879 backup=backup, **opts)
2882 backup=backup, **opts)
2880
2883
2881 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2884 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2882 inferrepo=True)
2885 inferrepo=True)
2883 def debugwalk(ui, repo, *pats, **opts):
2886 def debugwalk(ui, repo, *pats, **opts):
2884 """show how files match on given patterns"""
2887 """show how files match on given patterns"""
2885 opts = pycompat.byteskwargs(opts)
2888 opts = pycompat.byteskwargs(opts)
2886 m = scmutil.match(repo[None], pats, opts)
2889 m = scmutil.match(repo[None], pats, opts)
2887 if ui.verbose:
2890 if ui.verbose:
2888 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
2891 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
2889 items = list(repo[None].walk(m))
2892 items = list(repo[None].walk(m))
2890 if not items:
2893 if not items:
2891 return
2894 return
2892 f = lambda fn: fn
2895 f = lambda fn: fn
2893 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2896 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2894 f = lambda fn: util.normpath(fn)
2897 f = lambda fn: util.normpath(fn)
2895 fmt = 'f %%-%ds %%-%ds %%s' % (
2898 fmt = 'f %%-%ds %%-%ds %%s' % (
2896 max([len(abs) for abs in items]),
2899 max([len(abs) for abs in items]),
2897 max([len(repo.pathto(abs)) for abs in items]))
2900 max([len(repo.pathto(abs)) for abs in items]))
2898 for abs in items:
2901 for abs in items:
2899 line = fmt % (abs, f(repo.pathto(abs)), m.exact(abs) and 'exact' or '')
2902 line = fmt % (abs, f(repo.pathto(abs)), m.exact(abs) and 'exact' or '')
2900 ui.write("%s\n" % line.rstrip())
2903 ui.write("%s\n" % line.rstrip())
2901
2904
2902 @command('debugwhyunstable', [], _('REV'))
2905 @command('debugwhyunstable', [], _('REV'))
2903 def debugwhyunstable(ui, repo, rev):
2906 def debugwhyunstable(ui, repo, rev):
2904 """explain instabilities of a changeset"""
2907 """explain instabilities of a changeset"""
2905 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
2908 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
2906 dnodes = ''
2909 dnodes = ''
2907 if entry.get('divergentnodes'):
2910 if entry.get('divergentnodes'):
2908 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2911 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2909 for ctx in entry['divergentnodes']) + ' '
2912 for ctx in entry['divergentnodes']) + ' '
2910 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2913 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2911 entry['reason'], entry['node']))
2914 entry['reason'], entry['node']))
2912
2915
2913 @command('debugwireargs',
2916 @command('debugwireargs',
2914 [('', 'three', '', 'three'),
2917 [('', 'three', '', 'three'),
2915 ('', 'four', '', 'four'),
2918 ('', 'four', '', 'four'),
2916 ('', 'five', '', 'five'),
2919 ('', 'five', '', 'five'),
2917 ] + cmdutil.remoteopts,
2920 ] + cmdutil.remoteopts,
2918 _('REPO [OPTIONS]... [ONE [TWO]]'),
2921 _('REPO [OPTIONS]... [ONE [TWO]]'),
2919 norepo=True)
2922 norepo=True)
2920 def debugwireargs(ui, repopath, *vals, **opts):
2923 def debugwireargs(ui, repopath, *vals, **opts):
2921 opts = pycompat.byteskwargs(opts)
2924 opts = pycompat.byteskwargs(opts)
2922 repo = hg.peer(ui, opts, repopath)
2925 repo = hg.peer(ui, opts, repopath)
2923 for opt in cmdutil.remoteopts:
2926 for opt in cmdutil.remoteopts:
2924 del opts[opt[1]]
2927 del opts[opt[1]]
2925 args = {}
2928 args = {}
2926 for k, v in opts.iteritems():
2929 for k, v in opts.iteritems():
2927 if v:
2930 if v:
2928 args[k] = v
2931 args[k] = v
2929 args = pycompat.strkwargs(args)
2932 args = pycompat.strkwargs(args)
2930 # run twice to check that we don't mess up the stream for the next command
2933 # run twice to check that we don't mess up the stream for the next command
2931 res1 = repo.debugwireargs(*vals, **args)
2934 res1 = repo.debugwireargs(*vals, **args)
2932 res2 = repo.debugwireargs(*vals, **args)
2935 res2 = repo.debugwireargs(*vals, **args)
2933 ui.write("%s\n" % res1)
2936 ui.write("%s\n" % res1)
2934 if res1 != res2:
2937 if res1 != res2:
2935 ui.warn("%s\n" % res2)
2938 ui.warn("%s\n" % res2)
2936
2939
2937 def _parsewirelangblocks(fh):
2940 def _parsewirelangblocks(fh):
2938 activeaction = None
2941 activeaction = None
2939 blocklines = []
2942 blocklines = []
2940 lastindent = 0
2943 lastindent = 0
2941
2944
2942 for line in fh:
2945 for line in fh:
2943 line = line.rstrip()
2946 line = line.rstrip()
2944 if not line:
2947 if not line:
2945 continue
2948 continue
2946
2949
2947 if line.startswith(b'#'):
2950 if line.startswith(b'#'):
2948 continue
2951 continue
2949
2952
2950 if not line.startswith(b' '):
2953 if not line.startswith(b' '):
2951 # New block. Flush previous one.
2954 # New block. Flush previous one.
2952 if activeaction:
2955 if activeaction:
2953 yield activeaction, blocklines
2956 yield activeaction, blocklines
2954
2957
2955 activeaction = line
2958 activeaction = line
2956 blocklines = []
2959 blocklines = []
2957 lastindent = 0
2960 lastindent = 0
2958 continue
2961 continue
2959
2962
2960 # Else we start with an indent.
2963 # Else we start with an indent.
2961
2964
2962 if not activeaction:
2965 if not activeaction:
2963 raise error.Abort(_('indented line outside of block'))
2966 raise error.Abort(_('indented line outside of block'))
2964
2967
2965 indent = len(line) - len(line.lstrip())
2968 indent = len(line) - len(line.lstrip())
2966
2969
2967 # If this line is indented more than the last line, concatenate it.
2970 # If this line is indented more than the last line, concatenate it.
2968 if indent > lastindent and blocklines:
2971 if indent > lastindent and blocklines:
2969 blocklines[-1] += line.lstrip()
2972 blocklines[-1] += line.lstrip()
2970 else:
2973 else:
2971 blocklines.append(line)
2974 blocklines.append(line)
2972 lastindent = indent
2975 lastindent = indent
2973
2976
2974 # Flush last block.
2977 # Flush last block.
2975 if activeaction:
2978 if activeaction:
2976 yield activeaction, blocklines
2979 yield activeaction, blocklines
2977
2980
2978 @command('debugwireproto',
2981 @command('debugwireproto',
2979 [
2982 [
2980 ('', 'localssh', False, _('start an SSH server for this repo')),
2983 ('', 'localssh', False, _('start an SSH server for this repo')),
2981 ('', 'peer', '', _('construct a specific version of the peer')),
2984 ('', 'peer', '', _('construct a specific version of the peer')),
2982 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2985 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2983 ('', 'nologhandshake', False,
2986 ('', 'nologhandshake', False,
2984 _('do not log I/O related to the peer handshake')),
2987 _('do not log I/O related to the peer handshake')),
2985 ] + cmdutil.remoteopts,
2988 ] + cmdutil.remoteopts,
2986 _('[PATH]'),
2989 _('[PATH]'),
2987 optionalrepo=True)
2990 optionalrepo=True)
2988 def debugwireproto(ui, repo, path=None, **opts):
2991 def debugwireproto(ui, repo, path=None, **opts):
2989 """send wire protocol commands to a server
2992 """send wire protocol commands to a server
2990
2993
2991 This command can be used to issue wire protocol commands to remote
2994 This command can be used to issue wire protocol commands to remote
2992 peers and to debug the raw data being exchanged.
2995 peers and to debug the raw data being exchanged.
2993
2996
2994 ``--localssh`` will start an SSH server against the current repository
2997 ``--localssh`` will start an SSH server against the current repository
2995 and connect to that. By default, the connection will perform a handshake
2998 and connect to that. By default, the connection will perform a handshake
2996 and establish an appropriate peer instance.
2999 and establish an appropriate peer instance.
2997
3000
2998 ``--peer`` can be used to bypass the handshake protocol and construct a
3001 ``--peer`` can be used to bypass the handshake protocol and construct a
2999 peer instance using the specified class type. Valid values are ``raw``,
3002 peer instance using the specified class type. Valid values are ``raw``,
3000 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
3003 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
3001 raw data payloads and don't support higher-level command actions.
3004 raw data payloads and don't support higher-level command actions.
3002
3005
3003 ``--noreadstderr`` can be used to disable automatic reading from stderr
3006 ``--noreadstderr`` can be used to disable automatic reading from stderr
3004 of the peer (for SSH connections only). Disabling automatic reading of
3007 of the peer (for SSH connections only). Disabling automatic reading of
3005 stderr is useful for making output more deterministic.
3008 stderr is useful for making output more deterministic.
3006
3009
3007 Commands are issued via a mini language which is specified via stdin.
3010 Commands are issued via a mini language which is specified via stdin.
3008 The language consists of individual actions to perform. An action is
3011 The language consists of individual actions to perform. An action is
3009 defined by a block. A block is defined as a line with no leading
3012 defined by a block. A block is defined as a line with no leading
3010 space followed by 0 or more lines with leading space. Blocks are
3013 space followed by 0 or more lines with leading space. Blocks are
3011 effectively a high-level command with additional metadata.
3014 effectively a high-level command with additional metadata.
3012
3015
3013 Lines beginning with ``#`` are ignored.
3016 Lines beginning with ``#`` are ignored.
3014
3017
3015 The following sections denote available actions.
3018 The following sections denote available actions.
3016
3019
3017 raw
3020 raw
3018 ---
3021 ---
3019
3022
3020 Send raw data to the server.
3023 Send raw data to the server.
3021
3024
3022 The block payload contains the raw data to send as one atomic send
3025 The block payload contains the raw data to send as one atomic send
3023 operation. The data may not actually be delivered in a single system
3026 operation. The data may not actually be delivered in a single system
3024 call: it depends on the abilities of the transport being used.
3027 call: it depends on the abilities of the transport being used.
3025
3028
3026 Each line in the block is de-indented and concatenated. Then, that
3029 Each line in the block is de-indented and concatenated. Then, that
3027 value is evaluated as a Python b'' literal. This allows the use of
3030 value is evaluated as a Python b'' literal. This allows the use of
3028 backslash escaping, etc.
3031 backslash escaping, etc.
3029
3032
3030 raw+
3033 raw+
3031 ----
3034 ----
3032
3035
3033 Behaves like ``raw`` except flushes output afterwards.
3036 Behaves like ``raw`` except flushes output afterwards.
3034
3037
3035 command <X>
3038 command <X>
3036 -----------
3039 -----------
3037
3040
3038 Send a request to run a named command, whose name follows the ``command``
3041 Send a request to run a named command, whose name follows the ``command``
3039 string.
3042 string.
3040
3043
3041 Arguments to the command are defined as lines in this block. The format of
3044 Arguments to the command are defined as lines in this block. The format of
3042 each line is ``<key> <value>``. e.g.::
3045 each line is ``<key> <value>``. e.g.::
3043
3046
3044 command listkeys
3047 command listkeys
3045 namespace bookmarks
3048 namespace bookmarks
3046
3049
3047 If the value begins with ``eval:``, it will be interpreted as a Python
3050 If the value begins with ``eval:``, it will be interpreted as a Python
3048 literal expression. Otherwise values are interpreted as Python b'' literals.
3051 literal expression. Otherwise values are interpreted as Python b'' literals.
3049 This allows sending complex types and encoding special byte sequences via
3052 This allows sending complex types and encoding special byte sequences via
3050 backslash escaping.
3053 backslash escaping.
3051
3054
3052 The following arguments have special meaning:
3055 The following arguments have special meaning:
3053
3056
3054 ``PUSHFILE``
3057 ``PUSHFILE``
3055 When defined, the *push* mechanism of the peer will be used instead
3058 When defined, the *push* mechanism of the peer will be used instead
3056 of the static request-response mechanism and the content of the
3059 of the static request-response mechanism and the content of the
3057 file specified in the value of this argument will be sent as the
3060 file specified in the value of this argument will be sent as the
3058 command payload.
3061 command payload.
3059
3062
3060 This can be used to submit a local bundle file to the remote.
3063 This can be used to submit a local bundle file to the remote.
3061
3064
3062 batchbegin
3065 batchbegin
3063 ----------
3066 ----------
3064
3067
3065 Instruct the peer to begin a batched send.
3068 Instruct the peer to begin a batched send.
3066
3069
3067 All ``command`` blocks are queued for execution until the next
3070 All ``command`` blocks are queued for execution until the next
3068 ``batchsubmit`` block.
3071 ``batchsubmit`` block.
3069
3072
3070 batchsubmit
3073 batchsubmit
3071 -----------
3074 -----------
3072
3075
3073 Submit previously queued ``command`` blocks as a batch request.
3076 Submit previously queued ``command`` blocks as a batch request.
3074
3077
3075 This action MUST be paired with a ``batchbegin`` action.
3078 This action MUST be paired with a ``batchbegin`` action.
3076
3079
3077 httprequest <method> <path>
3080 httprequest <method> <path>
3078 ---------------------------
3081 ---------------------------
3079
3082
3080 (HTTP peer only)
3083 (HTTP peer only)
3081
3084
3082 Send an HTTP request to the peer.
3085 Send an HTTP request to the peer.
3083
3086
3084 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3087 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3085
3088
3086 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3089 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3087 headers to add to the request. e.g. ``Accept: foo``.
3090 headers to add to the request. e.g. ``Accept: foo``.
3088
3091
3089 The following arguments are special:
3092 The following arguments are special:
3090
3093
3091 ``BODYFILE``
3094 ``BODYFILE``
3092 The content of the file defined as the value to this argument will be
3095 The content of the file defined as the value to this argument will be
3093 transferred verbatim as the HTTP request body.
3096 transferred verbatim as the HTTP request body.
3094
3097
3095 ``frame <type> <flags> <payload>``
3098 ``frame <type> <flags> <payload>``
3096 Send a unified protocol frame as part of the request body.
3099 Send a unified protocol frame as part of the request body.
3097
3100
3098 All frames will be collected and sent as the body to the HTTP
3101 All frames will be collected and sent as the body to the HTTP
3099 request.
3102 request.
3100
3103
3101 close
3104 close
3102 -----
3105 -----
3103
3106
3104 Close the connection to the server.
3107 Close the connection to the server.
3105
3108
3106 flush
3109 flush
3107 -----
3110 -----
3108
3111
3109 Flush data written to the server.
3112 Flush data written to the server.
3110
3113
3111 readavailable
3114 readavailable
3112 -------------
3115 -------------
3113
3116
3114 Close the write end of the connection and read all available data from
3117 Close the write end of the connection and read all available data from
3115 the server.
3118 the server.
3116
3119
3117 If the connection to the server encompasses multiple pipes, we poll both
3120 If the connection to the server encompasses multiple pipes, we poll both
3118 pipes and read available data.
3121 pipes and read available data.
3119
3122
3120 readline
3123 readline
3121 --------
3124 --------
3122
3125
3123 Read a line of output from the server. If there are multiple output
3126 Read a line of output from the server. If there are multiple output
3124 pipes, reads only the main pipe.
3127 pipes, reads only the main pipe.
3125
3128
3126 ereadline
3129 ereadline
3127 ---------
3130 ---------
3128
3131
3129 Like ``readline``, but read from the stderr pipe, if available.
3132 Like ``readline``, but read from the stderr pipe, if available.
3130
3133
3131 read <X>
3134 read <X>
3132 --------
3135 --------
3133
3136
3134 ``read()`` N bytes from the server's main output pipe.
3137 ``read()`` N bytes from the server's main output pipe.
3135
3138
3136 eread <X>
3139 eread <X>
3137 ---------
3140 ---------
3138
3141
3139 ``read()`` N bytes from the server's stderr pipe, if available.
3142 ``read()`` N bytes from the server's stderr pipe, if available.
3140
3143
3141 Specifying Unified Frame-Based Protocol Frames
3144 Specifying Unified Frame-Based Protocol Frames
3142 ----------------------------------------------
3145 ----------------------------------------------
3143
3146
3144 It is possible to emit a *Unified Frame-Based Protocol* by using special
3147 It is possible to emit a *Unified Frame-Based Protocol* by using special
3145 syntax.
3148 syntax.
3146
3149
3147 A frame is composed as a type, flags, and payload. These can be parsed
3150 A frame is composed as a type, flags, and payload. These can be parsed
3148 from a string of the form:
3151 from a string of the form:
3149
3152
3150 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3153 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3151
3154
3152 ``request-id`` and ``stream-id`` are integers defining the request and
3155 ``request-id`` and ``stream-id`` are integers defining the request and
3153 stream identifiers.
3156 stream identifiers.
3154
3157
3155 ``type`` can be an integer value for the frame type or the string name
3158 ``type`` can be an integer value for the frame type or the string name
3156 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3159 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3157 ``command-name``.
3160 ``command-name``.
3158
3161
3159 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3162 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3160 components. Each component (and there can be just one) can be an integer
3163 components. Each component (and there can be just one) can be an integer
3161 or a flag name for stream flags or frame flags, respectively. Values are
3164 or a flag name for stream flags or frame flags, respectively. Values are
3162 resolved to integers and then bitwise OR'd together.
3165 resolved to integers and then bitwise OR'd together.
3163
3166
3164 ``payload`` represents the raw frame payload. If it begins with
3167 ``payload`` represents the raw frame payload. If it begins with
3165 ``cbor:``, the following string is evaluated as Python code and the
3168 ``cbor:``, the following string is evaluated as Python code and the
3166 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3169 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3167 as a Python byte string literal.
3170 as a Python byte string literal.
3168 """
3171 """
3169 opts = pycompat.byteskwargs(opts)
3172 opts = pycompat.byteskwargs(opts)
3170
3173
3171 if opts['localssh'] and not repo:
3174 if opts['localssh'] and not repo:
3172 raise error.Abort(_('--localssh requires a repository'))
3175 raise error.Abort(_('--localssh requires a repository'))
3173
3176
3174 if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
3177 if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
3175 raise error.Abort(_('invalid value for --peer'),
3178 raise error.Abort(_('invalid value for --peer'),
3176 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
3179 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
3177
3180
3178 if path and opts['localssh']:
3181 if path and opts['localssh']:
3179 raise error.Abort(_('cannot specify --localssh with an explicit '
3182 raise error.Abort(_('cannot specify --localssh with an explicit '
3180 'path'))
3183 'path'))
3181
3184
3182 if ui.interactive():
3185 if ui.interactive():
3183 ui.write(_('(waiting for commands on stdin)\n'))
3186 ui.write(_('(waiting for commands on stdin)\n'))
3184
3187
3185 blocks = list(_parsewirelangblocks(ui.fin))
3188 blocks = list(_parsewirelangblocks(ui.fin))
3186
3189
3187 proc = None
3190 proc = None
3188 stdin = None
3191 stdin = None
3189 stdout = None
3192 stdout = None
3190 stderr = None
3193 stderr = None
3191 opener = None
3194 opener = None
3192
3195
3193 if opts['localssh']:
3196 if opts['localssh']:
3194 # We start the SSH server in its own process so there is process
3197 # We start the SSH server in its own process so there is process
3195 # separation. This prevents a whole class of potential bugs around
3198 # separation. This prevents a whole class of potential bugs around
3196 # shared state from interfering with server operation.
3199 # shared state from interfering with server operation.
3197 args = procutil.hgcmd() + [
3200 args = procutil.hgcmd() + [
3198 '-R', repo.root,
3201 '-R', repo.root,
3199 'debugserve', '--sshstdio',
3202 'debugserve', '--sshstdio',
3200 ]
3203 ]
3201 proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
3204 proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
3202 stdin=subprocess.PIPE,
3205 stdin=subprocess.PIPE,
3203 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3206 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3204 bufsize=0)
3207 bufsize=0)
3205
3208
3206 stdin = proc.stdin
3209 stdin = proc.stdin
3207 stdout = proc.stdout
3210 stdout = proc.stdout
3208 stderr = proc.stderr
3211 stderr = proc.stderr
3209
3212
3210 # We turn the pipes into observers so we can log I/O.
3213 # We turn the pipes into observers so we can log I/O.
3211 if ui.verbose or opts['peer'] == 'raw':
3214 if ui.verbose or opts['peer'] == 'raw':
3212 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
3215 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
3213 logdata=True)
3216 logdata=True)
3214 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
3217 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
3215 logdata=True)
3218 logdata=True)
3216 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
3219 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
3217 logdata=True)
3220 logdata=True)
3218
3221
3219 # --localssh also implies the peer connection settings.
3222 # --localssh also implies the peer connection settings.
3220
3223
3221 url = 'ssh://localserver'
3224 url = 'ssh://localserver'
3222 autoreadstderr = not opts['noreadstderr']
3225 autoreadstderr = not opts['noreadstderr']
3223
3226
3224 if opts['peer'] == 'ssh1':
3227 if opts['peer'] == 'ssh1':
3225 ui.write(_('creating ssh peer for wire protocol version 1\n'))
3228 ui.write(_('creating ssh peer for wire protocol version 1\n'))
3226 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
3229 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
3227 None, autoreadstderr=autoreadstderr)
3230 None, autoreadstderr=autoreadstderr)
3228 elif opts['peer'] == 'ssh2':
3231 elif opts['peer'] == 'ssh2':
3229 ui.write(_('creating ssh peer for wire protocol version 2\n'))
3232 ui.write(_('creating ssh peer for wire protocol version 2\n'))
3230 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
3233 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
3231 None, autoreadstderr=autoreadstderr)
3234 None, autoreadstderr=autoreadstderr)
3232 elif opts['peer'] == 'raw':
3235 elif opts['peer'] == 'raw':
3233 ui.write(_('using raw connection to peer\n'))
3236 ui.write(_('using raw connection to peer\n'))
3234 peer = None
3237 peer = None
3235 else:
3238 else:
3236 ui.write(_('creating ssh peer from handshake results\n'))
3239 ui.write(_('creating ssh peer from handshake results\n'))
3237 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
3240 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
3238 autoreadstderr=autoreadstderr)
3241 autoreadstderr=autoreadstderr)
3239
3242
3240 elif path:
3243 elif path:
3241 # We bypass hg.peer() so we can proxy the sockets.
3244 # We bypass hg.peer() so we can proxy the sockets.
3242 # TODO consider not doing this because we skip
3245 # TODO consider not doing this because we skip
3243 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3246 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3244 u = util.url(path)
3247 u = util.url(path)
3245 if u.scheme != 'http':
3248 if u.scheme != 'http':
3246 raise error.Abort(_('only http:// paths are currently supported'))
3249 raise error.Abort(_('only http:// paths are currently supported'))
3247
3250
3248 url, authinfo = u.authinfo()
3251 url, authinfo = u.authinfo()
3249 openerargs = {
3252 openerargs = {
3250 r'useragent': b'Mercurial debugwireproto',
3253 r'useragent': b'Mercurial debugwireproto',
3251 }
3254 }
3252
3255
3253 # Turn pipes/sockets into observers so we can log I/O.
3256 # Turn pipes/sockets into observers so we can log I/O.
3254 if ui.verbose:
3257 if ui.verbose:
3255 openerargs.update({
3258 openerargs.update({
3256 r'loggingfh': ui,
3259 r'loggingfh': ui,
3257 r'loggingname': b's',
3260 r'loggingname': b's',
3258 r'loggingopts': {
3261 r'loggingopts': {
3259 r'logdata': True,
3262 r'logdata': True,
3260 r'logdataapis': False,
3263 r'logdataapis': False,
3261 },
3264 },
3262 })
3265 })
3263
3266
3264 if ui.debugflag:
3267 if ui.debugflag:
3265 openerargs[r'loggingopts'][r'logdataapis'] = True
3268 openerargs[r'loggingopts'][r'logdataapis'] = True
3266
3269
3267 # Don't send default headers when in raw mode. This allows us to
3270 # Don't send default headers when in raw mode. This allows us to
3268 # bypass most of the behavior of our URL handling code so we can
3271 # bypass most of the behavior of our URL handling code so we can
3269 # have near complete control over what's sent on the wire.
3272 # have near complete control over what's sent on the wire.
3270 if opts['peer'] == 'raw':
3273 if opts['peer'] == 'raw':
3271 openerargs[r'sendaccept'] = False
3274 openerargs[r'sendaccept'] = False
3272
3275
3273 opener = urlmod.opener(ui, authinfo, **openerargs)
3276 opener = urlmod.opener(ui, authinfo, **openerargs)
3274
3277
3275 if opts['peer'] == 'http2':
3278 if opts['peer'] == 'http2':
3276 ui.write(_('creating http peer for wire protocol version 2\n'))
3279 ui.write(_('creating http peer for wire protocol version 2\n'))
3277 # We go through makepeer() because we need an API descriptor for
3280 # We go through makepeer() because we need an API descriptor for
3278 # the peer instance to be useful.
3281 # the peer instance to be useful.
3279 with ui.configoverride({
3282 with ui.configoverride({
3280 ('experimental', 'httppeer.advertise-v2'): True}):
3283 ('experimental', 'httppeer.advertise-v2'): True}):
3281 if opts['nologhandshake']:
3284 if opts['nologhandshake']:
3282 ui.pushbuffer()
3285 ui.pushbuffer()
3283
3286
3284 peer = httppeer.makepeer(ui, path, opener=opener)
3287 peer = httppeer.makepeer(ui, path, opener=opener)
3285
3288
3286 if opts['nologhandshake']:
3289 if opts['nologhandshake']:
3287 ui.popbuffer()
3290 ui.popbuffer()
3288
3291
3289 if not isinstance(peer, httppeer.httpv2peer):
3292 if not isinstance(peer, httppeer.httpv2peer):
3290 raise error.Abort(_('could not instantiate HTTP peer for '
3293 raise error.Abort(_('could not instantiate HTTP peer for '
3291 'wire protocol version 2'),
3294 'wire protocol version 2'),
3292 hint=_('the server may not have the feature '
3295 hint=_('the server may not have the feature '
3293 'enabled or is not allowing this '
3296 'enabled or is not allowing this '
3294 'client version'))
3297 'client version'))
3295
3298
3296 elif opts['peer'] == 'raw':
3299 elif opts['peer'] == 'raw':
3297 ui.write(_('using raw connection to peer\n'))
3300 ui.write(_('using raw connection to peer\n'))
3298 peer = None
3301 peer = None
3299 elif opts['peer']:
3302 elif opts['peer']:
3300 raise error.Abort(_('--peer %s not supported with HTTP peers') %
3303 raise error.Abort(_('--peer %s not supported with HTTP peers') %
3301 opts['peer'])
3304 opts['peer'])
3302 else:
3305 else:
3303 peer = httppeer.makepeer(ui, path, opener=opener)
3306 peer = httppeer.makepeer(ui, path, opener=opener)
3304
3307
3305 # We /could/ populate stdin/stdout with sock.makefile()...
3308 # We /could/ populate stdin/stdout with sock.makefile()...
3306 else:
3309 else:
3307 raise error.Abort(_('unsupported connection configuration'))
3310 raise error.Abort(_('unsupported connection configuration'))
3308
3311
3309 batchedcommands = None
3312 batchedcommands = None
3310
3313
3311 # Now perform actions based on the parsed wire language instructions.
3314 # Now perform actions based on the parsed wire language instructions.
3312 for action, lines in blocks:
3315 for action, lines in blocks:
3313 if action in ('raw', 'raw+'):
3316 if action in ('raw', 'raw+'):
3314 if not stdin:
3317 if not stdin:
3315 raise error.Abort(_('cannot call raw/raw+ on this peer'))
3318 raise error.Abort(_('cannot call raw/raw+ on this peer'))
3316
3319
3317 # Concatenate the data together.
3320 # Concatenate the data together.
3318 data = ''.join(l.lstrip() for l in lines)
3321 data = ''.join(l.lstrip() for l in lines)
3319 data = stringutil.unescapestr(data)
3322 data = stringutil.unescapestr(data)
3320 stdin.write(data)
3323 stdin.write(data)
3321
3324
3322 if action == 'raw+':
3325 if action == 'raw+':
3323 stdin.flush()
3326 stdin.flush()
3324 elif action == 'flush':
3327 elif action == 'flush':
3325 if not stdin:
3328 if not stdin:
3326 raise error.Abort(_('cannot call flush on this peer'))
3329 raise error.Abort(_('cannot call flush on this peer'))
3327 stdin.flush()
3330 stdin.flush()
3328 elif action.startswith('command'):
3331 elif action.startswith('command'):
3329 if not peer:
3332 if not peer:
3330 raise error.Abort(_('cannot send commands unless peer instance '
3333 raise error.Abort(_('cannot send commands unless peer instance '
3331 'is available'))
3334 'is available'))
3332
3335
3333 command = action.split(' ', 1)[1]
3336 command = action.split(' ', 1)[1]
3334
3337
3335 args = {}
3338 args = {}
3336 for line in lines:
3339 for line in lines:
3337 # We need to allow empty values.
3340 # We need to allow empty values.
3338 fields = line.lstrip().split(' ', 1)
3341 fields = line.lstrip().split(' ', 1)
3339 if len(fields) == 1:
3342 if len(fields) == 1:
3340 key = fields[0]
3343 key = fields[0]
3341 value = ''
3344 value = ''
3342 else:
3345 else:
3343 key, value = fields
3346 key, value = fields
3344
3347
3345 if value.startswith('eval:'):
3348 if value.startswith('eval:'):
3346 value = stringutil.evalpythonliteral(value[5:])
3349 value = stringutil.evalpythonliteral(value[5:])
3347 else:
3350 else:
3348 value = stringutil.unescapestr(value)
3351 value = stringutil.unescapestr(value)
3349
3352
3350 args[key] = value
3353 args[key] = value
3351
3354
3352 if batchedcommands is not None:
3355 if batchedcommands is not None:
3353 batchedcommands.append((command, args))
3356 batchedcommands.append((command, args))
3354 continue
3357 continue
3355
3358
3356 ui.status(_('sending %s command\n') % command)
3359 ui.status(_('sending %s command\n') % command)
3357
3360
3358 if 'PUSHFILE' in args:
3361 if 'PUSHFILE' in args:
3359 with open(args['PUSHFILE'], r'rb') as fh:
3362 with open(args['PUSHFILE'], r'rb') as fh:
3360 del args['PUSHFILE']
3363 del args['PUSHFILE']
3361 res, output = peer._callpush(command, fh,
3364 res, output = peer._callpush(command, fh,
3362 **pycompat.strkwargs(args))
3365 **pycompat.strkwargs(args))
3363 ui.status(_('result: %s\n') % stringutil.escapestr(res))
3366 ui.status(_('result: %s\n') % stringutil.escapestr(res))
3364 ui.status(_('remote output: %s\n') %
3367 ui.status(_('remote output: %s\n') %
3365 stringutil.escapestr(output))
3368 stringutil.escapestr(output))
3366 else:
3369 else:
3367 with peer.commandexecutor() as e:
3370 with peer.commandexecutor() as e:
3368 res = e.callcommand(command, args).result()
3371 res = e.callcommand(command, args).result()
3369
3372
3370 if isinstance(res, wireprotov2peer.commandresponse):
3373 if isinstance(res, wireprotov2peer.commandresponse):
3371 val = res.objects()
3374 val = res.objects()
3372 ui.status(_('response: %s\n') %
3375 ui.status(_('response: %s\n') %
3373 stringutil.pprint(val, bprefix=True, indent=2))
3376 stringutil.pprint(val, bprefix=True, indent=2))
3374 else:
3377 else:
3375 ui.status(_('response: %s\n') %
3378 ui.status(_('response: %s\n') %
3376 stringutil.pprint(res, bprefix=True, indent=2))
3379 stringutil.pprint(res, bprefix=True, indent=2))
3377
3380
3378 elif action == 'batchbegin':
3381 elif action == 'batchbegin':
3379 if batchedcommands is not None:
3382 if batchedcommands is not None:
3380 raise error.Abort(_('nested batchbegin not allowed'))
3383 raise error.Abort(_('nested batchbegin not allowed'))
3381
3384
3382 batchedcommands = []
3385 batchedcommands = []
3383 elif action == 'batchsubmit':
3386 elif action == 'batchsubmit':
3384 # There is a batching API we could go through. But it would be
3387 # There is a batching API we could go through. But it would be
3385 # difficult to normalize requests into function calls. It is easier
3388 # difficult to normalize requests into function calls. It is easier
3386 # to bypass this layer and normalize to commands + args.
3389 # to bypass this layer and normalize to commands + args.
3387 ui.status(_('sending batch with %d sub-commands\n') %
3390 ui.status(_('sending batch with %d sub-commands\n') %
3388 len(batchedcommands))
3391 len(batchedcommands))
3389 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
3392 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
3390 ui.status(_('response #%d: %s\n') %
3393 ui.status(_('response #%d: %s\n') %
3391 (i, stringutil.escapestr(chunk)))
3394 (i, stringutil.escapestr(chunk)))
3392
3395
3393 batchedcommands = None
3396 batchedcommands = None
3394
3397
3395 elif action.startswith('httprequest '):
3398 elif action.startswith('httprequest '):
3396 if not opener:
3399 if not opener:
3397 raise error.Abort(_('cannot use httprequest without an HTTP '
3400 raise error.Abort(_('cannot use httprequest without an HTTP '
3398 'peer'))
3401 'peer'))
3399
3402
3400 request = action.split(' ', 2)
3403 request = action.split(' ', 2)
3401 if len(request) != 3:
3404 if len(request) != 3:
3402 raise error.Abort(_('invalid httprequest: expected format is '
3405 raise error.Abort(_('invalid httprequest: expected format is '
3403 '"httprequest <method> <path>'))
3406 '"httprequest <method> <path>'))
3404
3407
3405 method, httppath = request[1:]
3408 method, httppath = request[1:]
3406 headers = {}
3409 headers = {}
3407 body = None
3410 body = None
3408 frames = []
3411 frames = []
3409 for line in lines:
3412 for line in lines:
3410 line = line.lstrip()
3413 line = line.lstrip()
3411 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
3414 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
3412 if m:
3415 if m:
3413 # Headers need to use native strings.
3416 # Headers need to use native strings.
3414 key = pycompat.strurl(m.group(1))
3417 key = pycompat.strurl(m.group(1))
3415 value = pycompat.strurl(m.group(2))
3418 value = pycompat.strurl(m.group(2))
3416 headers[key] = value
3419 headers[key] = value
3417 continue
3420 continue
3418
3421
3419 if line.startswith(b'BODYFILE '):
3422 if line.startswith(b'BODYFILE '):
3420 with open(line.split(b' ', 1), 'rb') as fh:
3423 with open(line.split(b' ', 1), 'rb') as fh:
3421 body = fh.read()
3424 body = fh.read()
3422 elif line.startswith(b'frame '):
3425 elif line.startswith(b'frame '):
3423 frame = wireprotoframing.makeframefromhumanstring(
3426 frame = wireprotoframing.makeframefromhumanstring(
3424 line[len(b'frame '):])
3427 line[len(b'frame '):])
3425
3428
3426 frames.append(frame)
3429 frames.append(frame)
3427 else:
3430 else:
3428 raise error.Abort(_('unknown argument to httprequest: %s') %
3431 raise error.Abort(_('unknown argument to httprequest: %s') %
3429 line)
3432 line)
3430
3433
3431 url = path + httppath
3434 url = path + httppath
3432
3435
3433 if frames:
3436 if frames:
3434 body = b''.join(bytes(f) for f in frames)
3437 body = b''.join(bytes(f) for f in frames)
3435
3438
3436 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
3439 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
3437
3440
3438 # urllib.Request insists on using has_data() as a proxy for
3441 # urllib.Request insists on using has_data() as a proxy for
3439 # determining the request method. Override that to use our
3442 # determining the request method. Override that to use our
3440 # explicitly requested method.
3443 # explicitly requested method.
3441 req.get_method = lambda: pycompat.sysstr(method)
3444 req.get_method = lambda: pycompat.sysstr(method)
3442
3445
3443 try:
3446 try:
3444 res = opener.open(req)
3447 res = opener.open(req)
3445 body = res.read()
3448 body = res.read()
3446 except util.urlerr.urlerror as e:
3449 except util.urlerr.urlerror as e:
3447 # read() method must be called, but only exists in Python 2
3450 # read() method must be called, but only exists in Python 2
3448 getattr(e, 'read', lambda: None)()
3451 getattr(e, 'read', lambda: None)()
3449 continue
3452 continue
3450
3453
3451 ct = res.headers.get(r'Content-Type')
3454 ct = res.headers.get(r'Content-Type')
3452 if ct == r'application/mercurial-cbor':
3455 if ct == r'application/mercurial-cbor':
3453 ui.write(_('cbor> %s\n') %
3456 ui.write(_('cbor> %s\n') %
3454 stringutil.pprint(cborutil.decodeall(body),
3457 stringutil.pprint(cborutil.decodeall(body),
3455 bprefix=True,
3458 bprefix=True,
3456 indent=2))
3459 indent=2))
3457
3460
3458 elif action == 'close':
3461 elif action == 'close':
3459 peer.close()
3462 peer.close()
3460 elif action == 'readavailable':
3463 elif action == 'readavailable':
3461 if not stdout or not stderr:
3464 if not stdout or not stderr:
3462 raise error.Abort(_('readavailable not available on this peer'))
3465 raise error.Abort(_('readavailable not available on this peer'))
3463
3466
3464 stdin.close()
3467 stdin.close()
3465 stdout.read()
3468 stdout.read()
3466 stderr.read()
3469 stderr.read()
3467
3470
3468 elif action == 'readline':
3471 elif action == 'readline':
3469 if not stdout:
3472 if not stdout:
3470 raise error.Abort(_('readline not available on this peer'))
3473 raise error.Abort(_('readline not available on this peer'))
3471 stdout.readline()
3474 stdout.readline()
3472 elif action == 'ereadline':
3475 elif action == 'ereadline':
3473 if not stderr:
3476 if not stderr:
3474 raise error.Abort(_('ereadline not available on this peer'))
3477 raise error.Abort(_('ereadline not available on this peer'))
3475 stderr.readline()
3478 stderr.readline()
3476 elif action.startswith('read '):
3479 elif action.startswith('read '):
3477 count = int(action.split(' ', 1)[1])
3480 count = int(action.split(' ', 1)[1])
3478 if not stdout:
3481 if not stdout:
3479 raise error.Abort(_('read not available on this peer'))
3482 raise error.Abort(_('read not available on this peer'))
3480 stdout.read(count)
3483 stdout.read(count)
3481 elif action.startswith('eread '):
3484 elif action.startswith('eread '):
3482 count = int(action.split(' ', 1)[1])
3485 count = int(action.split(' ', 1)[1])
3483 if not stderr:
3486 if not stderr:
3484 raise error.Abort(_('eread not available on this peer'))
3487 raise error.Abort(_('eread not available on this peer'))
3485 stderr.read(count)
3488 stderr.read(count)
3486 else:
3489 else:
3487 raise error.Abort(_('unknown action: %s') % action)
3490 raise error.Abort(_('unknown action: %s') % action)
3488
3491
3489 if batchedcommands is not None:
3492 if batchedcommands is not None:
3490 raise error.Abort(_('unclosed "batchbegin" request'))
3493 raise error.Abort(_('unclosed "batchbegin" request'))
3491
3494
3492 if peer:
3495 if peer:
3493 peer.close()
3496 peer.close()
3494
3497
3495 if proc:
3498 if proc:
3496 proc.kill()
3499 proc.kill()
@@ -1,1056 +1,1060
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 changelog,
14 changelog,
15 error,
15 error,
16 filelog,
16 filelog,
17 hg,
17 hg,
18 localrepo,
18 localrepo,
19 manifest,
19 manifest,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 vfs as vfsmod,
24 vfs as vfsmod,
25 )
25 )
26
26
27 from .utils import (
27 from .utils import (
28 compression,
28 compression,
29 )
29 )
30
30
31 def requiredsourcerequirements(repo):
31 def requiredsourcerequirements(repo):
32 """Obtain requirements required to be present to upgrade a repo.
32 """Obtain requirements required to be present to upgrade a repo.
33
33
34 An upgrade will not be allowed if the repository doesn't have the
34 An upgrade will not be allowed if the repository doesn't have the
35 requirements returned by this function.
35 requirements returned by this function.
36 """
36 """
37 return {
37 return {
38 # Introduced in Mercurial 0.9.2.
38 # Introduced in Mercurial 0.9.2.
39 'revlogv1',
39 'revlogv1',
40 # Introduced in Mercurial 0.9.2.
40 # Introduced in Mercurial 0.9.2.
41 'store',
41 'store',
42 }
42 }
43
43
44 def blocksourcerequirements(repo):
44 def blocksourcerequirements(repo):
45 """Obtain requirements that will prevent an upgrade from occurring.
45 """Obtain requirements that will prevent an upgrade from occurring.
46
46
47 An upgrade cannot be performed if the source repository contains a
47 An upgrade cannot be performed if the source repository contains a
48 requirements in the returned set.
48 requirements in the returned set.
49 """
49 """
50 return {
50 return {
51 # The upgrade code does not yet support these experimental features.
51 # The upgrade code does not yet support these experimental features.
52 # This is an artificial limitation.
52 # This is an artificial limitation.
53 'treemanifest',
53 'treemanifest',
54 # This was a precursor to generaldelta and was never enabled by default.
54 # This was a precursor to generaldelta and was never enabled by default.
55 # It should (hopefully) not exist in the wild.
55 # It should (hopefully) not exist in the wild.
56 'parentdelta',
56 'parentdelta',
57 # Upgrade should operate on the actual store, not the shared link.
57 # Upgrade should operate on the actual store, not the shared link.
58 'shared',
58 'shared',
59 }
59 }
60
60
61 def supportremovedrequirements(repo):
61 def supportremovedrequirements(repo):
62 """Obtain requirements that can be removed during an upgrade.
62 """Obtain requirements that can be removed during an upgrade.
63
63
64 If an upgrade were to create a repository that dropped a requirement,
64 If an upgrade were to create a repository that dropped a requirement,
65 the dropped requirement must appear in the returned set for the upgrade
65 the dropped requirement must appear in the returned set for the upgrade
66 to be allowed.
66 to be allowed.
67 """
67 """
68 supported = {
68 supported = {
69 localrepo.SPARSEREVLOG_REQUIREMENT,
69 localrepo.SPARSEREVLOG_REQUIREMENT,
70 }
70 }
71 for name in compression.compengines:
71 for name in compression.compengines:
72 engine = compression.compengines[name]
72 engine = compression.compengines[name]
73 if engine.available() and engine.revlogheader():
73 if engine.available() and engine.revlogheader():
74 supported.add(b'exp-compression-%s' % name)
74 supported.add(b'exp-compression-%s' % name)
75 if engine.name() == 'zstd':
75 if engine.name() == 'zstd':
76 supported.add(b'revlog-compression-zstd')
76 supported.add(b'revlog-compression-zstd')
77 return supported
77 return supported
78
78
79 def supporteddestrequirements(repo):
79 def supporteddestrequirements(repo):
80 """Obtain requirements that upgrade supports in the destination.
80 """Obtain requirements that upgrade supports in the destination.
81
81
82 If the result of the upgrade would create requirements not in this set,
82 If the result of the upgrade would create requirements not in this set,
83 the upgrade is disallowed.
83 the upgrade is disallowed.
84
84
85 Extensions should monkeypatch this to add their custom requirements.
85 Extensions should monkeypatch this to add their custom requirements.
86 """
86 """
87 supported = {
87 supported = {
88 'dotencode',
88 'dotencode',
89 'fncache',
89 'fncache',
90 'generaldelta',
90 'generaldelta',
91 'revlogv1',
91 'revlogv1',
92 'store',
92 'store',
93 localrepo.SPARSEREVLOG_REQUIREMENT,
93 localrepo.SPARSEREVLOG_REQUIREMENT,
94 }
94 }
95 for name in compression.compengines:
95 for name in compression.compengines:
96 engine = compression.compengines[name]
96 engine = compression.compengines[name]
97 if engine.available() and engine.revlogheader():
97 if engine.available() and engine.revlogheader():
98 supported.add(b'exp-compression-%s' % name)
98 supported.add(b'exp-compression-%s' % name)
99 if engine.name() == 'zstd':
99 if engine.name() == 'zstd':
100 supported.add(b'revlog-compression-zstd')
100 supported.add(b'revlog-compression-zstd')
101 return supported
101 return supported
102
102
103 def allowednewrequirements(repo):
103 def allowednewrequirements(repo):
104 """Obtain requirements that can be added to a repository during upgrade.
104 """Obtain requirements that can be added to a repository during upgrade.
105
105
106 This is used to disallow proposed requirements from being added when
106 This is used to disallow proposed requirements from being added when
107 they weren't present before.
107 they weren't present before.
108
108
109 We use a list of allowed requirement additions instead of a list of known
109 We use a list of allowed requirement additions instead of a list of known
110 bad additions because the whitelist approach is safer and will prevent
110 bad additions because the whitelist approach is safer and will prevent
111 future, unknown requirements from accidentally being added.
111 future, unknown requirements from accidentally being added.
112 """
112 """
113 supported = {
113 supported = {
114 'dotencode',
114 'dotencode',
115 'fncache',
115 'fncache',
116 'generaldelta',
116 'generaldelta',
117 localrepo.SPARSEREVLOG_REQUIREMENT,
117 localrepo.SPARSEREVLOG_REQUIREMENT,
118 }
118 }
119 for name in compression.compengines:
119 for name in compression.compengines:
120 engine = compression.compengines[name]
120 engine = compression.compengines[name]
121 if engine.available() and engine.revlogheader():
121 if engine.available() and engine.revlogheader():
122 supported.add(b'exp-compression-%s' % name)
122 supported.add(b'exp-compression-%s' % name)
123 if engine.name() == 'zstd':
123 if engine.name() == 'zstd':
124 supported.add(b'revlog-compression-zstd')
124 supported.add(b'revlog-compression-zstd')
125 return supported
125 return supported
126
126
127 def preservedrequirements(repo):
127 def preservedrequirements(repo):
128 return set()
128 return set()
129
129
130 deficiency = 'deficiency'
130 deficiency = 'deficiency'
131 optimisation = 'optimization'
131 optimisation = 'optimization'
132
132
133 class improvement(object):
133 class improvement(object):
134 """Represents an improvement that can be made as part of an upgrade.
134 """Represents an improvement that can be made as part of an upgrade.
135
135
136 The following attributes are defined on each instance:
136 The following attributes are defined on each instance:
137
137
138 name
138 name
139 Machine-readable string uniquely identifying this improvement. It
139 Machine-readable string uniquely identifying this improvement. It
140 will be mapped to an action later in the upgrade process.
140 will be mapped to an action later in the upgrade process.
141
141
142 type
142 type
143 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
143 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
144 problem. An optimization is an action (sometimes optional) that
144 problem. An optimization is an action (sometimes optional) that
145 can be taken to further improve the state of the repository.
145 can be taken to further improve the state of the repository.
146
146
147 description
147 description
148 Message intended for humans explaining the improvement in more detail,
148 Message intended for humans explaining the improvement in more detail,
149 including the implications of it. For ``deficiency`` types, should be
149 including the implications of it. For ``deficiency`` types, should be
150 worded in the present tense. For ``optimisation`` types, should be
150 worded in the present tense. For ``optimisation`` types, should be
151 worded in the future tense.
151 worded in the future tense.
152
152
153 upgrademessage
153 upgrademessage
154 Message intended for humans explaining what an upgrade addressing this
154 Message intended for humans explaining what an upgrade addressing this
155 issue will do. Should be worded in the future tense.
155 issue will do. Should be worded in the future tense.
156 """
156 """
157 def __init__(self, name, type, description, upgrademessage):
157 def __init__(self, name, type, description, upgrademessage):
158 self.name = name
158 self.name = name
159 self.type = type
159 self.type = type
160 self.description = description
160 self.description = description
161 self.upgrademessage = upgrademessage
161 self.upgrademessage = upgrademessage
162
162
163 def __eq__(self, other):
163 def __eq__(self, other):
164 if not isinstance(other, improvement):
164 if not isinstance(other, improvement):
165 # This is what python tell use to do
165 # This is what python tell use to do
166 return NotImplemented
166 return NotImplemented
167 return self.name == other.name
167 return self.name == other.name
168
168
169 def __ne__(self, other):
169 def __ne__(self, other):
170 return not (self == other)
170 return not (self == other)
171
171
172 def __hash__(self):
172 def __hash__(self):
173 return hash(self.name)
173 return hash(self.name)
174
174
175 allformatvariant = []
175 allformatvariant = []
176
176
177 def registerformatvariant(cls):
177 def registerformatvariant(cls):
178 allformatvariant.append(cls)
178 allformatvariant.append(cls)
179 return cls
179 return cls
180
180
181 class formatvariant(improvement):
181 class formatvariant(improvement):
182 """an improvement subclass dedicated to repository format"""
182 """an improvement subclass dedicated to repository format"""
183 type = deficiency
183 type = deficiency
184 ### The following attributes should be defined for each class:
184 ### The following attributes should be defined for each class:
185
185
186 # machine-readable string uniquely identifying this improvement. it will be
186 # machine-readable string uniquely identifying this improvement. it will be
187 # mapped to an action later in the upgrade process.
187 # mapped to an action later in the upgrade process.
188 name = None
188 name = None
189
189
190 # message intended for humans explaining the improvement in more detail,
190 # message intended for humans explaining the improvement in more detail,
191 # including the implications of it ``deficiency`` types, should be worded
191 # including the implications of it ``deficiency`` types, should be worded
192 # in the present tense.
192 # in the present tense.
193 description = None
193 description = None
194
194
195 # message intended for humans explaining what an upgrade addressing this
195 # message intended for humans explaining what an upgrade addressing this
196 # issue will do. should be worded in the future tense.
196 # issue will do. should be worded in the future tense.
197 upgrademessage = None
197 upgrademessage = None
198
198
199 # value of current Mercurial default for new repository
199 # value of current Mercurial default for new repository
200 default = None
200 default = None
201
201
202 def __init__(self):
202 def __init__(self):
203 raise NotImplementedError()
203 raise NotImplementedError()
204
204
205 @staticmethod
205 @staticmethod
206 def fromrepo(repo):
206 def fromrepo(repo):
207 """current value of the variant in the repository"""
207 """current value of the variant in the repository"""
208 raise NotImplementedError()
208 raise NotImplementedError()
209
209
210 @staticmethod
210 @staticmethod
211 def fromconfig(repo):
211 def fromconfig(repo):
212 """current value of the variant in the configuration"""
212 """current value of the variant in the configuration"""
213 raise NotImplementedError()
213 raise NotImplementedError()
214
214
215 class requirementformatvariant(formatvariant):
215 class requirementformatvariant(formatvariant):
216 """formatvariant based on a 'requirement' name.
216 """formatvariant based on a 'requirement' name.
217
217
218 Many format variant are controlled by a 'requirement'. We define a small
218 Many format variant are controlled by a 'requirement'. We define a small
219 subclass to factor the code.
219 subclass to factor the code.
220 """
220 """
221
221
222 # the requirement that control this format variant
222 # the requirement that control this format variant
223 _requirement = None
223 _requirement = None
224
224
225 @staticmethod
225 @staticmethod
226 def _newreporequirements(ui):
226 def _newreporequirements(ui):
227 return localrepo.newreporequirements(
227 return localrepo.newreporequirements(
228 ui, localrepo.defaultcreateopts(ui))
228 ui, localrepo.defaultcreateopts(ui))
229
229
230 @classmethod
230 @classmethod
231 def fromrepo(cls, repo):
231 def fromrepo(cls, repo):
232 assert cls._requirement is not None
232 assert cls._requirement is not None
233 return cls._requirement in repo.requirements
233 return cls._requirement in repo.requirements
234
234
235 @classmethod
235 @classmethod
236 def fromconfig(cls, repo):
236 def fromconfig(cls, repo):
237 assert cls._requirement is not None
237 assert cls._requirement is not None
238 return cls._requirement in cls._newreporequirements(repo.ui)
238 return cls._requirement in cls._newreporequirements(repo.ui)
239
239
240 @registerformatvariant
240 @registerformatvariant
241 class fncache(requirementformatvariant):
241 class fncache(requirementformatvariant):
242 name = 'fncache'
242 name = 'fncache'
243
243
244 _requirement = 'fncache'
244 _requirement = 'fncache'
245
245
246 default = True
246 default = True
247
247
248 description = _('long and reserved filenames may not work correctly; '
248 description = _('long and reserved filenames may not work correctly; '
249 'repository performance is sub-optimal')
249 'repository performance is sub-optimal')
250
250
251 upgrademessage = _('repository will be more resilient to storing '
251 upgrademessage = _('repository will be more resilient to storing '
252 'certain paths and performance of certain '
252 'certain paths and performance of certain '
253 'operations should be improved')
253 'operations should be improved')
254
254
255 @registerformatvariant
255 @registerformatvariant
256 class dotencode(requirementformatvariant):
256 class dotencode(requirementformatvariant):
257 name = 'dotencode'
257 name = 'dotencode'
258
258
259 _requirement = 'dotencode'
259 _requirement = 'dotencode'
260
260
261 default = True
261 default = True
262
262
263 description = _('storage of filenames beginning with a period or '
263 description = _('storage of filenames beginning with a period or '
264 'space may not work correctly')
264 'space may not work correctly')
265
265
266 upgrademessage = _('repository will be better able to store files '
266 upgrademessage = _('repository will be better able to store files '
267 'beginning with a space or period')
267 'beginning with a space or period')
268
268
269 @registerformatvariant
269 @registerformatvariant
270 class generaldelta(requirementformatvariant):
270 class generaldelta(requirementformatvariant):
271 name = 'generaldelta'
271 name = 'generaldelta'
272
272
273 _requirement = 'generaldelta'
273 _requirement = 'generaldelta'
274
274
275 default = True
275 default = True
276
276
277 description = _('deltas within internal storage are unable to '
277 description = _('deltas within internal storage are unable to '
278 'choose optimal revisions; repository is larger and '
278 'choose optimal revisions; repository is larger and '
279 'slower than it could be; interaction with other '
279 'slower than it could be; interaction with other '
280 'repositories may require extra network and CPU '
280 'repositories may require extra network and CPU '
281 'resources, making "hg push" and "hg pull" slower')
281 'resources, making "hg push" and "hg pull" slower')
282
282
283 upgrademessage = _('repository storage will be able to create '
283 upgrademessage = _('repository storage will be able to create '
284 'optimal deltas; new repository data will be '
284 'optimal deltas; new repository data will be '
285 'smaller and read times should decrease; '
285 'smaller and read times should decrease; '
286 'interacting with other repositories using this '
286 'interacting with other repositories using this '
287 'storage model should require less network and '
287 'storage model should require less network and '
288 'CPU resources, making "hg push" and "hg pull" '
288 'CPU resources, making "hg push" and "hg pull" '
289 'faster')
289 'faster')
290
290
291 @registerformatvariant
291 @registerformatvariant
292 class sparserevlog(requirementformatvariant):
292 class sparserevlog(requirementformatvariant):
293 name = 'sparserevlog'
293 name = 'sparserevlog'
294
294
295 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
295 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
296
296
297 default = True
297 default = True
298
298
299 description = _('in order to limit disk reading and memory usage on older '
299 description = _('in order to limit disk reading and memory usage on older '
300 'version, the span of a delta chain from its root to its '
300 'version, the span of a delta chain from its root to its '
301 'end is limited, whatever the relevant data in this span. '
301 'end is limited, whatever the relevant data in this span. '
302 'This can severly limit Mercurial ability to build good '
302 'This can severly limit Mercurial ability to build good '
303 'chain of delta resulting is much more storage space being '
303 'chain of delta resulting is much more storage space being '
304 'taken and limit reusability of on disk delta during '
304 'taken and limit reusability of on disk delta during '
305 'exchange.'
305 'exchange.'
306 )
306 )
307
307
308 upgrademessage = _('Revlog supports delta chain with more unused data '
308 upgrademessage = _('Revlog supports delta chain with more unused data '
309 'between payload. These gaps will be skipped at read '
309 'between payload. These gaps will be skipped at read '
310 'time. This allows for better delta chains, making a '
310 'time. This allows for better delta chains, making a '
311 'better compression and faster exchange with server.')
311 'better compression and faster exchange with server.')
312
312
313 @registerformatvariant
313 @registerformatvariant
314 class removecldeltachain(formatvariant):
314 class removecldeltachain(formatvariant):
315 name = 'plain-cl-delta'
315 name = 'plain-cl-delta'
316
316
317 default = True
317 default = True
318
318
319 description = _('changelog storage is using deltas instead of '
319 description = _('changelog storage is using deltas instead of '
320 'raw entries; changelog reading and any '
320 'raw entries; changelog reading and any '
321 'operation relying on changelog data are slower '
321 'operation relying on changelog data are slower '
322 'than they could be')
322 'than they could be')
323
323
324 upgrademessage = _('changelog storage will be reformated to '
324 upgrademessage = _('changelog storage will be reformated to '
325 'store raw entries; changelog reading will be '
325 'store raw entries; changelog reading will be '
326 'faster; changelog size may be reduced')
326 'faster; changelog size may be reduced')
327
327
328 @staticmethod
328 @staticmethod
329 def fromrepo(repo):
329 def fromrepo(repo):
330 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
330 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
331 # changelogs with deltas.
331 # changelogs with deltas.
332 cl = repo.changelog
332 cl = repo.changelog
333 chainbase = cl.chainbase
333 chainbase = cl.chainbase
334 return all(rev == chainbase(rev) for rev in cl)
334 return all(rev == chainbase(rev) for rev in cl)
335
335
336 @staticmethod
336 @staticmethod
337 def fromconfig(repo):
337 def fromconfig(repo):
338 return True
338 return True
339
339
340 @registerformatvariant
340 @registerformatvariant
341 class compressionengine(formatvariant):
341 class compressionengine(formatvariant):
342 name = 'compression'
342 name = 'compression'
343 default = 'zlib'
343 default = 'zlib'
344
344
345 description = _('Compresion algorithm used to compress data. '
345 description = _('Compresion algorithm used to compress data. '
346 'Some engine are faster than other')
346 'Some engine are faster than other')
347
347
348 upgrademessage = _('revlog content will be recompressed with the new '
348 upgrademessage = _('revlog content will be recompressed with the new '
349 'algorithm.')
349 'algorithm.')
350
350
351 @classmethod
351 @classmethod
352 def fromrepo(cls, repo):
352 def fromrepo(cls, repo):
353 # we allow multiple compression engine requirement to co-exist because
353 # we allow multiple compression engine requirement to co-exist because
354 # strickly speaking, revlog seems to support mixed compression style.
354 # strickly speaking, revlog seems to support mixed compression style.
355 #
355 #
356 # The compression used for new entries will be "the last one"
356 # The compression used for new entries will be "the last one"
357 compression = 'zlib'
357 compression = 'zlib'
358 for req in repo.requirements:
358 for req in repo.requirements:
359 prefix = req.startswith
359 prefix = req.startswith
360 if prefix('revlog-compression-') or prefix('exp-compression-'):
360 if prefix('revlog-compression-') or prefix('exp-compression-'):
361 compression = req.split('-', 2)[2]
361 compression = req.split('-', 2)[2]
362 return compression
362 return compression
363
363
364 @classmethod
364 @classmethod
365 def fromconfig(cls, repo):
365 def fromconfig(cls, repo):
366 return repo.ui.config('format', 'revlog-compression')
366 return repo.ui.config('format', 'revlog-compression')
367
367
368 @registerformatvariant
368 @registerformatvariant
369 class compressionlevel(formatvariant):
369 class compressionlevel(formatvariant):
370 name = 'compression-level'
370 name = 'compression-level'
371 default = 'default'
371 default = 'default'
372
372
373 description = _('compression level')
373 description = _('compression level')
374
374
375 upgrademessage = _('revlog content will be recompressed')
375 upgrademessage = _('revlog content will be recompressed')
376
376
377 @classmethod
377 @classmethod
378 def fromrepo(cls, repo):
378 def fromrepo(cls, repo):
379 comp = compressionengine.fromrepo(repo)
379 comp = compressionengine.fromrepo(repo)
380 level = None
380 level = None
381 if comp == 'zlib':
381 if comp == 'zlib':
382 level = repo.ui.configint('storage', 'revlog.zlib.level')
382 level = repo.ui.configint('storage', 'revlog.zlib.level')
383 elif comp == 'zstd':
383 elif comp == 'zstd':
384 level = repo.ui.configint('storage', 'revlog.zstd.level')
384 level = repo.ui.configint('storage', 'revlog.zstd.level')
385 if level is None:
385 if level is None:
386 return 'default'
386 return 'default'
387 return bytes(level)
387 return bytes(level)
388
388
389 @classmethod
389 @classmethod
390 def fromconfig(cls, repo):
390 def fromconfig(cls, repo):
391 comp = compressionengine.fromconfig(repo)
391 comp = compressionengine.fromconfig(repo)
392 level = None
392 level = None
393 if comp == 'zlib':
393 if comp == 'zlib':
394 level = repo.ui.configint('storage', 'revlog.zlib.level')
394 level = repo.ui.configint('storage', 'revlog.zlib.level')
395 elif comp == 'zstd':
395 elif comp == 'zstd':
396 level = repo.ui.configint('storage', 'revlog.zstd.level')
396 level = repo.ui.configint('storage', 'revlog.zstd.level')
397 if level is None:
397 if level is None:
398 return 'default'
398 return 'default'
399 return bytes(level)
399 return bytes(level)
400
400
401 def finddeficiencies(repo):
401 def finddeficiencies(repo):
402 """returns a list of deficiencies that the repo suffer from"""
402 """returns a list of deficiencies that the repo suffer from"""
403 deficiencies = []
403 deficiencies = []
404
404
405 # We could detect lack of revlogv1 and store here, but they were added
405 # We could detect lack of revlogv1 and store here, but they were added
406 # in 0.9.2 and we don't support upgrading repos without these
406 # in 0.9.2 and we don't support upgrading repos without these
407 # requirements, so let's not bother.
407 # requirements, so let's not bother.
408
408
409 for fv in allformatvariant:
409 for fv in allformatvariant:
410 if not fv.fromrepo(repo):
410 if not fv.fromrepo(repo):
411 deficiencies.append(fv)
411 deficiencies.append(fv)
412
412
413 return deficiencies
413 return deficiencies
414
414
415 # search without '-' to support older form on newer client.
415 # search without '-' to support older form on newer client.
416 #
416 #
417 # We don't enforce backward compatibility for debug command so this
417 # We don't enforce backward compatibility for debug command so this
418 # might eventually be dropped. However, having to use two different
418 # might eventually be dropped. However, having to use two different
419 # forms in script when comparing result is anoying enough to add
419 # forms in script when comparing result is anoying enough to add
420 # backward compatibility for a while.
420 # backward compatibility for a while.
421 legacy_opts_map = {
421 legacy_opts_map = {
422 'redeltaparent': 're-delta-parent',
422 'redeltaparent': 're-delta-parent',
423 'redeltamultibase': 're-delta-multibase',
423 'redeltamultibase': 're-delta-multibase',
424 'redeltaall': 're-delta-all',
424 'redeltaall': 're-delta-all',
425 'redeltafulladd': 're-delta-fulladd',
425 'redeltafulladd': 're-delta-fulladd',
426 }
426 }
427
427
428 def findoptimizations(repo):
428 def findoptimizations(repo):
429 """Determine optimisation that could be used during upgrade"""
429 """Determine optimisation that could be used during upgrade"""
430 # These are unconditionally added. There is logic later that figures out
430 # These are unconditionally added. There is logic later that figures out
431 # which ones to apply.
431 # which ones to apply.
432 optimizations = []
432 optimizations = []
433
433
434 optimizations.append(improvement(
434 optimizations.append(improvement(
435 name='re-delta-parent',
435 name='re-delta-parent',
436 type=optimisation,
436 type=optimisation,
437 description=_('deltas within internal storage will be recalculated to '
437 description=_('deltas within internal storage will be recalculated to '
438 'choose an optimal base revision where this was not '
438 'choose an optimal base revision where this was not '
439 'already done; the size of the repository may shrink and '
439 'already done; the size of the repository may shrink and '
440 'various operations may become faster; the first time '
440 'various operations may become faster; the first time '
441 'this optimization is performed could slow down upgrade '
441 'this optimization is performed could slow down upgrade '
442 'execution considerably; subsequent invocations should '
442 'execution considerably; subsequent invocations should '
443 'not run noticeably slower'),
443 'not run noticeably slower'),
444 upgrademessage=_('deltas within internal storage will choose a new '
444 upgrademessage=_('deltas within internal storage will choose a new '
445 'base revision if needed')))
445 'base revision if needed')))
446
446
447 optimizations.append(improvement(
447 optimizations.append(improvement(
448 name='re-delta-multibase',
448 name='re-delta-multibase',
449 type=optimisation,
449 type=optimisation,
450 description=_('deltas within internal storage will be recalculated '
450 description=_('deltas within internal storage will be recalculated '
451 'against multiple base revision and the smallest '
451 'against multiple base revision and the smallest '
452 'difference will be used; the size of the repository may '
452 'difference will be used; the size of the repository may '
453 'shrink significantly when there are many merges; this '
453 'shrink significantly when there are many merges; this '
454 'optimization will slow down execution in proportion to '
454 'optimization will slow down execution in proportion to '
455 'the number of merges in the repository and the amount '
455 'the number of merges in the repository and the amount '
456 'of files in the repository; this slow down should not '
456 'of files in the repository; this slow down should not '
457 'be significant unless there are tens of thousands of '
457 'be significant unless there are tens of thousands of '
458 'files and thousands of merges'),
458 'files and thousands of merges'),
459 upgrademessage=_('deltas within internal storage will choose an '
459 upgrademessage=_('deltas within internal storage will choose an '
460 'optimal delta by computing deltas against multiple '
460 'optimal delta by computing deltas against multiple '
461 'parents; may slow down execution time '
461 'parents; may slow down execution time '
462 'significantly')))
462 'significantly')))
463
463
464 optimizations.append(improvement(
464 optimizations.append(improvement(
465 name='re-delta-all',
465 name='re-delta-all',
466 type=optimisation,
466 type=optimisation,
467 description=_('deltas within internal storage will always be '
467 description=_('deltas within internal storage will always be '
468 'recalculated without reusing prior deltas; this will '
468 'recalculated without reusing prior deltas; this will '
469 'likely make execution run several times slower; this '
469 'likely make execution run several times slower; this '
470 'optimization is typically not needed'),
470 'optimization is typically not needed'),
471 upgrademessage=_('deltas within internal storage will be fully '
471 upgrademessage=_('deltas within internal storage will be fully '
472 'recomputed; this will likely drastically slow down '
472 'recomputed; this will likely drastically slow down '
473 'execution time')))
473 'execution time')))
474
474
475 optimizations.append(improvement(
475 optimizations.append(improvement(
476 name='re-delta-fulladd',
476 name='re-delta-fulladd',
477 type=optimisation,
477 type=optimisation,
478 description=_('every revision will be re-added as if it was new '
478 description=_('every revision will be re-added as if it was new '
479 'content. It will go through the full storage '
479 'content. It will go through the full storage '
480 'mechanism giving extensions a chance to process it '
480 'mechanism giving extensions a chance to process it '
481 '(eg. lfs). This is similar to "re-delta-all" but even '
481 '(eg. lfs). This is similar to "re-delta-all" but even '
482 'slower since more logic is involved.'),
482 'slower since more logic is involved.'),
483 upgrademessage=_('each revision will be added as new content to the '
483 upgrademessage=_('each revision will be added as new content to the '
484 'internal storage; this will likely drastically slow '
484 'internal storage; this will likely drastically slow '
485 'down execution time, but some extensions might need '
485 'down execution time, but some extensions might need '
486 'it')))
486 'it')))
487
487
488 return optimizations
488 return optimizations
489
489
490 def determineactions(repo, deficiencies, sourcereqs, destreqs):
490 def determineactions(repo, deficiencies, sourcereqs, destreqs):
491 """Determine upgrade actions that will be performed.
491 """Determine upgrade actions that will be performed.
492
492
493 Given a list of improvements as returned by ``finddeficiencies`` and
493 Given a list of improvements as returned by ``finddeficiencies`` and
494 ``findoptimizations``, determine the list of upgrade actions that
494 ``findoptimizations``, determine the list of upgrade actions that
495 will be performed.
495 will be performed.
496
496
497 The role of this function is to filter improvements if needed, apply
497 The role of this function is to filter improvements if needed, apply
498 recommended optimizations from the improvements list that make sense,
498 recommended optimizations from the improvements list that make sense,
499 etc.
499 etc.
500
500
501 Returns a list of action names.
501 Returns a list of action names.
502 """
502 """
503 newactions = []
503 newactions = []
504
504
505 knownreqs = supporteddestrequirements(repo)
505 knownreqs = supporteddestrequirements(repo)
506
506
507 for d in deficiencies:
507 for d in deficiencies:
508 name = d.name
508 name = d.name
509
509
510 # If the action is a requirement that doesn't show up in the
510 # If the action is a requirement that doesn't show up in the
511 # destination requirements, prune the action.
511 # destination requirements, prune the action.
512 if name in knownreqs and name not in destreqs:
512 if name in knownreqs and name not in destreqs:
513 continue
513 continue
514
514
515 newactions.append(d)
515 newactions.append(d)
516
516
517 # FUTURE consider adding some optimizations here for certain transitions.
517 # FUTURE consider adding some optimizations here for certain transitions.
518 # e.g. adding generaldelta could schedule parent redeltas.
518 # e.g. adding generaldelta could schedule parent redeltas.
519
519
520 return newactions
520 return newactions
521
521
522 def _revlogfrompath(repo, path):
522 def _revlogfrompath(repo, path):
523 """Obtain a revlog from a repo path.
523 """Obtain a revlog from a repo path.
524
524
525 An instance of the appropriate class is returned.
525 An instance of the appropriate class is returned.
526 """
526 """
527 if path == '00changelog.i':
527 if path == '00changelog.i':
528 return changelog.changelog(repo.svfs)
528 return changelog.changelog(repo.svfs)
529 elif path.endswith('00manifest.i'):
529 elif path.endswith('00manifest.i'):
530 mandir = path[:-len('00manifest.i')]
530 mandir = path[:-len('00manifest.i')]
531 return manifest.manifestrevlog(repo.svfs, tree=mandir)
531 return manifest.manifestrevlog(repo.svfs, tree=mandir)
532 else:
532 else:
533 #reverse of "/".join(("data", path + ".i"))
533 #reverse of "/".join(("data", path + ".i"))
534 return filelog.filelog(repo.svfs, path[5:-2])
534 return filelog.filelog(repo.svfs, path[5:-2])
535
535
536 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
536 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
537 """copy all relevant files for `oldrl` into `destrepo` store
537 """copy all relevant files for `oldrl` into `destrepo` store
538
538
539 Files are copied "as is" without any transformation. The copy is performed
539 Files are copied "as is" without any transformation. The copy is performed
540 without extra checks. Callers are responsible for making sure the copied
540 without extra checks. Callers are responsible for making sure the copied
541 content is compatible with format of the destination repository.
541 content is compatible with format of the destination repository.
542 """
542 """
543 oldrl = getattr(oldrl, '_revlog', oldrl)
543 oldrl = getattr(oldrl, '_revlog', oldrl)
544 newrl = _revlogfrompath(destrepo, unencodedname)
544 newrl = _revlogfrompath(destrepo, unencodedname)
545 newrl = getattr(newrl, '_revlog', newrl)
545 newrl = getattr(newrl, '_revlog', newrl)
546
546
547 oldvfs = oldrl.opener
547 oldvfs = oldrl.opener
548 newvfs = newrl.opener
548 newvfs = newrl.opener
549 oldindex = oldvfs.join(oldrl.indexfile)
549 oldindex = oldvfs.join(oldrl.indexfile)
550 newindex = newvfs.join(newrl.indexfile)
550 newindex = newvfs.join(newrl.indexfile)
551 olddata = oldvfs.join(oldrl.datafile)
551 olddata = oldvfs.join(oldrl.datafile)
552 newdata = newvfs.join(newrl.datafile)
552 newdata = newvfs.join(newrl.datafile)
553
553
554 newdir = newvfs.dirname(newrl.indexfile)
554 newdir = newvfs.dirname(newrl.indexfile)
555 newvfs.makedirs(newdir)
555 newvfs.makedirs(newdir)
556
556
557 util.copyfile(oldindex, newindex)
557 util.copyfile(oldindex, newindex)
558 if oldrl.opener.exists(olddata):
558 if oldrl.opener.exists(olddata):
559 util.copyfile(olddata, newdata)
559 util.copyfile(olddata, newdata)
560
560
561 if not (unencodedname.endswith('00changelog.i')
561 if not (unencodedname.endswith('00changelog.i')
562 or unencodedname.endswith('00manifest.i')):
562 or unencodedname.endswith('00manifest.i')):
563 destrepo.svfs.fncache.add(unencodedname)
563 destrepo.svfs.fncache.add(unencodedname)
564
564
565 UPGRADE_CHANGELOG = object()
565 UPGRADE_CHANGELOG = object()
566 UPGRADE_MANIFEST = object()
566 UPGRADE_MANIFEST = object()
567 UPGRADE_FILELOG = object()
567 UPGRADE_FILELOG = object()
568
568
569 UPGRADE_ALL_REVLOGS = frozenset([UPGRADE_CHANGELOG,
569 UPGRADE_ALL_REVLOGS = frozenset([UPGRADE_CHANGELOG,
570 UPGRADE_MANIFEST,
570 UPGRADE_MANIFEST,
571 UPGRADE_FILELOG])
571 UPGRADE_FILELOG])
572
572
573 def matchrevlog(revlogfilter, entry):
573 def matchrevlog(revlogfilter, entry):
574 """check is a revlog is selected for cloning
574 """check is a revlog is selected for cloning
575
575
576 The store entry is checked against the passed filter"""
576 The store entry is checked against the passed filter"""
577 if entry.endswith('00changelog.i'):
577 if entry.endswith('00changelog.i'):
578 return UPGRADE_CHANGELOG in revlogfilter
578 return UPGRADE_CHANGELOG in revlogfilter
579 elif entry.endswith('00manifest.i'):
579 elif entry.endswith('00manifest.i'):
580 return UPGRADE_MANIFEST in revlogfilter
580 return UPGRADE_MANIFEST in revlogfilter
581 return UPGRADE_FILELOG in revlogfilter
581 return UPGRADE_FILELOG in revlogfilter
582
582
583 def _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents,
583 def _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents,
584 revlogs=UPGRADE_ALL_REVLOGS):
584 revlogs=UPGRADE_ALL_REVLOGS):
585 """Copy revlogs between 2 repos."""
585 """Copy revlogs between 2 repos."""
586 revcount = 0
586 revcount = 0
587 srcsize = 0
587 srcsize = 0
588 srcrawsize = 0
588 srcrawsize = 0
589 dstsize = 0
589 dstsize = 0
590 fcount = 0
590 fcount = 0
591 frevcount = 0
591 frevcount = 0
592 fsrcsize = 0
592 fsrcsize = 0
593 frawsize = 0
593 frawsize = 0
594 fdstsize = 0
594 fdstsize = 0
595 mcount = 0
595 mcount = 0
596 mrevcount = 0
596 mrevcount = 0
597 msrcsize = 0
597 msrcsize = 0
598 mrawsize = 0
598 mrawsize = 0
599 mdstsize = 0
599 mdstsize = 0
600 crevcount = 0
600 crevcount = 0
601 csrcsize = 0
601 csrcsize = 0
602 crawsize = 0
602 crawsize = 0
603 cdstsize = 0
603 cdstsize = 0
604
604
605 alldatafiles = list(srcrepo.store.walk())
605 alldatafiles = list(srcrepo.store.walk())
606
606
607 # Perform a pass to collect metadata. This validates we can open all
607 # Perform a pass to collect metadata. This validates we can open all
608 # source files and allows a unified progress bar to be displayed.
608 # source files and allows a unified progress bar to be displayed.
609 for unencoded, encoded, size in alldatafiles:
609 for unencoded, encoded, size in alldatafiles:
610 if unencoded.endswith('.d'):
610 if unencoded.endswith('.d'):
611 continue
611 continue
612
612
613 rl = _revlogfrompath(srcrepo, unencoded)
613 rl = _revlogfrompath(srcrepo, unencoded)
614
614
615 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
615 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
616 trackedsize=True, storedsize=True)
616 trackedsize=True, storedsize=True)
617
617
618 revcount += info['revisionscount'] or 0
618 revcount += info['revisionscount'] or 0
619 datasize = info['storedsize'] or 0
619 datasize = info['storedsize'] or 0
620 rawsize = info['trackedsize'] or 0
620 rawsize = info['trackedsize'] or 0
621
621
622 srcsize += datasize
622 srcsize += datasize
623 srcrawsize += rawsize
623 srcrawsize += rawsize
624
624
625 # This is for the separate progress bars.
625 # This is for the separate progress bars.
626 if isinstance(rl, changelog.changelog):
626 if isinstance(rl, changelog.changelog):
627 crevcount += len(rl)
627 crevcount += len(rl)
628 csrcsize += datasize
628 csrcsize += datasize
629 crawsize += rawsize
629 crawsize += rawsize
630 elif isinstance(rl, manifest.manifestrevlog):
630 elif isinstance(rl, manifest.manifestrevlog):
631 mcount += 1
631 mcount += 1
632 mrevcount += len(rl)
632 mrevcount += len(rl)
633 msrcsize += datasize
633 msrcsize += datasize
634 mrawsize += rawsize
634 mrawsize += rawsize
635 elif isinstance(rl, filelog.filelog):
635 elif isinstance(rl, filelog.filelog):
636 fcount += 1
636 fcount += 1
637 frevcount += len(rl)
637 frevcount += len(rl)
638 fsrcsize += datasize
638 fsrcsize += datasize
639 frawsize += rawsize
639 frawsize += rawsize
640 else:
640 else:
641 error.ProgrammingError('unknown revlog type')
641 error.ProgrammingError('unknown revlog type')
642
642
643 if not revcount:
643 if not revcount:
644 return
644 return
645
645
646 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
646 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
647 '%d in changelog)\n') %
647 '%d in changelog)\n') %
648 (revcount, frevcount, mrevcount, crevcount))
648 (revcount, frevcount, mrevcount, crevcount))
649 ui.write(_('migrating %s in store; %s tracked data\n') % (
649 ui.write(_('migrating %s in store; %s tracked data\n') % (
650 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
650 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
651
651
652 # Used to keep track of progress.
652 # Used to keep track of progress.
653 progress = None
653 progress = None
654 def oncopiedrevision(rl, rev, node):
654 def oncopiedrevision(rl, rev, node):
655 progress.increment()
655 progress.increment()
656
656
657 # Do the actual copying.
657 # Do the actual copying.
658 # FUTURE this operation can be farmed off to worker processes.
658 # FUTURE this operation can be farmed off to worker processes.
659 seen = set()
659 seen = set()
660 for unencoded, encoded, size in alldatafiles:
660 for unencoded, encoded, size in alldatafiles:
661 if unencoded.endswith('.d'):
661 if unencoded.endswith('.d'):
662 continue
662 continue
663
663
664 oldrl = _revlogfrompath(srcrepo, unencoded)
664 oldrl = _revlogfrompath(srcrepo, unencoded)
665
665
666 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
666 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
667 ui.write(_('finished migrating %d manifest revisions across %d '
667 ui.write(_('finished migrating %d manifest revisions across %d '
668 'manifests; change in size: %s\n') %
668 'manifests; change in size: %s\n') %
669 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
669 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
670
670
671 ui.write(_('migrating changelog containing %d revisions '
671 ui.write(_('migrating changelog containing %d revisions '
672 '(%s in store; %s tracked data)\n') %
672 '(%s in store; %s tracked data)\n') %
673 (crevcount, util.bytecount(csrcsize),
673 (crevcount, util.bytecount(csrcsize),
674 util.bytecount(crawsize)))
674 util.bytecount(crawsize)))
675 seen.add('c')
675 seen.add('c')
676 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
676 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
677 total=crevcount)
677 total=crevcount)
678 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
678 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
679 ui.write(_('finished migrating %d filelog revisions across %d '
679 ui.write(_('finished migrating %d filelog revisions across %d '
680 'filelogs; change in size: %s\n') %
680 'filelogs; change in size: %s\n') %
681 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
681 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
682
682
683 ui.write(_('migrating %d manifests containing %d revisions '
683 ui.write(_('migrating %d manifests containing %d revisions '
684 '(%s in store; %s tracked data)\n') %
684 '(%s in store; %s tracked data)\n') %
685 (mcount, mrevcount, util.bytecount(msrcsize),
685 (mcount, mrevcount, util.bytecount(msrcsize),
686 util.bytecount(mrawsize)))
686 util.bytecount(mrawsize)))
687 seen.add('m')
687 seen.add('m')
688 if progress:
688 if progress:
689 progress.complete()
689 progress.complete()
690 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
690 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
691 total=mrevcount)
691 total=mrevcount)
692 elif 'f' not in seen:
692 elif 'f' not in seen:
693 ui.write(_('migrating %d filelogs containing %d revisions '
693 ui.write(_('migrating %d filelogs containing %d revisions '
694 '(%s in store; %s tracked data)\n') %
694 '(%s in store; %s tracked data)\n') %
695 (fcount, frevcount, util.bytecount(fsrcsize),
695 (fcount, frevcount, util.bytecount(fsrcsize),
696 util.bytecount(frawsize)))
696 util.bytecount(frawsize)))
697 seen.add('f')
697 seen.add('f')
698 if progress:
698 if progress:
699 progress.complete()
699 progress.complete()
700 progress = srcrepo.ui.makeprogress(_('file revisions'),
700 progress = srcrepo.ui.makeprogress(_('file revisions'),
701 total=frevcount)
701 total=frevcount)
702
702
703 if matchrevlog(revlogs, unencoded):
703 if matchrevlog(revlogs, unencoded):
704 ui.note(_('cloning %d revisions from %s\n')
704 ui.note(_('cloning %d revisions from %s\n')
705 % (len(oldrl), unencoded))
705 % (len(oldrl), unencoded))
706 newrl = _revlogfrompath(dstrepo, unencoded)
706 newrl = _revlogfrompath(dstrepo, unencoded)
707 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
707 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
708 deltareuse=deltareuse,
708 deltareuse=deltareuse,
709 forcedeltabothparents=forcedeltabothparents)
709 forcedeltabothparents=forcedeltabothparents)
710 else:
710 else:
711 msg = _('blindly copying %s containing %i revisions\n')
711 msg = _('blindly copying %s containing %i revisions\n')
712 ui.note(msg % (unencoded, len(oldrl)))
712 ui.note(msg % (unencoded, len(oldrl)))
713 _copyrevlog(tr, dstrepo, oldrl, unencoded)
713 _copyrevlog(tr, dstrepo, oldrl, unencoded)
714
714
715 newrl = _revlogfrompath(dstrepo, unencoded)
715 newrl = _revlogfrompath(dstrepo, unencoded)
716
716
717 info = newrl.storageinfo(storedsize=True)
717 info = newrl.storageinfo(storedsize=True)
718 datasize = info['storedsize'] or 0
718 datasize = info['storedsize'] or 0
719
719
720 dstsize += datasize
720 dstsize += datasize
721
721
722 if isinstance(newrl, changelog.changelog):
722 if isinstance(newrl, changelog.changelog):
723 cdstsize += datasize
723 cdstsize += datasize
724 elif isinstance(newrl, manifest.manifestrevlog):
724 elif isinstance(newrl, manifest.manifestrevlog):
725 mdstsize += datasize
725 mdstsize += datasize
726 else:
726 else:
727 fdstsize += datasize
727 fdstsize += datasize
728
728
729 progress.complete()
729 progress.complete()
730
730
731 ui.write(_('finished migrating %d changelog revisions; change in size: '
731 ui.write(_('finished migrating %d changelog revisions; change in size: '
732 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
732 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
733
733
734 ui.write(_('finished migrating %d total revisions; total change in store '
734 ui.write(_('finished migrating %d total revisions; total change in store '
735 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
735 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
736
736
737 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
737 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
738 """Determine whether to copy a store file during upgrade.
738 """Determine whether to copy a store file during upgrade.
739
739
740 This function is called when migrating store files from ``srcrepo`` to
740 This function is called when migrating store files from ``srcrepo`` to
741 ``dstrepo`` as part of upgrading a repository.
741 ``dstrepo`` as part of upgrading a repository.
742
742
743 Args:
743 Args:
744 srcrepo: repo we are copying from
744 srcrepo: repo we are copying from
745 dstrepo: repo we are copying to
745 dstrepo: repo we are copying to
746 requirements: set of requirements for ``dstrepo``
746 requirements: set of requirements for ``dstrepo``
747 path: store file being examined
747 path: store file being examined
748 mode: the ``ST_MODE`` file type of ``path``
748 mode: the ``ST_MODE`` file type of ``path``
749 st: ``stat`` data structure for ``path``
749 st: ``stat`` data structure for ``path``
750
750
751 Function should return ``True`` if the file is to be copied.
751 Function should return ``True`` if the file is to be copied.
752 """
752 """
753 # Skip revlogs.
753 # Skip revlogs.
754 if path.endswith(('.i', '.d')):
754 if path.endswith(('.i', '.d')):
755 return False
755 return False
756 # Skip transaction related files.
756 # Skip transaction related files.
757 if path.startswith('undo'):
757 if path.startswith('undo'):
758 return False
758 return False
759 # Only copy regular files.
759 # Only copy regular files.
760 if mode != stat.S_IFREG:
760 if mode != stat.S_IFREG:
761 return False
761 return False
762 # Skip other skipped files.
762 # Skip other skipped files.
763 if path in ('lock', 'fncache'):
763 if path in ('lock', 'fncache'):
764 return False
764 return False
765
765
766 return True
766 return True
767
767
768 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
768 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
769 """Hook point for extensions to perform additional actions during upgrade.
769 """Hook point for extensions to perform additional actions during upgrade.
770
770
771 This function is called after revlogs and store files have been copied but
771 This function is called after revlogs and store files have been copied but
772 before the new store is swapped into the original location.
772 before the new store is swapped into the original location.
773 """
773 """
774
774
775 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions,
775 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions,
776 revlogs=UPGRADE_ALL_REVLOGS):
776 revlogs=UPGRADE_ALL_REVLOGS):
777 """Do the low-level work of upgrading a repository.
777 """Do the low-level work of upgrading a repository.
778
778
779 The upgrade is effectively performed as a copy between a source
779 The upgrade is effectively performed as a copy between a source
780 repository and a temporary destination repository.
780 repository and a temporary destination repository.
781
781
782 The source repository is unmodified for as long as possible so the
782 The source repository is unmodified for as long as possible so the
783 upgrade can abort at any time without causing loss of service for
783 upgrade can abort at any time without causing loss of service for
784 readers and without corrupting the source repository.
784 readers and without corrupting the source repository.
785 """
785 """
786 assert srcrepo.currentwlock()
786 assert srcrepo.currentwlock()
787 assert dstrepo.currentwlock()
787 assert dstrepo.currentwlock()
788
788
789 ui.write(_('(it is safe to interrupt this process any time before '
789 ui.write(_('(it is safe to interrupt this process any time before '
790 'data migration completes)\n'))
790 'data migration completes)\n'))
791
791
792 if 're-delta-all' in actions:
792 if 're-delta-all' in actions:
793 deltareuse = revlog.revlog.DELTAREUSENEVER
793 deltareuse = revlog.revlog.DELTAREUSENEVER
794 elif 're-delta-parent' in actions:
794 elif 're-delta-parent' in actions:
795 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
795 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
796 elif 're-delta-multibase' in actions:
796 elif 're-delta-multibase' in actions:
797 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
797 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
798 elif 're-delta-fulladd' in actions:
798 elif 're-delta-fulladd' in actions:
799 deltareuse = revlog.revlog.DELTAREUSEFULLADD
799 deltareuse = revlog.revlog.DELTAREUSEFULLADD
800 else:
800 else:
801 deltareuse = revlog.revlog.DELTAREUSEALWAYS
801 deltareuse = revlog.revlog.DELTAREUSEALWAYS
802
802
803 with dstrepo.transaction('upgrade') as tr:
803 with dstrepo.transaction('upgrade') as tr:
804 _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
804 _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
805 're-delta-multibase' in actions, revlogs=revlogs)
805 're-delta-multibase' in actions, revlogs=revlogs)
806
806
807 # Now copy other files in the store directory.
807 # Now copy other files in the store directory.
808 # The sorted() makes execution deterministic.
808 # The sorted() makes execution deterministic.
809 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
809 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
810 if not _filterstorefile(srcrepo, dstrepo, requirements,
810 if not _filterstorefile(srcrepo, dstrepo, requirements,
811 p, kind, st):
811 p, kind, st):
812 continue
812 continue
813
813
814 srcrepo.ui.write(_('copying %s\n') % p)
814 srcrepo.ui.write(_('copying %s\n') % p)
815 src = srcrepo.store.rawvfs.join(p)
815 src = srcrepo.store.rawvfs.join(p)
816 dst = dstrepo.store.rawvfs.join(p)
816 dst = dstrepo.store.rawvfs.join(p)
817 util.copyfile(src, dst, copystat=True)
817 util.copyfile(src, dst, copystat=True)
818
818
819 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
819 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
820
820
821 ui.write(_('data fully migrated to temporary repository\n'))
821 ui.write(_('data fully migrated to temporary repository\n'))
822
822
823 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
823 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
824 backupvfs = vfsmod.vfs(backuppath)
824 backupvfs = vfsmod.vfs(backuppath)
825
825
826 # Make a backup of requires file first, as it is the first to be modified.
826 # Make a backup of requires file first, as it is the first to be modified.
827 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
827 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
828
828
829 # We install an arbitrary requirement that clients must not support
829 # We install an arbitrary requirement that clients must not support
830 # as a mechanism to lock out new clients during the data swap. This is
830 # as a mechanism to lock out new clients during the data swap. This is
831 # better than allowing a client to continue while the repository is in
831 # better than allowing a client to continue while the repository is in
832 # an inconsistent state.
832 # an inconsistent state.
833 ui.write(_('marking source repository as being upgraded; clients will be '
833 ui.write(_('marking source repository as being upgraded; clients will be '
834 'unable to read from repository\n'))
834 'unable to read from repository\n'))
835 scmutil.writerequires(srcrepo.vfs,
835 scmutil.writerequires(srcrepo.vfs,
836 srcrepo.requirements | {'upgradeinprogress'})
836 srcrepo.requirements | {'upgradeinprogress'})
837
837
838 ui.write(_('starting in-place swap of repository data\n'))
838 ui.write(_('starting in-place swap of repository data\n'))
839 ui.write(_('replaced files will be backed up at %s\n') %
839 ui.write(_('replaced files will be backed up at %s\n') %
840 backuppath)
840 backuppath)
841
841
842 # Now swap in the new store directory. Doing it as a rename should make
842 # Now swap in the new store directory. Doing it as a rename should make
843 # the operation nearly instantaneous and atomic (at least in well-behaved
843 # the operation nearly instantaneous and atomic (at least in well-behaved
844 # environments).
844 # environments).
845 ui.write(_('replacing store...\n'))
845 ui.write(_('replacing store...\n'))
846 tstart = util.timer()
846 tstart = util.timer()
847 util.rename(srcrepo.spath, backupvfs.join('store'))
847 util.rename(srcrepo.spath, backupvfs.join('store'))
848 util.rename(dstrepo.spath, srcrepo.spath)
848 util.rename(dstrepo.spath, srcrepo.spath)
849 elapsed = util.timer() - tstart
849 elapsed = util.timer() - tstart
850 ui.write(_('store replacement complete; repository was inconsistent for '
850 ui.write(_('store replacement complete; repository was inconsistent for '
851 '%0.1fs\n') % elapsed)
851 '%0.1fs\n') % elapsed)
852
852
853 # We first write the requirements file. Any new requirements will lock
853 # We first write the requirements file. Any new requirements will lock
854 # out legacy clients.
854 # out legacy clients.
855 ui.write(_('finalizing requirements file and making repository readable '
855 ui.write(_('finalizing requirements file and making repository readable '
856 'again\n'))
856 'again\n'))
857 scmutil.writerequires(srcrepo.vfs, requirements)
857 scmutil.writerequires(srcrepo.vfs, requirements)
858
858
859 # The lock file from the old store won't be removed because nothing has a
859 # The lock file from the old store won't be removed because nothing has a
860 # reference to its new location. So clean it up manually. Alternatively, we
860 # reference to its new location. So clean it up manually. Alternatively, we
861 # could update srcrepo.svfs and other variables to point to the new
861 # could update srcrepo.svfs and other variables to point to the new
862 # location. This is simpler.
862 # location. This is simpler.
863 backupvfs.unlink('store/lock')
863 backupvfs.unlink('store/lock')
864
864
865 return backuppath
865 return backuppath
866
866
867 def upgraderepo(ui, repo, run=False, optimize=None, backup=True,
867 def upgraderepo(ui, repo, run=False, optimize=None, backup=True,
868 manifest=None):
868 manifest=None, changelog=None):
869 """Upgrade a repository in place."""
869 """Upgrade a repository in place."""
870 if optimize is None:
870 if optimize is None:
871 optimize = []
871 optimize = []
872 optimize = set(legacy_opts_map.get(o, o) for o in optimize)
872 optimize = set(legacy_opts_map.get(o, o) for o in optimize)
873 repo = repo.unfiltered()
873 repo = repo.unfiltered()
874
874
875 revlogs = set(UPGRADE_ALL_REVLOGS)
875 revlogs = set(UPGRADE_ALL_REVLOGS)
876 specentries = (('m', manifest),)
876 specentries = (('c', changelog), ('m', manifest))
877 specified = [(y, x) for (y, x) in specentries if x is not None]
877 specified = [(y, x) for (y, x) in specentries if x is not None]
878 if specified:
878 if specified:
879 # we have some limitation on revlogs to be recloned
879 # we have some limitation on revlogs to be recloned
880 if any(x for y, x in specified):
880 if any(x for y, x in specified):
881 revlogs = set()
881 revlogs = set()
882 for r, enabled in specified:
882 for r, enabled in specified:
883 if enabled:
883 if enabled:
884 if r == 'm':
884 if r == 'c':
885 revlogs.add(UPGRADE_CHANGELOG)
886 elif r == 'm':
885 revlogs.add(UPGRADE_MANIFEST)
887 revlogs.add(UPGRADE_MANIFEST)
886 else:
888 else:
887 # none are enabled
889 # none are enabled
888 for r, __ in specified:
890 for r, __ in specified:
889 if r == 'm':
891 if r == 'c':
892 revlogs.discard(UPGRADE_CHANGELOG)
893 elif r == 'm':
890 revlogs.discard(UPGRADE_MANIFEST)
894 revlogs.discard(UPGRADE_MANIFEST)
891
895
892 # Ensure the repository can be upgraded.
896 # Ensure the repository can be upgraded.
893 missingreqs = requiredsourcerequirements(repo) - repo.requirements
897 missingreqs = requiredsourcerequirements(repo) - repo.requirements
894 if missingreqs:
898 if missingreqs:
895 raise error.Abort(_('cannot upgrade repository; requirement '
899 raise error.Abort(_('cannot upgrade repository; requirement '
896 'missing: %s') % _(', ').join(sorted(missingreqs)))
900 'missing: %s') % _(', ').join(sorted(missingreqs)))
897
901
898 blockedreqs = blocksourcerequirements(repo) & repo.requirements
902 blockedreqs = blocksourcerequirements(repo) & repo.requirements
899 if blockedreqs:
903 if blockedreqs:
900 raise error.Abort(_('cannot upgrade repository; unsupported source '
904 raise error.Abort(_('cannot upgrade repository; unsupported source '
901 'requirement: %s') %
905 'requirement: %s') %
902 _(', ').join(sorted(blockedreqs)))
906 _(', ').join(sorted(blockedreqs)))
903
907
904 # FUTURE there is potentially a need to control the wanted requirements via
908 # FUTURE there is potentially a need to control the wanted requirements via
905 # command arguments or via an extension hook point.
909 # command arguments or via an extension hook point.
906 newreqs = localrepo.newreporequirements(
910 newreqs = localrepo.newreporequirements(
907 repo.ui, localrepo.defaultcreateopts(repo.ui))
911 repo.ui, localrepo.defaultcreateopts(repo.ui))
908 newreqs.update(preservedrequirements(repo))
912 newreqs.update(preservedrequirements(repo))
909
913
910 noremovereqs = (repo.requirements - newreqs -
914 noremovereqs = (repo.requirements - newreqs -
911 supportremovedrequirements(repo))
915 supportremovedrequirements(repo))
912 if noremovereqs:
916 if noremovereqs:
913 raise error.Abort(_('cannot upgrade repository; requirement would be '
917 raise error.Abort(_('cannot upgrade repository; requirement would be '
914 'removed: %s') % _(', ').join(sorted(noremovereqs)))
918 'removed: %s') % _(', ').join(sorted(noremovereqs)))
915
919
916 noaddreqs = (newreqs - repo.requirements -
920 noaddreqs = (newreqs - repo.requirements -
917 allowednewrequirements(repo))
921 allowednewrequirements(repo))
918 if noaddreqs:
922 if noaddreqs:
919 raise error.Abort(_('cannot upgrade repository; do not support adding '
923 raise error.Abort(_('cannot upgrade repository; do not support adding '
920 'requirement: %s') %
924 'requirement: %s') %
921 _(', ').join(sorted(noaddreqs)))
925 _(', ').join(sorted(noaddreqs)))
922
926
923 unsupportedreqs = newreqs - supporteddestrequirements(repo)
927 unsupportedreqs = newreqs - supporteddestrequirements(repo)
924 if unsupportedreqs:
928 if unsupportedreqs:
925 raise error.Abort(_('cannot upgrade repository; do not support '
929 raise error.Abort(_('cannot upgrade repository; do not support '
926 'destination requirement: %s') %
930 'destination requirement: %s') %
927 _(', ').join(sorted(unsupportedreqs)))
931 _(', ').join(sorted(unsupportedreqs)))
928
932
929 # Find and validate all improvements that can be made.
933 # Find and validate all improvements that can be made.
930 alloptimizations = findoptimizations(repo)
934 alloptimizations = findoptimizations(repo)
931
935
932 # Apply and Validate arguments.
936 # Apply and Validate arguments.
933 optimizations = []
937 optimizations = []
934 for o in alloptimizations:
938 for o in alloptimizations:
935 if o.name in optimize:
939 if o.name in optimize:
936 optimizations.append(o)
940 optimizations.append(o)
937 optimize.discard(o.name)
941 optimize.discard(o.name)
938
942
939 if optimize: # anything left is unknown
943 if optimize: # anything left is unknown
940 raise error.Abort(_('unknown optimization action requested: %s') %
944 raise error.Abort(_('unknown optimization action requested: %s') %
941 ', '.join(sorted(optimize)),
945 ', '.join(sorted(optimize)),
942 hint=_('run without arguments to see valid '
946 hint=_('run without arguments to see valid '
943 'optimizations'))
947 'optimizations'))
944
948
945 deficiencies = finddeficiencies(repo)
949 deficiencies = finddeficiencies(repo)
946 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
950 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
947 actions.extend(o for o in sorted(optimizations)
951 actions.extend(o for o in sorted(optimizations)
948 # determineactions could have added optimisation
952 # determineactions could have added optimisation
949 if o not in actions)
953 if o not in actions)
950
954
951 def printrequirements():
955 def printrequirements():
952 ui.write(_('requirements\n'))
956 ui.write(_('requirements\n'))
953 ui.write(_(' preserved: %s\n') %
957 ui.write(_(' preserved: %s\n') %
954 _(', ').join(sorted(newreqs & repo.requirements)))
958 _(', ').join(sorted(newreqs & repo.requirements)))
955
959
956 if repo.requirements - newreqs:
960 if repo.requirements - newreqs:
957 ui.write(_(' removed: %s\n') %
961 ui.write(_(' removed: %s\n') %
958 _(', ').join(sorted(repo.requirements - newreqs)))
962 _(', ').join(sorted(repo.requirements - newreqs)))
959
963
960 if newreqs - repo.requirements:
964 if newreqs - repo.requirements:
961 ui.write(_(' added: %s\n') %
965 ui.write(_(' added: %s\n') %
962 _(', ').join(sorted(newreqs - repo.requirements)))
966 _(', ').join(sorted(newreqs - repo.requirements)))
963
967
964 ui.write('\n')
968 ui.write('\n')
965
969
966 def printupgradeactions():
970 def printupgradeactions():
967 for a in actions:
971 for a in actions:
968 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
972 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
969
973
970 if not run:
974 if not run:
971 fromconfig = []
975 fromconfig = []
972 onlydefault = []
976 onlydefault = []
973
977
974 for d in deficiencies:
978 for d in deficiencies:
975 if d.fromconfig(repo):
979 if d.fromconfig(repo):
976 fromconfig.append(d)
980 fromconfig.append(d)
977 elif d.default:
981 elif d.default:
978 onlydefault.append(d)
982 onlydefault.append(d)
979
983
980 if fromconfig or onlydefault:
984 if fromconfig or onlydefault:
981
985
982 if fromconfig:
986 if fromconfig:
983 ui.write(_('repository lacks features recommended by '
987 ui.write(_('repository lacks features recommended by '
984 'current config options:\n\n'))
988 'current config options:\n\n'))
985 for i in fromconfig:
989 for i in fromconfig:
986 ui.write('%s\n %s\n\n' % (i.name, i.description))
990 ui.write('%s\n %s\n\n' % (i.name, i.description))
987
991
988 if onlydefault:
992 if onlydefault:
989 ui.write(_('repository lacks features used by the default '
993 ui.write(_('repository lacks features used by the default '
990 'config options:\n\n'))
994 'config options:\n\n'))
991 for i in onlydefault:
995 for i in onlydefault:
992 ui.write('%s\n %s\n\n' % (i.name, i.description))
996 ui.write('%s\n %s\n\n' % (i.name, i.description))
993
997
994 ui.write('\n')
998 ui.write('\n')
995 else:
999 else:
996 ui.write(_('(no feature deficiencies found in existing '
1000 ui.write(_('(no feature deficiencies found in existing '
997 'repository)\n'))
1001 'repository)\n'))
998
1002
999 ui.write(_('performing an upgrade with "--run" will make the following '
1003 ui.write(_('performing an upgrade with "--run" will make the following '
1000 'changes:\n\n'))
1004 'changes:\n\n'))
1001
1005
1002 printrequirements()
1006 printrequirements()
1003 printupgradeactions()
1007 printupgradeactions()
1004
1008
1005 unusedoptimize = [i for i in alloptimizations if i not in actions]
1009 unusedoptimize = [i for i in alloptimizations if i not in actions]
1006
1010
1007 if unusedoptimize:
1011 if unusedoptimize:
1008 ui.write(_('additional optimizations are available by specifying '
1012 ui.write(_('additional optimizations are available by specifying '
1009 '"--optimize <name>":\n\n'))
1013 '"--optimize <name>":\n\n'))
1010 for i in unusedoptimize:
1014 for i in unusedoptimize:
1011 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1015 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1012 return
1016 return
1013
1017
1014 # Else we're in the run=true case.
1018 # Else we're in the run=true case.
1015 ui.write(_('upgrade will perform the following actions:\n\n'))
1019 ui.write(_('upgrade will perform the following actions:\n\n'))
1016 printrequirements()
1020 printrequirements()
1017 printupgradeactions()
1021 printupgradeactions()
1018
1022
1019 upgradeactions = [a.name for a in actions]
1023 upgradeactions = [a.name for a in actions]
1020
1024
1021 ui.write(_('beginning upgrade...\n'))
1025 ui.write(_('beginning upgrade...\n'))
1022 with repo.wlock(), repo.lock():
1026 with repo.wlock(), repo.lock():
1023 ui.write(_('repository locked and read-only\n'))
1027 ui.write(_('repository locked and read-only\n'))
1024 # Our strategy for upgrading the repository is to create a new,
1028 # Our strategy for upgrading the repository is to create a new,
1025 # temporary repository, write data to it, then do a swap of the
1029 # temporary repository, write data to it, then do a swap of the
1026 # data. There are less heavyweight ways to do this, but it is easier
1030 # data. There are less heavyweight ways to do this, but it is easier
1027 # to create a new repo object than to instantiate all the components
1031 # to create a new repo object than to instantiate all the components
1028 # (like the store) separately.
1032 # (like the store) separately.
1029 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
1033 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
1030 backuppath = None
1034 backuppath = None
1031 try:
1035 try:
1032 ui.write(_('creating temporary repository to stage migrated '
1036 ui.write(_('creating temporary repository to stage migrated '
1033 'data: %s\n') % tmppath)
1037 'data: %s\n') % tmppath)
1034
1038
1035 # clone ui without using ui.copy because repo.ui is protected
1039 # clone ui without using ui.copy because repo.ui is protected
1036 repoui = repo.ui.__class__(repo.ui)
1040 repoui = repo.ui.__class__(repo.ui)
1037 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1041 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1038
1042
1039 with dstrepo.wlock(), dstrepo.lock():
1043 with dstrepo.wlock(), dstrepo.lock():
1040 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1044 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1041 upgradeactions, revlogs=revlogs)
1045 upgradeactions, revlogs=revlogs)
1042 if not (backup or backuppath is None):
1046 if not (backup or backuppath is None):
1043 ui.write(_('removing old repository content%s\n') % backuppath)
1047 ui.write(_('removing old repository content%s\n') % backuppath)
1044 repo.vfs.rmtree(backuppath, forcibly=True)
1048 repo.vfs.rmtree(backuppath, forcibly=True)
1045 backuppath = None
1049 backuppath = None
1046
1050
1047 finally:
1051 finally:
1048 ui.write(_('removing temporary repository %s\n') % tmppath)
1052 ui.write(_('removing temporary repository %s\n') % tmppath)
1049 repo.vfs.rmtree(tmppath, forcibly=True)
1053 repo.vfs.rmtree(tmppath, forcibly=True)
1050
1054
1051 if backuppath:
1055 if backuppath:
1052 ui.warn(_('copy of old repository backed up at %s\n') %
1056 ui.warn(_('copy of old repository backed up at %s\n') %
1053 backuppath)
1057 backuppath)
1054 ui.warn(_('the old repository will not be deleted; remove '
1058 ui.warn(_('the old repository will not be deleted; remove '
1055 'it to free up disk space once the upgraded '
1059 'it to free up disk space once the upgraded '
1056 'repository is verified\n'))
1060 'repository is verified\n'))
@@ -1,423 +1,423
1 Show all commands except debug commands
1 Show all commands except debug commands
2 $ hg debugcomplete
2 $ hg debugcomplete
3 abort
3 abort
4 add
4 add
5 addremove
5 addremove
6 annotate
6 annotate
7 archive
7 archive
8 backout
8 backout
9 bisect
9 bisect
10 bookmarks
10 bookmarks
11 branch
11 branch
12 branches
12 branches
13 bundle
13 bundle
14 cat
14 cat
15 clone
15 clone
16 commit
16 commit
17 config
17 config
18 continue
18 continue
19 copy
19 copy
20 diff
20 diff
21 export
21 export
22 files
22 files
23 forget
23 forget
24 graft
24 graft
25 grep
25 grep
26 heads
26 heads
27 help
27 help
28 identify
28 identify
29 import
29 import
30 incoming
30 incoming
31 init
31 init
32 locate
32 locate
33 log
33 log
34 manifest
34 manifest
35 merge
35 merge
36 outgoing
36 outgoing
37 parents
37 parents
38 paths
38 paths
39 phase
39 phase
40 pull
40 pull
41 push
41 push
42 recover
42 recover
43 remove
43 remove
44 rename
44 rename
45 resolve
45 resolve
46 revert
46 revert
47 rollback
47 rollback
48 root
48 root
49 serve
49 serve
50 shelve
50 shelve
51 status
51 status
52 summary
52 summary
53 tag
53 tag
54 tags
54 tags
55 tip
55 tip
56 unbundle
56 unbundle
57 unshelve
57 unshelve
58 update
58 update
59 verify
59 verify
60 version
60 version
61
61
62 Show all commands that start with "a"
62 Show all commands that start with "a"
63 $ hg debugcomplete a
63 $ hg debugcomplete a
64 abort
64 abort
65 add
65 add
66 addremove
66 addremove
67 annotate
67 annotate
68 archive
68 archive
69
69
70 Do not show debug commands if there are other candidates
70 Do not show debug commands if there are other candidates
71 $ hg debugcomplete d
71 $ hg debugcomplete d
72 diff
72 diff
73
73
74 Show debug commands if there are no other candidates
74 Show debug commands if there are no other candidates
75 $ hg debugcomplete debug
75 $ hg debugcomplete debug
76 debugancestor
76 debugancestor
77 debugapplystreamclonebundle
77 debugapplystreamclonebundle
78 debugbuilddag
78 debugbuilddag
79 debugbundle
79 debugbundle
80 debugcapabilities
80 debugcapabilities
81 debugcheckstate
81 debugcheckstate
82 debugcolor
82 debugcolor
83 debugcommands
83 debugcommands
84 debugcomplete
84 debugcomplete
85 debugconfig
85 debugconfig
86 debugcreatestreamclonebundle
86 debugcreatestreamclonebundle
87 debugdag
87 debugdag
88 debugdata
88 debugdata
89 debugdate
89 debugdate
90 debugdeltachain
90 debugdeltachain
91 debugdirstate
91 debugdirstate
92 debugdiscovery
92 debugdiscovery
93 debugdownload
93 debugdownload
94 debugextensions
94 debugextensions
95 debugfileset
95 debugfileset
96 debugformat
96 debugformat
97 debugfsinfo
97 debugfsinfo
98 debuggetbundle
98 debuggetbundle
99 debugignore
99 debugignore
100 debugindex
100 debugindex
101 debugindexdot
101 debugindexdot
102 debugindexstats
102 debugindexstats
103 debuginstall
103 debuginstall
104 debugknown
104 debugknown
105 debuglabelcomplete
105 debuglabelcomplete
106 debuglocks
106 debuglocks
107 debugmanifestfulltextcache
107 debugmanifestfulltextcache
108 debugmergestate
108 debugmergestate
109 debugnamecomplete
109 debugnamecomplete
110 debugobsolete
110 debugobsolete
111 debugp1copies
111 debugp1copies
112 debugp2copies
112 debugp2copies
113 debugpathcomplete
113 debugpathcomplete
114 debugpathcopies
114 debugpathcopies
115 debugpeer
115 debugpeer
116 debugpickmergetool
116 debugpickmergetool
117 debugpushkey
117 debugpushkey
118 debugpvec
118 debugpvec
119 debugrebuilddirstate
119 debugrebuilddirstate
120 debugrebuildfncache
120 debugrebuildfncache
121 debugrename
121 debugrename
122 debugrevlog
122 debugrevlog
123 debugrevlogindex
123 debugrevlogindex
124 debugrevspec
124 debugrevspec
125 debugserve
125 debugserve
126 debugsetparents
126 debugsetparents
127 debugssl
127 debugssl
128 debugsub
128 debugsub
129 debugsuccessorssets
129 debugsuccessorssets
130 debugtemplate
130 debugtemplate
131 debuguigetpass
131 debuguigetpass
132 debuguiprompt
132 debuguiprompt
133 debugupdatecaches
133 debugupdatecaches
134 debugupgraderepo
134 debugupgraderepo
135 debugwalk
135 debugwalk
136 debugwhyunstable
136 debugwhyunstable
137 debugwireargs
137 debugwireargs
138 debugwireproto
138 debugwireproto
139
139
140 Do not show the alias of a debug command if there are other candidates
140 Do not show the alias of a debug command if there are other candidates
141 (this should hide rawcommit)
141 (this should hide rawcommit)
142 $ hg debugcomplete r
142 $ hg debugcomplete r
143 recover
143 recover
144 remove
144 remove
145 rename
145 rename
146 resolve
146 resolve
147 revert
147 revert
148 rollback
148 rollback
149 root
149 root
150 Show the alias of a debug command if there are no other candidates
150 Show the alias of a debug command if there are no other candidates
151 $ hg debugcomplete rawc
151 $ hg debugcomplete rawc
152
152
153
153
154 Show the global options
154 Show the global options
155 $ hg debugcomplete --options | sort
155 $ hg debugcomplete --options | sort
156 --color
156 --color
157 --config
157 --config
158 --cwd
158 --cwd
159 --debug
159 --debug
160 --debugger
160 --debugger
161 --encoding
161 --encoding
162 --encodingmode
162 --encodingmode
163 --help
163 --help
164 --hidden
164 --hidden
165 --noninteractive
165 --noninteractive
166 --pager
166 --pager
167 --profile
167 --profile
168 --quiet
168 --quiet
169 --repository
169 --repository
170 --time
170 --time
171 --traceback
171 --traceback
172 --verbose
172 --verbose
173 --version
173 --version
174 -R
174 -R
175 -h
175 -h
176 -q
176 -q
177 -v
177 -v
178 -y
178 -y
179
179
180 Show the options for the "serve" command
180 Show the options for the "serve" command
181 $ hg debugcomplete --options serve | sort
181 $ hg debugcomplete --options serve | sort
182 --accesslog
182 --accesslog
183 --address
183 --address
184 --certificate
184 --certificate
185 --cmdserver
185 --cmdserver
186 --color
186 --color
187 --config
187 --config
188 --cwd
188 --cwd
189 --daemon
189 --daemon
190 --daemon-postexec
190 --daemon-postexec
191 --debug
191 --debug
192 --debugger
192 --debugger
193 --encoding
193 --encoding
194 --encodingmode
194 --encodingmode
195 --errorlog
195 --errorlog
196 --help
196 --help
197 --hidden
197 --hidden
198 --ipv6
198 --ipv6
199 --name
199 --name
200 --noninteractive
200 --noninteractive
201 --pager
201 --pager
202 --pid-file
202 --pid-file
203 --port
203 --port
204 --prefix
204 --prefix
205 --print-url
205 --print-url
206 --profile
206 --profile
207 --quiet
207 --quiet
208 --repository
208 --repository
209 --stdio
209 --stdio
210 --style
210 --style
211 --subrepos
211 --subrepos
212 --templates
212 --templates
213 --time
213 --time
214 --traceback
214 --traceback
215 --verbose
215 --verbose
216 --version
216 --version
217 --web-conf
217 --web-conf
218 -6
218 -6
219 -A
219 -A
220 -E
220 -E
221 -R
221 -R
222 -S
222 -S
223 -a
223 -a
224 -d
224 -d
225 -h
225 -h
226 -n
226 -n
227 -p
227 -p
228 -q
228 -q
229 -t
229 -t
230 -v
230 -v
231 -y
231 -y
232
232
233 Show an error if we use --options with an ambiguous abbreviation
233 Show an error if we use --options with an ambiguous abbreviation
234 $ hg debugcomplete --options s
234 $ hg debugcomplete --options s
235 hg: command 's' is ambiguous:
235 hg: command 's' is ambiguous:
236 serve shelve showconfig status summary
236 serve shelve showconfig status summary
237 [255]
237 [255]
238
238
239 Show all commands + options
239 Show all commands + options
240 $ hg debugcommands
240 $ hg debugcommands
241 abort: dry-run
241 abort: dry-run
242 add: include, exclude, subrepos, dry-run
242 add: include, exclude, subrepos, dry-run
243 addremove: similarity, subrepos, include, exclude, dry-run
243 addremove: similarity, subrepos, include, exclude, dry-run
244 annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
244 annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
245 archive: no-decode, prefix, rev, type, subrepos, include, exclude
245 archive: no-decode, prefix, rev, type, subrepos, include, exclude
246 backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
246 backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
247 bisect: reset, good, bad, skip, extend, command, noupdate
247 bisect: reset, good, bad, skip, extend, command, noupdate
248 bookmarks: force, rev, delete, rename, inactive, list, template
248 bookmarks: force, rev, delete, rename, inactive, list, template
249 branch: force, clean, rev
249 branch: force, clean, rev
250 branches: active, closed, rev, template
250 branches: active, closed, rev, template
251 bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
251 bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
252 cat: output, rev, decode, include, exclude, template
252 cat: output, rev, decode, include, exclude, template
253 clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
253 clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
254 commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
254 commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
255 config: untrusted, edit, local, global, template
255 config: untrusted, edit, local, global, template
256 continue: dry-run
256 continue: dry-run
257 copy: after, force, include, exclude, dry-run
257 copy: after, force, include, exclude, dry-run
258 debugancestor:
258 debugancestor:
259 debugapplystreamclonebundle:
259 debugapplystreamclonebundle:
260 debugbuilddag: mergeable-file, overwritten-file, new-file
260 debugbuilddag: mergeable-file, overwritten-file, new-file
261 debugbundle: all, part-type, spec
261 debugbundle: all, part-type, spec
262 debugcapabilities:
262 debugcapabilities:
263 debugcheckstate:
263 debugcheckstate:
264 debugcolor: style
264 debugcolor: style
265 debugcommands:
265 debugcommands:
266 debugcomplete: options
266 debugcomplete: options
267 debugcreatestreamclonebundle:
267 debugcreatestreamclonebundle:
268 debugdag: tags, branches, dots, spaces
268 debugdag: tags, branches, dots, spaces
269 debugdata: changelog, manifest, dir
269 debugdata: changelog, manifest, dir
270 debugdate: extended
270 debugdate: extended
271 debugdeltachain: changelog, manifest, dir, template
271 debugdeltachain: changelog, manifest, dir, template
272 debugdirstate: nodates, dates, datesort
272 debugdirstate: nodates, dates, datesort
273 debugdiscovery: old, nonheads, rev, seed, ssh, remotecmd, insecure
273 debugdiscovery: old, nonheads, rev, seed, ssh, remotecmd, insecure
274 debugdownload: output
274 debugdownload: output
275 debugextensions: template
275 debugextensions: template
276 debugfileset: rev, all-files, show-matcher, show-stage
276 debugfileset: rev, all-files, show-matcher, show-stage
277 debugformat: template
277 debugformat: template
278 debugfsinfo:
278 debugfsinfo:
279 debuggetbundle: head, common, type
279 debuggetbundle: head, common, type
280 debugignore:
280 debugignore:
281 debugindex: changelog, manifest, dir, template
281 debugindex: changelog, manifest, dir, template
282 debugindexdot: changelog, manifest, dir
282 debugindexdot: changelog, manifest, dir
283 debugindexstats:
283 debugindexstats:
284 debuginstall: template
284 debuginstall: template
285 debugknown:
285 debugknown:
286 debuglabelcomplete:
286 debuglabelcomplete:
287 debuglocks: force-lock, force-wlock, set-lock, set-wlock
287 debuglocks: force-lock, force-wlock, set-lock, set-wlock
288 debugmanifestfulltextcache: clear, add
288 debugmanifestfulltextcache: clear, add
289 debugmergestate:
289 debugmergestate:
290 debugnamecomplete:
290 debugnamecomplete:
291 debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
291 debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
292 debugp1copies: rev
292 debugp1copies: rev
293 debugp2copies: rev
293 debugp2copies: rev
294 debugpathcomplete: full, normal, added, removed
294 debugpathcomplete: full, normal, added, removed
295 debugpathcopies: include, exclude
295 debugpathcopies: include, exclude
296 debugpeer:
296 debugpeer:
297 debugpickmergetool: rev, changedelete, include, exclude, tool
297 debugpickmergetool: rev, changedelete, include, exclude, tool
298 debugpushkey:
298 debugpushkey:
299 debugpvec:
299 debugpvec:
300 debugrebuilddirstate: rev, minimal
300 debugrebuilddirstate: rev, minimal
301 debugrebuildfncache:
301 debugrebuildfncache:
302 debugrename: rev
302 debugrename: rev
303 debugrevlog: changelog, manifest, dir, dump
303 debugrevlog: changelog, manifest, dir, dump
304 debugrevlogindex: changelog, manifest, dir, format
304 debugrevlogindex: changelog, manifest, dir, format
305 debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
305 debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
306 debugserve: sshstdio, logiofd, logiofile
306 debugserve: sshstdio, logiofd, logiofile
307 debugsetparents:
307 debugsetparents:
308 debugssl:
308 debugssl:
309 debugsub: rev
309 debugsub: rev
310 debugsuccessorssets: closest
310 debugsuccessorssets: closest
311 debugtemplate: rev, define
311 debugtemplate: rev, define
312 debuguigetpass: prompt
312 debuguigetpass: prompt
313 debuguiprompt: prompt
313 debuguiprompt: prompt
314 debugupdatecaches:
314 debugupdatecaches:
315 debugupgraderepo: optimize, run, backup, manifest
315 debugupgraderepo: optimize, run, backup, changelog, manifest
316 debugwalk: include, exclude
316 debugwalk: include, exclude
317 debugwhyunstable:
317 debugwhyunstable:
318 debugwireargs: three, four, five, ssh, remotecmd, insecure
318 debugwireargs: three, four, five, ssh, remotecmd, insecure
319 debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure
319 debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure
320 diff: rev, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
320 diff: rev, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
321 export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
321 export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
322 files: rev, print0, include, exclude, template, subrepos
322 files: rev, print0, include, exclude, template, subrepos
323 forget: interactive, include, exclude, dry-run
323 forget: interactive, include, exclude, dry-run
324 graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
324 graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
325 grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
325 grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
326 heads: rev, topo, active, closed, style, template
326 heads: rev, topo, active, closed, style, template
327 help: extension, command, keyword, system
327 help: extension, command, keyword, system
328 identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
328 identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
329 import: strip, base, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
329 import: strip, base, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
330 incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
330 incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
331 init: ssh, remotecmd, insecure
331 init: ssh, remotecmd, insecure
332 locate: rev, print0, fullpath, include, exclude
332 locate: rev, print0, fullpath, include, exclude
333 log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
333 log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
334 manifest: rev, all, template
334 manifest: rev, all, template
335 merge: force, rev, preview, abort, tool
335 merge: force, rev, preview, abort, tool
336 outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
336 outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
337 parents: rev, style, template
337 parents: rev, style, template
338 paths: template
338 paths: template
339 phase: public, draft, secret, force, rev
339 phase: public, draft, secret, force, rev
340 pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure
340 pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure
341 push: force, rev, bookmark, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
341 push: force, rev, bookmark, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
342 recover: verify
342 recover: verify
343 remove: after, force, subrepos, include, exclude, dry-run
343 remove: after, force, subrepos, include, exclude, dry-run
344 rename: after, force, include, exclude, dry-run
344 rename: after, force, include, exclude, dry-run
345 resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
345 resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
346 revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
346 revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
347 rollback: dry-run, force
347 rollback: dry-run, force
348 root: template
348 root: template
349 serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
349 serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
350 shelve: addremove, unknown, cleanup, date, delete, edit, keep, list, message, name, patch, interactive, stat, include, exclude
350 shelve: addremove, unknown, cleanup, date, delete, edit, keep, list, message, name, patch, interactive, stat, include, exclude
351 status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
351 status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
352 summary: remote
352 summary: remote
353 tag: force, local, rev, remove, edit, message, date, user
353 tag: force, local, rev, remove, edit, message, date, user
354 tags: template
354 tags: template
355 tip: patch, git, style, template
355 tip: patch, git, style, template
356 unbundle: update
356 unbundle: update
357 unshelve: abort, continue, interactive, keep, name, tool, date
357 unshelve: abort, continue, interactive, keep, name, tool, date
358 update: clean, check, merge, date, rev, tool
358 update: clean, check, merge, date, rev, tool
359 verify: full
359 verify: full
360 version: template
360 version: template
361
361
362 $ hg init a
362 $ hg init a
363 $ cd a
363 $ cd a
364 $ echo fee > fee
364 $ echo fee > fee
365 $ hg ci -q -Amfee
365 $ hg ci -q -Amfee
366 $ hg tag fee
366 $ hg tag fee
367 $ mkdir fie
367 $ mkdir fie
368 $ echo dead > fie/dead
368 $ echo dead > fie/dead
369 $ echo live > fie/live
369 $ echo live > fie/live
370 $ hg bookmark fo
370 $ hg bookmark fo
371 $ hg branch -q fie
371 $ hg branch -q fie
372 $ hg ci -q -Amfie
372 $ hg ci -q -Amfie
373 $ echo fo > fo
373 $ echo fo > fo
374 $ hg branch -qf default
374 $ hg branch -qf default
375 $ hg ci -q -Amfo
375 $ hg ci -q -Amfo
376 $ echo Fum > Fum
376 $ echo Fum > Fum
377 $ hg ci -q -AmFum
377 $ hg ci -q -AmFum
378 $ hg bookmark Fum
378 $ hg bookmark Fum
379
379
380 Test debugpathcomplete
380 Test debugpathcomplete
381
381
382 $ hg debugpathcomplete f
382 $ hg debugpathcomplete f
383 fee
383 fee
384 fie
384 fie
385 fo
385 fo
386 $ hg debugpathcomplete -f f
386 $ hg debugpathcomplete -f f
387 fee
387 fee
388 fie/dead
388 fie/dead
389 fie/live
389 fie/live
390 fo
390 fo
391
391
392 $ hg rm Fum
392 $ hg rm Fum
393 $ hg debugpathcomplete -r F
393 $ hg debugpathcomplete -r F
394 Fum
394 Fum
395
395
396 Test debugnamecomplete
396 Test debugnamecomplete
397
397
398 $ hg debugnamecomplete
398 $ hg debugnamecomplete
399 Fum
399 Fum
400 default
400 default
401 fee
401 fee
402 fie
402 fie
403 fo
403 fo
404 tip
404 tip
405 $ hg debugnamecomplete f
405 $ hg debugnamecomplete f
406 fee
406 fee
407 fie
407 fie
408 fo
408 fo
409
409
410 Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still
410 Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still
411 used for completions in some shells.
411 used for completions in some shells.
412
412
413 $ hg debuglabelcomplete
413 $ hg debuglabelcomplete
414 Fum
414 Fum
415 default
415 default
416 fee
416 fee
417 fie
417 fie
418 fo
418 fo
419 tip
419 tip
420 $ hg debuglabelcomplete f
420 $ hg debuglabelcomplete f
421 fee
421 fee
422 fie
422 fie
423 fo
423 fo
@@ -1,1048 +1,1140
1 #require no-reposimplestore
1 #require no-reposimplestore
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > share =
5 > share =
6 > EOF
6 > EOF
7
7
8 store and revlogv1 are required in source
8 store and revlogv1 are required in source
9
9
10 $ hg --config format.usestore=false init no-store
10 $ hg --config format.usestore=false init no-store
11 $ hg -R no-store debugupgraderepo
11 $ hg -R no-store debugupgraderepo
12 abort: cannot upgrade repository; requirement missing: store
12 abort: cannot upgrade repository; requirement missing: store
13 [255]
13 [255]
14
14
15 $ hg init no-revlogv1
15 $ hg init no-revlogv1
16 $ cat > no-revlogv1/.hg/requires << EOF
16 $ cat > no-revlogv1/.hg/requires << EOF
17 > dotencode
17 > dotencode
18 > fncache
18 > fncache
19 > generaldelta
19 > generaldelta
20 > store
20 > store
21 > EOF
21 > EOF
22
22
23 $ hg -R no-revlogv1 debugupgraderepo
23 $ hg -R no-revlogv1 debugupgraderepo
24 abort: cannot upgrade repository; requirement missing: revlogv1
24 abort: cannot upgrade repository; requirement missing: revlogv1
25 [255]
25 [255]
26
26
27 Cannot upgrade shared repositories
27 Cannot upgrade shared repositories
28
28
29 $ hg init share-parent
29 $ hg init share-parent
30 $ hg -q share share-parent share-child
30 $ hg -q share share-parent share-child
31
31
32 $ hg -R share-child debugupgraderepo
32 $ hg -R share-child debugupgraderepo
33 abort: cannot upgrade repository; unsupported source requirement: shared
33 abort: cannot upgrade repository; unsupported source requirement: shared
34 [255]
34 [255]
35
35
36 Do not yet support upgrading treemanifest repos
36 Do not yet support upgrading treemanifest repos
37
37
38 $ hg --config experimental.treemanifest=true init treemanifest
38 $ hg --config experimental.treemanifest=true init treemanifest
39 $ hg -R treemanifest debugupgraderepo
39 $ hg -R treemanifest debugupgraderepo
40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
41 [255]
41 [255]
42
42
43 Cannot add treemanifest requirement during upgrade
43 Cannot add treemanifest requirement during upgrade
44
44
45 $ hg init disallowaddedreq
45 $ hg init disallowaddedreq
46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
48 [255]
48 [255]
49
49
50 An upgrade of a repository created with recommended settings only suggests optimizations
50 An upgrade of a repository created with recommended settings only suggests optimizations
51
51
52 $ hg init empty
52 $ hg init empty
53 $ cd empty
53 $ cd empty
54 $ hg debugformat
54 $ hg debugformat
55 format-variant repo
55 format-variant repo
56 fncache: yes
56 fncache: yes
57 dotencode: yes
57 dotencode: yes
58 generaldelta: yes
58 generaldelta: yes
59 sparserevlog: yes
59 sparserevlog: yes
60 plain-cl-delta: yes
60 plain-cl-delta: yes
61 compression: zlib
61 compression: zlib
62 compression-level: default
62 compression-level: default
63 $ hg debugformat --verbose
63 $ hg debugformat --verbose
64 format-variant repo config default
64 format-variant repo config default
65 fncache: yes yes yes
65 fncache: yes yes yes
66 dotencode: yes yes yes
66 dotencode: yes yes yes
67 generaldelta: yes yes yes
67 generaldelta: yes yes yes
68 sparserevlog: yes yes yes
68 sparserevlog: yes yes yes
69 plain-cl-delta: yes yes yes
69 plain-cl-delta: yes yes yes
70 compression: zlib zlib zlib
70 compression: zlib zlib zlib
71 compression-level: default default default
71 compression-level: default default default
72 $ hg debugformat --verbose --config format.usefncache=no
72 $ hg debugformat --verbose --config format.usefncache=no
73 format-variant repo config default
73 format-variant repo config default
74 fncache: yes no yes
74 fncache: yes no yes
75 dotencode: yes no yes
75 dotencode: yes no yes
76 generaldelta: yes yes yes
76 generaldelta: yes yes yes
77 sparserevlog: yes yes yes
77 sparserevlog: yes yes yes
78 plain-cl-delta: yes yes yes
78 plain-cl-delta: yes yes yes
79 compression: zlib zlib zlib
79 compression: zlib zlib zlib
80 compression-level: default default default
80 compression-level: default default default
81 $ hg debugformat --verbose --config format.usefncache=no --color=debug
81 $ hg debugformat --verbose --config format.usefncache=no --color=debug
82 format-variant repo config default
82 format-variant repo config default
83 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
83 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
84 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
84 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
85 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
85 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
86 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
86 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
87 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
87 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
88 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
88 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
89 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
89 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
90 $ hg debugformat -Tjson
90 $ hg debugformat -Tjson
91 [
91 [
92 {
92 {
93 "config": true,
93 "config": true,
94 "default": true,
94 "default": true,
95 "name": "fncache",
95 "name": "fncache",
96 "repo": true
96 "repo": true
97 },
97 },
98 {
98 {
99 "config": true,
99 "config": true,
100 "default": true,
100 "default": true,
101 "name": "dotencode",
101 "name": "dotencode",
102 "repo": true
102 "repo": true
103 },
103 },
104 {
104 {
105 "config": true,
105 "config": true,
106 "default": true,
106 "default": true,
107 "name": "generaldelta",
107 "name": "generaldelta",
108 "repo": true
108 "repo": true
109 },
109 },
110 {
110 {
111 "config": true,
111 "config": true,
112 "default": true,
112 "default": true,
113 "name": "sparserevlog",
113 "name": "sparserevlog",
114 "repo": true
114 "repo": true
115 },
115 },
116 {
116 {
117 "config": true,
117 "config": true,
118 "default": true,
118 "default": true,
119 "name": "plain-cl-delta",
119 "name": "plain-cl-delta",
120 "repo": true
120 "repo": true
121 },
121 },
122 {
122 {
123 "config": "zlib",
123 "config": "zlib",
124 "default": "zlib",
124 "default": "zlib",
125 "name": "compression",
125 "name": "compression",
126 "repo": "zlib"
126 "repo": "zlib"
127 },
127 },
128 {
128 {
129 "config": "default",
129 "config": "default",
130 "default": "default",
130 "default": "default",
131 "name": "compression-level",
131 "name": "compression-level",
132 "repo": "default"
132 "repo": "default"
133 }
133 }
134 ]
134 ]
135 $ hg debugupgraderepo
135 $ hg debugupgraderepo
136 (no feature deficiencies found in existing repository)
136 (no feature deficiencies found in existing repository)
137 performing an upgrade with "--run" will make the following changes:
137 performing an upgrade with "--run" will make the following changes:
138
138
139 requirements
139 requirements
140 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
140 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
141
141
142 additional optimizations are available by specifying "--optimize <name>":
142 additional optimizations are available by specifying "--optimize <name>":
143
143
144 re-delta-parent
144 re-delta-parent
145 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
145 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
146
146
147 re-delta-multibase
147 re-delta-multibase
148 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
148 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
149
149
150 re-delta-all
150 re-delta-all
151 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
151 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
152
152
153 re-delta-fulladd
153 re-delta-fulladd
154 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
154 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
155
155
156
156
157 --optimize can be used to add optimizations
157 --optimize can be used to add optimizations
158
158
159 $ hg debugupgrade --optimize redeltaparent
159 $ hg debugupgrade --optimize redeltaparent
160 (no feature deficiencies found in existing repository)
160 (no feature deficiencies found in existing repository)
161 performing an upgrade with "--run" will make the following changes:
161 performing an upgrade with "--run" will make the following changes:
162
162
163 requirements
163 requirements
164 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
164 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
165
165
166 re-delta-parent
166 re-delta-parent
167 deltas within internal storage will choose a new base revision if needed
167 deltas within internal storage will choose a new base revision if needed
168
168
169 additional optimizations are available by specifying "--optimize <name>":
169 additional optimizations are available by specifying "--optimize <name>":
170
170
171 re-delta-multibase
171 re-delta-multibase
172 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
172 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
173
173
174 re-delta-all
174 re-delta-all
175 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
175 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
176
176
177 re-delta-fulladd
177 re-delta-fulladd
178 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
178 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
179
179
180
180
181 modern form of the option
181 modern form of the option
182
182
183 $ hg debugupgrade --optimize re-delta-parent
183 $ hg debugupgrade --optimize re-delta-parent
184 (no feature deficiencies found in existing repository)
184 (no feature deficiencies found in existing repository)
185 performing an upgrade with "--run" will make the following changes:
185 performing an upgrade with "--run" will make the following changes:
186
186
187 requirements
187 requirements
188 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
188 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
189
189
190 re-delta-parent
190 re-delta-parent
191 deltas within internal storage will choose a new base revision if needed
191 deltas within internal storage will choose a new base revision if needed
192
192
193 additional optimizations are available by specifying "--optimize <name>":
193 additional optimizations are available by specifying "--optimize <name>":
194
194
195 re-delta-multibase
195 re-delta-multibase
196 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
196 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
197
197
198 re-delta-all
198 re-delta-all
199 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
199 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
200
200
201 re-delta-fulladd
201 re-delta-fulladd
202 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
202 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
203
203
204
204
205 unknown optimization:
205 unknown optimization:
206
206
207 $ hg debugupgrade --optimize foobar
207 $ hg debugupgrade --optimize foobar
208 abort: unknown optimization action requested: foobar
208 abort: unknown optimization action requested: foobar
209 (run without arguments to see valid optimizations)
209 (run without arguments to see valid optimizations)
210 [255]
210 [255]
211
211
212 Various sub-optimal detections work
212 Various sub-optimal detections work
213
213
214 $ cat > .hg/requires << EOF
214 $ cat > .hg/requires << EOF
215 > revlogv1
215 > revlogv1
216 > store
216 > store
217 > EOF
217 > EOF
218
218
219 $ hg debugformat
219 $ hg debugformat
220 format-variant repo
220 format-variant repo
221 fncache: no
221 fncache: no
222 dotencode: no
222 dotencode: no
223 generaldelta: no
223 generaldelta: no
224 sparserevlog: no
224 sparserevlog: no
225 plain-cl-delta: yes
225 plain-cl-delta: yes
226 compression: zlib
226 compression: zlib
227 compression-level: default
227 compression-level: default
228 $ hg debugformat --verbose
228 $ hg debugformat --verbose
229 format-variant repo config default
229 format-variant repo config default
230 fncache: no yes yes
230 fncache: no yes yes
231 dotencode: no yes yes
231 dotencode: no yes yes
232 generaldelta: no yes yes
232 generaldelta: no yes yes
233 sparserevlog: no yes yes
233 sparserevlog: no yes yes
234 plain-cl-delta: yes yes yes
234 plain-cl-delta: yes yes yes
235 compression: zlib zlib zlib
235 compression: zlib zlib zlib
236 compression-level: default default default
236 compression-level: default default default
237 $ hg debugformat --verbose --config format.usegeneraldelta=no
237 $ hg debugformat --verbose --config format.usegeneraldelta=no
238 format-variant repo config default
238 format-variant repo config default
239 fncache: no yes yes
239 fncache: no yes yes
240 dotencode: no yes yes
240 dotencode: no yes yes
241 generaldelta: no no yes
241 generaldelta: no no yes
242 sparserevlog: no no yes
242 sparserevlog: no no yes
243 plain-cl-delta: yes yes yes
243 plain-cl-delta: yes yes yes
244 compression: zlib zlib zlib
244 compression: zlib zlib zlib
245 compression-level: default default default
245 compression-level: default default default
246 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
246 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
247 format-variant repo config default
247 format-variant repo config default
248 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
248 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
249 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
249 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
250 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
250 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
251 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
251 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
252 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
252 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
253 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
253 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
254 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
254 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
255 $ hg debugupgraderepo
255 $ hg debugupgraderepo
256 repository lacks features recommended by current config options:
256 repository lacks features recommended by current config options:
257
257
258 fncache
258 fncache
259 long and reserved filenames may not work correctly; repository performance is sub-optimal
259 long and reserved filenames may not work correctly; repository performance is sub-optimal
260
260
261 dotencode
261 dotencode
262 storage of filenames beginning with a period or space may not work correctly
262 storage of filenames beginning with a period or space may not work correctly
263
263
264 generaldelta
264 generaldelta
265 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
265 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
266
266
267 sparserevlog
267 sparserevlog
268 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
268 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
269
269
270
270
271 performing an upgrade with "--run" will make the following changes:
271 performing an upgrade with "--run" will make the following changes:
272
272
273 requirements
273 requirements
274 preserved: revlogv1, store
274 preserved: revlogv1, store
275 added: dotencode, fncache, generaldelta, sparserevlog
275 added: dotencode, fncache, generaldelta, sparserevlog
276
276
277 fncache
277 fncache
278 repository will be more resilient to storing certain paths and performance of certain operations should be improved
278 repository will be more resilient to storing certain paths and performance of certain operations should be improved
279
279
280 dotencode
280 dotencode
281 repository will be better able to store files beginning with a space or period
281 repository will be better able to store files beginning with a space or period
282
282
283 generaldelta
283 generaldelta
284 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
284 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
285
285
286 sparserevlog
286 sparserevlog
287 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
287 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
288
288
289 additional optimizations are available by specifying "--optimize <name>":
289 additional optimizations are available by specifying "--optimize <name>":
290
290
291 re-delta-parent
291 re-delta-parent
292 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
292 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
293
293
294 re-delta-multibase
294 re-delta-multibase
295 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
295 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
296
296
297 re-delta-all
297 re-delta-all
298 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
298 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
299
299
300 re-delta-fulladd
300 re-delta-fulladd
301 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
301 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
302
302
303
303
304 $ hg --config format.dotencode=false debugupgraderepo
304 $ hg --config format.dotencode=false debugupgraderepo
305 repository lacks features recommended by current config options:
305 repository lacks features recommended by current config options:
306
306
307 fncache
307 fncache
308 long and reserved filenames may not work correctly; repository performance is sub-optimal
308 long and reserved filenames may not work correctly; repository performance is sub-optimal
309
309
310 generaldelta
310 generaldelta
311 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
311 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
312
312
313 sparserevlog
313 sparserevlog
314 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
314 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
315
315
316 repository lacks features used by the default config options:
316 repository lacks features used by the default config options:
317
317
318 dotencode
318 dotencode
319 storage of filenames beginning with a period or space may not work correctly
319 storage of filenames beginning with a period or space may not work correctly
320
320
321
321
322 performing an upgrade with "--run" will make the following changes:
322 performing an upgrade with "--run" will make the following changes:
323
323
324 requirements
324 requirements
325 preserved: revlogv1, store
325 preserved: revlogv1, store
326 added: fncache, generaldelta, sparserevlog
326 added: fncache, generaldelta, sparserevlog
327
327
328 fncache
328 fncache
329 repository will be more resilient to storing certain paths and performance of certain operations should be improved
329 repository will be more resilient to storing certain paths and performance of certain operations should be improved
330
330
331 generaldelta
331 generaldelta
332 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
332 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
333
333
334 sparserevlog
334 sparserevlog
335 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
335 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
336
336
337 additional optimizations are available by specifying "--optimize <name>":
337 additional optimizations are available by specifying "--optimize <name>":
338
338
339 re-delta-parent
339 re-delta-parent
340 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
340 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
341
341
342 re-delta-multibase
342 re-delta-multibase
343 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
343 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
344
344
345 re-delta-all
345 re-delta-all
346 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
346 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
347
347
348 re-delta-fulladd
348 re-delta-fulladd
349 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
349 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
350
350
351
351
352 $ cd ..
352 $ cd ..
353
353
354 Upgrading a repository that is already modern essentially no-ops
354 Upgrading a repository that is already modern essentially no-ops
355
355
356 $ hg init modern
356 $ hg init modern
357 $ hg -R modern debugupgraderepo --run
357 $ hg -R modern debugupgraderepo --run
358 upgrade will perform the following actions:
358 upgrade will perform the following actions:
359
359
360 requirements
360 requirements
361 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
361 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
362
362
363 beginning upgrade...
363 beginning upgrade...
364 repository locked and read-only
364 repository locked and read-only
365 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
365 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
366 (it is safe to interrupt this process any time before data migration completes)
366 (it is safe to interrupt this process any time before data migration completes)
367 data fully migrated to temporary repository
367 data fully migrated to temporary repository
368 marking source repository as being upgraded; clients will be unable to read from repository
368 marking source repository as being upgraded; clients will be unable to read from repository
369 starting in-place swap of repository data
369 starting in-place swap of repository data
370 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
370 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
371 replacing store...
371 replacing store...
372 store replacement complete; repository was inconsistent for *s (glob)
372 store replacement complete; repository was inconsistent for *s (glob)
373 finalizing requirements file and making repository readable again
373 finalizing requirements file and making repository readable again
374 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
374 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
375 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
375 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
376 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
376 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
377
377
378 Upgrading a repository to generaldelta works
378 Upgrading a repository to generaldelta works
379
379
380 $ hg --config format.usegeneraldelta=false init upgradegd
380 $ hg --config format.usegeneraldelta=false init upgradegd
381 $ cd upgradegd
381 $ cd upgradegd
382 $ touch f0
382 $ touch f0
383 $ hg -q commit -A -m initial
383 $ hg -q commit -A -m initial
384 $ touch f1
384 $ touch f1
385 $ hg -q commit -A -m 'add f1'
385 $ hg -q commit -A -m 'add f1'
386 $ hg -q up -r 0
386 $ hg -q up -r 0
387 $ touch f2
387 $ touch f2
388 $ hg -q commit -A -m 'add f2'
388 $ hg -q commit -A -m 'add f2'
389
389
390 $ hg debugupgraderepo --run --config format.sparse-revlog=false
390 $ hg debugupgraderepo --run --config format.sparse-revlog=false
391 upgrade will perform the following actions:
391 upgrade will perform the following actions:
392
392
393 requirements
393 requirements
394 preserved: dotencode, fncache, revlogv1, store
394 preserved: dotencode, fncache, revlogv1, store
395 added: generaldelta
395 added: generaldelta
396
396
397 generaldelta
397 generaldelta
398 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
398 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
399
399
400 beginning upgrade...
400 beginning upgrade...
401 repository locked and read-only
401 repository locked and read-only
402 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
402 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
403 (it is safe to interrupt this process any time before data migration completes)
403 (it is safe to interrupt this process any time before data migration completes)
404 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
404 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
405 migrating 917 bytes in store; 401 bytes tracked data
405 migrating 917 bytes in store; 401 bytes tracked data
406 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
406 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
407 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
407 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
408 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
408 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
409 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
409 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
410 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
410 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
411 finished migrating 3 changelog revisions; change in size: 0 bytes
411 finished migrating 3 changelog revisions; change in size: 0 bytes
412 finished migrating 9 total revisions; total change in store size: 0 bytes
412 finished migrating 9 total revisions; total change in store size: 0 bytes
413 copying phaseroots
413 copying phaseroots
414 data fully migrated to temporary repository
414 data fully migrated to temporary repository
415 marking source repository as being upgraded; clients will be unable to read from repository
415 marking source repository as being upgraded; clients will be unable to read from repository
416 starting in-place swap of repository data
416 starting in-place swap of repository data
417 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
417 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
418 replacing store...
418 replacing store...
419 store replacement complete; repository was inconsistent for *s (glob)
419 store replacement complete; repository was inconsistent for *s (glob)
420 finalizing requirements file and making repository readable again
420 finalizing requirements file and making repository readable again
421 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
421 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
422 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
422 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
423 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
423 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
424
424
425 Original requirements backed up
425 Original requirements backed up
426
426
427 $ cat .hg/upgradebackup.*/requires
427 $ cat .hg/upgradebackup.*/requires
428 dotencode
428 dotencode
429 fncache
429 fncache
430 revlogv1
430 revlogv1
431 store
431 store
432
432
433 generaldelta added to original requirements files
433 generaldelta added to original requirements files
434
434
435 $ cat .hg/requires
435 $ cat .hg/requires
436 dotencode
436 dotencode
437 fncache
437 fncache
438 generaldelta
438 generaldelta
439 revlogv1
439 revlogv1
440 store
440 store
441
441
442 store directory has files we expect
442 store directory has files we expect
443
443
444 $ ls .hg/store
444 $ ls .hg/store
445 00changelog.i
445 00changelog.i
446 00manifest.i
446 00manifest.i
447 data
447 data
448 fncache
448 fncache
449 phaseroots
449 phaseroots
450 undo
450 undo
451 undo.backupfiles
451 undo.backupfiles
452 undo.phaseroots
452 undo.phaseroots
453
453
454 manifest should be generaldelta
454 manifest should be generaldelta
455
455
456 $ hg debugrevlog -m | grep flags
456 $ hg debugrevlog -m | grep flags
457 flags : inline, generaldelta
457 flags : inline, generaldelta
458
458
459 verify should be happy
459 verify should be happy
460
460
461 $ hg verify
461 $ hg verify
462 checking changesets
462 checking changesets
463 checking manifests
463 checking manifests
464 crosschecking files in changesets and manifests
464 crosschecking files in changesets and manifests
465 checking files
465 checking files
466 checked 3 changesets with 3 changes to 3 files
466 checked 3 changesets with 3 changes to 3 files
467
467
468 old store should be backed up
468 old store should be backed up
469
469
470 $ ls -d .hg/upgradebackup.*/
470 $ ls -d .hg/upgradebackup.*/
471 .hg/upgradebackup.*/ (glob)
471 .hg/upgradebackup.*/ (glob)
472 $ ls .hg/upgradebackup.*/store
472 $ ls .hg/upgradebackup.*/store
473 00changelog.i
473 00changelog.i
474 00manifest.i
474 00manifest.i
475 data
475 data
476 fncache
476 fncache
477 phaseroots
477 phaseroots
478 undo
478 undo
479 undo.backup.fncache
479 undo.backup.fncache
480 undo.backupfiles
480 undo.backupfiles
481 undo.phaseroots
481 undo.phaseroots
482
482
483 unless --no-backup is passed
483 unless --no-backup is passed
484
484
485 $ rm -rf .hg/upgradebackup.*/
485 $ rm -rf .hg/upgradebackup.*/
486 $ hg debugupgraderepo --run --no-backup
486 $ hg debugupgraderepo --run --no-backup
487 upgrade will perform the following actions:
487 upgrade will perform the following actions:
488
488
489 requirements
489 requirements
490 preserved: dotencode, fncache, generaldelta, revlogv1, store
490 preserved: dotencode, fncache, generaldelta, revlogv1, store
491 added: sparserevlog
491 added: sparserevlog
492
492
493 sparserevlog
493 sparserevlog
494 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
494 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
495
495
496 beginning upgrade...
496 beginning upgrade...
497 repository locked and read-only
497 repository locked and read-only
498 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
498 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
499 (it is safe to interrupt this process any time before data migration completes)
499 (it is safe to interrupt this process any time before data migration completes)
500 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
500 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
501 migrating 917 bytes in store; 401 bytes tracked data
501 migrating 917 bytes in store; 401 bytes tracked data
502 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
502 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
503 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
503 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
504 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
504 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
505 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
505 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
506 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
506 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
507 finished migrating 3 changelog revisions; change in size: 0 bytes
507 finished migrating 3 changelog revisions; change in size: 0 bytes
508 finished migrating 9 total revisions; total change in store size: 0 bytes
508 finished migrating 9 total revisions; total change in store size: 0 bytes
509 copying phaseroots
509 copying phaseroots
510 data fully migrated to temporary repository
510 data fully migrated to temporary repository
511 marking source repository as being upgraded; clients will be unable to read from repository
511 marking source repository as being upgraded; clients will be unable to read from repository
512 starting in-place swap of repository data
512 starting in-place swap of repository data
513 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
513 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
514 replacing store...
514 replacing store...
515 store replacement complete; repository was inconsistent for * (glob)
515 store replacement complete; repository was inconsistent for * (glob)
516 finalizing requirements file and making repository readable again
516 finalizing requirements file and making repository readable again
517 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
517 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
518 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
518 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
519 $ ls -1 .hg/ | grep upgradebackup
519 $ ls -1 .hg/ | grep upgradebackup
520 [1]
520 [1]
521
521
522 We can restrict optimization to some revlog:
522 We can restrict optimization to some revlog:
523
523
524 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
524 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
525 upgrade will perform the following actions:
525 upgrade will perform the following actions:
526
526
527 requirements
527 requirements
528 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
528 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
529
529
530 re-delta-parent
530 re-delta-parent
531 deltas within internal storage will choose a new base revision if needed
531 deltas within internal storage will choose a new base revision if needed
532
532
533 beginning upgrade...
533 beginning upgrade...
534 repository locked and read-only
534 repository locked and read-only
535 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
535 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
536 (it is safe to interrupt this process any time before data migration completes)
536 (it is safe to interrupt this process any time before data migration completes)
537 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
537 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
538 migrating 917 bytes in store; 401 bytes tracked data
538 migrating 917 bytes in store; 401 bytes tracked data
539 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
539 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
540 blindly copying data/f0.i containing 1 revisions
540 blindly copying data/f0.i containing 1 revisions
541 blindly copying data/f1.i containing 1 revisions
541 blindly copying data/f1.i containing 1 revisions
542 blindly copying data/f2.i containing 1 revisions
542 blindly copying data/f2.i containing 1 revisions
543 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
543 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
544 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
544 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
545 cloning 3 revisions from 00manifest.i
545 cloning 3 revisions from 00manifest.i
546 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
546 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
547 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
547 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
548 blindly copying 00changelog.i containing 3 revisions
548 blindly copying 00changelog.i containing 3 revisions
549 finished migrating 3 changelog revisions; change in size: 0 bytes
549 finished migrating 3 changelog revisions; change in size: 0 bytes
550 finished migrating 9 total revisions; total change in store size: 0 bytes
550 finished migrating 9 total revisions; total change in store size: 0 bytes
551 copying phaseroots
551 copying phaseroots
552 data fully migrated to temporary repository
552 data fully migrated to temporary repository
553 marking source repository as being upgraded; clients will be unable to read from repository
553 marking source repository as being upgraded; clients will be unable to read from repository
554 starting in-place swap of repository data
554 starting in-place swap of repository data
555 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
555 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
556 replacing store...
556 replacing store...
557 store replacement complete; repository was inconsistent for *s (glob)
557 store replacement complete; repository was inconsistent for *s (glob)
558 finalizing requirements file and making repository readable again
558 finalizing requirements file and making repository readable again
559 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
559 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
560 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
560 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
561
561
562 Check that the repo still works fine
562 Check that the repo still works fine
563
563
564 $ hg log -G --patch
564 $ hg log -G --patch
565 @ changeset: 2:b5a3b78015e5
565 @ changeset: 2:b5a3b78015e5
566 | tag: tip
566 | tag: tip
567 | parent: 0:ba592bf28da2
567 | parent: 0:ba592bf28da2
568 | user: test
568 | user: test
569 | date: Thu Jan 01 00:00:00 1970 +0000
569 | date: Thu Jan 01 00:00:00 1970 +0000
570 | summary: add f2
570 | summary: add f2
571 |
571 |
572 |
572 |
573 | o changeset: 1:da8c0fc4833c
573 | o changeset: 1:da8c0fc4833c
574 |/ user: test
574 |/ user: test
575 | date: Thu Jan 01 00:00:00 1970 +0000
575 | date: Thu Jan 01 00:00:00 1970 +0000
576 | summary: add f1
576 | summary: add f1
577 |
577 |
578 |
578 |
579 o changeset: 0:ba592bf28da2
579 o changeset: 0:ba592bf28da2
580 user: test
580 user: test
581 date: Thu Jan 01 00:00:00 1970 +0000
581 date: Thu Jan 01 00:00:00 1970 +0000
582 summary: initial
582 summary: initial
583
583
584
584
585
585
586 $ hg verify
586 $ hg verify
587 checking changesets
587 checking changesets
588 checking manifests
588 checking manifests
589 crosschecking files in changesets and manifests
589 crosschecking files in changesets and manifests
590 checking files
590 checking files
591 checked 3 changesets with 3 changes to 3 files
591 checked 3 changesets with 3 changes to 3 files
592
592
593 Check we can select negatively
593 Check we can select negatively
594
594
595 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
595 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
596 upgrade will perform the following actions:
596 upgrade will perform the following actions:
597
597
598 requirements
598 requirements
599 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
599 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
600
600
601 re-delta-parent
601 re-delta-parent
602 deltas within internal storage will choose a new base revision if needed
602 deltas within internal storage will choose a new base revision if needed
603
603
604 beginning upgrade...
604 beginning upgrade...
605 repository locked and read-only
605 repository locked and read-only
606 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
606 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
607 (it is safe to interrupt this process any time before data migration completes)
607 (it is safe to interrupt this process any time before data migration completes)
608 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
608 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
609 migrating 917 bytes in store; 401 bytes tracked data
609 migrating 917 bytes in store; 401 bytes tracked data
610 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
610 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
611 cloning 1 revisions from data/f0.i
611 cloning 1 revisions from data/f0.i
612 cloning 1 revisions from data/f1.i
612 cloning 1 revisions from data/f1.i
613 cloning 1 revisions from data/f2.i
613 cloning 1 revisions from data/f2.i
614 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
614 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
615 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
615 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
616 blindly copying 00manifest.i containing 3 revisions
616 blindly copying 00manifest.i containing 3 revisions
617 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
617 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
618 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
618 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
619 cloning 3 revisions from 00changelog.i
619 cloning 3 revisions from 00changelog.i
620 finished migrating 3 changelog revisions; change in size: 0 bytes
620 finished migrating 3 changelog revisions; change in size: 0 bytes
621 finished migrating 9 total revisions; total change in store size: 0 bytes
621 finished migrating 9 total revisions; total change in store size: 0 bytes
622 copying phaseroots
622 copying phaseroots
623 data fully migrated to temporary repository
623 data fully migrated to temporary repository
624 marking source repository as being upgraded; clients will be unable to read from repository
624 marking source repository as being upgraded; clients will be unable to read from repository
625 starting in-place swap of repository data
625 starting in-place swap of repository data
626 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
626 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
627 replacing store...
627 replacing store...
628 store replacement complete; repository was inconsistent for *s (glob)
628 store replacement complete; repository was inconsistent for *s (glob)
629 finalizing requirements file and making repository readable again
629 finalizing requirements file and making repository readable again
630 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
630 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
631 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
631 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
632 $ hg verify
632 $ hg verify
633 checking changesets
633 checking changesets
634 checking manifests
634 checking manifests
635 crosschecking files in changesets and manifests
635 crosschecking files in changesets and manifests
636 checking files
636 checking files
637 checked 3 changesets with 3 changes to 3 files
637 checked 3 changesets with 3 changes to 3 files
638
638
639 Check that we can select changelog only
640
641 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
642 upgrade will perform the following actions:
643
644 requirements
645 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
646
647 re-delta-parent
648 deltas within internal storage will choose a new base revision if needed
649
650 beginning upgrade...
651 repository locked and read-only
652 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
653 (it is safe to interrupt this process any time before data migration completes)
654 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
655 migrating 917 bytes in store; 401 bytes tracked data
656 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
657 blindly copying data/f0.i containing 1 revisions
658 blindly copying data/f1.i containing 1 revisions
659 blindly copying data/f2.i containing 1 revisions
660 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
661 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
662 blindly copying 00manifest.i containing 3 revisions
663 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
664 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
665 cloning 3 revisions from 00changelog.i
666 finished migrating 3 changelog revisions; change in size: 0 bytes
667 finished migrating 9 total revisions; total change in store size: 0 bytes
668 copying phaseroots
669 data fully migrated to temporary repository
670 marking source repository as being upgraded; clients will be unable to read from repository
671 starting in-place swap of repository data
672 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
673 replacing store...
674 store replacement complete; repository was inconsistent for *s (glob)
675 finalizing requirements file and making repository readable again
676 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
677 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
678 $ hg verify
679 checking changesets
680 checking manifests
681 crosschecking files in changesets and manifests
682 checking files
683 checked 3 changesets with 3 changes to 3 files
684
685 Check that we can select filelog only
686
687 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
688 upgrade will perform the following actions:
689
690 requirements
691 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
692
693 re-delta-parent
694 deltas within internal storage will choose a new base revision if needed
695
696 beginning upgrade...
697 repository locked and read-only
698 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
699 (it is safe to interrupt this process any time before data migration completes)
700 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
701 migrating 917 bytes in store; 401 bytes tracked data
702 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
703 cloning 1 revisions from data/f0.i
704 cloning 1 revisions from data/f1.i
705 cloning 1 revisions from data/f2.i
706 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
707 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
708 blindly copying 00manifest.i containing 3 revisions
709 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
710 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
711 blindly copying 00changelog.i containing 3 revisions
712 finished migrating 3 changelog revisions; change in size: 0 bytes
713 finished migrating 9 total revisions; total change in store size: 0 bytes
714 copying phaseroots
715 data fully migrated to temporary repository
716 marking source repository as being upgraded; clients will be unable to read from repository
717 starting in-place swap of repository data
718 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
719 replacing store...
720 store replacement complete; repository was inconsistent for *s (glob)
721 finalizing requirements file and making repository readable again
722 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
723 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
724 $ hg verify
725 checking changesets
726 checking manifests
727 crosschecking files in changesets and manifests
728 checking files
729 checked 3 changesets with 3 changes to 3 files
730
639 $ cd ..
731 $ cd ..
640
732
641 store files with special filenames aren't encoded during copy
733 store files with special filenames aren't encoded during copy
642
734
643 $ hg init store-filenames
735 $ hg init store-filenames
644 $ cd store-filenames
736 $ cd store-filenames
645 $ touch foo
737 $ touch foo
646 $ hg -q commit -A -m initial
738 $ hg -q commit -A -m initial
647 $ touch .hg/store/.XX_special_filename
739 $ touch .hg/store/.XX_special_filename
648
740
649 $ hg debugupgraderepo --run
741 $ hg debugupgraderepo --run
650 upgrade will perform the following actions:
742 upgrade will perform the following actions:
651
743
652 requirements
744 requirements
653 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
745 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
654
746
655 beginning upgrade...
747 beginning upgrade...
656 repository locked and read-only
748 repository locked and read-only
657 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
749 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
658 (it is safe to interrupt this process any time before data migration completes)
750 (it is safe to interrupt this process any time before data migration completes)
659 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
751 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
660 migrating 301 bytes in store; 107 bytes tracked data
752 migrating 301 bytes in store; 107 bytes tracked data
661 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
753 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
662 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
754 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
663 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
755 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
664 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
756 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
665 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
757 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
666 finished migrating 1 changelog revisions; change in size: 0 bytes
758 finished migrating 1 changelog revisions; change in size: 0 bytes
667 finished migrating 3 total revisions; total change in store size: 0 bytes
759 finished migrating 3 total revisions; total change in store size: 0 bytes
668 copying .XX_special_filename
760 copying .XX_special_filename
669 copying phaseroots
761 copying phaseroots
670 data fully migrated to temporary repository
762 data fully migrated to temporary repository
671 marking source repository as being upgraded; clients will be unable to read from repository
763 marking source repository as being upgraded; clients will be unable to read from repository
672 starting in-place swap of repository data
764 starting in-place swap of repository data
673 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
765 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
674 replacing store...
766 replacing store...
675 store replacement complete; repository was inconsistent for *s (glob)
767 store replacement complete; repository was inconsistent for *s (glob)
676 finalizing requirements file and making repository readable again
768 finalizing requirements file and making repository readable again
677 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
769 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
678 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
770 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
679 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
771 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
680 $ hg debugupgraderepo --run --optimize redeltafulladd
772 $ hg debugupgraderepo --run --optimize redeltafulladd
681 upgrade will perform the following actions:
773 upgrade will perform the following actions:
682
774
683 requirements
775 requirements
684 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
776 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
685
777
686 re-delta-fulladd
778 re-delta-fulladd
687 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
779 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
688
780
689 beginning upgrade...
781 beginning upgrade...
690 repository locked and read-only
782 repository locked and read-only
691 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
783 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
692 (it is safe to interrupt this process any time before data migration completes)
784 (it is safe to interrupt this process any time before data migration completes)
693 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
785 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
694 migrating 301 bytes in store; 107 bytes tracked data
786 migrating 301 bytes in store; 107 bytes tracked data
695 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
787 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
696 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
788 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
697 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
789 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
698 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
790 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
699 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
791 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
700 finished migrating 1 changelog revisions; change in size: 0 bytes
792 finished migrating 1 changelog revisions; change in size: 0 bytes
701 finished migrating 3 total revisions; total change in store size: 0 bytes
793 finished migrating 3 total revisions; total change in store size: 0 bytes
702 copying .XX_special_filename
794 copying .XX_special_filename
703 copying phaseroots
795 copying phaseroots
704 data fully migrated to temporary repository
796 data fully migrated to temporary repository
705 marking source repository as being upgraded; clients will be unable to read from repository
797 marking source repository as being upgraded; clients will be unable to read from repository
706 starting in-place swap of repository data
798 starting in-place swap of repository data
707 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
799 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
708 replacing store...
800 replacing store...
709 store replacement complete; repository was inconsistent for *s (glob)
801 store replacement complete; repository was inconsistent for *s (glob)
710 finalizing requirements file and making repository readable again
802 finalizing requirements file and making repository readable again
711 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
803 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
712 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
804 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
713 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
805 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
714
806
715 fncache is valid after upgrade
807 fncache is valid after upgrade
716
808
717 $ hg debugrebuildfncache
809 $ hg debugrebuildfncache
718 fncache already up to date
810 fncache already up to date
719
811
720 $ cd ..
812 $ cd ..
721
813
722 Check upgrading a large file repository
814 Check upgrading a large file repository
723 ---------------------------------------
815 ---------------------------------------
724
816
725 $ hg init largefilesrepo
817 $ hg init largefilesrepo
726 $ cat << EOF >> largefilesrepo/.hg/hgrc
818 $ cat << EOF >> largefilesrepo/.hg/hgrc
727 > [extensions]
819 > [extensions]
728 > largefiles =
820 > largefiles =
729 > EOF
821 > EOF
730
822
731 $ cd largefilesrepo
823 $ cd largefilesrepo
732 $ touch foo
824 $ touch foo
733 $ hg add --large foo
825 $ hg add --large foo
734 $ hg -q commit -m initial
826 $ hg -q commit -m initial
735 $ cat .hg/requires
827 $ cat .hg/requires
736 dotencode
828 dotencode
737 fncache
829 fncache
738 generaldelta
830 generaldelta
739 largefiles
831 largefiles
740 revlogv1
832 revlogv1
741 sparserevlog
833 sparserevlog
742 store
834 store
743
835
744 $ hg debugupgraderepo --run
836 $ hg debugupgraderepo --run
745 upgrade will perform the following actions:
837 upgrade will perform the following actions:
746
838
747 requirements
839 requirements
748 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
840 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
749
841
750 beginning upgrade...
842 beginning upgrade...
751 repository locked and read-only
843 repository locked and read-only
752 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
844 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
753 (it is safe to interrupt this process any time before data migration completes)
845 (it is safe to interrupt this process any time before data migration completes)
754 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
846 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
755 migrating 355 bytes in store; 160 bytes tracked data
847 migrating 355 bytes in store; 160 bytes tracked data
756 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
848 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
757 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
849 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
758 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
850 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
759 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
851 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
760 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
852 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
761 finished migrating 1 changelog revisions; change in size: 0 bytes
853 finished migrating 1 changelog revisions; change in size: 0 bytes
762 finished migrating 3 total revisions; total change in store size: 0 bytes
854 finished migrating 3 total revisions; total change in store size: 0 bytes
763 copying phaseroots
855 copying phaseroots
764 data fully migrated to temporary repository
856 data fully migrated to temporary repository
765 marking source repository as being upgraded; clients will be unable to read from repository
857 marking source repository as being upgraded; clients will be unable to read from repository
766 starting in-place swap of repository data
858 starting in-place swap of repository data
767 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
859 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
768 replacing store...
860 replacing store...
769 store replacement complete; repository was inconsistent for *s (glob)
861 store replacement complete; repository was inconsistent for *s (glob)
770 finalizing requirements file and making repository readable again
862 finalizing requirements file and making repository readable again
771 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
863 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
772 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
864 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
773 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
865 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
774 $ cat .hg/requires
866 $ cat .hg/requires
775 dotencode
867 dotencode
776 fncache
868 fncache
777 generaldelta
869 generaldelta
778 largefiles
870 largefiles
779 revlogv1
871 revlogv1
780 sparserevlog
872 sparserevlog
781 store
873 store
782
874
783 $ cat << EOF >> .hg/hgrc
875 $ cat << EOF >> .hg/hgrc
784 > [extensions]
876 > [extensions]
785 > lfs =
877 > lfs =
786 > [lfs]
878 > [lfs]
787 > threshold = 10
879 > threshold = 10
788 > EOF
880 > EOF
789 $ echo '123456789012345' > lfs.bin
881 $ echo '123456789012345' > lfs.bin
790 $ hg ci -Am 'lfs.bin'
882 $ hg ci -Am 'lfs.bin'
791 adding lfs.bin
883 adding lfs.bin
792 $ grep lfs .hg/requires
884 $ grep lfs .hg/requires
793 lfs
885 lfs
794 $ find .hg/store/lfs -type f
886 $ find .hg/store/lfs -type f
795 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
887 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
796
888
797 $ hg debugupgraderepo --run
889 $ hg debugupgraderepo --run
798 upgrade will perform the following actions:
890 upgrade will perform the following actions:
799
891
800 requirements
892 requirements
801 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
893 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
802
894
803 beginning upgrade...
895 beginning upgrade...
804 repository locked and read-only
896 repository locked and read-only
805 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
897 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
806 (it is safe to interrupt this process any time before data migration completes)
898 (it is safe to interrupt this process any time before data migration completes)
807 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
899 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
808 migrating 801 bytes in store; 467 bytes tracked data
900 migrating 801 bytes in store; 467 bytes tracked data
809 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
901 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
810 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
902 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
811 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
903 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
812 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
904 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
813 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
905 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
814 finished migrating 2 changelog revisions; change in size: 0 bytes
906 finished migrating 2 changelog revisions; change in size: 0 bytes
815 finished migrating 6 total revisions; total change in store size: 0 bytes
907 finished migrating 6 total revisions; total change in store size: 0 bytes
816 copying phaseroots
908 copying phaseroots
817 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
909 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
818 data fully migrated to temporary repository
910 data fully migrated to temporary repository
819 marking source repository as being upgraded; clients will be unable to read from repository
911 marking source repository as being upgraded; clients will be unable to read from repository
820 starting in-place swap of repository data
912 starting in-place swap of repository data
821 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
913 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
822 replacing store...
914 replacing store...
823 store replacement complete; repository was inconsistent for *s (glob)
915 store replacement complete; repository was inconsistent for *s (glob)
824 finalizing requirements file and making repository readable again
916 finalizing requirements file and making repository readable again
825 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
917 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
826 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
918 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
827 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
919 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
828
920
829 $ grep lfs .hg/requires
921 $ grep lfs .hg/requires
830 lfs
922 lfs
831 $ find .hg/store/lfs -type f
923 $ find .hg/store/lfs -type f
832 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
924 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
833 $ hg verify
925 $ hg verify
834 checking changesets
926 checking changesets
835 checking manifests
927 checking manifests
836 crosschecking files in changesets and manifests
928 crosschecking files in changesets and manifests
837 checking files
929 checking files
838 checked 2 changesets with 2 changes to 2 files
930 checked 2 changesets with 2 changes to 2 files
839 $ hg debugdata lfs.bin 0
931 $ hg debugdata lfs.bin 0
840 version https://git-lfs.github.com/spec/v1
932 version https://git-lfs.github.com/spec/v1
841 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
933 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
842 size 16
934 size 16
843 x-is-binary 0
935 x-is-binary 0
844
936
845 $ cd ..
937 $ cd ..
846
938
847 repository config is taken in account
939 repository config is taken in account
848 -------------------------------------
940 -------------------------------------
849
941
850 $ cat << EOF >> $HGRCPATH
942 $ cat << EOF >> $HGRCPATH
851 > [format]
943 > [format]
852 > maxchainlen = 1
944 > maxchainlen = 1
853 > EOF
945 > EOF
854
946
855 $ hg init localconfig
947 $ hg init localconfig
856 $ cd localconfig
948 $ cd localconfig
857 $ cat << EOF > file
949 $ cat << EOF > file
858 > some content
950 > some content
859 > with some length
951 > with some length
860 > to make sure we get a delta
952 > to make sure we get a delta
861 > after changes
953 > after changes
862 > very long
954 > very long
863 > very long
955 > very long
864 > very long
956 > very long
865 > very long
957 > very long
866 > very long
958 > very long
867 > very long
959 > very long
868 > very long
960 > very long
869 > very long
961 > very long
870 > very long
962 > very long
871 > very long
963 > very long
872 > very long
964 > very long
873 > EOF
965 > EOF
874 $ hg -q commit -A -m A
966 $ hg -q commit -A -m A
875 $ echo "new line" >> file
967 $ echo "new line" >> file
876 $ hg -q commit -m B
968 $ hg -q commit -m B
877 $ echo "new line" >> file
969 $ echo "new line" >> file
878 $ hg -q commit -m C
970 $ hg -q commit -m C
879
971
880 $ cat << EOF >> .hg/hgrc
972 $ cat << EOF >> .hg/hgrc
881 > [format]
973 > [format]
882 > maxchainlen = 9001
974 > maxchainlen = 9001
883 > EOF
975 > EOF
884 $ hg config format
976 $ hg config format
885 format.maxchainlen=9001
977 format.maxchainlen=9001
886 $ hg debugdeltachain file
978 $ hg debugdeltachain file
887 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
979 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
888 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
980 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
889 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
981 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
890 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
982 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
891
983
892 $ hg debugupgraderepo --run --optimize redeltaall
984 $ hg debugupgraderepo --run --optimize redeltaall
893 upgrade will perform the following actions:
985 upgrade will perform the following actions:
894
986
895 requirements
987 requirements
896 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
988 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
897
989
898 re-delta-all
990 re-delta-all
899 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
991 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
900
992
901 beginning upgrade...
993 beginning upgrade...
902 repository locked and read-only
994 repository locked and read-only
903 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
995 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
904 (it is safe to interrupt this process any time before data migration completes)
996 (it is safe to interrupt this process any time before data migration completes)
905 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
997 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
906 migrating 1019 bytes in store; 882 bytes tracked data
998 migrating 1019 bytes in store; 882 bytes tracked data
907 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
999 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
908 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1000 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
909 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1001 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
910 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1002 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
911 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1003 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
912 finished migrating 3 changelog revisions; change in size: 0 bytes
1004 finished migrating 3 changelog revisions; change in size: 0 bytes
913 finished migrating 9 total revisions; total change in store size: -9 bytes
1005 finished migrating 9 total revisions; total change in store size: -9 bytes
914 copying phaseroots
1006 copying phaseroots
915 data fully migrated to temporary repository
1007 data fully migrated to temporary repository
916 marking source repository as being upgraded; clients will be unable to read from repository
1008 marking source repository as being upgraded; clients will be unable to read from repository
917 starting in-place swap of repository data
1009 starting in-place swap of repository data
918 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1010 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
919 replacing store...
1011 replacing store...
920 store replacement complete; repository was inconsistent for *s (glob)
1012 store replacement complete; repository was inconsistent for *s (glob)
921 finalizing requirements file and making repository readable again
1013 finalizing requirements file and making repository readable again
922 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1014 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
923 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1015 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
924 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1016 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
925 $ hg debugdeltachain file
1017 $ hg debugdeltachain file
926 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1018 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
927 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1019 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
928 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1020 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
929 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1021 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
930 $ cd ..
1022 $ cd ..
931
1023
932 $ cat << EOF >> $HGRCPATH
1024 $ cat << EOF >> $HGRCPATH
933 > [format]
1025 > [format]
934 > maxchainlen = 9001
1026 > maxchainlen = 9001
935 > EOF
1027 > EOF
936
1028
937 Check upgrading a sparse-revlog repository
1029 Check upgrading a sparse-revlog repository
938 ---------------------------------------
1030 ---------------------------------------
939
1031
940 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1032 $ hg init sparserevlogrepo --config format.sparse-revlog=no
941 $ cd sparserevlogrepo
1033 $ cd sparserevlogrepo
942 $ touch foo
1034 $ touch foo
943 $ hg add foo
1035 $ hg add foo
944 $ hg -q commit -m "foo"
1036 $ hg -q commit -m "foo"
945 $ cat .hg/requires
1037 $ cat .hg/requires
946 dotencode
1038 dotencode
947 fncache
1039 fncache
948 generaldelta
1040 generaldelta
949 revlogv1
1041 revlogv1
950 store
1042 store
951
1043
952 Check that we can add the sparse-revlog format requirement
1044 Check that we can add the sparse-revlog format requirement
953 $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null
1045 $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null
954 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1046 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
955 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1047 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
956 $ cat .hg/requires
1048 $ cat .hg/requires
957 dotencode
1049 dotencode
958 fncache
1050 fncache
959 generaldelta
1051 generaldelta
960 revlogv1
1052 revlogv1
961 sparserevlog
1053 sparserevlog
962 store
1054 store
963
1055
964 Check that we can remove the sparse-revlog format requirement
1056 Check that we can remove the sparse-revlog format requirement
965 $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null
1057 $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null
966 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1058 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
967 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1059 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
968 $ cat .hg/requires
1060 $ cat .hg/requires
969 dotencode
1061 dotencode
970 fncache
1062 fncache
971 generaldelta
1063 generaldelta
972 revlogv1
1064 revlogv1
973 store
1065 store
974
1066
975 #if zstd
1067 #if zstd
976
1068
977 Check upgrading to a zstd revlog
1069 Check upgrading to a zstd revlog
978 --------------------------------
1070 --------------------------------
979
1071
980 upgrade
1072 upgrade
981
1073
982 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup >/dev/null
1074 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup >/dev/null
983 $ hg debugformat -v
1075 $ hg debugformat -v
984 format-variant repo config default
1076 format-variant repo config default
985 fncache: yes yes yes
1077 fncache: yes yes yes
986 dotencode: yes yes yes
1078 dotencode: yes yes yes
987 generaldelta: yes yes yes
1079 generaldelta: yes yes yes
988 sparserevlog: yes yes yes
1080 sparserevlog: yes yes yes
989 plain-cl-delta: yes yes yes
1081 plain-cl-delta: yes yes yes
990 compression: zstd zlib zlib
1082 compression: zstd zlib zlib
991 compression-level: default default default
1083 compression-level: default default default
992 $ cat .hg/requires
1084 $ cat .hg/requires
993 dotencode
1085 dotencode
994 fncache
1086 fncache
995 generaldelta
1087 generaldelta
996 revlog-compression-zstd
1088 revlog-compression-zstd
997 revlogv1
1089 revlogv1
998 sparserevlog
1090 sparserevlog
999 store
1091 store
1000
1092
1001 downgrade
1093 downgrade
1002
1094
1003 $ hg debugupgraderepo --run --no-backup > /dev/null
1095 $ hg debugupgraderepo --run --no-backup > /dev/null
1004 $ hg debugformat -v
1096 $ hg debugformat -v
1005 format-variant repo config default
1097 format-variant repo config default
1006 fncache: yes yes yes
1098 fncache: yes yes yes
1007 dotencode: yes yes yes
1099 dotencode: yes yes yes
1008 generaldelta: yes yes yes
1100 generaldelta: yes yes yes
1009 sparserevlog: yes yes yes
1101 sparserevlog: yes yes yes
1010 plain-cl-delta: yes yes yes
1102 plain-cl-delta: yes yes yes
1011 compression: zlib zlib zlib
1103 compression: zlib zlib zlib
1012 compression-level: default default default
1104 compression-level: default default default
1013 $ cat .hg/requires
1105 $ cat .hg/requires
1014 dotencode
1106 dotencode
1015 fncache
1107 fncache
1016 generaldelta
1108 generaldelta
1017 revlogv1
1109 revlogv1
1018 sparserevlog
1110 sparserevlog
1019 store
1111 store
1020
1112
1021 upgrade from hgrc
1113 upgrade from hgrc
1022
1114
1023 $ cat >> .hg/hgrc << EOF
1115 $ cat >> .hg/hgrc << EOF
1024 > [format]
1116 > [format]
1025 > revlog-compression=zstd
1117 > revlog-compression=zstd
1026 > EOF
1118 > EOF
1027 $ hg debugupgraderepo --run --no-backup > /dev/null
1119 $ hg debugupgraderepo --run --no-backup > /dev/null
1028 $ hg debugformat -v
1120 $ hg debugformat -v
1029 format-variant repo config default
1121 format-variant repo config default
1030 fncache: yes yes yes
1122 fncache: yes yes yes
1031 dotencode: yes yes yes
1123 dotencode: yes yes yes
1032 generaldelta: yes yes yes
1124 generaldelta: yes yes yes
1033 sparserevlog: yes yes yes
1125 sparserevlog: yes yes yes
1034 plain-cl-delta: yes yes yes
1126 plain-cl-delta: yes yes yes
1035 compression: zstd zstd zlib
1127 compression: zstd zstd zlib
1036 compression-level: default default default
1128 compression-level: default default default
1037 $ cat .hg/requires
1129 $ cat .hg/requires
1038 dotencode
1130 dotencode
1039 fncache
1131 fncache
1040 generaldelta
1132 generaldelta
1041 revlog-compression-zstd
1133 revlog-compression-zstd
1042 revlogv1
1134 revlogv1
1043 sparserevlog
1135 sparserevlog
1044 store
1136 store
1045
1137
1046 $ cd ..
1138 $ cd ..
1047
1139
1048 #endif
1140 #endif
General Comments 0
You need to be logged in to leave comments. Login now