##// END OF EJS Templates
upgrade: extract code in its own module...
Pierre-Yves David -
r31864:70d163b8 default
parent child Browse files
Show More
@@ -1,2115 +1,2116
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import difflib
10 import difflib
11 import errno
11 import errno
12 import operator
12 import operator
13 import os
13 import os
14 import random
14 import random
15 import socket
15 import socket
16 import string
16 import string
17 import sys
17 import sys
18 import tempfile
18 import tempfile
19 import time
19 import time
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import (
22 from .node import (
23 bin,
23 bin,
24 hex,
24 hex,
25 nullhex,
25 nullhex,
26 nullid,
26 nullid,
27 nullrev,
27 nullrev,
28 short,
28 short,
29 )
29 )
30 from . import (
30 from . import (
31 bundle2,
31 bundle2,
32 changegroup,
32 changegroup,
33 cmdutil,
33 cmdutil,
34 color,
34 color,
35 commands,
35 commands,
36 context,
36 context,
37 dagparser,
37 dagparser,
38 dagutil,
38 dagutil,
39 encoding,
39 encoding,
40 error,
40 error,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 fileset,
43 fileset,
44 formatter,
44 formatter,
45 hg,
45 hg,
46 localrepo,
46 localrepo,
47 lock as lockmod,
47 lock as lockmod,
48 merge as mergemod,
48 merge as mergemod,
49 obsolete,
49 obsolete,
50 policy,
50 policy,
51 pvec,
51 pvec,
52 pycompat,
52 pycompat,
53 repair,
53 repair,
54 revlog,
54 revlog,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 setdiscovery,
58 setdiscovery,
59 simplemerge,
59 simplemerge,
60 smartset,
60 smartset,
61 sslutil,
61 sslutil,
62 streamclone,
62 streamclone,
63 templater,
63 templater,
64 treediscovery,
64 treediscovery,
65 upgrade,
65 util,
66 util,
66 vfs as vfsmod,
67 vfs as vfsmod,
67 )
68 )
68
69
69 release = lockmod.release
70 release = lockmod.release
70
71
71 # We reuse the command table from commands because it is easier than
72 # We reuse the command table from commands because it is easier than
72 # teaching dispatch about multiple tables.
73 # teaching dispatch about multiple tables.
73 command = cmdutil.command(commands.table)
74 command = cmdutil.command(commands.table)
74
75
75 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
76 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
76 def debugancestor(ui, repo, *args):
77 def debugancestor(ui, repo, *args):
77 """find the ancestor revision of two revisions in a given index"""
78 """find the ancestor revision of two revisions in a given index"""
78 if len(args) == 3:
79 if len(args) == 3:
79 index, rev1, rev2 = args
80 index, rev1, rev2 = args
80 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
81 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
81 lookup = r.lookup
82 lookup = r.lookup
82 elif len(args) == 2:
83 elif len(args) == 2:
83 if not repo:
84 if not repo:
84 raise error.Abort(_('there is no Mercurial repository here '
85 raise error.Abort(_('there is no Mercurial repository here '
85 '(.hg not found)'))
86 '(.hg not found)'))
86 rev1, rev2 = args
87 rev1, rev2 = args
87 r = repo.changelog
88 r = repo.changelog
88 lookup = repo.lookup
89 lookup = repo.lookup
89 else:
90 else:
90 raise error.Abort(_('either two or three arguments required'))
91 raise error.Abort(_('either two or three arguments required'))
91 a = r.ancestor(lookup(rev1), lookup(rev2))
92 a = r.ancestor(lookup(rev1), lookup(rev2))
92 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
93 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
93
94
94 @command('debugapplystreamclonebundle', [], 'FILE')
95 @command('debugapplystreamclonebundle', [], 'FILE')
95 def debugapplystreamclonebundle(ui, repo, fname):
96 def debugapplystreamclonebundle(ui, repo, fname):
96 """apply a stream clone bundle file"""
97 """apply a stream clone bundle file"""
97 f = hg.openpath(ui, fname)
98 f = hg.openpath(ui, fname)
98 gen = exchange.readbundle(ui, f, fname)
99 gen = exchange.readbundle(ui, f, fname)
99 gen.apply(repo)
100 gen.apply(repo)
100
101
101 @command('debugbuilddag',
102 @command('debugbuilddag',
102 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
103 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
103 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
104 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
104 ('n', 'new-file', None, _('add new file at each rev'))],
105 ('n', 'new-file', None, _('add new file at each rev'))],
105 _('[OPTION]... [TEXT]'))
106 _('[OPTION]... [TEXT]'))
106 def debugbuilddag(ui, repo, text=None,
107 def debugbuilddag(ui, repo, text=None,
107 mergeable_file=False,
108 mergeable_file=False,
108 overwritten_file=False,
109 overwritten_file=False,
109 new_file=False):
110 new_file=False):
110 """builds a repo with a given DAG from scratch in the current empty repo
111 """builds a repo with a given DAG from scratch in the current empty repo
111
112
112 The description of the DAG is read from stdin if not given on the
113 The description of the DAG is read from stdin if not given on the
113 command line.
114 command line.
114
115
115 Elements:
116 Elements:
116
117
117 - "+n" is a linear run of n nodes based on the current default parent
118 - "+n" is a linear run of n nodes based on the current default parent
118 - "." is a single node based on the current default parent
119 - "." is a single node based on the current default parent
119 - "$" resets the default parent to null (implied at the start);
120 - "$" resets the default parent to null (implied at the start);
120 otherwise the default parent is always the last node created
121 otherwise the default parent is always the last node created
121 - "<p" sets the default parent to the backref p
122 - "<p" sets the default parent to the backref p
122 - "*p" is a fork at parent p, which is a backref
123 - "*p" is a fork at parent p, which is a backref
123 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
124 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
124 - "/p2" is a merge of the preceding node and p2
125 - "/p2" is a merge of the preceding node and p2
125 - ":tag" defines a local tag for the preceding node
126 - ":tag" defines a local tag for the preceding node
126 - "@branch" sets the named branch for subsequent nodes
127 - "@branch" sets the named branch for subsequent nodes
127 - "#...\\n" is a comment up to the end of the line
128 - "#...\\n" is a comment up to the end of the line
128
129
129 Whitespace between the above elements is ignored.
130 Whitespace between the above elements is ignored.
130
131
131 A backref is either
132 A backref is either
132
133
133 - a number n, which references the node curr-n, where curr is the current
134 - a number n, which references the node curr-n, where curr is the current
134 node, or
135 node, or
135 - the name of a local tag you placed earlier using ":tag", or
136 - the name of a local tag you placed earlier using ":tag", or
136 - empty to denote the default parent.
137 - empty to denote the default parent.
137
138
138 All string valued-elements are either strictly alphanumeric, or must
139 All string valued-elements are either strictly alphanumeric, or must
139 be enclosed in double quotes ("..."), with "\\" as escape character.
140 be enclosed in double quotes ("..."), with "\\" as escape character.
140 """
141 """
141
142
142 if text is None:
143 if text is None:
143 ui.status(_("reading DAG from stdin\n"))
144 ui.status(_("reading DAG from stdin\n"))
144 text = ui.fin.read()
145 text = ui.fin.read()
145
146
146 cl = repo.changelog
147 cl = repo.changelog
147 if len(cl) > 0:
148 if len(cl) > 0:
148 raise error.Abort(_('repository is not empty'))
149 raise error.Abort(_('repository is not empty'))
149
150
150 # determine number of revs in DAG
151 # determine number of revs in DAG
151 total = 0
152 total = 0
152 for type, data in dagparser.parsedag(text):
153 for type, data in dagparser.parsedag(text):
153 if type == 'n':
154 if type == 'n':
154 total += 1
155 total += 1
155
156
156 if mergeable_file:
157 if mergeable_file:
157 linesperrev = 2
158 linesperrev = 2
158 # make a file with k lines per rev
159 # make a file with k lines per rev
159 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
160 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
160 initialmergedlines.append("")
161 initialmergedlines.append("")
161
162
162 tags = []
163 tags = []
163
164
164 wlock = lock = tr = None
165 wlock = lock = tr = None
165 try:
166 try:
166 wlock = repo.wlock()
167 wlock = repo.wlock()
167 lock = repo.lock()
168 lock = repo.lock()
168 tr = repo.transaction("builddag")
169 tr = repo.transaction("builddag")
169
170
170 at = -1
171 at = -1
171 atbranch = 'default'
172 atbranch = 'default'
172 nodeids = []
173 nodeids = []
173 id = 0
174 id = 0
174 ui.progress(_('building'), id, unit=_('revisions'), total=total)
175 ui.progress(_('building'), id, unit=_('revisions'), total=total)
175 for type, data in dagparser.parsedag(text):
176 for type, data in dagparser.parsedag(text):
176 if type == 'n':
177 if type == 'n':
177 ui.note(('node %s\n' % str(data)))
178 ui.note(('node %s\n' % str(data)))
178 id, ps = data
179 id, ps = data
179
180
180 files = []
181 files = []
181 fctxs = {}
182 fctxs = {}
182
183
183 p2 = None
184 p2 = None
184 if mergeable_file:
185 if mergeable_file:
185 fn = "mf"
186 fn = "mf"
186 p1 = repo[ps[0]]
187 p1 = repo[ps[0]]
187 if len(ps) > 1:
188 if len(ps) > 1:
188 p2 = repo[ps[1]]
189 p2 = repo[ps[1]]
189 pa = p1.ancestor(p2)
190 pa = p1.ancestor(p2)
190 base, local, other = [x[fn].data() for x in (pa, p1,
191 base, local, other = [x[fn].data() for x in (pa, p1,
191 p2)]
192 p2)]
192 m3 = simplemerge.Merge3Text(base, local, other)
193 m3 = simplemerge.Merge3Text(base, local, other)
193 ml = [l.strip() for l in m3.merge_lines()]
194 ml = [l.strip() for l in m3.merge_lines()]
194 ml.append("")
195 ml.append("")
195 elif at > 0:
196 elif at > 0:
196 ml = p1[fn].data().split("\n")
197 ml = p1[fn].data().split("\n")
197 else:
198 else:
198 ml = initialmergedlines
199 ml = initialmergedlines
199 ml[id * linesperrev] += " r%i" % id
200 ml[id * linesperrev] += " r%i" % id
200 mergedtext = "\n".join(ml)
201 mergedtext = "\n".join(ml)
201 files.append(fn)
202 files.append(fn)
202 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
203 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
203
204
204 if overwritten_file:
205 if overwritten_file:
205 fn = "of"
206 fn = "of"
206 files.append(fn)
207 files.append(fn)
207 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
208 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
208
209
209 if new_file:
210 if new_file:
210 fn = "nf%i" % id
211 fn = "nf%i" % id
211 files.append(fn)
212 files.append(fn)
212 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
213 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
213 if len(ps) > 1:
214 if len(ps) > 1:
214 if not p2:
215 if not p2:
215 p2 = repo[ps[1]]
216 p2 = repo[ps[1]]
216 for fn in p2:
217 for fn in p2:
217 if fn.startswith("nf"):
218 if fn.startswith("nf"):
218 files.append(fn)
219 files.append(fn)
219 fctxs[fn] = p2[fn]
220 fctxs[fn] = p2[fn]
220
221
221 def fctxfn(repo, cx, path):
222 def fctxfn(repo, cx, path):
222 return fctxs.get(path)
223 return fctxs.get(path)
223
224
224 if len(ps) == 0 or ps[0] < 0:
225 if len(ps) == 0 or ps[0] < 0:
225 pars = [None, None]
226 pars = [None, None]
226 elif len(ps) == 1:
227 elif len(ps) == 1:
227 pars = [nodeids[ps[0]], None]
228 pars = [nodeids[ps[0]], None]
228 else:
229 else:
229 pars = [nodeids[p] for p in ps]
230 pars = [nodeids[p] for p in ps]
230 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
231 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
231 date=(id, 0),
232 date=(id, 0),
232 user="debugbuilddag",
233 user="debugbuilddag",
233 extra={'branch': atbranch})
234 extra={'branch': atbranch})
234 nodeid = repo.commitctx(cx)
235 nodeid = repo.commitctx(cx)
235 nodeids.append(nodeid)
236 nodeids.append(nodeid)
236 at = id
237 at = id
237 elif type == 'l':
238 elif type == 'l':
238 id, name = data
239 id, name = data
239 ui.note(('tag %s\n' % name))
240 ui.note(('tag %s\n' % name))
240 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
241 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
241 elif type == 'a':
242 elif type == 'a':
242 ui.note(('branch %s\n' % data))
243 ui.note(('branch %s\n' % data))
243 atbranch = data
244 atbranch = data
244 ui.progress(_('building'), id, unit=_('revisions'), total=total)
245 ui.progress(_('building'), id, unit=_('revisions'), total=total)
245 tr.close()
246 tr.close()
246
247
247 if tags:
248 if tags:
248 repo.vfs.write("localtags", "".join(tags))
249 repo.vfs.write("localtags", "".join(tags))
249 finally:
250 finally:
250 ui.progress(_('building'), None)
251 ui.progress(_('building'), None)
251 release(tr, lock, wlock)
252 release(tr, lock, wlock)
252
253
253 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
254 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
254 indent_string = ' ' * indent
255 indent_string = ' ' * indent
255 if all:
256 if all:
256 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
257 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
257 % indent_string)
258 % indent_string)
258
259
259 def showchunks(named):
260 def showchunks(named):
260 ui.write("\n%s%s\n" % (indent_string, named))
261 ui.write("\n%s%s\n" % (indent_string, named))
261 chain = None
262 chain = None
262 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
263 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
263 node = chunkdata['node']
264 node = chunkdata['node']
264 p1 = chunkdata['p1']
265 p1 = chunkdata['p1']
265 p2 = chunkdata['p2']
266 p2 = chunkdata['p2']
266 cs = chunkdata['cs']
267 cs = chunkdata['cs']
267 deltabase = chunkdata['deltabase']
268 deltabase = chunkdata['deltabase']
268 delta = chunkdata['delta']
269 delta = chunkdata['delta']
269 ui.write("%s%s %s %s %s %s %s\n" %
270 ui.write("%s%s %s %s %s %s %s\n" %
270 (indent_string, hex(node), hex(p1), hex(p2),
271 (indent_string, hex(node), hex(p1), hex(p2),
271 hex(cs), hex(deltabase), len(delta)))
272 hex(cs), hex(deltabase), len(delta)))
272 chain = node
273 chain = node
273
274
274 chunkdata = gen.changelogheader()
275 chunkdata = gen.changelogheader()
275 showchunks("changelog")
276 showchunks("changelog")
276 chunkdata = gen.manifestheader()
277 chunkdata = gen.manifestheader()
277 showchunks("manifest")
278 showchunks("manifest")
278 for chunkdata in iter(gen.filelogheader, {}):
279 for chunkdata in iter(gen.filelogheader, {}):
279 fname = chunkdata['filename']
280 fname = chunkdata['filename']
280 showchunks(fname)
281 showchunks(fname)
281 else:
282 else:
282 if isinstance(gen, bundle2.unbundle20):
283 if isinstance(gen, bundle2.unbundle20):
283 raise error.Abort(_('use debugbundle2 for this file'))
284 raise error.Abort(_('use debugbundle2 for this file'))
284 chunkdata = gen.changelogheader()
285 chunkdata = gen.changelogheader()
285 chain = None
286 chain = None
286 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
287 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
287 node = chunkdata['node']
288 node = chunkdata['node']
288 ui.write("%s%s\n" % (indent_string, hex(node)))
289 ui.write("%s%s\n" % (indent_string, hex(node)))
289 chain = node
290 chain = node
290
291
291 def _debugbundle2(ui, gen, all=None, **opts):
292 def _debugbundle2(ui, gen, all=None, **opts):
292 """lists the contents of a bundle2"""
293 """lists the contents of a bundle2"""
293 if not isinstance(gen, bundle2.unbundle20):
294 if not isinstance(gen, bundle2.unbundle20):
294 raise error.Abort(_('not a bundle2 file'))
295 raise error.Abort(_('not a bundle2 file'))
295 ui.write(('Stream params: %s\n' % repr(gen.params)))
296 ui.write(('Stream params: %s\n' % repr(gen.params)))
296 for part in gen.iterparts():
297 for part in gen.iterparts():
297 ui.write('%s -- %r\n' % (part.type, repr(part.params)))
298 ui.write('%s -- %r\n' % (part.type, repr(part.params)))
298 if part.type == 'changegroup':
299 if part.type == 'changegroup':
299 version = part.params.get('version', '01')
300 version = part.params.get('version', '01')
300 cg = changegroup.getunbundler(version, part, 'UN')
301 cg = changegroup.getunbundler(version, part, 'UN')
301 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
302 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
302
303
303 @command('debugbundle',
304 @command('debugbundle',
304 [('a', 'all', None, _('show all details')),
305 [('a', 'all', None, _('show all details')),
305 ('', 'spec', None, _('print the bundlespec of the bundle'))],
306 ('', 'spec', None, _('print the bundlespec of the bundle'))],
306 _('FILE'),
307 _('FILE'),
307 norepo=True)
308 norepo=True)
308 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
309 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
309 """lists the contents of a bundle"""
310 """lists the contents of a bundle"""
310 with hg.openpath(ui, bundlepath) as f:
311 with hg.openpath(ui, bundlepath) as f:
311 if spec:
312 if spec:
312 spec = exchange.getbundlespec(ui, f)
313 spec = exchange.getbundlespec(ui, f)
313 ui.write('%s\n' % spec)
314 ui.write('%s\n' % spec)
314 return
315 return
315
316
316 gen = exchange.readbundle(ui, f, bundlepath)
317 gen = exchange.readbundle(ui, f, bundlepath)
317 if isinstance(gen, bundle2.unbundle20):
318 if isinstance(gen, bundle2.unbundle20):
318 return _debugbundle2(ui, gen, all=all, **opts)
319 return _debugbundle2(ui, gen, all=all, **opts)
319 _debugchangegroup(ui, gen, all=all, **opts)
320 _debugchangegroup(ui, gen, all=all, **opts)
320
321
321 @command('debugcheckstate', [], '')
322 @command('debugcheckstate', [], '')
322 def debugcheckstate(ui, repo):
323 def debugcheckstate(ui, repo):
323 """validate the correctness of the current dirstate"""
324 """validate the correctness of the current dirstate"""
324 parent1, parent2 = repo.dirstate.parents()
325 parent1, parent2 = repo.dirstate.parents()
325 m1 = repo[parent1].manifest()
326 m1 = repo[parent1].manifest()
326 m2 = repo[parent2].manifest()
327 m2 = repo[parent2].manifest()
327 errors = 0
328 errors = 0
328 for f in repo.dirstate:
329 for f in repo.dirstate:
329 state = repo.dirstate[f]
330 state = repo.dirstate[f]
330 if state in "nr" and f not in m1:
331 if state in "nr" and f not in m1:
331 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
332 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
332 errors += 1
333 errors += 1
333 if state in "a" and f in m1:
334 if state in "a" and f in m1:
334 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
335 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
335 errors += 1
336 errors += 1
336 if state in "m" and f not in m1 and f not in m2:
337 if state in "m" and f not in m1 and f not in m2:
337 ui.warn(_("%s in state %s, but not in either manifest\n") %
338 ui.warn(_("%s in state %s, but not in either manifest\n") %
338 (f, state))
339 (f, state))
339 errors += 1
340 errors += 1
340 for f in m1:
341 for f in m1:
341 state = repo.dirstate[f]
342 state = repo.dirstate[f]
342 if state not in "nrm":
343 if state not in "nrm":
343 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
344 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
344 errors += 1
345 errors += 1
345 if errors:
346 if errors:
346 error = _(".hg/dirstate inconsistent with current parent's manifest")
347 error = _(".hg/dirstate inconsistent with current parent's manifest")
347 raise error.Abort(error)
348 raise error.Abort(error)
348
349
349 @command('debugcolor',
350 @command('debugcolor',
350 [('', 'style', None, _('show all configured styles'))],
351 [('', 'style', None, _('show all configured styles'))],
351 'hg debugcolor')
352 'hg debugcolor')
352 def debugcolor(ui, repo, **opts):
353 def debugcolor(ui, repo, **opts):
353 """show available color, effects or style"""
354 """show available color, effects or style"""
354 ui.write(('color mode: %s\n') % ui._colormode)
355 ui.write(('color mode: %s\n') % ui._colormode)
355 if opts.get('style'):
356 if opts.get('style'):
356 return _debugdisplaystyle(ui)
357 return _debugdisplaystyle(ui)
357 else:
358 else:
358 return _debugdisplaycolor(ui)
359 return _debugdisplaycolor(ui)
359
360
360 def _debugdisplaycolor(ui):
361 def _debugdisplaycolor(ui):
361 ui = ui.copy()
362 ui = ui.copy()
362 ui._styles.clear()
363 ui._styles.clear()
363 for effect in color._activeeffects(ui).keys():
364 for effect in color._activeeffects(ui).keys():
364 ui._styles[effect] = effect
365 ui._styles[effect] = effect
365 if ui._terminfoparams:
366 if ui._terminfoparams:
366 for k, v in ui.configitems('color'):
367 for k, v in ui.configitems('color'):
367 if k.startswith('color.'):
368 if k.startswith('color.'):
368 ui._styles[k] = k[6:]
369 ui._styles[k] = k[6:]
369 elif k.startswith('terminfo.'):
370 elif k.startswith('terminfo.'):
370 ui._styles[k] = k[9:]
371 ui._styles[k] = k[9:]
371 ui.write(_('available colors:\n'))
372 ui.write(_('available colors:\n'))
372 # sort label with a '_' after the other to group '_background' entry.
373 # sort label with a '_' after the other to group '_background' entry.
373 items = sorted(ui._styles.items(),
374 items = sorted(ui._styles.items(),
374 key=lambda i: ('_' in i[0], i[0], i[1]))
375 key=lambda i: ('_' in i[0], i[0], i[1]))
375 for colorname, label in items:
376 for colorname, label in items:
376 ui.write(('%s\n') % colorname, label=label)
377 ui.write(('%s\n') % colorname, label=label)
377
378
378 def _debugdisplaystyle(ui):
379 def _debugdisplaystyle(ui):
379 ui.write(_('available style:\n'))
380 ui.write(_('available style:\n'))
380 width = max(len(s) for s in ui._styles)
381 width = max(len(s) for s in ui._styles)
381 for label, effects in sorted(ui._styles.items()):
382 for label, effects in sorted(ui._styles.items()):
382 ui.write('%s' % label, label=label)
383 ui.write('%s' % label, label=label)
383 if effects:
384 if effects:
384 # 50
385 # 50
385 ui.write(': ')
386 ui.write(': ')
386 ui.write(' ' * (max(0, width - len(label))))
387 ui.write(' ' * (max(0, width - len(label))))
387 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
388 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
388 ui.write('\n')
389 ui.write('\n')
389
390
390 @command('debugcommands', [], _('[COMMAND]'), norepo=True)
391 @command('debugcommands', [], _('[COMMAND]'), norepo=True)
391 def debugcommands(ui, cmd='', *args):
392 def debugcommands(ui, cmd='', *args):
392 """list all available commands and options"""
393 """list all available commands and options"""
393 for cmd, vals in sorted(commands.table.iteritems()):
394 for cmd, vals in sorted(commands.table.iteritems()):
394 cmd = cmd.split('|')[0].strip('^')
395 cmd = cmd.split('|')[0].strip('^')
395 opts = ', '.join([i[1] for i in vals[1]])
396 opts = ', '.join([i[1] for i in vals[1]])
396 ui.write('%s: %s\n' % (cmd, opts))
397 ui.write('%s: %s\n' % (cmd, opts))
397
398
398 @command('debugcomplete',
399 @command('debugcomplete',
399 [('o', 'options', None, _('show the command options'))],
400 [('o', 'options', None, _('show the command options'))],
400 _('[-o] CMD'),
401 _('[-o] CMD'),
401 norepo=True)
402 norepo=True)
402 def debugcomplete(ui, cmd='', **opts):
403 def debugcomplete(ui, cmd='', **opts):
403 """returns the completion list associated with the given command"""
404 """returns the completion list associated with the given command"""
404
405
405 if opts.get('options'):
406 if opts.get('options'):
406 options = []
407 options = []
407 otables = [commands.globalopts]
408 otables = [commands.globalopts]
408 if cmd:
409 if cmd:
409 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
410 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
410 otables.append(entry[1])
411 otables.append(entry[1])
411 for t in otables:
412 for t in otables:
412 for o in t:
413 for o in t:
413 if "(DEPRECATED)" in o[3]:
414 if "(DEPRECATED)" in o[3]:
414 continue
415 continue
415 if o[0]:
416 if o[0]:
416 options.append('-%s' % o[0])
417 options.append('-%s' % o[0])
417 options.append('--%s' % o[1])
418 options.append('--%s' % o[1])
418 ui.write("%s\n" % "\n".join(options))
419 ui.write("%s\n" % "\n".join(options))
419 return
420 return
420
421
421 cmdlist, unused_allcmds = cmdutil.findpossible(cmd, commands.table)
422 cmdlist, unused_allcmds = cmdutil.findpossible(cmd, commands.table)
422 if ui.verbose:
423 if ui.verbose:
423 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
424 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
424 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
425 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
425
426
426 @command('debugcreatestreamclonebundle', [], 'FILE')
427 @command('debugcreatestreamclonebundle', [], 'FILE')
427 def debugcreatestreamclonebundle(ui, repo, fname):
428 def debugcreatestreamclonebundle(ui, repo, fname):
428 """create a stream clone bundle file
429 """create a stream clone bundle file
429
430
430 Stream bundles are special bundles that are essentially archives of
431 Stream bundles are special bundles that are essentially archives of
431 revlog files. They are commonly used for cloning very quickly.
432 revlog files. They are commonly used for cloning very quickly.
432 """
433 """
433 requirements, gen = streamclone.generatebundlev1(repo)
434 requirements, gen = streamclone.generatebundlev1(repo)
434 changegroup.writechunks(ui, gen, fname)
435 changegroup.writechunks(ui, gen, fname)
435
436
436 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
437 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
437
438
438 @command('debugdag',
439 @command('debugdag',
439 [('t', 'tags', None, _('use tags as labels')),
440 [('t', 'tags', None, _('use tags as labels')),
440 ('b', 'branches', None, _('annotate with branch names')),
441 ('b', 'branches', None, _('annotate with branch names')),
441 ('', 'dots', None, _('use dots for runs')),
442 ('', 'dots', None, _('use dots for runs')),
442 ('s', 'spaces', None, _('separate elements by spaces'))],
443 ('s', 'spaces', None, _('separate elements by spaces'))],
443 _('[OPTION]... [FILE [REV]...]'),
444 _('[OPTION]... [FILE [REV]...]'),
444 optionalrepo=True)
445 optionalrepo=True)
445 def debugdag(ui, repo, file_=None, *revs, **opts):
446 def debugdag(ui, repo, file_=None, *revs, **opts):
446 """format the changelog or an index DAG as a concise textual description
447 """format the changelog or an index DAG as a concise textual description
447
448
448 If you pass a revlog index, the revlog's DAG is emitted. If you list
449 If you pass a revlog index, the revlog's DAG is emitted. If you list
449 revision numbers, they get labeled in the output as rN.
450 revision numbers, they get labeled in the output as rN.
450
451
451 Otherwise, the changelog DAG of the current repo is emitted.
452 Otherwise, the changelog DAG of the current repo is emitted.
452 """
453 """
453 spaces = opts.get('spaces')
454 spaces = opts.get('spaces')
454 dots = opts.get('dots')
455 dots = opts.get('dots')
455 if file_:
456 if file_:
456 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
457 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
457 file_)
458 file_)
458 revs = set((int(r) for r in revs))
459 revs = set((int(r) for r in revs))
459 def events():
460 def events():
460 for r in rlog:
461 for r in rlog:
461 yield 'n', (r, list(p for p in rlog.parentrevs(r)
462 yield 'n', (r, list(p for p in rlog.parentrevs(r)
462 if p != -1))
463 if p != -1))
463 if r in revs:
464 if r in revs:
464 yield 'l', (r, "r%i" % r)
465 yield 'l', (r, "r%i" % r)
465 elif repo:
466 elif repo:
466 cl = repo.changelog
467 cl = repo.changelog
467 tags = opts.get('tags')
468 tags = opts.get('tags')
468 branches = opts.get('branches')
469 branches = opts.get('branches')
469 if tags:
470 if tags:
470 labels = {}
471 labels = {}
471 for l, n in repo.tags().items():
472 for l, n in repo.tags().items():
472 labels.setdefault(cl.rev(n), []).append(l)
473 labels.setdefault(cl.rev(n), []).append(l)
473 def events():
474 def events():
474 b = "default"
475 b = "default"
475 for r in cl:
476 for r in cl:
476 if branches:
477 if branches:
477 newb = cl.read(cl.node(r))[5]['branch']
478 newb = cl.read(cl.node(r))[5]['branch']
478 if newb != b:
479 if newb != b:
479 yield 'a', newb
480 yield 'a', newb
480 b = newb
481 b = newb
481 yield 'n', (r, list(p for p in cl.parentrevs(r)
482 yield 'n', (r, list(p for p in cl.parentrevs(r)
482 if p != -1))
483 if p != -1))
483 if tags:
484 if tags:
484 ls = labels.get(r)
485 ls = labels.get(r)
485 if ls:
486 if ls:
486 for l in ls:
487 for l in ls:
487 yield 'l', (r, l)
488 yield 'l', (r, l)
488 else:
489 else:
489 raise error.Abort(_('need repo for changelog dag'))
490 raise error.Abort(_('need repo for changelog dag'))
490
491
491 for line in dagparser.dagtextlines(events(),
492 for line in dagparser.dagtextlines(events(),
492 addspaces=spaces,
493 addspaces=spaces,
493 wraplabels=True,
494 wraplabels=True,
494 wrapannotations=True,
495 wrapannotations=True,
495 wrapnonlinear=dots,
496 wrapnonlinear=dots,
496 usedots=dots,
497 usedots=dots,
497 maxlinewidth=70):
498 maxlinewidth=70):
498 ui.write(line)
499 ui.write(line)
499 ui.write("\n")
500 ui.write("\n")
500
501
501 @command('debugdata', commands.debugrevlogopts, _('-c|-m|FILE REV'))
502 @command('debugdata', commands.debugrevlogopts, _('-c|-m|FILE REV'))
502 def debugdata(ui, repo, file_, rev=None, **opts):
503 def debugdata(ui, repo, file_, rev=None, **opts):
503 """dump the contents of a data file revision"""
504 """dump the contents of a data file revision"""
504 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
505 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
505 if rev is not None:
506 if rev is not None:
506 raise error.CommandError('debugdata', _('invalid arguments'))
507 raise error.CommandError('debugdata', _('invalid arguments'))
507 file_, rev = None, file_
508 file_, rev = None, file_
508 elif rev is None:
509 elif rev is None:
509 raise error.CommandError('debugdata', _('invalid arguments'))
510 raise error.CommandError('debugdata', _('invalid arguments'))
510 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
511 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
511 try:
512 try:
512 ui.write(r.revision(r.lookup(rev), raw=True))
513 ui.write(r.revision(r.lookup(rev), raw=True))
513 except KeyError:
514 except KeyError:
514 raise error.Abort(_('invalid revision identifier %s') % rev)
515 raise error.Abort(_('invalid revision identifier %s') % rev)
515
516
516 @command('debugdate',
517 @command('debugdate',
517 [('e', 'extended', None, _('try extended date formats'))],
518 [('e', 'extended', None, _('try extended date formats'))],
518 _('[-e] DATE [RANGE]'),
519 _('[-e] DATE [RANGE]'),
519 norepo=True, optionalrepo=True)
520 norepo=True, optionalrepo=True)
520 def debugdate(ui, date, range=None, **opts):
521 def debugdate(ui, date, range=None, **opts):
521 """parse and display a date"""
522 """parse and display a date"""
522 if opts["extended"]:
523 if opts["extended"]:
523 d = util.parsedate(date, util.extendeddateformats)
524 d = util.parsedate(date, util.extendeddateformats)
524 else:
525 else:
525 d = util.parsedate(date)
526 d = util.parsedate(date)
526 ui.write(("internal: %s %s\n") % d)
527 ui.write(("internal: %s %s\n") % d)
527 ui.write(("standard: %s\n") % util.datestr(d))
528 ui.write(("standard: %s\n") % util.datestr(d))
528 if range:
529 if range:
529 m = util.matchdate(range)
530 m = util.matchdate(range)
530 ui.write(("match: %s\n") % m(d[0]))
531 ui.write(("match: %s\n") % m(d[0]))
531
532
532 @command('debugdeltachain',
533 @command('debugdeltachain',
533 commands.debugrevlogopts + commands.formatteropts,
534 commands.debugrevlogopts + commands.formatteropts,
534 _('-c|-m|FILE'),
535 _('-c|-m|FILE'),
535 optionalrepo=True)
536 optionalrepo=True)
536 def debugdeltachain(ui, repo, file_=None, **opts):
537 def debugdeltachain(ui, repo, file_=None, **opts):
537 """dump information about delta chains in a revlog
538 """dump information about delta chains in a revlog
538
539
539 Output can be templatized. Available template keywords are:
540 Output can be templatized. Available template keywords are:
540
541
541 :``rev``: revision number
542 :``rev``: revision number
542 :``chainid``: delta chain identifier (numbered by unique base)
543 :``chainid``: delta chain identifier (numbered by unique base)
543 :``chainlen``: delta chain length to this revision
544 :``chainlen``: delta chain length to this revision
544 :``prevrev``: previous revision in delta chain
545 :``prevrev``: previous revision in delta chain
545 :``deltatype``: role of delta / how it was computed
546 :``deltatype``: role of delta / how it was computed
546 :``compsize``: compressed size of revision
547 :``compsize``: compressed size of revision
547 :``uncompsize``: uncompressed size of revision
548 :``uncompsize``: uncompressed size of revision
548 :``chainsize``: total size of compressed revisions in chain
549 :``chainsize``: total size of compressed revisions in chain
549 :``chainratio``: total chain size divided by uncompressed revision size
550 :``chainratio``: total chain size divided by uncompressed revision size
550 (new delta chains typically start at ratio 2.00)
551 (new delta chains typically start at ratio 2.00)
551 :``lindist``: linear distance from base revision in delta chain to end
552 :``lindist``: linear distance from base revision in delta chain to end
552 of this revision
553 of this revision
553 :``extradist``: total size of revisions not part of this delta chain from
554 :``extradist``: total size of revisions not part of this delta chain from
554 base of delta chain to end of this revision; a measurement
555 base of delta chain to end of this revision; a measurement
555 of how much extra data we need to read/seek across to read
556 of how much extra data we need to read/seek across to read
556 the delta chain for this revision
557 the delta chain for this revision
557 :``extraratio``: extradist divided by chainsize; another representation of
558 :``extraratio``: extradist divided by chainsize; another representation of
558 how much unrelated data is needed to load this delta chain
559 how much unrelated data is needed to load this delta chain
559 """
560 """
560 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
561 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
561 index = r.index
562 index = r.index
562 generaldelta = r.version & revlog.REVLOGGENERALDELTA
563 generaldelta = r.version & revlog.REVLOGGENERALDELTA
563
564
564 def revinfo(rev):
565 def revinfo(rev):
565 e = index[rev]
566 e = index[rev]
566 compsize = e[1]
567 compsize = e[1]
567 uncompsize = e[2]
568 uncompsize = e[2]
568 chainsize = 0
569 chainsize = 0
569
570
570 if generaldelta:
571 if generaldelta:
571 if e[3] == e[5]:
572 if e[3] == e[5]:
572 deltatype = 'p1'
573 deltatype = 'p1'
573 elif e[3] == e[6]:
574 elif e[3] == e[6]:
574 deltatype = 'p2'
575 deltatype = 'p2'
575 elif e[3] == rev - 1:
576 elif e[3] == rev - 1:
576 deltatype = 'prev'
577 deltatype = 'prev'
577 elif e[3] == rev:
578 elif e[3] == rev:
578 deltatype = 'base'
579 deltatype = 'base'
579 else:
580 else:
580 deltatype = 'other'
581 deltatype = 'other'
581 else:
582 else:
582 if e[3] == rev:
583 if e[3] == rev:
583 deltatype = 'base'
584 deltatype = 'base'
584 else:
585 else:
585 deltatype = 'prev'
586 deltatype = 'prev'
586
587
587 chain = r._deltachain(rev)[0]
588 chain = r._deltachain(rev)[0]
588 for iterrev in chain:
589 for iterrev in chain:
589 e = index[iterrev]
590 e = index[iterrev]
590 chainsize += e[1]
591 chainsize += e[1]
591
592
592 return compsize, uncompsize, deltatype, chain, chainsize
593 return compsize, uncompsize, deltatype, chain, chainsize
593
594
594 fm = ui.formatter('debugdeltachain', opts)
595 fm = ui.formatter('debugdeltachain', opts)
595
596
596 fm.plain(' rev chain# chainlen prev delta '
597 fm.plain(' rev chain# chainlen prev delta '
597 'size rawsize chainsize ratio lindist extradist '
598 'size rawsize chainsize ratio lindist extradist '
598 'extraratio\n')
599 'extraratio\n')
599
600
600 chainbases = {}
601 chainbases = {}
601 for rev in r:
602 for rev in r:
602 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
603 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
603 chainbase = chain[0]
604 chainbase = chain[0]
604 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
605 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
605 basestart = r.start(chainbase)
606 basestart = r.start(chainbase)
606 revstart = r.start(rev)
607 revstart = r.start(rev)
607 lineardist = revstart + comp - basestart
608 lineardist = revstart + comp - basestart
608 extradist = lineardist - chainsize
609 extradist = lineardist - chainsize
609 try:
610 try:
610 prevrev = chain[-2]
611 prevrev = chain[-2]
611 except IndexError:
612 except IndexError:
612 prevrev = -1
613 prevrev = -1
613
614
614 chainratio = float(chainsize) / float(uncomp)
615 chainratio = float(chainsize) / float(uncomp)
615 extraratio = float(extradist) / float(chainsize)
616 extraratio = float(extradist) / float(chainsize)
616
617
617 fm.startitem()
618 fm.startitem()
618 fm.write('rev chainid chainlen prevrev deltatype compsize '
619 fm.write('rev chainid chainlen prevrev deltatype compsize '
619 'uncompsize chainsize chainratio lindist extradist '
620 'uncompsize chainsize chainratio lindist extradist '
620 'extraratio',
621 'extraratio',
621 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
622 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
622 rev, chainid, len(chain), prevrev, deltatype, comp,
623 rev, chainid, len(chain), prevrev, deltatype, comp,
623 uncomp, chainsize, chainratio, lineardist, extradist,
624 uncomp, chainsize, chainratio, lineardist, extradist,
624 extraratio,
625 extraratio,
625 rev=rev, chainid=chainid, chainlen=len(chain),
626 rev=rev, chainid=chainid, chainlen=len(chain),
626 prevrev=prevrev, deltatype=deltatype, compsize=comp,
627 prevrev=prevrev, deltatype=deltatype, compsize=comp,
627 uncompsize=uncomp, chainsize=chainsize,
628 uncompsize=uncomp, chainsize=chainsize,
628 chainratio=chainratio, lindist=lineardist,
629 chainratio=chainratio, lindist=lineardist,
629 extradist=extradist, extraratio=extraratio)
630 extradist=extradist, extraratio=extraratio)
630
631
631 fm.end()
632 fm.end()
632
633
633 @command('debugdirstate|debugstate',
634 @command('debugdirstate|debugstate',
634 [('', 'nodates', None, _('do not display the saved mtime')),
635 [('', 'nodates', None, _('do not display the saved mtime')),
635 ('', 'datesort', None, _('sort by saved mtime'))],
636 ('', 'datesort', None, _('sort by saved mtime'))],
636 _('[OPTION]...'))
637 _('[OPTION]...'))
637 def debugstate(ui, repo, **opts):
638 def debugstate(ui, repo, **opts):
638 """show the contents of the current dirstate"""
639 """show the contents of the current dirstate"""
639
640
640 nodates = opts.get('nodates')
641 nodates = opts.get('nodates')
641 datesort = opts.get('datesort')
642 datesort = opts.get('datesort')
642
643
643 timestr = ""
644 timestr = ""
644 if datesort:
645 if datesort:
645 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
646 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
646 else:
647 else:
647 keyfunc = None # sort by filename
648 keyfunc = None # sort by filename
648 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
649 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
649 if ent[3] == -1:
650 if ent[3] == -1:
650 timestr = 'unset '
651 timestr = 'unset '
651 elif nodates:
652 elif nodates:
652 timestr = 'set '
653 timestr = 'set '
653 else:
654 else:
654 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
655 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
655 time.localtime(ent[3]))
656 time.localtime(ent[3]))
656 if ent[1] & 0o20000:
657 if ent[1] & 0o20000:
657 mode = 'lnk'
658 mode = 'lnk'
658 else:
659 else:
659 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
660 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
660 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
661 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
661 for f in repo.dirstate.copies():
662 for f in repo.dirstate.copies():
662 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
663 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
663
664
664 @command('debugdiscovery',
665 @command('debugdiscovery',
665 [('', 'old', None, _('use old-style discovery')),
666 [('', 'old', None, _('use old-style discovery')),
666 ('', 'nonheads', None,
667 ('', 'nonheads', None,
667 _('use old-style discovery with non-heads included')),
668 _('use old-style discovery with non-heads included')),
668 ] + commands.remoteopts,
669 ] + commands.remoteopts,
669 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
670 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
670 def debugdiscovery(ui, repo, remoteurl="default", **opts):
671 def debugdiscovery(ui, repo, remoteurl="default", **opts):
671 """runs the changeset discovery protocol in isolation"""
672 """runs the changeset discovery protocol in isolation"""
672 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
673 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
673 opts.get('branch'))
674 opts.get('branch'))
674 remote = hg.peer(repo, opts, remoteurl)
675 remote = hg.peer(repo, opts, remoteurl)
675 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
676 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
676
677
677 # make sure tests are repeatable
678 # make sure tests are repeatable
678 random.seed(12323)
679 random.seed(12323)
679
680
680 def doit(localheads, remoteheads, remote=remote):
681 def doit(localheads, remoteheads, remote=remote):
681 if opts.get('old'):
682 if opts.get('old'):
682 if localheads:
683 if localheads:
683 raise error.Abort('cannot use localheads with old style '
684 raise error.Abort('cannot use localheads with old style '
684 'discovery')
685 'discovery')
685 if not util.safehasattr(remote, 'branches'):
686 if not util.safehasattr(remote, 'branches'):
686 # enable in-client legacy support
687 # enable in-client legacy support
687 remote = localrepo.locallegacypeer(remote.local())
688 remote = localrepo.locallegacypeer(remote.local())
688 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
689 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
689 force=True)
690 force=True)
690 common = set(common)
691 common = set(common)
691 if not opts.get('nonheads'):
692 if not opts.get('nonheads'):
692 ui.write(("unpruned common: %s\n") %
693 ui.write(("unpruned common: %s\n") %
693 " ".join(sorted(short(n) for n in common)))
694 " ".join(sorted(short(n) for n in common)))
694 dag = dagutil.revlogdag(repo.changelog)
695 dag = dagutil.revlogdag(repo.changelog)
695 all = dag.ancestorset(dag.internalizeall(common))
696 all = dag.ancestorset(dag.internalizeall(common))
696 common = dag.externalizeall(dag.headsetofconnecteds(all))
697 common = dag.externalizeall(dag.headsetofconnecteds(all))
697 else:
698 else:
698 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
699 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
699 common = set(common)
700 common = set(common)
700 rheads = set(hds)
701 rheads = set(hds)
701 lheads = set(repo.heads())
702 lheads = set(repo.heads())
702 ui.write(("common heads: %s\n") %
703 ui.write(("common heads: %s\n") %
703 " ".join(sorted(short(n) for n in common)))
704 " ".join(sorted(short(n) for n in common)))
704 if lheads <= common:
705 if lheads <= common:
705 ui.write(("local is subset\n"))
706 ui.write(("local is subset\n"))
706 elif rheads <= common:
707 elif rheads <= common:
707 ui.write(("remote is subset\n"))
708 ui.write(("remote is subset\n"))
708
709
709 serverlogs = opts.get('serverlog')
710 serverlogs = opts.get('serverlog')
710 if serverlogs:
711 if serverlogs:
711 for filename in serverlogs:
712 for filename in serverlogs:
712 with open(filename, 'r') as logfile:
713 with open(filename, 'r') as logfile:
713 line = logfile.readline()
714 line = logfile.readline()
714 while line:
715 while line:
715 parts = line.strip().split(';')
716 parts = line.strip().split(';')
716 op = parts[1]
717 op = parts[1]
717 if op == 'cg':
718 if op == 'cg':
718 pass
719 pass
719 elif op == 'cgss':
720 elif op == 'cgss':
720 doit(parts[2].split(' '), parts[3].split(' '))
721 doit(parts[2].split(' '), parts[3].split(' '))
721 elif op == 'unb':
722 elif op == 'unb':
722 doit(parts[3].split(' '), parts[2].split(' '))
723 doit(parts[3].split(' '), parts[2].split(' '))
723 line = logfile.readline()
724 line = logfile.readline()
724 else:
725 else:
725 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
726 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
726 opts.get('remote_head'))
727 opts.get('remote_head'))
727 localrevs = opts.get('local_head')
728 localrevs = opts.get('local_head')
728 doit(localrevs, remoterevs)
729 doit(localrevs, remoterevs)
729
730
730 @command('debugextensions', commands.formatteropts, [], norepo=True)
731 @command('debugextensions', commands.formatteropts, [], norepo=True)
731 def debugextensions(ui, **opts):
732 def debugextensions(ui, **opts):
732 '''show information about active extensions'''
733 '''show information about active extensions'''
733 exts = extensions.extensions(ui)
734 exts = extensions.extensions(ui)
734 hgver = util.version()
735 hgver = util.version()
735 fm = ui.formatter('debugextensions', opts)
736 fm = ui.formatter('debugextensions', opts)
736 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
737 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
737 isinternal = extensions.ismoduleinternal(extmod)
738 isinternal = extensions.ismoduleinternal(extmod)
738 extsource = pycompat.fsencode(extmod.__file__)
739 extsource = pycompat.fsencode(extmod.__file__)
739 if isinternal:
740 if isinternal:
740 exttestedwith = [] # never expose magic string to users
741 exttestedwith = [] # never expose magic string to users
741 else:
742 else:
742 exttestedwith = getattr(extmod, 'testedwith', '').split()
743 exttestedwith = getattr(extmod, 'testedwith', '').split()
743 extbuglink = getattr(extmod, 'buglink', None)
744 extbuglink = getattr(extmod, 'buglink', None)
744
745
745 fm.startitem()
746 fm.startitem()
746
747
747 if ui.quiet or ui.verbose:
748 if ui.quiet or ui.verbose:
748 fm.write('name', '%s\n', extname)
749 fm.write('name', '%s\n', extname)
749 else:
750 else:
750 fm.write('name', '%s', extname)
751 fm.write('name', '%s', extname)
751 if isinternal or hgver in exttestedwith:
752 if isinternal or hgver in exttestedwith:
752 fm.plain('\n')
753 fm.plain('\n')
753 elif not exttestedwith:
754 elif not exttestedwith:
754 fm.plain(_(' (untested!)\n'))
755 fm.plain(_(' (untested!)\n'))
755 else:
756 else:
756 lasttestedversion = exttestedwith[-1]
757 lasttestedversion = exttestedwith[-1]
757 fm.plain(' (%s!)\n' % lasttestedversion)
758 fm.plain(' (%s!)\n' % lasttestedversion)
758
759
759 fm.condwrite(ui.verbose and extsource, 'source',
760 fm.condwrite(ui.verbose and extsource, 'source',
760 _(' location: %s\n'), extsource or "")
761 _(' location: %s\n'), extsource or "")
761
762
762 if ui.verbose:
763 if ui.verbose:
763 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
764 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
764 fm.data(bundled=isinternal)
765 fm.data(bundled=isinternal)
765
766
766 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
767 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
767 _(' tested with: %s\n'),
768 _(' tested with: %s\n'),
768 fm.formatlist(exttestedwith, name='ver'))
769 fm.formatlist(exttestedwith, name='ver'))
769
770
770 fm.condwrite(ui.verbose and extbuglink, 'buglink',
771 fm.condwrite(ui.verbose and extbuglink, 'buglink',
771 _(' bug reporting: %s\n'), extbuglink or "")
772 _(' bug reporting: %s\n'), extbuglink or "")
772
773
773 fm.end()
774 fm.end()
774
775
775 @command('debugfileset',
776 @command('debugfileset',
776 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
777 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
777 _('[-r REV] FILESPEC'))
778 _('[-r REV] FILESPEC'))
778 def debugfileset(ui, repo, expr, **opts):
779 def debugfileset(ui, repo, expr, **opts):
779 '''parse and apply a fileset specification'''
780 '''parse and apply a fileset specification'''
780 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
781 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
781 if ui.verbose:
782 if ui.verbose:
782 tree = fileset.parse(expr)
783 tree = fileset.parse(expr)
783 ui.note(fileset.prettyformat(tree), "\n")
784 ui.note(fileset.prettyformat(tree), "\n")
784
785
785 for f in ctx.getfileset(expr):
786 for f in ctx.getfileset(expr):
786 ui.write("%s\n" % f)
787 ui.write("%s\n" % f)
787
788
788 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
789 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
789 def debugfsinfo(ui, path="."):
790 def debugfsinfo(ui, path="."):
790 """show information detected about current filesystem"""
791 """show information detected about current filesystem"""
791 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
792 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
792 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
793 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
793 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
794 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
794 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
795 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
795 casesensitive = '(unknown)'
796 casesensitive = '(unknown)'
796 try:
797 try:
797 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
798 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
798 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
799 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
799 except OSError:
800 except OSError:
800 pass
801 pass
801 ui.write(('case-sensitive: %s\n') % casesensitive)
802 ui.write(('case-sensitive: %s\n') % casesensitive)
802
803
803 @command('debuggetbundle',
804 @command('debuggetbundle',
804 [('H', 'head', [], _('id of head node'), _('ID')),
805 [('H', 'head', [], _('id of head node'), _('ID')),
805 ('C', 'common', [], _('id of common node'), _('ID')),
806 ('C', 'common', [], _('id of common node'), _('ID')),
806 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
807 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
807 _('REPO FILE [-H|-C ID]...'),
808 _('REPO FILE [-H|-C ID]...'),
808 norepo=True)
809 norepo=True)
809 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
810 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
810 """retrieves a bundle from a repo
811 """retrieves a bundle from a repo
811
812
812 Every ID must be a full-length hex node id string. Saves the bundle to the
813 Every ID must be a full-length hex node id string. Saves the bundle to the
813 given file.
814 given file.
814 """
815 """
815 repo = hg.peer(ui, opts, repopath)
816 repo = hg.peer(ui, opts, repopath)
816 if not repo.capable('getbundle'):
817 if not repo.capable('getbundle'):
817 raise error.Abort("getbundle() not supported by target repository")
818 raise error.Abort("getbundle() not supported by target repository")
818 args = {}
819 args = {}
819 if common:
820 if common:
820 args['common'] = [bin(s) for s in common]
821 args['common'] = [bin(s) for s in common]
821 if head:
822 if head:
822 args['heads'] = [bin(s) for s in head]
823 args['heads'] = [bin(s) for s in head]
823 # TODO: get desired bundlecaps from command line.
824 # TODO: get desired bundlecaps from command line.
824 args['bundlecaps'] = None
825 args['bundlecaps'] = None
825 bundle = repo.getbundle('debug', **args)
826 bundle = repo.getbundle('debug', **args)
826
827
827 bundletype = opts.get('type', 'bzip2').lower()
828 bundletype = opts.get('type', 'bzip2').lower()
828 btypes = {'none': 'HG10UN',
829 btypes = {'none': 'HG10UN',
829 'bzip2': 'HG10BZ',
830 'bzip2': 'HG10BZ',
830 'gzip': 'HG10GZ',
831 'gzip': 'HG10GZ',
831 'bundle2': 'HG20'}
832 'bundle2': 'HG20'}
832 bundletype = btypes.get(bundletype)
833 bundletype = btypes.get(bundletype)
833 if bundletype not in bundle2.bundletypes:
834 if bundletype not in bundle2.bundletypes:
834 raise error.Abort(_('unknown bundle type specified with --type'))
835 raise error.Abort(_('unknown bundle type specified with --type'))
835 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
836 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
836
837
837 @command('debugignore', [], '[FILE]')
838 @command('debugignore', [], '[FILE]')
838 def debugignore(ui, repo, *files, **opts):
839 def debugignore(ui, repo, *files, **opts):
839 """display the combined ignore pattern and information about ignored files
840 """display the combined ignore pattern and information about ignored files
840
841
841 With no argument display the combined ignore pattern.
842 With no argument display the combined ignore pattern.
842
843
843 Given space separated file names, shows if the given file is ignored and
844 Given space separated file names, shows if the given file is ignored and
844 if so, show the ignore rule (file and line number) that matched it.
845 if so, show the ignore rule (file and line number) that matched it.
845 """
846 """
846 ignore = repo.dirstate._ignore
847 ignore = repo.dirstate._ignore
847 if not files:
848 if not files:
848 # Show all the patterns
849 # Show all the patterns
849 includepat = getattr(ignore, 'includepat', None)
850 includepat = getattr(ignore, 'includepat', None)
850 if includepat is not None:
851 if includepat is not None:
851 ui.write("%s\n" % includepat)
852 ui.write("%s\n" % includepat)
852 else:
853 else:
853 raise error.Abort(_("no ignore patterns found"))
854 raise error.Abort(_("no ignore patterns found"))
854 else:
855 else:
855 for f in files:
856 for f in files:
856 nf = util.normpath(f)
857 nf = util.normpath(f)
857 ignored = None
858 ignored = None
858 ignoredata = None
859 ignoredata = None
859 if nf != '.':
860 if nf != '.':
860 if ignore(nf):
861 if ignore(nf):
861 ignored = nf
862 ignored = nf
862 ignoredata = repo.dirstate._ignorefileandline(nf)
863 ignoredata = repo.dirstate._ignorefileandline(nf)
863 else:
864 else:
864 for p in util.finddirs(nf):
865 for p in util.finddirs(nf):
865 if ignore(p):
866 if ignore(p):
866 ignored = p
867 ignored = p
867 ignoredata = repo.dirstate._ignorefileandline(p)
868 ignoredata = repo.dirstate._ignorefileandline(p)
868 break
869 break
869 if ignored:
870 if ignored:
870 if ignored == nf:
871 if ignored == nf:
871 ui.write(_("%s is ignored\n") % f)
872 ui.write(_("%s is ignored\n") % f)
872 else:
873 else:
873 ui.write(_("%s is ignored because of "
874 ui.write(_("%s is ignored because of "
874 "containing folder %s\n")
875 "containing folder %s\n")
875 % (f, ignored))
876 % (f, ignored))
876 ignorefile, lineno, line = ignoredata
877 ignorefile, lineno, line = ignoredata
877 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
878 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
878 % (ignorefile, lineno, line))
879 % (ignorefile, lineno, line))
879 else:
880 else:
880 ui.write(_("%s is not ignored\n") % f)
881 ui.write(_("%s is not ignored\n") % f)
881
882
882 @command('debugindex', commands.debugrevlogopts +
883 @command('debugindex', commands.debugrevlogopts +
883 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
884 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
884 _('[-f FORMAT] -c|-m|FILE'),
885 _('[-f FORMAT] -c|-m|FILE'),
885 optionalrepo=True)
886 optionalrepo=True)
886 def debugindex(ui, repo, file_=None, **opts):
887 def debugindex(ui, repo, file_=None, **opts):
887 """dump the contents of an index file"""
888 """dump the contents of an index file"""
888 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
889 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
889 format = opts.get('format', 0)
890 format = opts.get('format', 0)
890 if format not in (0, 1):
891 if format not in (0, 1):
891 raise error.Abort(_("unknown format %d") % format)
892 raise error.Abort(_("unknown format %d") % format)
892
893
893 generaldelta = r.version & revlog.REVLOGGENERALDELTA
894 generaldelta = r.version & revlog.REVLOGGENERALDELTA
894 if generaldelta:
895 if generaldelta:
895 basehdr = ' delta'
896 basehdr = ' delta'
896 else:
897 else:
897 basehdr = ' base'
898 basehdr = ' base'
898
899
899 if ui.debugflag:
900 if ui.debugflag:
900 shortfn = hex
901 shortfn = hex
901 else:
902 else:
902 shortfn = short
903 shortfn = short
903
904
904 # There might not be anything in r, so have a sane default
905 # There might not be anything in r, so have a sane default
905 idlen = 12
906 idlen = 12
906 for i in r:
907 for i in r:
907 idlen = len(shortfn(r.node(i)))
908 idlen = len(shortfn(r.node(i)))
908 break
909 break
909
910
910 if format == 0:
911 if format == 0:
911 ui.write((" rev offset length " + basehdr + " linkrev"
912 ui.write((" rev offset length " + basehdr + " linkrev"
912 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
913 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
913 elif format == 1:
914 elif format == 1:
914 ui.write((" rev flag offset length"
915 ui.write((" rev flag offset length"
915 " size " + basehdr + " link p1 p2"
916 " size " + basehdr + " link p1 p2"
916 " %s\n") % "nodeid".rjust(idlen))
917 " %s\n") % "nodeid".rjust(idlen))
917
918
918 for i in r:
919 for i in r:
919 node = r.node(i)
920 node = r.node(i)
920 if generaldelta:
921 if generaldelta:
921 base = r.deltaparent(i)
922 base = r.deltaparent(i)
922 else:
923 else:
923 base = r.chainbase(i)
924 base = r.chainbase(i)
924 if format == 0:
925 if format == 0:
925 try:
926 try:
926 pp = r.parents(node)
927 pp = r.parents(node)
927 except Exception:
928 except Exception:
928 pp = [nullid, nullid]
929 pp = [nullid, nullid]
929 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
930 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
930 i, r.start(i), r.length(i), base, r.linkrev(i),
931 i, r.start(i), r.length(i), base, r.linkrev(i),
931 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
932 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
932 elif format == 1:
933 elif format == 1:
933 pr = r.parentrevs(i)
934 pr = r.parentrevs(i)
934 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
935 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
935 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
936 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
936 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
937 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
937
938
938 @command('debugindexdot', commands.debugrevlogopts,
939 @command('debugindexdot', commands.debugrevlogopts,
939 _('-c|-m|FILE'), optionalrepo=True)
940 _('-c|-m|FILE'), optionalrepo=True)
940 def debugindexdot(ui, repo, file_=None, **opts):
941 def debugindexdot(ui, repo, file_=None, **opts):
941 """dump an index DAG as a graphviz dot file"""
942 """dump an index DAG as a graphviz dot file"""
942 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
943 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
943 ui.write(("digraph G {\n"))
944 ui.write(("digraph G {\n"))
944 for i in r:
945 for i in r:
945 node = r.node(i)
946 node = r.node(i)
946 pp = r.parents(node)
947 pp = r.parents(node)
947 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
948 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
948 if pp[1] != nullid:
949 if pp[1] != nullid:
949 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
950 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
950 ui.write("}\n")
951 ui.write("}\n")
951
952
952 @command('debuginstall', [] + commands.formatteropts, '', norepo=True)
953 @command('debuginstall', [] + commands.formatteropts, '', norepo=True)
953 def debuginstall(ui, **opts):
954 def debuginstall(ui, **opts):
954 '''test Mercurial installation
955 '''test Mercurial installation
955
956
956 Returns 0 on success.
957 Returns 0 on success.
957 '''
958 '''
958
959
959 def writetemp(contents):
960 def writetemp(contents):
960 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
961 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
961 f = os.fdopen(fd, pycompat.sysstr("wb"))
962 f = os.fdopen(fd, pycompat.sysstr("wb"))
962 f.write(contents)
963 f.write(contents)
963 f.close()
964 f.close()
964 return name
965 return name
965
966
966 problems = 0
967 problems = 0
967
968
968 fm = ui.formatter('debuginstall', opts)
969 fm = ui.formatter('debuginstall', opts)
969 fm.startitem()
970 fm.startitem()
970
971
971 # encoding
972 # encoding
972 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
973 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
973 err = None
974 err = None
974 try:
975 try:
975 encoding.fromlocal("test")
976 encoding.fromlocal("test")
976 except error.Abort as inst:
977 except error.Abort as inst:
977 err = inst
978 err = inst
978 problems += 1
979 problems += 1
979 fm.condwrite(err, 'encodingerror', _(" %s\n"
980 fm.condwrite(err, 'encodingerror', _(" %s\n"
980 " (check that your locale is properly set)\n"), err)
981 " (check that your locale is properly set)\n"), err)
981
982
982 # Python
983 # Python
983 fm.write('pythonexe', _("checking Python executable (%s)\n"),
984 fm.write('pythonexe', _("checking Python executable (%s)\n"),
984 pycompat.sysexecutable)
985 pycompat.sysexecutable)
985 fm.write('pythonver', _("checking Python version (%s)\n"),
986 fm.write('pythonver', _("checking Python version (%s)\n"),
986 ("%d.%d.%d" % sys.version_info[:3]))
987 ("%d.%d.%d" % sys.version_info[:3]))
987 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
988 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
988 os.path.dirname(pycompat.fsencode(os.__file__)))
989 os.path.dirname(pycompat.fsencode(os.__file__)))
989
990
990 security = set(sslutil.supportedprotocols)
991 security = set(sslutil.supportedprotocols)
991 if sslutil.hassni:
992 if sslutil.hassni:
992 security.add('sni')
993 security.add('sni')
993
994
994 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
995 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
995 fm.formatlist(sorted(security), name='protocol',
996 fm.formatlist(sorted(security), name='protocol',
996 fmt='%s', sep=','))
997 fmt='%s', sep=','))
997
998
998 # These are warnings, not errors. So don't increment problem count. This
999 # These are warnings, not errors. So don't increment problem count. This
999 # may change in the future.
1000 # may change in the future.
1000 if 'tls1.2' not in security:
1001 if 'tls1.2' not in security:
1001 fm.plain(_(' TLS 1.2 not supported by Python install; '
1002 fm.plain(_(' TLS 1.2 not supported by Python install; '
1002 'network connections lack modern security\n'))
1003 'network connections lack modern security\n'))
1003 if 'sni' not in security:
1004 if 'sni' not in security:
1004 fm.plain(_(' SNI not supported by Python install; may have '
1005 fm.plain(_(' SNI not supported by Python install; may have '
1005 'connectivity issues with some servers\n'))
1006 'connectivity issues with some servers\n'))
1006
1007
1007 # TODO print CA cert info
1008 # TODO print CA cert info
1008
1009
1009 # hg version
1010 # hg version
1010 hgver = util.version()
1011 hgver = util.version()
1011 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1012 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1012 hgver.split('+')[0])
1013 hgver.split('+')[0])
1013 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1014 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1014 '+'.join(hgver.split('+')[1:]))
1015 '+'.join(hgver.split('+')[1:]))
1015
1016
1016 # compiled modules
1017 # compiled modules
1017 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1018 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1018 policy.policy)
1019 policy.policy)
1019 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1020 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1020 os.path.dirname(pycompat.fsencode(__file__)))
1021 os.path.dirname(pycompat.fsencode(__file__)))
1021
1022
1022 err = None
1023 err = None
1023 try:
1024 try:
1024 from . import (
1025 from . import (
1025 base85,
1026 base85,
1026 bdiff,
1027 bdiff,
1027 mpatch,
1028 mpatch,
1028 osutil,
1029 osutil,
1029 )
1030 )
1030 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1031 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1031 except Exception as inst:
1032 except Exception as inst:
1032 err = inst
1033 err = inst
1033 problems += 1
1034 problems += 1
1034 fm.condwrite(err, 'extensionserror', " %s\n", err)
1035 fm.condwrite(err, 'extensionserror', " %s\n", err)
1035
1036
1036 compengines = util.compengines._engines.values()
1037 compengines = util.compengines._engines.values()
1037 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1038 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1038 fm.formatlist(sorted(e.name() for e in compengines),
1039 fm.formatlist(sorted(e.name() for e in compengines),
1039 name='compengine', fmt='%s', sep=', '))
1040 name='compengine', fmt='%s', sep=', '))
1040 fm.write('compenginesavail', _('checking available compression engines '
1041 fm.write('compenginesavail', _('checking available compression engines '
1041 '(%s)\n'),
1042 '(%s)\n'),
1042 fm.formatlist(sorted(e.name() for e in compengines
1043 fm.formatlist(sorted(e.name() for e in compengines
1043 if e.available()),
1044 if e.available()),
1044 name='compengine', fmt='%s', sep=', '))
1045 name='compengine', fmt='%s', sep=', '))
1045 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1046 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1046 fm.write('compenginesserver', _('checking available compression engines '
1047 fm.write('compenginesserver', _('checking available compression engines '
1047 'for wire protocol (%s)\n'),
1048 'for wire protocol (%s)\n'),
1048 fm.formatlist([e.name() for e in wirecompengines
1049 fm.formatlist([e.name() for e in wirecompengines
1049 if e.wireprotosupport()],
1050 if e.wireprotosupport()],
1050 name='compengine', fmt='%s', sep=', '))
1051 name='compengine', fmt='%s', sep=', '))
1051
1052
1052 # templates
1053 # templates
1053 p = templater.templatepaths()
1054 p = templater.templatepaths()
1054 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1055 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1055 fm.condwrite(not p, '', _(" no template directories found\n"))
1056 fm.condwrite(not p, '', _(" no template directories found\n"))
1056 if p:
1057 if p:
1057 m = templater.templatepath("map-cmdline.default")
1058 m = templater.templatepath("map-cmdline.default")
1058 if m:
1059 if m:
1059 # template found, check if it is working
1060 # template found, check if it is working
1060 err = None
1061 err = None
1061 try:
1062 try:
1062 templater.templater.frommapfile(m)
1063 templater.templater.frommapfile(m)
1063 except Exception as inst:
1064 except Exception as inst:
1064 err = inst
1065 err = inst
1065 p = None
1066 p = None
1066 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1067 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1067 else:
1068 else:
1068 p = None
1069 p = None
1069 fm.condwrite(p, 'defaulttemplate',
1070 fm.condwrite(p, 'defaulttemplate',
1070 _("checking default template (%s)\n"), m)
1071 _("checking default template (%s)\n"), m)
1071 fm.condwrite(not m, 'defaulttemplatenotfound',
1072 fm.condwrite(not m, 'defaulttemplatenotfound',
1072 _(" template '%s' not found\n"), "default")
1073 _(" template '%s' not found\n"), "default")
1073 if not p:
1074 if not p:
1074 problems += 1
1075 problems += 1
1075 fm.condwrite(not p, '',
1076 fm.condwrite(not p, '',
1076 _(" (templates seem to have been installed incorrectly)\n"))
1077 _(" (templates seem to have been installed incorrectly)\n"))
1077
1078
1078 # editor
1079 # editor
1079 editor = ui.geteditor()
1080 editor = ui.geteditor()
1080 editor = util.expandpath(editor)
1081 editor = util.expandpath(editor)
1081 fm.write('editor', _("checking commit editor... (%s)\n"), editor)
1082 fm.write('editor', _("checking commit editor... (%s)\n"), editor)
1082 cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
1083 cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
1083 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1084 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1084 _(" No commit editor set and can't find %s in PATH\n"
1085 _(" No commit editor set and can't find %s in PATH\n"
1085 " (specify a commit editor in your configuration"
1086 " (specify a commit editor in your configuration"
1086 " file)\n"), not cmdpath and editor == 'vi' and editor)
1087 " file)\n"), not cmdpath and editor == 'vi' and editor)
1087 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1088 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1088 _(" Can't find editor '%s' in PATH\n"
1089 _(" Can't find editor '%s' in PATH\n"
1089 " (specify a commit editor in your configuration"
1090 " (specify a commit editor in your configuration"
1090 " file)\n"), not cmdpath and editor)
1091 " file)\n"), not cmdpath and editor)
1091 if not cmdpath and editor != 'vi':
1092 if not cmdpath and editor != 'vi':
1092 problems += 1
1093 problems += 1
1093
1094
1094 # check username
1095 # check username
1095 username = None
1096 username = None
1096 err = None
1097 err = None
1097 try:
1098 try:
1098 username = ui.username()
1099 username = ui.username()
1099 except error.Abort as e:
1100 except error.Abort as e:
1100 err = e
1101 err = e
1101 problems += 1
1102 problems += 1
1102
1103
1103 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1104 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1104 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1105 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1105 " (specify a username in your configuration file)\n"), err)
1106 " (specify a username in your configuration file)\n"), err)
1106
1107
1107 fm.condwrite(not problems, '',
1108 fm.condwrite(not problems, '',
1108 _("no problems detected\n"))
1109 _("no problems detected\n"))
1109 if not problems:
1110 if not problems:
1110 fm.data(problems=problems)
1111 fm.data(problems=problems)
1111 fm.condwrite(problems, 'problems',
1112 fm.condwrite(problems, 'problems',
1112 _("%d problems detected,"
1113 _("%d problems detected,"
1113 " please check your install!\n"), problems)
1114 " please check your install!\n"), problems)
1114 fm.end()
1115 fm.end()
1115
1116
1116 return problems
1117 return problems
1117
1118
1118 @command('debugknown', [], _('REPO ID...'), norepo=True)
1119 @command('debugknown', [], _('REPO ID...'), norepo=True)
1119 def debugknown(ui, repopath, *ids, **opts):
1120 def debugknown(ui, repopath, *ids, **opts):
1120 """test whether node ids are known to a repo
1121 """test whether node ids are known to a repo
1121
1122
1122 Every ID must be a full-length hex node id string. Returns a list of 0s
1123 Every ID must be a full-length hex node id string. Returns a list of 0s
1123 and 1s indicating unknown/known.
1124 and 1s indicating unknown/known.
1124 """
1125 """
1125 repo = hg.peer(ui, opts, repopath)
1126 repo = hg.peer(ui, opts, repopath)
1126 if not repo.capable('known'):
1127 if not repo.capable('known'):
1127 raise error.Abort("known() not supported by target repository")
1128 raise error.Abort("known() not supported by target repository")
1128 flags = repo.known([bin(s) for s in ids])
1129 flags = repo.known([bin(s) for s in ids])
1129 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1130 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1130
1131
1131 @command('debuglabelcomplete', [], _('LABEL...'))
1132 @command('debuglabelcomplete', [], _('LABEL...'))
1132 def debuglabelcomplete(ui, repo, *args):
1133 def debuglabelcomplete(ui, repo, *args):
1133 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1134 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1134 debugnamecomplete(ui, repo, *args)
1135 debugnamecomplete(ui, repo, *args)
1135
1136
1136 @command('debuglocks',
1137 @command('debuglocks',
1137 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1138 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1138 ('W', 'force-wlock', None,
1139 ('W', 'force-wlock', None,
1139 _('free the working state lock (DANGEROUS)'))],
1140 _('free the working state lock (DANGEROUS)'))],
1140 _('[OPTION]...'))
1141 _('[OPTION]...'))
1141 def debuglocks(ui, repo, **opts):
1142 def debuglocks(ui, repo, **opts):
1142 """show or modify state of locks
1143 """show or modify state of locks
1143
1144
1144 By default, this command will show which locks are held. This
1145 By default, this command will show which locks are held. This
1145 includes the user and process holding the lock, the amount of time
1146 includes the user and process holding the lock, the amount of time
1146 the lock has been held, and the machine name where the process is
1147 the lock has been held, and the machine name where the process is
1147 running if it's not local.
1148 running if it's not local.
1148
1149
1149 Locks protect the integrity of Mercurial's data, so should be
1150 Locks protect the integrity of Mercurial's data, so should be
1150 treated with care. System crashes or other interruptions may cause
1151 treated with care. System crashes or other interruptions may cause
1151 locks to not be properly released, though Mercurial will usually
1152 locks to not be properly released, though Mercurial will usually
1152 detect and remove such stale locks automatically.
1153 detect and remove such stale locks automatically.
1153
1154
1154 However, detecting stale locks may not always be possible (for
1155 However, detecting stale locks may not always be possible (for
1155 instance, on a shared filesystem). Removing locks may also be
1156 instance, on a shared filesystem). Removing locks may also be
1156 blocked by filesystem permissions.
1157 blocked by filesystem permissions.
1157
1158
1158 Returns 0 if no locks are held.
1159 Returns 0 if no locks are held.
1159
1160
1160 """
1161 """
1161
1162
1162 if opts.get('force_lock'):
1163 if opts.get('force_lock'):
1163 repo.svfs.unlink('lock')
1164 repo.svfs.unlink('lock')
1164 if opts.get('force_wlock'):
1165 if opts.get('force_wlock'):
1165 repo.vfs.unlink('wlock')
1166 repo.vfs.unlink('wlock')
1166 if opts.get('force_lock') or opts.get('force_lock'):
1167 if opts.get('force_lock') or opts.get('force_lock'):
1167 return 0
1168 return 0
1168
1169
1169 now = time.time()
1170 now = time.time()
1170 held = 0
1171 held = 0
1171
1172
1172 def report(vfs, name, method):
1173 def report(vfs, name, method):
1173 # this causes stale locks to get reaped for more accurate reporting
1174 # this causes stale locks to get reaped for more accurate reporting
1174 try:
1175 try:
1175 l = method(False)
1176 l = method(False)
1176 except error.LockHeld:
1177 except error.LockHeld:
1177 l = None
1178 l = None
1178
1179
1179 if l:
1180 if l:
1180 l.release()
1181 l.release()
1181 else:
1182 else:
1182 try:
1183 try:
1183 stat = vfs.lstat(name)
1184 stat = vfs.lstat(name)
1184 age = now - stat.st_mtime
1185 age = now - stat.st_mtime
1185 user = util.username(stat.st_uid)
1186 user = util.username(stat.st_uid)
1186 locker = vfs.readlock(name)
1187 locker = vfs.readlock(name)
1187 if ":" in locker:
1188 if ":" in locker:
1188 host, pid = locker.split(':')
1189 host, pid = locker.split(':')
1189 if host == socket.gethostname():
1190 if host == socket.gethostname():
1190 locker = 'user %s, process %s' % (user, pid)
1191 locker = 'user %s, process %s' % (user, pid)
1191 else:
1192 else:
1192 locker = 'user %s, process %s, host %s' \
1193 locker = 'user %s, process %s, host %s' \
1193 % (user, pid, host)
1194 % (user, pid, host)
1194 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1195 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1195 return 1
1196 return 1
1196 except OSError as e:
1197 except OSError as e:
1197 if e.errno != errno.ENOENT:
1198 if e.errno != errno.ENOENT:
1198 raise
1199 raise
1199
1200
1200 ui.write(("%-6s free\n") % (name + ":"))
1201 ui.write(("%-6s free\n") % (name + ":"))
1201 return 0
1202 return 0
1202
1203
1203 held += report(repo.svfs, "lock", repo.lock)
1204 held += report(repo.svfs, "lock", repo.lock)
1204 held += report(repo.vfs, "wlock", repo.wlock)
1205 held += report(repo.vfs, "wlock", repo.wlock)
1205
1206
1206 return held
1207 return held
1207
1208
1208 @command('debugmergestate', [], '')
1209 @command('debugmergestate', [], '')
1209 def debugmergestate(ui, repo, *args):
1210 def debugmergestate(ui, repo, *args):
1210 """print merge state
1211 """print merge state
1211
1212
1212 Use --verbose to print out information about whether v1 or v2 merge state
1213 Use --verbose to print out information about whether v1 or v2 merge state
1213 was chosen."""
1214 was chosen."""
1214 def _hashornull(h):
1215 def _hashornull(h):
1215 if h == nullhex:
1216 if h == nullhex:
1216 return 'null'
1217 return 'null'
1217 else:
1218 else:
1218 return h
1219 return h
1219
1220
1220 def printrecords(version):
1221 def printrecords(version):
1221 ui.write(('* version %s records\n') % version)
1222 ui.write(('* version %s records\n') % version)
1222 if version == 1:
1223 if version == 1:
1223 records = v1records
1224 records = v1records
1224 else:
1225 else:
1225 records = v2records
1226 records = v2records
1226
1227
1227 for rtype, record in records:
1228 for rtype, record in records:
1228 # pretty print some record types
1229 # pretty print some record types
1229 if rtype == 'L':
1230 if rtype == 'L':
1230 ui.write(('local: %s\n') % record)
1231 ui.write(('local: %s\n') % record)
1231 elif rtype == 'O':
1232 elif rtype == 'O':
1232 ui.write(('other: %s\n') % record)
1233 ui.write(('other: %s\n') % record)
1233 elif rtype == 'm':
1234 elif rtype == 'm':
1234 driver, mdstate = record.split('\0', 1)
1235 driver, mdstate = record.split('\0', 1)
1235 ui.write(('merge driver: %s (state "%s")\n')
1236 ui.write(('merge driver: %s (state "%s")\n')
1236 % (driver, mdstate))
1237 % (driver, mdstate))
1237 elif rtype in 'FDC':
1238 elif rtype in 'FDC':
1238 r = record.split('\0')
1239 r = record.split('\0')
1239 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1240 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1240 if version == 1:
1241 if version == 1:
1241 onode = 'not stored in v1 format'
1242 onode = 'not stored in v1 format'
1242 flags = r[7]
1243 flags = r[7]
1243 else:
1244 else:
1244 onode, flags = r[7:9]
1245 onode, flags = r[7:9]
1245 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1246 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1246 % (f, rtype, state, _hashornull(hash)))
1247 % (f, rtype, state, _hashornull(hash)))
1247 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1248 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1248 ui.write((' ancestor path: %s (node %s)\n')
1249 ui.write((' ancestor path: %s (node %s)\n')
1249 % (afile, _hashornull(anode)))
1250 % (afile, _hashornull(anode)))
1250 ui.write((' other path: %s (node %s)\n')
1251 ui.write((' other path: %s (node %s)\n')
1251 % (ofile, _hashornull(onode)))
1252 % (ofile, _hashornull(onode)))
1252 elif rtype == 'f':
1253 elif rtype == 'f':
1253 filename, rawextras = record.split('\0', 1)
1254 filename, rawextras = record.split('\0', 1)
1254 extras = rawextras.split('\0')
1255 extras = rawextras.split('\0')
1255 i = 0
1256 i = 0
1256 extrastrings = []
1257 extrastrings = []
1257 while i < len(extras):
1258 while i < len(extras):
1258 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1259 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1259 i += 2
1260 i += 2
1260
1261
1261 ui.write(('file extras: %s (%s)\n')
1262 ui.write(('file extras: %s (%s)\n')
1262 % (filename, ', '.join(extrastrings)))
1263 % (filename, ', '.join(extrastrings)))
1263 elif rtype == 'l':
1264 elif rtype == 'l':
1264 labels = record.split('\0', 2)
1265 labels = record.split('\0', 2)
1265 labels = [l for l in labels if len(l) > 0]
1266 labels = [l for l in labels if len(l) > 0]
1266 ui.write(('labels:\n'))
1267 ui.write(('labels:\n'))
1267 ui.write((' local: %s\n' % labels[0]))
1268 ui.write((' local: %s\n' % labels[0]))
1268 ui.write((' other: %s\n' % labels[1]))
1269 ui.write((' other: %s\n' % labels[1]))
1269 if len(labels) > 2:
1270 if len(labels) > 2:
1270 ui.write((' base: %s\n' % labels[2]))
1271 ui.write((' base: %s\n' % labels[2]))
1271 else:
1272 else:
1272 ui.write(('unrecognized entry: %s\t%s\n')
1273 ui.write(('unrecognized entry: %s\t%s\n')
1273 % (rtype, record.replace('\0', '\t')))
1274 % (rtype, record.replace('\0', '\t')))
1274
1275
1275 # Avoid mergestate.read() since it may raise an exception for unsupported
1276 # Avoid mergestate.read() since it may raise an exception for unsupported
1276 # merge state records. We shouldn't be doing this, but this is OK since this
1277 # merge state records. We shouldn't be doing this, but this is OK since this
1277 # command is pretty low-level.
1278 # command is pretty low-level.
1278 ms = mergemod.mergestate(repo)
1279 ms = mergemod.mergestate(repo)
1279
1280
1280 # sort so that reasonable information is on top
1281 # sort so that reasonable information is on top
1281 v1records = ms._readrecordsv1()
1282 v1records = ms._readrecordsv1()
1282 v2records = ms._readrecordsv2()
1283 v2records = ms._readrecordsv2()
1283 order = 'LOml'
1284 order = 'LOml'
1284 def key(r):
1285 def key(r):
1285 idx = order.find(r[0])
1286 idx = order.find(r[0])
1286 if idx == -1:
1287 if idx == -1:
1287 return (1, r[1])
1288 return (1, r[1])
1288 else:
1289 else:
1289 return (0, idx)
1290 return (0, idx)
1290 v1records.sort(key=key)
1291 v1records.sort(key=key)
1291 v2records.sort(key=key)
1292 v2records.sort(key=key)
1292
1293
1293 if not v1records and not v2records:
1294 if not v1records and not v2records:
1294 ui.write(('no merge state found\n'))
1295 ui.write(('no merge state found\n'))
1295 elif not v2records:
1296 elif not v2records:
1296 ui.note(('no version 2 merge state\n'))
1297 ui.note(('no version 2 merge state\n'))
1297 printrecords(1)
1298 printrecords(1)
1298 elif ms._v1v2match(v1records, v2records):
1299 elif ms._v1v2match(v1records, v2records):
1299 ui.note(('v1 and v2 states match: using v2\n'))
1300 ui.note(('v1 and v2 states match: using v2\n'))
1300 printrecords(2)
1301 printrecords(2)
1301 else:
1302 else:
1302 ui.note(('v1 and v2 states mismatch: using v1\n'))
1303 ui.note(('v1 and v2 states mismatch: using v1\n'))
1303 printrecords(1)
1304 printrecords(1)
1304 if ui.verbose:
1305 if ui.verbose:
1305 printrecords(2)
1306 printrecords(2)
1306
1307
1307 @command('debugnamecomplete', [], _('NAME...'))
1308 @command('debugnamecomplete', [], _('NAME...'))
1308 def debugnamecomplete(ui, repo, *args):
1309 def debugnamecomplete(ui, repo, *args):
1309 '''complete "names" - tags, open branch names, bookmark names'''
1310 '''complete "names" - tags, open branch names, bookmark names'''
1310
1311
1311 names = set()
1312 names = set()
1312 # since we previously only listed open branches, we will handle that
1313 # since we previously only listed open branches, we will handle that
1313 # specially (after this for loop)
1314 # specially (after this for loop)
1314 for name, ns in repo.names.iteritems():
1315 for name, ns in repo.names.iteritems():
1315 if name != 'branches':
1316 if name != 'branches':
1316 names.update(ns.listnames(repo))
1317 names.update(ns.listnames(repo))
1317 names.update(tag for (tag, heads, tip, closed)
1318 names.update(tag for (tag, heads, tip, closed)
1318 in repo.branchmap().iterbranches() if not closed)
1319 in repo.branchmap().iterbranches() if not closed)
1319 completions = set()
1320 completions = set()
1320 if not args:
1321 if not args:
1321 args = ['']
1322 args = ['']
1322 for a in args:
1323 for a in args:
1323 completions.update(n for n in names if n.startswith(a))
1324 completions.update(n for n in names if n.startswith(a))
1324 ui.write('\n'.join(sorted(completions)))
1325 ui.write('\n'.join(sorted(completions)))
1325 ui.write('\n')
1326 ui.write('\n')
1326
1327
1327 @command('debugobsolete',
1328 @command('debugobsolete',
1328 [('', 'flags', 0, _('markers flag')),
1329 [('', 'flags', 0, _('markers flag')),
1329 ('', 'record-parents', False,
1330 ('', 'record-parents', False,
1330 _('record parent information for the precursor')),
1331 _('record parent information for the precursor')),
1331 ('r', 'rev', [], _('display markers relevant to REV')),
1332 ('r', 'rev', [], _('display markers relevant to REV')),
1332 ('', 'index', False, _('display index of the marker')),
1333 ('', 'index', False, _('display index of the marker')),
1333 ('', 'delete', [], _('delete markers specified by indices')),
1334 ('', 'delete', [], _('delete markers specified by indices')),
1334 ] + commands.commitopts2 + commands.formatteropts,
1335 ] + commands.commitopts2 + commands.formatteropts,
1335 _('[OBSOLETED [REPLACEMENT ...]]'))
1336 _('[OBSOLETED [REPLACEMENT ...]]'))
1336 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1337 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1337 """create arbitrary obsolete marker
1338 """create arbitrary obsolete marker
1338
1339
1339 With no arguments, displays the list of obsolescence markers."""
1340 With no arguments, displays the list of obsolescence markers."""
1340
1341
1341 def parsenodeid(s):
1342 def parsenodeid(s):
1342 try:
1343 try:
1343 # We do not use revsingle/revrange functions here to accept
1344 # We do not use revsingle/revrange functions here to accept
1344 # arbitrary node identifiers, possibly not present in the
1345 # arbitrary node identifiers, possibly not present in the
1345 # local repository.
1346 # local repository.
1346 n = bin(s)
1347 n = bin(s)
1347 if len(n) != len(nullid):
1348 if len(n) != len(nullid):
1348 raise TypeError()
1349 raise TypeError()
1349 return n
1350 return n
1350 except TypeError:
1351 except TypeError:
1351 raise error.Abort('changeset references must be full hexadecimal '
1352 raise error.Abort('changeset references must be full hexadecimal '
1352 'node identifiers')
1353 'node identifiers')
1353
1354
1354 if opts.get('delete'):
1355 if opts.get('delete'):
1355 indices = []
1356 indices = []
1356 for v in opts.get('delete'):
1357 for v in opts.get('delete'):
1357 try:
1358 try:
1358 indices.append(int(v))
1359 indices.append(int(v))
1359 except ValueError:
1360 except ValueError:
1360 raise error.Abort(_('invalid index value: %r') % v,
1361 raise error.Abort(_('invalid index value: %r') % v,
1361 hint=_('use integers for indices'))
1362 hint=_('use integers for indices'))
1362
1363
1363 if repo.currenttransaction():
1364 if repo.currenttransaction():
1364 raise error.Abort(_('cannot delete obsmarkers in the middle '
1365 raise error.Abort(_('cannot delete obsmarkers in the middle '
1365 'of transaction.'))
1366 'of transaction.'))
1366
1367
1367 with repo.lock():
1368 with repo.lock():
1368 n = repair.deleteobsmarkers(repo.obsstore, indices)
1369 n = repair.deleteobsmarkers(repo.obsstore, indices)
1369 ui.write(_('deleted %i obsolescence markers\n') % n)
1370 ui.write(_('deleted %i obsolescence markers\n') % n)
1370
1371
1371 return
1372 return
1372
1373
1373 if precursor is not None:
1374 if precursor is not None:
1374 if opts['rev']:
1375 if opts['rev']:
1375 raise error.Abort('cannot select revision when creating marker')
1376 raise error.Abort('cannot select revision when creating marker')
1376 metadata = {}
1377 metadata = {}
1377 metadata['user'] = opts['user'] or ui.username()
1378 metadata['user'] = opts['user'] or ui.username()
1378 succs = tuple(parsenodeid(succ) for succ in successors)
1379 succs = tuple(parsenodeid(succ) for succ in successors)
1379 l = repo.lock()
1380 l = repo.lock()
1380 try:
1381 try:
1381 tr = repo.transaction('debugobsolete')
1382 tr = repo.transaction('debugobsolete')
1382 try:
1383 try:
1383 date = opts.get('date')
1384 date = opts.get('date')
1384 if date:
1385 if date:
1385 date = util.parsedate(date)
1386 date = util.parsedate(date)
1386 else:
1387 else:
1387 date = None
1388 date = None
1388 prec = parsenodeid(precursor)
1389 prec = parsenodeid(precursor)
1389 parents = None
1390 parents = None
1390 if opts['record_parents']:
1391 if opts['record_parents']:
1391 if prec not in repo.unfiltered():
1392 if prec not in repo.unfiltered():
1392 raise error.Abort('cannot used --record-parents on '
1393 raise error.Abort('cannot used --record-parents on '
1393 'unknown changesets')
1394 'unknown changesets')
1394 parents = repo.unfiltered()[prec].parents()
1395 parents = repo.unfiltered()[prec].parents()
1395 parents = tuple(p.node() for p in parents)
1396 parents = tuple(p.node() for p in parents)
1396 repo.obsstore.create(tr, prec, succs, opts['flags'],
1397 repo.obsstore.create(tr, prec, succs, opts['flags'],
1397 parents=parents, date=date,
1398 parents=parents, date=date,
1398 metadata=metadata)
1399 metadata=metadata)
1399 tr.close()
1400 tr.close()
1400 except ValueError as exc:
1401 except ValueError as exc:
1401 raise error.Abort(_('bad obsmarker input: %s') % exc)
1402 raise error.Abort(_('bad obsmarker input: %s') % exc)
1402 finally:
1403 finally:
1403 tr.release()
1404 tr.release()
1404 finally:
1405 finally:
1405 l.release()
1406 l.release()
1406 else:
1407 else:
1407 if opts['rev']:
1408 if opts['rev']:
1408 revs = scmutil.revrange(repo, opts['rev'])
1409 revs = scmutil.revrange(repo, opts['rev'])
1409 nodes = [repo[r].node() for r in revs]
1410 nodes = [repo[r].node() for r in revs]
1410 markers = list(obsolete.getmarkers(repo, nodes=nodes))
1411 markers = list(obsolete.getmarkers(repo, nodes=nodes))
1411 markers.sort(key=lambda x: x._data)
1412 markers.sort(key=lambda x: x._data)
1412 else:
1413 else:
1413 markers = obsolete.getmarkers(repo)
1414 markers = obsolete.getmarkers(repo)
1414
1415
1415 markerstoiter = markers
1416 markerstoiter = markers
1416 isrelevant = lambda m: True
1417 isrelevant = lambda m: True
1417 if opts.get('rev') and opts.get('index'):
1418 if opts.get('rev') and opts.get('index'):
1418 markerstoiter = obsolete.getmarkers(repo)
1419 markerstoiter = obsolete.getmarkers(repo)
1419 markerset = set(markers)
1420 markerset = set(markers)
1420 isrelevant = lambda m: m in markerset
1421 isrelevant = lambda m: m in markerset
1421
1422
1422 fm = ui.formatter('debugobsolete', opts)
1423 fm = ui.formatter('debugobsolete', opts)
1423 for i, m in enumerate(markerstoiter):
1424 for i, m in enumerate(markerstoiter):
1424 if not isrelevant(m):
1425 if not isrelevant(m):
1425 # marker can be irrelevant when we're iterating over a set
1426 # marker can be irrelevant when we're iterating over a set
1426 # of markers (markerstoiter) which is bigger than the set
1427 # of markers (markerstoiter) which is bigger than the set
1427 # of markers we want to display (markers)
1428 # of markers we want to display (markers)
1428 # this can happen if both --index and --rev options are
1429 # this can happen if both --index and --rev options are
1429 # provided and thus we need to iterate over all of the markers
1430 # provided and thus we need to iterate over all of the markers
1430 # to get the correct indices, but only display the ones that
1431 # to get the correct indices, but only display the ones that
1431 # are relevant to --rev value
1432 # are relevant to --rev value
1432 continue
1433 continue
1433 fm.startitem()
1434 fm.startitem()
1434 ind = i if opts.get('index') else None
1435 ind = i if opts.get('index') else None
1435 cmdutil.showmarker(fm, m, index=ind)
1436 cmdutil.showmarker(fm, m, index=ind)
1436 fm.end()
1437 fm.end()
1437
1438
1438 @command('debugpathcomplete',
1439 @command('debugpathcomplete',
1439 [('f', 'full', None, _('complete an entire path')),
1440 [('f', 'full', None, _('complete an entire path')),
1440 ('n', 'normal', None, _('show only normal files')),
1441 ('n', 'normal', None, _('show only normal files')),
1441 ('a', 'added', None, _('show only added files')),
1442 ('a', 'added', None, _('show only added files')),
1442 ('r', 'removed', None, _('show only removed files'))],
1443 ('r', 'removed', None, _('show only removed files'))],
1443 _('FILESPEC...'))
1444 _('FILESPEC...'))
1444 def debugpathcomplete(ui, repo, *specs, **opts):
1445 def debugpathcomplete(ui, repo, *specs, **opts):
1445 '''complete part or all of a tracked path
1446 '''complete part or all of a tracked path
1446
1447
1447 This command supports shells that offer path name completion. It
1448 This command supports shells that offer path name completion. It
1448 currently completes only files already known to the dirstate.
1449 currently completes only files already known to the dirstate.
1449
1450
1450 Completion extends only to the next path segment unless
1451 Completion extends only to the next path segment unless
1451 --full is specified, in which case entire paths are used.'''
1452 --full is specified, in which case entire paths are used.'''
1452
1453
1453 def complete(path, acceptable):
1454 def complete(path, acceptable):
1454 dirstate = repo.dirstate
1455 dirstate = repo.dirstate
1455 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1456 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1456 rootdir = repo.root + pycompat.ossep
1457 rootdir = repo.root + pycompat.ossep
1457 if spec != repo.root and not spec.startswith(rootdir):
1458 if spec != repo.root and not spec.startswith(rootdir):
1458 return [], []
1459 return [], []
1459 if os.path.isdir(spec):
1460 if os.path.isdir(spec):
1460 spec += '/'
1461 spec += '/'
1461 spec = spec[len(rootdir):]
1462 spec = spec[len(rootdir):]
1462 fixpaths = pycompat.ossep != '/'
1463 fixpaths = pycompat.ossep != '/'
1463 if fixpaths:
1464 if fixpaths:
1464 spec = spec.replace(pycompat.ossep, '/')
1465 spec = spec.replace(pycompat.ossep, '/')
1465 speclen = len(spec)
1466 speclen = len(spec)
1466 fullpaths = opts['full']
1467 fullpaths = opts['full']
1467 files, dirs = set(), set()
1468 files, dirs = set(), set()
1468 adddir, addfile = dirs.add, files.add
1469 adddir, addfile = dirs.add, files.add
1469 for f, st in dirstate.iteritems():
1470 for f, st in dirstate.iteritems():
1470 if f.startswith(spec) and st[0] in acceptable:
1471 if f.startswith(spec) and st[0] in acceptable:
1471 if fixpaths:
1472 if fixpaths:
1472 f = f.replace('/', pycompat.ossep)
1473 f = f.replace('/', pycompat.ossep)
1473 if fullpaths:
1474 if fullpaths:
1474 addfile(f)
1475 addfile(f)
1475 continue
1476 continue
1476 s = f.find(pycompat.ossep, speclen)
1477 s = f.find(pycompat.ossep, speclen)
1477 if s >= 0:
1478 if s >= 0:
1478 adddir(f[:s])
1479 adddir(f[:s])
1479 else:
1480 else:
1480 addfile(f)
1481 addfile(f)
1481 return files, dirs
1482 return files, dirs
1482
1483
1483 acceptable = ''
1484 acceptable = ''
1484 if opts['normal']:
1485 if opts['normal']:
1485 acceptable += 'nm'
1486 acceptable += 'nm'
1486 if opts['added']:
1487 if opts['added']:
1487 acceptable += 'a'
1488 acceptable += 'a'
1488 if opts['removed']:
1489 if opts['removed']:
1489 acceptable += 'r'
1490 acceptable += 'r'
1490 cwd = repo.getcwd()
1491 cwd = repo.getcwd()
1491 if not specs:
1492 if not specs:
1492 specs = ['.']
1493 specs = ['.']
1493
1494
1494 files, dirs = set(), set()
1495 files, dirs = set(), set()
1495 for spec in specs:
1496 for spec in specs:
1496 f, d = complete(spec, acceptable or 'nmar')
1497 f, d = complete(spec, acceptable or 'nmar')
1497 files.update(f)
1498 files.update(f)
1498 dirs.update(d)
1499 dirs.update(d)
1499 files.update(dirs)
1500 files.update(dirs)
1500 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1501 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1501 ui.write('\n')
1502 ui.write('\n')
1502
1503
1503 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1504 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1504 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1505 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1505 '''access the pushkey key/value protocol
1506 '''access the pushkey key/value protocol
1506
1507
1507 With two args, list the keys in the given namespace.
1508 With two args, list the keys in the given namespace.
1508
1509
1509 With five args, set a key to new if it currently is set to old.
1510 With five args, set a key to new if it currently is set to old.
1510 Reports success or failure.
1511 Reports success or failure.
1511 '''
1512 '''
1512
1513
1513 target = hg.peer(ui, {}, repopath)
1514 target = hg.peer(ui, {}, repopath)
1514 if keyinfo:
1515 if keyinfo:
1515 key, old, new = keyinfo
1516 key, old, new = keyinfo
1516 r = target.pushkey(namespace, key, old, new)
1517 r = target.pushkey(namespace, key, old, new)
1517 ui.status(str(r) + '\n')
1518 ui.status(str(r) + '\n')
1518 return not r
1519 return not r
1519 else:
1520 else:
1520 for k, v in sorted(target.listkeys(namespace).iteritems()):
1521 for k, v in sorted(target.listkeys(namespace).iteritems()):
1521 ui.write("%s\t%s\n" % (util.escapestr(k),
1522 ui.write("%s\t%s\n" % (util.escapestr(k),
1522 util.escapestr(v)))
1523 util.escapestr(v)))
1523
1524
1524 @command('debugpvec', [], _('A B'))
1525 @command('debugpvec', [], _('A B'))
1525 def debugpvec(ui, repo, a, b=None):
1526 def debugpvec(ui, repo, a, b=None):
1526 ca = scmutil.revsingle(repo, a)
1527 ca = scmutil.revsingle(repo, a)
1527 cb = scmutil.revsingle(repo, b)
1528 cb = scmutil.revsingle(repo, b)
1528 pa = pvec.ctxpvec(ca)
1529 pa = pvec.ctxpvec(ca)
1529 pb = pvec.ctxpvec(cb)
1530 pb = pvec.ctxpvec(cb)
1530 if pa == pb:
1531 if pa == pb:
1531 rel = "="
1532 rel = "="
1532 elif pa > pb:
1533 elif pa > pb:
1533 rel = ">"
1534 rel = ">"
1534 elif pa < pb:
1535 elif pa < pb:
1535 rel = "<"
1536 rel = "<"
1536 elif pa | pb:
1537 elif pa | pb:
1537 rel = "|"
1538 rel = "|"
1538 ui.write(_("a: %s\n") % pa)
1539 ui.write(_("a: %s\n") % pa)
1539 ui.write(_("b: %s\n") % pb)
1540 ui.write(_("b: %s\n") % pb)
1540 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1541 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1541 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1542 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1542 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1543 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1543 pa.distance(pb), rel))
1544 pa.distance(pb), rel))
1544
1545
1545 @command('debugrebuilddirstate|debugrebuildstate',
1546 @command('debugrebuilddirstate|debugrebuildstate',
1546 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1547 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1547 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1548 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1548 'the working copy parent')),
1549 'the working copy parent')),
1549 ],
1550 ],
1550 _('[-r REV]'))
1551 _('[-r REV]'))
1551 def debugrebuilddirstate(ui, repo, rev, **opts):
1552 def debugrebuilddirstate(ui, repo, rev, **opts):
1552 """rebuild the dirstate as it would look like for the given revision
1553 """rebuild the dirstate as it would look like for the given revision
1553
1554
1554 If no revision is specified the first current parent will be used.
1555 If no revision is specified the first current parent will be used.
1555
1556
1556 The dirstate will be set to the files of the given revision.
1557 The dirstate will be set to the files of the given revision.
1557 The actual working directory content or existing dirstate
1558 The actual working directory content or existing dirstate
1558 information such as adds or removes is not considered.
1559 information such as adds or removes is not considered.
1559
1560
1560 ``minimal`` will only rebuild the dirstate status for files that claim to be
1561 ``minimal`` will only rebuild the dirstate status for files that claim to be
1561 tracked but are not in the parent manifest, or that exist in the parent
1562 tracked but are not in the parent manifest, or that exist in the parent
1562 manifest but are not in the dirstate. It will not change adds, removes, or
1563 manifest but are not in the dirstate. It will not change adds, removes, or
1563 modified files that are in the working copy parent.
1564 modified files that are in the working copy parent.
1564
1565
1565 One use of this command is to make the next :hg:`status` invocation
1566 One use of this command is to make the next :hg:`status` invocation
1566 check the actual file content.
1567 check the actual file content.
1567 """
1568 """
1568 ctx = scmutil.revsingle(repo, rev)
1569 ctx = scmutil.revsingle(repo, rev)
1569 with repo.wlock():
1570 with repo.wlock():
1570 dirstate = repo.dirstate
1571 dirstate = repo.dirstate
1571 changedfiles = None
1572 changedfiles = None
1572 # See command doc for what minimal does.
1573 # See command doc for what minimal does.
1573 if opts.get('minimal'):
1574 if opts.get('minimal'):
1574 manifestfiles = set(ctx.manifest().keys())
1575 manifestfiles = set(ctx.manifest().keys())
1575 dirstatefiles = set(dirstate)
1576 dirstatefiles = set(dirstate)
1576 manifestonly = manifestfiles - dirstatefiles
1577 manifestonly = manifestfiles - dirstatefiles
1577 dsonly = dirstatefiles - manifestfiles
1578 dsonly = dirstatefiles - manifestfiles
1578 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1579 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1579 changedfiles = manifestonly | dsnotadded
1580 changedfiles = manifestonly | dsnotadded
1580
1581
1581 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1582 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1582
1583
1583 @command('debugrebuildfncache', [], '')
1584 @command('debugrebuildfncache', [], '')
1584 def debugrebuildfncache(ui, repo):
1585 def debugrebuildfncache(ui, repo):
1585 """rebuild the fncache file"""
1586 """rebuild the fncache file"""
1586 repair.rebuildfncache(ui, repo)
1587 repair.rebuildfncache(ui, repo)
1587
1588
1588 @command('debugrename',
1589 @command('debugrename',
1589 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1590 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1590 _('[-r REV] FILE'))
1591 _('[-r REV] FILE'))
1591 def debugrename(ui, repo, file1, *pats, **opts):
1592 def debugrename(ui, repo, file1, *pats, **opts):
1592 """dump rename information"""
1593 """dump rename information"""
1593
1594
1594 ctx = scmutil.revsingle(repo, opts.get('rev'))
1595 ctx = scmutil.revsingle(repo, opts.get('rev'))
1595 m = scmutil.match(ctx, (file1,) + pats, opts)
1596 m = scmutil.match(ctx, (file1,) + pats, opts)
1596 for abs in ctx.walk(m):
1597 for abs in ctx.walk(m):
1597 fctx = ctx[abs]
1598 fctx = ctx[abs]
1598 o = fctx.filelog().renamed(fctx.filenode())
1599 o = fctx.filelog().renamed(fctx.filenode())
1599 rel = m.rel(abs)
1600 rel = m.rel(abs)
1600 if o:
1601 if o:
1601 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1602 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1602 else:
1603 else:
1603 ui.write(_("%s not renamed\n") % rel)
1604 ui.write(_("%s not renamed\n") % rel)
1604
1605
1605 @command('debugrevlog', commands.debugrevlogopts +
1606 @command('debugrevlog', commands.debugrevlogopts +
1606 [('d', 'dump', False, _('dump index data'))],
1607 [('d', 'dump', False, _('dump index data'))],
1607 _('-c|-m|FILE'),
1608 _('-c|-m|FILE'),
1608 optionalrepo=True)
1609 optionalrepo=True)
1609 def debugrevlog(ui, repo, file_=None, **opts):
1610 def debugrevlog(ui, repo, file_=None, **opts):
1610 """show data and statistics about a revlog"""
1611 """show data and statistics about a revlog"""
1611 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1612 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1612
1613
1613 if opts.get("dump"):
1614 if opts.get("dump"):
1614 numrevs = len(r)
1615 numrevs = len(r)
1615 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1616 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1616 " rawsize totalsize compression heads chainlen\n"))
1617 " rawsize totalsize compression heads chainlen\n"))
1617 ts = 0
1618 ts = 0
1618 heads = set()
1619 heads = set()
1619
1620
1620 for rev in xrange(numrevs):
1621 for rev in xrange(numrevs):
1621 dbase = r.deltaparent(rev)
1622 dbase = r.deltaparent(rev)
1622 if dbase == -1:
1623 if dbase == -1:
1623 dbase = rev
1624 dbase = rev
1624 cbase = r.chainbase(rev)
1625 cbase = r.chainbase(rev)
1625 clen = r.chainlen(rev)
1626 clen = r.chainlen(rev)
1626 p1, p2 = r.parentrevs(rev)
1627 p1, p2 = r.parentrevs(rev)
1627 rs = r.rawsize(rev)
1628 rs = r.rawsize(rev)
1628 ts = ts + rs
1629 ts = ts + rs
1629 heads -= set(r.parentrevs(rev))
1630 heads -= set(r.parentrevs(rev))
1630 heads.add(rev)
1631 heads.add(rev)
1631 try:
1632 try:
1632 compression = ts / r.end(rev)
1633 compression = ts / r.end(rev)
1633 except ZeroDivisionError:
1634 except ZeroDivisionError:
1634 compression = 0
1635 compression = 0
1635 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1636 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1636 "%11d %5d %8d\n" %
1637 "%11d %5d %8d\n" %
1637 (rev, p1, p2, r.start(rev), r.end(rev),
1638 (rev, p1, p2, r.start(rev), r.end(rev),
1638 r.start(dbase), r.start(cbase),
1639 r.start(dbase), r.start(cbase),
1639 r.start(p1), r.start(p2),
1640 r.start(p1), r.start(p2),
1640 rs, ts, compression, len(heads), clen))
1641 rs, ts, compression, len(heads), clen))
1641 return 0
1642 return 0
1642
1643
1643 v = r.version
1644 v = r.version
1644 format = v & 0xFFFF
1645 format = v & 0xFFFF
1645 flags = []
1646 flags = []
1646 gdelta = False
1647 gdelta = False
1647 if v & revlog.REVLOGNGINLINEDATA:
1648 if v & revlog.REVLOGNGINLINEDATA:
1648 flags.append('inline')
1649 flags.append('inline')
1649 if v & revlog.REVLOGGENERALDELTA:
1650 if v & revlog.REVLOGGENERALDELTA:
1650 gdelta = True
1651 gdelta = True
1651 flags.append('generaldelta')
1652 flags.append('generaldelta')
1652 if not flags:
1653 if not flags:
1653 flags = ['(none)']
1654 flags = ['(none)']
1654
1655
1655 nummerges = 0
1656 nummerges = 0
1656 numfull = 0
1657 numfull = 0
1657 numprev = 0
1658 numprev = 0
1658 nump1 = 0
1659 nump1 = 0
1659 nump2 = 0
1660 nump2 = 0
1660 numother = 0
1661 numother = 0
1661 nump1prev = 0
1662 nump1prev = 0
1662 nump2prev = 0
1663 nump2prev = 0
1663 chainlengths = []
1664 chainlengths = []
1664
1665
1665 datasize = [None, 0, 0]
1666 datasize = [None, 0, 0]
1666 fullsize = [None, 0, 0]
1667 fullsize = [None, 0, 0]
1667 deltasize = [None, 0, 0]
1668 deltasize = [None, 0, 0]
1668 chunktypecounts = {}
1669 chunktypecounts = {}
1669 chunktypesizes = {}
1670 chunktypesizes = {}
1670
1671
1671 def addsize(size, l):
1672 def addsize(size, l):
1672 if l[0] is None or size < l[0]:
1673 if l[0] is None or size < l[0]:
1673 l[0] = size
1674 l[0] = size
1674 if size > l[1]:
1675 if size > l[1]:
1675 l[1] = size
1676 l[1] = size
1676 l[2] += size
1677 l[2] += size
1677
1678
1678 numrevs = len(r)
1679 numrevs = len(r)
1679 for rev in xrange(numrevs):
1680 for rev in xrange(numrevs):
1680 p1, p2 = r.parentrevs(rev)
1681 p1, p2 = r.parentrevs(rev)
1681 delta = r.deltaparent(rev)
1682 delta = r.deltaparent(rev)
1682 if format > 0:
1683 if format > 0:
1683 addsize(r.rawsize(rev), datasize)
1684 addsize(r.rawsize(rev), datasize)
1684 if p2 != nullrev:
1685 if p2 != nullrev:
1685 nummerges += 1
1686 nummerges += 1
1686 size = r.length(rev)
1687 size = r.length(rev)
1687 if delta == nullrev:
1688 if delta == nullrev:
1688 chainlengths.append(0)
1689 chainlengths.append(0)
1689 numfull += 1
1690 numfull += 1
1690 addsize(size, fullsize)
1691 addsize(size, fullsize)
1691 else:
1692 else:
1692 chainlengths.append(chainlengths[delta] + 1)
1693 chainlengths.append(chainlengths[delta] + 1)
1693 addsize(size, deltasize)
1694 addsize(size, deltasize)
1694 if delta == rev - 1:
1695 if delta == rev - 1:
1695 numprev += 1
1696 numprev += 1
1696 if delta == p1:
1697 if delta == p1:
1697 nump1prev += 1
1698 nump1prev += 1
1698 elif delta == p2:
1699 elif delta == p2:
1699 nump2prev += 1
1700 nump2prev += 1
1700 elif delta == p1:
1701 elif delta == p1:
1701 nump1 += 1
1702 nump1 += 1
1702 elif delta == p2:
1703 elif delta == p2:
1703 nump2 += 1
1704 nump2 += 1
1704 elif delta != nullrev:
1705 elif delta != nullrev:
1705 numother += 1
1706 numother += 1
1706
1707
1707 # Obtain data on the raw chunks in the revlog.
1708 # Obtain data on the raw chunks in the revlog.
1708 chunk = r._chunkraw(rev, rev)[1]
1709 chunk = r._chunkraw(rev, rev)[1]
1709 if chunk:
1710 if chunk:
1710 chunktype = chunk[0]
1711 chunktype = chunk[0]
1711 else:
1712 else:
1712 chunktype = 'empty'
1713 chunktype = 'empty'
1713
1714
1714 if chunktype not in chunktypecounts:
1715 if chunktype not in chunktypecounts:
1715 chunktypecounts[chunktype] = 0
1716 chunktypecounts[chunktype] = 0
1716 chunktypesizes[chunktype] = 0
1717 chunktypesizes[chunktype] = 0
1717
1718
1718 chunktypecounts[chunktype] += 1
1719 chunktypecounts[chunktype] += 1
1719 chunktypesizes[chunktype] += size
1720 chunktypesizes[chunktype] += size
1720
1721
1721 # Adjust size min value for empty cases
1722 # Adjust size min value for empty cases
1722 for size in (datasize, fullsize, deltasize):
1723 for size in (datasize, fullsize, deltasize):
1723 if size[0] is None:
1724 if size[0] is None:
1724 size[0] = 0
1725 size[0] = 0
1725
1726
1726 numdeltas = numrevs - numfull
1727 numdeltas = numrevs - numfull
1727 numoprev = numprev - nump1prev - nump2prev
1728 numoprev = numprev - nump1prev - nump2prev
1728 totalrawsize = datasize[2]
1729 totalrawsize = datasize[2]
1729 datasize[2] /= numrevs
1730 datasize[2] /= numrevs
1730 fulltotal = fullsize[2]
1731 fulltotal = fullsize[2]
1731 fullsize[2] /= numfull
1732 fullsize[2] /= numfull
1732 deltatotal = deltasize[2]
1733 deltatotal = deltasize[2]
1733 if numrevs - numfull > 0:
1734 if numrevs - numfull > 0:
1734 deltasize[2] /= numrevs - numfull
1735 deltasize[2] /= numrevs - numfull
1735 totalsize = fulltotal + deltatotal
1736 totalsize = fulltotal + deltatotal
1736 avgchainlen = sum(chainlengths) / numrevs
1737 avgchainlen = sum(chainlengths) / numrevs
1737 maxchainlen = max(chainlengths)
1738 maxchainlen = max(chainlengths)
1738 compratio = 1
1739 compratio = 1
1739 if totalsize:
1740 if totalsize:
1740 compratio = totalrawsize / totalsize
1741 compratio = totalrawsize / totalsize
1741
1742
1742 basedfmtstr = '%%%dd\n'
1743 basedfmtstr = '%%%dd\n'
1743 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1744 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1744
1745
1745 def dfmtstr(max):
1746 def dfmtstr(max):
1746 return basedfmtstr % len(str(max))
1747 return basedfmtstr % len(str(max))
1747 def pcfmtstr(max, padding=0):
1748 def pcfmtstr(max, padding=0):
1748 return basepcfmtstr % (len(str(max)), ' ' * padding)
1749 return basepcfmtstr % (len(str(max)), ' ' * padding)
1749
1750
1750 def pcfmt(value, total):
1751 def pcfmt(value, total):
1751 if total:
1752 if total:
1752 return (value, 100 * float(value) / total)
1753 return (value, 100 * float(value) / total)
1753 else:
1754 else:
1754 return value, 100.0
1755 return value, 100.0
1755
1756
1756 ui.write(('format : %d\n') % format)
1757 ui.write(('format : %d\n') % format)
1757 ui.write(('flags : %s\n') % ', '.join(flags))
1758 ui.write(('flags : %s\n') % ', '.join(flags))
1758
1759
1759 ui.write('\n')
1760 ui.write('\n')
1760 fmt = pcfmtstr(totalsize)
1761 fmt = pcfmtstr(totalsize)
1761 fmt2 = dfmtstr(totalsize)
1762 fmt2 = dfmtstr(totalsize)
1762 ui.write(('revisions : ') + fmt2 % numrevs)
1763 ui.write(('revisions : ') + fmt2 % numrevs)
1763 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
1764 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
1764 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
1765 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
1765 ui.write(('revisions : ') + fmt2 % numrevs)
1766 ui.write(('revisions : ') + fmt2 % numrevs)
1766 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
1767 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
1767 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
1768 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
1768 ui.write(('revision size : ') + fmt2 % totalsize)
1769 ui.write(('revision size : ') + fmt2 % totalsize)
1769 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
1770 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
1770 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
1771 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
1771
1772
1772 def fmtchunktype(chunktype):
1773 def fmtchunktype(chunktype):
1773 if chunktype == 'empty':
1774 if chunktype == 'empty':
1774 return ' %s : ' % chunktype
1775 return ' %s : ' % chunktype
1775 elif chunktype in string.ascii_letters:
1776 elif chunktype in string.ascii_letters:
1776 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
1777 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
1777 else:
1778 else:
1778 return ' 0x%s : ' % hex(chunktype)
1779 return ' 0x%s : ' % hex(chunktype)
1779
1780
1780 ui.write('\n')
1781 ui.write('\n')
1781 ui.write(('chunks : ') + fmt2 % numrevs)
1782 ui.write(('chunks : ') + fmt2 % numrevs)
1782 for chunktype in sorted(chunktypecounts):
1783 for chunktype in sorted(chunktypecounts):
1783 ui.write(fmtchunktype(chunktype))
1784 ui.write(fmtchunktype(chunktype))
1784 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
1785 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
1785 ui.write(('chunks size : ') + fmt2 % totalsize)
1786 ui.write(('chunks size : ') + fmt2 % totalsize)
1786 for chunktype in sorted(chunktypecounts):
1787 for chunktype in sorted(chunktypecounts):
1787 ui.write(fmtchunktype(chunktype))
1788 ui.write(fmtchunktype(chunktype))
1788 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
1789 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
1789
1790
1790 ui.write('\n')
1791 ui.write('\n')
1791 fmt = dfmtstr(max(avgchainlen, compratio))
1792 fmt = dfmtstr(max(avgchainlen, compratio))
1792 ui.write(('avg chain length : ') + fmt % avgchainlen)
1793 ui.write(('avg chain length : ') + fmt % avgchainlen)
1793 ui.write(('max chain length : ') + fmt % maxchainlen)
1794 ui.write(('max chain length : ') + fmt % maxchainlen)
1794 ui.write(('compression ratio : ') + fmt % compratio)
1795 ui.write(('compression ratio : ') + fmt % compratio)
1795
1796
1796 if format > 0:
1797 if format > 0:
1797 ui.write('\n')
1798 ui.write('\n')
1798 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
1799 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
1799 % tuple(datasize))
1800 % tuple(datasize))
1800 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
1801 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
1801 % tuple(fullsize))
1802 % tuple(fullsize))
1802 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
1803 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
1803 % tuple(deltasize))
1804 % tuple(deltasize))
1804
1805
1805 if numdeltas > 0:
1806 if numdeltas > 0:
1806 ui.write('\n')
1807 ui.write('\n')
1807 fmt = pcfmtstr(numdeltas)
1808 fmt = pcfmtstr(numdeltas)
1808 fmt2 = pcfmtstr(numdeltas, 4)
1809 fmt2 = pcfmtstr(numdeltas, 4)
1809 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
1810 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
1810 if numprev > 0:
1811 if numprev > 0:
1811 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
1812 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
1812 numprev))
1813 numprev))
1813 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
1814 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
1814 numprev))
1815 numprev))
1815 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
1816 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
1816 numprev))
1817 numprev))
1817 if gdelta:
1818 if gdelta:
1818 ui.write(('deltas against p1 : ')
1819 ui.write(('deltas against p1 : ')
1819 + fmt % pcfmt(nump1, numdeltas))
1820 + fmt % pcfmt(nump1, numdeltas))
1820 ui.write(('deltas against p2 : ')
1821 ui.write(('deltas against p2 : ')
1821 + fmt % pcfmt(nump2, numdeltas))
1822 + fmt % pcfmt(nump2, numdeltas))
1822 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
1823 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
1823 numdeltas))
1824 numdeltas))
1824
1825
1825 @command('debugrevspec',
1826 @command('debugrevspec',
1826 [('', 'optimize', None,
1827 [('', 'optimize', None,
1827 _('print parsed tree after optimizing (DEPRECATED)')),
1828 _('print parsed tree after optimizing (DEPRECATED)')),
1828 ('p', 'show-stage', [],
1829 ('p', 'show-stage', [],
1829 _('print parsed tree at the given stage'), _('NAME')),
1830 _('print parsed tree at the given stage'), _('NAME')),
1830 ('', 'no-optimized', False, _('evaluate tree without optimization')),
1831 ('', 'no-optimized', False, _('evaluate tree without optimization')),
1831 ('', 'verify-optimized', False, _('verify optimized result')),
1832 ('', 'verify-optimized', False, _('verify optimized result')),
1832 ],
1833 ],
1833 ('REVSPEC'))
1834 ('REVSPEC'))
1834 def debugrevspec(ui, repo, expr, **opts):
1835 def debugrevspec(ui, repo, expr, **opts):
1835 """parse and apply a revision specification
1836 """parse and apply a revision specification
1836
1837
1837 Use -p/--show-stage option to print the parsed tree at the given stages.
1838 Use -p/--show-stage option to print the parsed tree at the given stages.
1838 Use -p all to print tree at every stage.
1839 Use -p all to print tree at every stage.
1839
1840
1840 Use --verify-optimized to compare the optimized result with the unoptimized
1841 Use --verify-optimized to compare the optimized result with the unoptimized
1841 one. Returns 1 if the optimized result differs.
1842 one. Returns 1 if the optimized result differs.
1842 """
1843 """
1843 stages = [
1844 stages = [
1844 ('parsed', lambda tree: tree),
1845 ('parsed', lambda tree: tree),
1845 ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
1846 ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
1846 ('concatenated', revsetlang.foldconcat),
1847 ('concatenated', revsetlang.foldconcat),
1847 ('analyzed', revsetlang.analyze),
1848 ('analyzed', revsetlang.analyze),
1848 ('optimized', revsetlang.optimize),
1849 ('optimized', revsetlang.optimize),
1849 ]
1850 ]
1850 if opts['no_optimized']:
1851 if opts['no_optimized']:
1851 stages = stages[:-1]
1852 stages = stages[:-1]
1852 if opts['verify_optimized'] and opts['no_optimized']:
1853 if opts['verify_optimized'] and opts['no_optimized']:
1853 raise error.Abort(_('cannot use --verify-optimized with '
1854 raise error.Abort(_('cannot use --verify-optimized with '
1854 '--no-optimized'))
1855 '--no-optimized'))
1855 stagenames = set(n for n, f in stages)
1856 stagenames = set(n for n, f in stages)
1856
1857
1857 showalways = set()
1858 showalways = set()
1858 showchanged = set()
1859 showchanged = set()
1859 if ui.verbose and not opts['show_stage']:
1860 if ui.verbose and not opts['show_stage']:
1860 # show parsed tree by --verbose (deprecated)
1861 # show parsed tree by --verbose (deprecated)
1861 showalways.add('parsed')
1862 showalways.add('parsed')
1862 showchanged.update(['expanded', 'concatenated'])
1863 showchanged.update(['expanded', 'concatenated'])
1863 if opts['optimize']:
1864 if opts['optimize']:
1864 showalways.add('optimized')
1865 showalways.add('optimized')
1865 if opts['show_stage'] and opts['optimize']:
1866 if opts['show_stage'] and opts['optimize']:
1866 raise error.Abort(_('cannot use --optimize with --show-stage'))
1867 raise error.Abort(_('cannot use --optimize with --show-stage'))
1867 if opts['show_stage'] == ['all']:
1868 if opts['show_stage'] == ['all']:
1868 showalways.update(stagenames)
1869 showalways.update(stagenames)
1869 else:
1870 else:
1870 for n in opts['show_stage']:
1871 for n in opts['show_stage']:
1871 if n not in stagenames:
1872 if n not in stagenames:
1872 raise error.Abort(_('invalid stage name: %s') % n)
1873 raise error.Abort(_('invalid stage name: %s') % n)
1873 showalways.update(opts['show_stage'])
1874 showalways.update(opts['show_stage'])
1874
1875
1875 treebystage = {}
1876 treebystage = {}
1876 printedtree = None
1877 printedtree = None
1877 tree = revsetlang.parse(expr, lookup=repo.__contains__)
1878 tree = revsetlang.parse(expr, lookup=repo.__contains__)
1878 for n, f in stages:
1879 for n, f in stages:
1879 treebystage[n] = tree = f(tree)
1880 treebystage[n] = tree = f(tree)
1880 if n in showalways or (n in showchanged and tree != printedtree):
1881 if n in showalways or (n in showchanged and tree != printedtree):
1881 if opts['show_stage'] or n != 'parsed':
1882 if opts['show_stage'] or n != 'parsed':
1882 ui.write(("* %s:\n") % n)
1883 ui.write(("* %s:\n") % n)
1883 ui.write(revsetlang.prettyformat(tree), "\n")
1884 ui.write(revsetlang.prettyformat(tree), "\n")
1884 printedtree = tree
1885 printedtree = tree
1885
1886
1886 if opts['verify_optimized']:
1887 if opts['verify_optimized']:
1887 arevs = revset.makematcher(treebystage['analyzed'])(repo)
1888 arevs = revset.makematcher(treebystage['analyzed'])(repo)
1888 brevs = revset.makematcher(treebystage['optimized'])(repo)
1889 brevs = revset.makematcher(treebystage['optimized'])(repo)
1889 if ui.verbose:
1890 if ui.verbose:
1890 ui.note(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
1891 ui.note(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
1891 ui.note(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
1892 ui.note(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
1892 arevs = list(arevs)
1893 arevs = list(arevs)
1893 brevs = list(brevs)
1894 brevs = list(brevs)
1894 if arevs == brevs:
1895 if arevs == brevs:
1895 return 0
1896 return 0
1896 ui.write(('--- analyzed\n'), label='diff.file_a')
1897 ui.write(('--- analyzed\n'), label='diff.file_a')
1897 ui.write(('+++ optimized\n'), label='diff.file_b')
1898 ui.write(('+++ optimized\n'), label='diff.file_b')
1898 sm = difflib.SequenceMatcher(None, arevs, brevs)
1899 sm = difflib.SequenceMatcher(None, arevs, brevs)
1899 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1900 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1900 if tag in ('delete', 'replace'):
1901 if tag in ('delete', 'replace'):
1901 for c in arevs[alo:ahi]:
1902 for c in arevs[alo:ahi]:
1902 ui.write('-%s\n' % c, label='diff.deleted')
1903 ui.write('-%s\n' % c, label='diff.deleted')
1903 if tag in ('insert', 'replace'):
1904 if tag in ('insert', 'replace'):
1904 for c in brevs[blo:bhi]:
1905 for c in brevs[blo:bhi]:
1905 ui.write('+%s\n' % c, label='diff.inserted')
1906 ui.write('+%s\n' % c, label='diff.inserted')
1906 if tag == 'equal':
1907 if tag == 'equal':
1907 for c in arevs[alo:ahi]:
1908 for c in arevs[alo:ahi]:
1908 ui.write(' %s\n' % c)
1909 ui.write(' %s\n' % c)
1909 return 1
1910 return 1
1910
1911
1911 func = revset.makematcher(tree)
1912 func = revset.makematcher(tree)
1912 revs = func(repo)
1913 revs = func(repo)
1913 if ui.verbose:
1914 if ui.verbose:
1914 ui.note(("* set:\n"), smartset.prettyformat(revs), "\n")
1915 ui.note(("* set:\n"), smartset.prettyformat(revs), "\n")
1915 for c in revs:
1916 for c in revs:
1916 ui.write("%s\n" % c)
1917 ui.write("%s\n" % c)
1917
1918
1918 @command('debugsetparents', [], _('REV1 [REV2]'))
1919 @command('debugsetparents', [], _('REV1 [REV2]'))
1919 def debugsetparents(ui, repo, rev1, rev2=None):
1920 def debugsetparents(ui, repo, rev1, rev2=None):
1920 """manually set the parents of the current working directory
1921 """manually set the parents of the current working directory
1921
1922
1922 This is useful for writing repository conversion tools, but should
1923 This is useful for writing repository conversion tools, but should
1923 be used with care. For example, neither the working directory nor the
1924 be used with care. For example, neither the working directory nor the
1924 dirstate is updated, so file status may be incorrect after running this
1925 dirstate is updated, so file status may be incorrect after running this
1925 command.
1926 command.
1926
1927
1927 Returns 0 on success.
1928 Returns 0 on success.
1928 """
1929 """
1929
1930
1930 r1 = scmutil.revsingle(repo, rev1).node()
1931 r1 = scmutil.revsingle(repo, rev1).node()
1931 r2 = scmutil.revsingle(repo, rev2, 'null').node()
1932 r2 = scmutil.revsingle(repo, rev2, 'null').node()
1932
1933
1933 with repo.wlock():
1934 with repo.wlock():
1934 repo.setparents(r1, r2)
1935 repo.setparents(r1, r2)
1935
1936
1936 @command('debugsub',
1937 @command('debugsub',
1937 [('r', 'rev', '',
1938 [('r', 'rev', '',
1938 _('revision to check'), _('REV'))],
1939 _('revision to check'), _('REV'))],
1939 _('[-r REV] [REV]'))
1940 _('[-r REV] [REV]'))
1940 def debugsub(ui, repo, rev=None):
1941 def debugsub(ui, repo, rev=None):
1941 ctx = scmutil.revsingle(repo, rev, None)
1942 ctx = scmutil.revsingle(repo, rev, None)
1942 for k, v in sorted(ctx.substate.items()):
1943 for k, v in sorted(ctx.substate.items()):
1943 ui.write(('path %s\n') % k)
1944 ui.write(('path %s\n') % k)
1944 ui.write((' source %s\n') % v[0])
1945 ui.write((' source %s\n') % v[0])
1945 ui.write((' revision %s\n') % v[1])
1946 ui.write((' revision %s\n') % v[1])
1946
1947
1947 @command('debugsuccessorssets',
1948 @command('debugsuccessorssets',
1948 [],
1949 [],
1949 _('[REV]'))
1950 _('[REV]'))
1950 def debugsuccessorssets(ui, repo, *revs):
1951 def debugsuccessorssets(ui, repo, *revs):
1951 """show set of successors for revision
1952 """show set of successors for revision
1952
1953
1953 A successors set of changeset A is a consistent group of revisions that
1954 A successors set of changeset A is a consistent group of revisions that
1954 succeed A. It contains non-obsolete changesets only.
1955 succeed A. It contains non-obsolete changesets only.
1955
1956
1956 In most cases a changeset A has a single successors set containing a single
1957 In most cases a changeset A has a single successors set containing a single
1957 successor (changeset A replaced by A').
1958 successor (changeset A replaced by A').
1958
1959
1959 A changeset that is made obsolete with no successors are called "pruned".
1960 A changeset that is made obsolete with no successors are called "pruned".
1960 Such changesets have no successors sets at all.
1961 Such changesets have no successors sets at all.
1961
1962
1962 A changeset that has been "split" will have a successors set containing
1963 A changeset that has been "split" will have a successors set containing
1963 more than one successor.
1964 more than one successor.
1964
1965
1965 A changeset that has been rewritten in multiple different ways is called
1966 A changeset that has been rewritten in multiple different ways is called
1966 "divergent". Such changesets have multiple successor sets (each of which
1967 "divergent". Such changesets have multiple successor sets (each of which
1967 may also be split, i.e. have multiple successors).
1968 may also be split, i.e. have multiple successors).
1968
1969
1969 Results are displayed as follows::
1970 Results are displayed as follows::
1970
1971
1971 <rev1>
1972 <rev1>
1972 <successors-1A>
1973 <successors-1A>
1973 <rev2>
1974 <rev2>
1974 <successors-2A>
1975 <successors-2A>
1975 <successors-2B1> <successors-2B2> <successors-2B3>
1976 <successors-2B1> <successors-2B2> <successors-2B3>
1976
1977
1977 Here rev2 has two possible (i.e. divergent) successors sets. The first
1978 Here rev2 has two possible (i.e. divergent) successors sets. The first
1978 holds one element, whereas the second holds three (i.e. the changeset has
1979 holds one element, whereas the second holds three (i.e. the changeset has
1979 been split).
1980 been split).
1980 """
1981 """
1981 # passed to successorssets caching computation from one call to another
1982 # passed to successorssets caching computation from one call to another
1982 cache = {}
1983 cache = {}
1983 ctx2str = str
1984 ctx2str = str
1984 node2str = short
1985 node2str = short
1985 if ui.debug():
1986 if ui.debug():
1986 def ctx2str(ctx):
1987 def ctx2str(ctx):
1987 return ctx.hex()
1988 return ctx.hex()
1988 node2str = hex
1989 node2str = hex
1989 for rev in scmutil.revrange(repo, revs):
1990 for rev in scmutil.revrange(repo, revs):
1990 ctx = repo[rev]
1991 ctx = repo[rev]
1991 ui.write('%s\n'% ctx2str(ctx))
1992 ui.write('%s\n'% ctx2str(ctx))
1992 for succsset in obsolete.successorssets(repo, ctx.node(), cache):
1993 for succsset in obsolete.successorssets(repo, ctx.node(), cache):
1993 if succsset:
1994 if succsset:
1994 ui.write(' ')
1995 ui.write(' ')
1995 ui.write(node2str(succsset[0]))
1996 ui.write(node2str(succsset[0]))
1996 for node in succsset[1:]:
1997 for node in succsset[1:]:
1997 ui.write(' ')
1998 ui.write(' ')
1998 ui.write(node2str(node))
1999 ui.write(node2str(node))
1999 ui.write('\n')
2000 ui.write('\n')
2000
2001
2001 @command('debugtemplate',
2002 @command('debugtemplate',
2002 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2003 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2003 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2004 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2004 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2005 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2005 optionalrepo=True)
2006 optionalrepo=True)
2006 def debugtemplate(ui, repo, tmpl, **opts):
2007 def debugtemplate(ui, repo, tmpl, **opts):
2007 """parse and apply a template
2008 """parse and apply a template
2008
2009
2009 If -r/--rev is given, the template is processed as a log template and
2010 If -r/--rev is given, the template is processed as a log template and
2010 applied to the given changesets. Otherwise, it is processed as a generic
2011 applied to the given changesets. Otherwise, it is processed as a generic
2011 template.
2012 template.
2012
2013
2013 Use --verbose to print the parsed tree.
2014 Use --verbose to print the parsed tree.
2014 """
2015 """
2015 revs = None
2016 revs = None
2016 if opts['rev']:
2017 if opts['rev']:
2017 if repo is None:
2018 if repo is None:
2018 raise error.RepoError(_('there is no Mercurial repository here '
2019 raise error.RepoError(_('there is no Mercurial repository here '
2019 '(.hg not found)'))
2020 '(.hg not found)'))
2020 revs = scmutil.revrange(repo, opts['rev'])
2021 revs = scmutil.revrange(repo, opts['rev'])
2021
2022
2022 props = {}
2023 props = {}
2023 for d in opts['define']:
2024 for d in opts['define']:
2024 try:
2025 try:
2025 k, v = (e.strip() for e in d.split('=', 1))
2026 k, v = (e.strip() for e in d.split('=', 1))
2026 if not k or k == 'ui':
2027 if not k or k == 'ui':
2027 raise ValueError
2028 raise ValueError
2028 props[k] = v
2029 props[k] = v
2029 except ValueError:
2030 except ValueError:
2030 raise error.Abort(_('malformed keyword definition: %s') % d)
2031 raise error.Abort(_('malformed keyword definition: %s') % d)
2031
2032
2032 if ui.verbose:
2033 if ui.verbose:
2033 aliases = ui.configitems('templatealias')
2034 aliases = ui.configitems('templatealias')
2034 tree = templater.parse(tmpl)
2035 tree = templater.parse(tmpl)
2035 ui.note(templater.prettyformat(tree), '\n')
2036 ui.note(templater.prettyformat(tree), '\n')
2036 newtree = templater.expandaliases(tree, aliases)
2037 newtree = templater.expandaliases(tree, aliases)
2037 if newtree != tree:
2038 if newtree != tree:
2038 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2039 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2039
2040
2040 mapfile = None
2041 mapfile = None
2041 if revs is None:
2042 if revs is None:
2042 k = 'debugtemplate'
2043 k = 'debugtemplate'
2043 t = formatter.maketemplater(ui, k, tmpl)
2044 t = formatter.maketemplater(ui, k, tmpl)
2044 ui.write(templater.stringify(t(k, ui=ui, **props)))
2045 ui.write(templater.stringify(t(k, ui=ui, **props)))
2045 else:
2046 else:
2046 displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl,
2047 displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl,
2047 mapfile, buffered=False)
2048 mapfile, buffered=False)
2048 for r in revs:
2049 for r in revs:
2049 displayer.show(repo[r], **props)
2050 displayer.show(repo[r], **props)
2050 displayer.close()
2051 displayer.close()
2051
2052
2052 @command('debugupgraderepo', [
2053 @command('debugupgraderepo', [
2053 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2054 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2054 ('', 'run', False, _('performs an upgrade')),
2055 ('', 'run', False, _('performs an upgrade')),
2055 ])
2056 ])
2056 def debugupgraderepo(ui, repo, run=False, optimize=None):
2057 def debugupgraderepo(ui, repo, run=False, optimize=None):
2057 """upgrade a repository to use different features
2058 """upgrade a repository to use different features
2058
2059
2059 If no arguments are specified, the repository is evaluated for upgrade
2060 If no arguments are specified, the repository is evaluated for upgrade
2060 and a list of problems and potential optimizations is printed.
2061 and a list of problems and potential optimizations is printed.
2061
2062
2062 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2063 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2063 can be influenced via additional arguments. More details will be provided
2064 can be influenced via additional arguments. More details will be provided
2064 by the command output when run without ``--run``.
2065 by the command output when run without ``--run``.
2065
2066
2066 During the upgrade, the repository will be locked and no writes will be
2067 During the upgrade, the repository will be locked and no writes will be
2067 allowed.
2068 allowed.
2068
2069
2069 At the end of the upgrade, the repository may not be readable while new
2070 At the end of the upgrade, the repository may not be readable while new
2070 repository data is swapped in. This window will be as long as it takes to
2071 repository data is swapped in. This window will be as long as it takes to
2071 rename some directories inside the ``.hg`` directory. On most machines, this
2072 rename some directories inside the ``.hg`` directory. On most machines, this
2072 should complete almost instantaneously and the chances of a consumer being
2073 should complete almost instantaneously and the chances of a consumer being
2073 unable to access the repository should be low.
2074 unable to access the repository should be low.
2074 """
2075 """
2075 return repair.upgraderepo(ui, repo, run=run, optimize=optimize)
2076 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2076
2077
2077 @command('debugwalk', commands.walkopts, _('[OPTION]... [FILE]...'),
2078 @command('debugwalk', commands.walkopts, _('[OPTION]... [FILE]...'),
2078 inferrepo=True)
2079 inferrepo=True)
2079 def debugwalk(ui, repo, *pats, **opts):
2080 def debugwalk(ui, repo, *pats, **opts):
2080 """show how files match on given patterns"""
2081 """show how files match on given patterns"""
2081 m = scmutil.match(repo[None], pats, opts)
2082 m = scmutil.match(repo[None], pats, opts)
2082 items = list(repo.walk(m))
2083 items = list(repo.walk(m))
2083 if not items:
2084 if not items:
2084 return
2085 return
2085 f = lambda fn: fn
2086 f = lambda fn: fn
2086 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2087 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2087 f = lambda fn: util.normpath(fn)
2088 f = lambda fn: util.normpath(fn)
2088 fmt = 'f %%-%ds %%-%ds %%s' % (
2089 fmt = 'f %%-%ds %%-%ds %%s' % (
2089 max([len(abs) for abs in items]),
2090 max([len(abs) for abs in items]),
2090 max([len(m.rel(abs)) for abs in items]))
2091 max([len(m.rel(abs)) for abs in items]))
2091 for abs in items:
2092 for abs in items:
2092 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2093 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2093 ui.write("%s\n" % line.rstrip())
2094 ui.write("%s\n" % line.rstrip())
2094
2095
2095 @command('debugwireargs',
2096 @command('debugwireargs',
2096 [('', 'three', '', 'three'),
2097 [('', 'three', '', 'three'),
2097 ('', 'four', '', 'four'),
2098 ('', 'four', '', 'four'),
2098 ('', 'five', '', 'five'),
2099 ('', 'five', '', 'five'),
2099 ] + commands.remoteopts,
2100 ] + commands.remoteopts,
2100 _('REPO [OPTIONS]... [ONE [TWO]]'),
2101 _('REPO [OPTIONS]... [ONE [TWO]]'),
2101 norepo=True)
2102 norepo=True)
2102 def debugwireargs(ui, repopath, *vals, **opts):
2103 def debugwireargs(ui, repopath, *vals, **opts):
2103 repo = hg.peer(ui, opts, repopath)
2104 repo = hg.peer(ui, opts, repopath)
2104 for opt in commands.remoteopts:
2105 for opt in commands.remoteopts:
2105 del opts[opt[1]]
2106 del opts[opt[1]]
2106 args = {}
2107 args = {}
2107 for k, v in opts.iteritems():
2108 for k, v in opts.iteritems():
2108 if v:
2109 if v:
2109 args[k] = v
2110 args[k] = v
2110 # run twice to check that we don't mess up the stream for the next command
2111 # run twice to check that we don't mess up the stream for the next command
2111 res1 = repo.debugwireargs(*vals, **args)
2112 res1 = repo.debugwireargs(*vals, **args)
2112 res2 = repo.debugwireargs(*vals, **args)
2113 res2 = repo.debugwireargs(*vals, **args)
2113 ui.write("%s\n" % res1)
2114 ui.write("%s\n" % res1)
2114 if res1 != res2:
2115 if res1 != res2:
2115 ui.warn("%s\n" % res2)
2116 ui.warn("%s\n" % res2)
This diff has been collapsed as it changes many lines, (742 lines changed) Show them Hide them
@@ -1,1096 +1,354
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import stat
14 import tempfile
15
13
16 from .i18n import _
14 from .i18n import _
17 from .node import short
15 from .node import short
18 from . import (
16 from . import (
19 bundle2,
17 bundle2,
20 changegroup,
18 changegroup,
21 changelog,
22 error,
19 error,
23 exchange,
20 exchange,
24 manifest,
25 obsolete,
21 obsolete,
26 revlog,
27 scmutil,
28 util,
22 util,
29 vfs as vfsmod,
30 )
23 )
31
24
32 def _bundle(repo, bases, heads, node, suffix, compress=True):
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
33 """create a bundle with the specified revisions as a backup"""
26 """create a bundle with the specified revisions as a backup"""
34 cgversion = changegroup.safeversion(repo)
27 cgversion = changegroup.safeversion(repo)
35
28
36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
37 version=cgversion)
30 version=cgversion)
38 backupdir = "strip-backup"
31 backupdir = "strip-backup"
39 vfs = repo.vfs
32 vfs = repo.vfs
40 if not vfs.isdir(backupdir):
33 if not vfs.isdir(backupdir):
41 vfs.mkdir(backupdir)
34 vfs.mkdir(backupdir)
42
35
43 # Include a hash of all the nodes in the filename for uniqueness
36 # Include a hash of all the nodes in the filename for uniqueness
44 allcommits = repo.set('%ln::%ln', bases, heads)
37 allcommits = repo.set('%ln::%ln', bases, heads)
45 allhashes = sorted(c.hex() for c in allcommits)
38 allhashes = sorted(c.hex() for c in allcommits)
46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
48
41
49 comp = None
42 comp = None
50 if cgversion != '01':
43 if cgversion != '01':
51 bundletype = "HG20"
44 bundletype = "HG20"
52 if compress:
45 if compress:
53 comp = 'BZ'
46 comp = 'BZ'
54 elif compress:
47 elif compress:
55 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
56 else:
49 else:
57 bundletype = "HG10UN"
50 bundletype = "HG10UN"
58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
59 compression=comp)
52 compression=comp)
60
53
61 def _collectfiles(repo, striprev):
54 def _collectfiles(repo, striprev):
62 """find out the filelogs affected by the strip"""
55 """find out the filelogs affected by the strip"""
63 files = set()
56 files = set()
64
57
65 for x in xrange(striprev, len(repo)):
58 for x in xrange(striprev, len(repo)):
66 files.update(repo[x].files())
59 files.update(repo[x].files())
67
60
68 return sorted(files)
61 return sorted(files)
69
62
70 def _collectbrokencsets(repo, files, striprev):
63 def _collectbrokencsets(repo, files, striprev):
71 """return the changesets which will be broken by the truncation"""
64 """return the changesets which will be broken by the truncation"""
72 s = set()
65 s = set()
73 def collectone(revlog):
66 def collectone(revlog):
74 _, brokenset = revlog.getstrippoint(striprev)
67 _, brokenset = revlog.getstrippoint(striprev)
75 s.update([revlog.linkrev(r) for r in brokenset])
68 s.update([revlog.linkrev(r) for r in brokenset])
76
69
77 collectone(repo.manifestlog._revlog)
70 collectone(repo.manifestlog._revlog)
78 for fname in files:
71 for fname in files:
79 collectone(repo.file(fname))
72 collectone(repo.file(fname))
80
73
81 return s
74 return s
82
75
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
84 # This function operates within a transaction of its own, but does
77 # This function operates within a transaction of its own, but does
85 # not take any lock on the repo.
78 # not take any lock on the repo.
86 # Simple way to maintain backwards compatibility for this
79 # Simple way to maintain backwards compatibility for this
87 # argument.
80 # argument.
88 if backup in ['none', 'strip']:
81 if backup in ['none', 'strip']:
89 backup = False
82 backup = False
90
83
91 repo = repo.unfiltered()
84 repo = repo.unfiltered()
92 repo.destroying()
85 repo.destroying()
93
86
94 cl = repo.changelog
87 cl = repo.changelog
95 # TODO handle undo of merge sets
88 # TODO handle undo of merge sets
96 if isinstance(nodelist, str):
89 if isinstance(nodelist, str):
97 nodelist = [nodelist]
90 nodelist = [nodelist]
98 striplist = [cl.rev(node) for node in nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
99 striprev = min(striplist)
92 striprev = min(striplist)
100
93
101 files = _collectfiles(repo, striprev)
94 files = _collectfiles(repo, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
95 saverevs = _collectbrokencsets(repo, files, striprev)
103
96
104 # Some revisions with rev > striprev may not be descendants of striprev.
97 # Some revisions with rev > striprev may not be descendants of striprev.
105 # We have to find these revisions and put them in a bundle, so that
98 # We have to find these revisions and put them in a bundle, so that
106 # we can restore them after the truncations.
99 # we can restore them after the truncations.
107 # To create the bundle we use repo.changegroupsubset which requires
100 # To create the bundle we use repo.changegroupsubset which requires
108 # the list of heads and bases of the set of interesting revisions.
101 # the list of heads and bases of the set of interesting revisions.
109 # (head = revision in the set that has no descendant in the set;
102 # (head = revision in the set that has no descendant in the set;
110 # base = revision in the set that has no ancestor in the set)
103 # base = revision in the set that has no ancestor in the set)
111 tostrip = set(striplist)
104 tostrip = set(striplist)
112 saveheads = set(saverevs)
105 saveheads = set(saverevs)
113 for r in cl.revs(start=striprev + 1):
106 for r in cl.revs(start=striprev + 1):
114 if any(p in tostrip for p in cl.parentrevs(r)):
107 if any(p in tostrip for p in cl.parentrevs(r)):
115 tostrip.add(r)
108 tostrip.add(r)
116
109
117 if r not in tostrip:
110 if r not in tostrip:
118 saverevs.add(r)
111 saverevs.add(r)
119 saveheads.difference_update(cl.parentrevs(r))
112 saveheads.difference_update(cl.parentrevs(r))
120 saveheads.add(r)
113 saveheads.add(r)
121 saveheads = [cl.node(r) for r in saveheads]
114 saveheads = [cl.node(r) for r in saveheads]
122
115
123 # compute base nodes
116 # compute base nodes
124 if saverevs:
117 if saverevs:
125 descendants = set(cl.descendants(saverevs))
118 descendants = set(cl.descendants(saverevs))
126 saverevs.difference_update(descendants)
119 saverevs.difference_update(descendants)
127 savebases = [cl.node(r) for r in saverevs]
120 savebases = [cl.node(r) for r in saverevs]
128 stripbases = [cl.node(r) for r in tostrip]
121 stripbases = [cl.node(r) for r in tostrip]
129
122
130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
123 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
131 # is much faster
124 # is much faster
132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
125 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
133 if newbmtarget:
126 if newbmtarget:
134 newbmtarget = repo[newbmtarget.first()].node()
127 newbmtarget = repo[newbmtarget.first()].node()
135 else:
128 else:
136 newbmtarget = '.'
129 newbmtarget = '.'
137
130
138 bm = repo._bookmarks
131 bm = repo._bookmarks
139 updatebm = []
132 updatebm = []
140 for m in bm:
133 for m in bm:
141 rev = repo[bm[m]].rev()
134 rev = repo[bm[m]].rev()
142 if rev in tostrip:
135 if rev in tostrip:
143 updatebm.append(m)
136 updatebm.append(m)
144
137
145 # create a changegroup for all the branches we need to keep
138 # create a changegroup for all the branches we need to keep
146 backupfile = None
139 backupfile = None
147 vfs = repo.vfs
140 vfs = repo.vfs
148 node = nodelist[-1]
141 node = nodelist[-1]
149 if backup:
142 if backup:
150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
143 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
151 repo.ui.status(_("saved backup bundle to %s\n") %
144 repo.ui.status(_("saved backup bundle to %s\n") %
152 vfs.join(backupfile))
145 vfs.join(backupfile))
153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
146 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
154 vfs.join(backupfile))
147 vfs.join(backupfile))
155 tmpbundlefile = None
148 tmpbundlefile = None
156 if saveheads:
149 if saveheads:
157 # do not compress temporary bundle if we remove it from disk later
150 # do not compress temporary bundle if we remove it from disk later
158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
151 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
159 compress=False)
152 compress=False)
160
153
161 mfst = repo.manifestlog._revlog
154 mfst = repo.manifestlog._revlog
162
155
163 curtr = repo.currenttransaction()
156 curtr = repo.currenttransaction()
164 if curtr is not None:
157 if curtr is not None:
165 del curtr # avoid carrying reference to transaction for nothing
158 del curtr # avoid carrying reference to transaction for nothing
166 raise error.ProgrammingError('cannot strip from inside a transaction')
159 raise error.ProgrammingError('cannot strip from inside a transaction')
167
160
168 try:
161 try:
169 with repo.transaction("strip") as tr:
162 with repo.transaction("strip") as tr:
170 offset = len(tr.entries)
163 offset = len(tr.entries)
171
164
172 tr.startgroup()
165 tr.startgroup()
173 cl.strip(striprev, tr)
166 cl.strip(striprev, tr)
174 mfst.strip(striprev, tr)
167 mfst.strip(striprev, tr)
175 if 'treemanifest' in repo.requirements: # safe but unnecessary
168 if 'treemanifest' in repo.requirements: # safe but unnecessary
176 # otherwise
169 # otherwise
177 for unencoded, encoded, size in repo.store.datafiles():
170 for unencoded, encoded, size in repo.store.datafiles():
178 if (unencoded.startswith('meta/') and
171 if (unencoded.startswith('meta/') and
179 unencoded.endswith('00manifest.i')):
172 unencoded.endswith('00manifest.i')):
180 dir = unencoded[5:-12]
173 dir = unencoded[5:-12]
181 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
174 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
182 for fn in files:
175 for fn in files:
183 repo.file(fn).strip(striprev, tr)
176 repo.file(fn).strip(striprev, tr)
184 tr.endgroup()
177 tr.endgroup()
185
178
186 for i in xrange(offset, len(tr.entries)):
179 for i in xrange(offset, len(tr.entries)):
187 file, troffset, ignore = tr.entries[i]
180 file, troffset, ignore = tr.entries[i]
188 with repo.svfs(file, 'a', checkambig=True) as fp:
181 with repo.svfs(file, 'a', checkambig=True) as fp:
189 fp.truncate(troffset)
182 fp.truncate(troffset)
190 if troffset == 0:
183 if troffset == 0:
191 repo.store.markremoved(file)
184 repo.store.markremoved(file)
192
185
193 if tmpbundlefile:
186 if tmpbundlefile:
194 ui.note(_("adding branch\n"))
187 ui.note(_("adding branch\n"))
195 f = vfs.open(tmpbundlefile, "rb")
188 f = vfs.open(tmpbundlefile, "rb")
196 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
189 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
197 if not repo.ui.verbose:
190 if not repo.ui.verbose:
198 # silence internal shuffling chatter
191 # silence internal shuffling chatter
199 repo.ui.pushbuffer()
192 repo.ui.pushbuffer()
200 if isinstance(gen, bundle2.unbundle20):
193 if isinstance(gen, bundle2.unbundle20):
201 with repo.transaction('strip') as tr:
194 with repo.transaction('strip') as tr:
202 tr.hookargs = {'source': 'strip',
195 tr.hookargs = {'source': 'strip',
203 'url': 'bundle:' + vfs.join(tmpbundlefile)}
196 'url': 'bundle:' + vfs.join(tmpbundlefile)}
204 bundle2.applybundle(repo, gen, tr, source='strip',
197 bundle2.applybundle(repo, gen, tr, source='strip',
205 url='bundle:' + vfs.join(tmpbundlefile))
198 url='bundle:' + vfs.join(tmpbundlefile))
206 else:
199 else:
207 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
200 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
208 True)
201 True)
209 if not repo.ui.verbose:
202 if not repo.ui.verbose:
210 repo.ui.popbuffer()
203 repo.ui.popbuffer()
211 f.close()
204 f.close()
212 repo._phasecache.invalidate()
205 repo._phasecache.invalidate()
213
206
214 for m in updatebm:
207 for m in updatebm:
215 bm[m] = repo[newbmtarget].node()
208 bm[m] = repo[newbmtarget].node()
216
209
217 with repo.lock():
210 with repo.lock():
218 with repo.transaction('repair') as tr:
211 with repo.transaction('repair') as tr:
219 bm.recordchange(tr)
212 bm.recordchange(tr)
220
213
221 # remove undo files
214 # remove undo files
222 for undovfs, undofile in repo.undofiles():
215 for undovfs, undofile in repo.undofiles():
223 try:
216 try:
224 undovfs.unlink(undofile)
217 undovfs.unlink(undofile)
225 except OSError as e:
218 except OSError as e:
226 if e.errno != errno.ENOENT:
219 if e.errno != errno.ENOENT:
227 ui.warn(_('error removing %s: %s\n') %
220 ui.warn(_('error removing %s: %s\n') %
228 (undovfs.join(undofile), str(e)))
221 (undovfs.join(undofile), str(e)))
229
222
230 except: # re-raises
223 except: # re-raises
231 if backupfile:
224 if backupfile:
232 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
225 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
233 % vfs.join(backupfile))
226 % vfs.join(backupfile))
234 if tmpbundlefile:
227 if tmpbundlefile:
235 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
228 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
236 % vfs.join(tmpbundlefile))
229 % vfs.join(tmpbundlefile))
237 ui.warn(_("(fix the problem, then recover the changesets with "
230 ui.warn(_("(fix the problem, then recover the changesets with "
238 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
231 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
239 raise
232 raise
240 else:
233 else:
241 if tmpbundlefile:
234 if tmpbundlefile:
242 # Remove temporary bundle only if there were no exceptions
235 # Remove temporary bundle only if there were no exceptions
243 vfs.unlink(tmpbundlefile)
236 vfs.unlink(tmpbundlefile)
244
237
245 repo.destroyed()
238 repo.destroyed()
246 # return the backup file path (or None if 'backup' was False) so
239 # return the backup file path (or None if 'backup' was False) so
247 # extensions can use it
240 # extensions can use it
248 return backupfile
241 return backupfile
249
242
250 def rebuildfncache(ui, repo):
243 def rebuildfncache(ui, repo):
251 """Rebuilds the fncache file from repo history.
244 """Rebuilds the fncache file from repo history.
252
245
253 Missing entries will be added. Extra entries will be removed.
246 Missing entries will be added. Extra entries will be removed.
254 """
247 """
255 repo = repo.unfiltered()
248 repo = repo.unfiltered()
256
249
257 if 'fncache' not in repo.requirements:
250 if 'fncache' not in repo.requirements:
258 ui.warn(_('(not rebuilding fncache because repository does not '
251 ui.warn(_('(not rebuilding fncache because repository does not '
259 'support fncache)\n'))
252 'support fncache)\n'))
260 return
253 return
261
254
262 with repo.lock():
255 with repo.lock():
263 fnc = repo.store.fncache
256 fnc = repo.store.fncache
264 # Trigger load of fncache.
257 # Trigger load of fncache.
265 if 'irrelevant' in fnc:
258 if 'irrelevant' in fnc:
266 pass
259 pass
267
260
268 oldentries = set(fnc.entries)
261 oldentries = set(fnc.entries)
269 newentries = set()
262 newentries = set()
270 seenfiles = set()
263 seenfiles = set()
271
264
272 repolen = len(repo)
265 repolen = len(repo)
273 for rev in repo:
266 for rev in repo:
274 ui.progress(_('rebuilding'), rev, total=repolen,
267 ui.progress(_('rebuilding'), rev, total=repolen,
275 unit=_('changesets'))
268 unit=_('changesets'))
276
269
277 ctx = repo[rev]
270 ctx = repo[rev]
278 for f in ctx.files():
271 for f in ctx.files():
279 # This is to minimize I/O.
272 # This is to minimize I/O.
280 if f in seenfiles:
273 if f in seenfiles:
281 continue
274 continue
282 seenfiles.add(f)
275 seenfiles.add(f)
283
276
284 i = 'data/%s.i' % f
277 i = 'data/%s.i' % f
285 d = 'data/%s.d' % f
278 d = 'data/%s.d' % f
286
279
287 if repo.store._exists(i):
280 if repo.store._exists(i):
288 newentries.add(i)
281 newentries.add(i)
289 if repo.store._exists(d):
282 if repo.store._exists(d):
290 newentries.add(d)
283 newentries.add(d)
291
284
292 ui.progress(_('rebuilding'), None)
285 ui.progress(_('rebuilding'), None)
293
286
294 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
287 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
295 for dir in util.dirs(seenfiles):
288 for dir in util.dirs(seenfiles):
296 i = 'meta/%s/00manifest.i' % dir
289 i = 'meta/%s/00manifest.i' % dir
297 d = 'meta/%s/00manifest.d' % dir
290 d = 'meta/%s/00manifest.d' % dir
298
291
299 if repo.store._exists(i):
292 if repo.store._exists(i):
300 newentries.add(i)
293 newentries.add(i)
301 if repo.store._exists(d):
294 if repo.store._exists(d):
302 newentries.add(d)
295 newentries.add(d)
303
296
304 addcount = len(newentries - oldentries)
297 addcount = len(newentries - oldentries)
305 removecount = len(oldentries - newentries)
298 removecount = len(oldentries - newentries)
306 for p in sorted(oldentries - newentries):
299 for p in sorted(oldentries - newentries):
307 ui.write(_('removing %s\n') % p)
300 ui.write(_('removing %s\n') % p)
308 for p in sorted(newentries - oldentries):
301 for p in sorted(newentries - oldentries):
309 ui.write(_('adding %s\n') % p)
302 ui.write(_('adding %s\n') % p)
310
303
311 if addcount or removecount:
304 if addcount or removecount:
312 ui.write(_('%d items added, %d removed from fncache\n') %
305 ui.write(_('%d items added, %d removed from fncache\n') %
313 (addcount, removecount))
306 (addcount, removecount))
314 fnc.entries = newentries
307 fnc.entries = newentries
315 fnc._dirty = True
308 fnc._dirty = True
316
309
317 with repo.transaction('fncache') as tr:
310 with repo.transaction('fncache') as tr:
318 fnc.write(tr)
311 fnc.write(tr)
319 else:
312 else:
320 ui.write(_('fncache already up to date\n'))
313 ui.write(_('fncache already up to date\n'))
321
314
322 def stripbmrevset(repo, mark):
315 def stripbmrevset(repo, mark):
323 """
316 """
324 The revset to strip when strip is called with -B mark
317 The revset to strip when strip is called with -B mark
325
318
326 Needs to live here so extensions can use it and wrap it even when strip is
319 Needs to live here so extensions can use it and wrap it even when strip is
327 not enabled or not present on a box.
320 not enabled or not present on a box.
328 """
321 """
329 return repo.revs("ancestors(bookmark(%s)) - "
322 return repo.revs("ancestors(bookmark(%s)) - "
330 "ancestors(head() and not bookmark(%s)) - "
323 "ancestors(head() and not bookmark(%s)) - "
331 "ancestors(bookmark() and not bookmark(%s))",
324 "ancestors(bookmark() and not bookmark(%s))",
332 mark, mark, mark)
325 mark, mark, mark)
333
326
334 def deleteobsmarkers(obsstore, indices):
327 def deleteobsmarkers(obsstore, indices):
335 """Delete some obsmarkers from obsstore and return how many were deleted
328 """Delete some obsmarkers from obsstore and return how many were deleted
336
329
337 'indices' is a list of ints which are the indices
330 'indices' is a list of ints which are the indices
338 of the markers to be deleted.
331 of the markers to be deleted.
339
332
340 Every invocation of this function completely rewrites the obsstore file,
333 Every invocation of this function completely rewrites the obsstore file,
341 skipping the markers we want to be removed. The new temporary file is
334 skipping the markers we want to be removed. The new temporary file is
342 created, remaining markers are written there and on .close() this file
335 created, remaining markers are written there and on .close() this file
343 gets atomically renamed to obsstore, thus guaranteeing consistency."""
336 gets atomically renamed to obsstore, thus guaranteeing consistency."""
344 if not indices:
337 if not indices:
345 # we don't want to rewrite the obsstore with the same content
338 # we don't want to rewrite the obsstore with the same content
346 return
339 return
347
340
348 left = []
341 left = []
349 current = obsstore._all
342 current = obsstore._all
350 n = 0
343 n = 0
351 for i, m in enumerate(current):
344 for i, m in enumerate(current):
352 if i in indices:
345 if i in indices:
353 n += 1
346 n += 1
354 continue
347 continue
355 left.append(m)
348 left.append(m)
356
349
357 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
350 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
358 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
351 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
359 newobsstorefile.write(bytes)
352 newobsstorefile.write(bytes)
360 newobsstorefile.close()
353 newobsstorefile.close()
361 return n
354 return n
362
363 def upgraderequiredsourcerequirements(repo):
364 """Obtain requirements required to be present to upgrade a repo.
365
366 An upgrade will not be allowed if the repository doesn't have the
367 requirements returned by this function.
368 """
369 return set([
370 # Introduced in Mercurial 0.9.2.
371 'revlogv1',
372 # Introduced in Mercurial 0.9.2.
373 'store',
374 ])
375
376 def upgradeblocksourcerequirements(repo):
377 """Obtain requirements that will prevent an upgrade from occurring.
378
379 An upgrade cannot be performed if the source repository contains a
380 requirements in the returned set.
381 """
382 return set([
383 # The upgrade code does not yet support these experimental features.
384 # This is an artificial limitation.
385 'manifestv2',
386 'treemanifest',
387 # This was a precursor to generaldelta and was never enabled by default.
388 # It should (hopefully) not exist in the wild.
389 'parentdelta',
390 # Upgrade should operate on the actual store, not the shared link.
391 'shared',
392 ])
393
394 def upgradesupportremovedrequirements(repo):
395 """Obtain requirements that can be removed during an upgrade.
396
397 If an upgrade were to create a repository that dropped a requirement,
398 the dropped requirement must appear in the returned set for the upgrade
399 to be allowed.
400 """
401 return set()
402
403 def upgradesupporteddestrequirements(repo):
404 """Obtain requirements that upgrade supports in the destination.
405
406 If the result of the upgrade would create requirements not in this set,
407 the upgrade is disallowed.
408
409 Extensions should monkeypatch this to add their custom requirements.
410 """
411 return set([
412 'dotencode',
413 'fncache',
414 'generaldelta',
415 'revlogv1',
416 'store',
417 ])
418
419 def upgradeallowednewrequirements(repo):
420 """Obtain requirements that can be added to a repository during upgrade.
421
422 This is used to disallow proposed requirements from being added when
423 they weren't present before.
424
425 We use a list of allowed requirement additions instead of a list of known
426 bad additions because the whitelist approach is safer and will prevent
427 future, unknown requirements from accidentally being added.
428 """
429 return set([
430 'dotencode',
431 'fncache',
432 'generaldelta',
433 ])
434
435 deficiency = 'deficiency'
436 optimisation = 'optimization'
437
438 class upgradeimprovement(object):
439 """Represents an improvement that can be made as part of an upgrade.
440
441 The following attributes are defined on each instance:
442
443 name
444 Machine-readable string uniquely identifying this improvement. It
445 will be mapped to an action later in the upgrade process.
446
447 type
448 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
449 problem. An optimization is an action (sometimes optional) that
450 can be taken to further improve the state of the repository.
451
452 description
453 Message intended for humans explaining the improvement in more detail,
454 including the implications of it. For ``deficiency`` types, should be
455 worded in the present tense. For ``optimisation`` types, should be
456 worded in the future tense.
457
458 upgrademessage
459 Message intended for humans explaining what an upgrade addressing this
460 issue will do. Should be worded in the future tense.
461
462 fromdefault (``deficiency`` types only)
463 Boolean indicating whether the current (deficient) state deviates
464 from Mercurial's default configuration.
465
466 fromconfig (``deficiency`` types only)
467 Boolean indicating whether the current (deficient) state deviates
468 from the current Mercurial configuration.
469 """
470 def __init__(self, name, type, description, upgrademessage, **kwargs):
471 self.name = name
472 self.type = type
473 self.description = description
474 self.upgrademessage = upgrademessage
475
476 for k, v in kwargs.items():
477 setattr(self, k, v)
478
479 def upgradefindimprovements(repo):
480 """Determine improvements that can be made to the repo during upgrade.
481
482 Returns a list of ``upgradeimprovement`` describing repository deficiencies
483 and optimizations.
484 """
485 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
486 from . import localrepo
487
488 newreporeqs = localrepo.newreporequirements(repo)
489
490 improvements = []
491
492 # We could detect lack of revlogv1 and store here, but they were added
493 # in 0.9.2 and we don't support upgrading repos without these
494 # requirements, so let's not bother.
495
496 if 'fncache' not in repo.requirements:
497 improvements.append(upgradeimprovement(
498 name='fncache',
499 type=deficiency,
500 description=_('long and reserved filenames may not work correctly; '
501 'repository performance is sub-optimal'),
502 upgrademessage=_('repository will be more resilient to storing '
503 'certain paths and performance of certain '
504 'operations should be improved'),
505 fromdefault=True,
506 fromconfig='fncache' in newreporeqs))
507
508 if 'dotencode' not in repo.requirements:
509 improvements.append(upgradeimprovement(
510 name='dotencode',
511 type=deficiency,
512 description=_('storage of filenames beginning with a period or '
513 'space may not work correctly'),
514 upgrademessage=_('repository will be better able to store files '
515 'beginning with a space or period'),
516 fromdefault=True,
517 fromconfig='dotencode' in newreporeqs))
518
519 if 'generaldelta' not in repo.requirements:
520 improvements.append(upgradeimprovement(
521 name='generaldelta',
522 type=deficiency,
523 description=_('deltas within internal storage are unable to '
524 'choose optimal revisions; repository is larger and '
525 'slower than it could be; interaction with other '
526 'repositories may require extra network and CPU '
527 'resources, making "hg push" and "hg pull" slower'),
528 upgrademessage=_('repository storage will be able to create '
529 'optimal deltas; new repository data will be '
530 'smaller and read times should decrease; '
531 'interacting with other repositories using this '
532 'storage model should require less network and '
533 'CPU resources, making "hg push" and "hg pull" '
534 'faster'),
535 fromdefault=True,
536 fromconfig='generaldelta' in newreporeqs))
537
538 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
539 # changelogs with deltas.
540 cl = repo.changelog
541 for rev in cl:
542 chainbase = cl.chainbase(rev)
543 if chainbase != rev:
544 improvements.append(upgradeimprovement(
545 name='removecldeltachain',
546 type=deficiency,
547 description=_('changelog storage is using deltas instead of '
548 'raw entries; changelog reading and any '
549 'operation relying on changelog data are slower '
550 'than they could be'),
551 upgrademessage=_('changelog storage will be reformated to '
552 'store raw entries; changelog reading will be '
553 'faster; changelog size may be reduced'),
554 fromdefault=True,
555 fromconfig=True))
556 break
557
558 # Now for the optimizations.
559
560 # These are unconditionally added. There is logic later that figures out
561 # which ones to apply.
562
563 improvements.append(upgradeimprovement(
564 name='redeltaparent',
565 type=optimisation,
566 description=_('deltas within internal storage will be recalculated to '
567 'choose an optimal base revision where this was not '
568 'already done; the size of the repository may shrink and '
569 'various operations may become faster; the first time '
570 'this optimization is performed could slow down upgrade '
571 'execution considerably; subsequent invocations should '
572 'not run noticeably slower'),
573 upgrademessage=_('deltas within internal storage will choose a new '
574 'base revision if needed')))
575
576 improvements.append(upgradeimprovement(
577 name='redeltamultibase',
578 type=optimisation,
579 description=_('deltas within internal storage will be recalculated '
580 'against multiple base revision and the smallest '
581 'difference will be used; the size of the repository may '
582 'shrink significantly when there are many merges; this '
583 'optimization will slow down execution in proportion to '
584 'the number of merges in the repository and the amount '
585 'of files in the repository; this slow down should not '
586 'be significant unless there are tens of thousands of '
587 'files and thousands of merges'),
588 upgrademessage=_('deltas within internal storage will choose an '
589 'optimal delta by computing deltas against multiple '
590 'parents; may slow down execution time '
591 'significantly')))
592
593 improvements.append(upgradeimprovement(
594 name='redeltaall',
595 type=optimisation,
596 description=_('deltas within internal storage will always be '
597 'recalculated without reusing prior deltas; this will '
598 'likely make execution run several times slower; this '
599 'optimization is typically not needed'),
600 upgrademessage=_('deltas within internal storage will be fully '
601 'recomputed; this will likely drastically slow down '
602 'execution time')))
603
604 return improvements
605
606 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
607 optimize):
608 """Determine upgrade actions that will be performed.
609
610 Given a list of improvements as returned by ``upgradefindimprovements``,
611 determine the list of upgrade actions that will be performed.
612
613 The role of this function is to filter improvements if needed, apply
614 recommended optimizations from the improvements list that make sense,
615 etc.
616
617 Returns a list of action names.
618 """
619 newactions = []
620
621 knownreqs = upgradesupporteddestrequirements(repo)
622
623 for i in improvements:
624 name = i.name
625
626 # If the action is a requirement that doesn't show up in the
627 # destination requirements, prune the action.
628 if name in knownreqs and name not in destreqs:
629 continue
630
631 if i.type == deficiency:
632 newactions.append(name)
633
634 newactions.extend(o for o in sorted(optimize) if o not in newactions)
635
636 # FUTURE consider adding some optimizations here for certain transitions.
637 # e.g. adding generaldelta could schedule parent redeltas.
638
639 return newactions
640
641 def _revlogfrompath(repo, path):
642 """Obtain a revlog from a repo path.
643
644 An instance of the appropriate class is returned.
645 """
646 if path == '00changelog.i':
647 return changelog.changelog(repo.svfs)
648 elif path.endswith('00manifest.i'):
649 mandir = path[:-len('00manifest.i')]
650 return manifest.manifestrevlog(repo.svfs, dir=mandir)
651 else:
652 # Filelogs don't do anything special with settings. So we can use a
653 # vanilla revlog.
654 return revlog.revlog(repo.svfs, path)
655
656 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
657 """Copy revlogs between 2 repos."""
658 revcount = 0
659 srcsize = 0
660 srcrawsize = 0
661 dstsize = 0
662 fcount = 0
663 frevcount = 0
664 fsrcsize = 0
665 frawsize = 0
666 fdstsize = 0
667 mcount = 0
668 mrevcount = 0
669 msrcsize = 0
670 mrawsize = 0
671 mdstsize = 0
672 crevcount = 0
673 csrcsize = 0
674 crawsize = 0
675 cdstsize = 0
676
677 # Perform a pass to collect metadata. This validates we can open all
678 # source files and allows a unified progress bar to be displayed.
679 for unencoded, encoded, size in srcrepo.store.walk():
680 if unencoded.endswith('.d'):
681 continue
682
683 rl = _revlogfrompath(srcrepo, unencoded)
684 revcount += len(rl)
685
686 datasize = 0
687 rawsize = 0
688 idx = rl.index
689 for rev in rl:
690 e = idx[rev]
691 datasize += e[1]
692 rawsize += e[2]
693
694 srcsize += datasize
695 srcrawsize += rawsize
696
697 # This is for the separate progress bars.
698 if isinstance(rl, changelog.changelog):
699 crevcount += len(rl)
700 csrcsize += datasize
701 crawsize += rawsize
702 elif isinstance(rl, manifest.manifestrevlog):
703 mcount += 1
704 mrevcount += len(rl)
705 msrcsize += datasize
706 mrawsize += rawsize
707 elif isinstance(rl, revlog.revlog):
708 fcount += 1
709 frevcount += len(rl)
710 fsrcsize += datasize
711 frawsize += rawsize
712
713 if not revcount:
714 return
715
716 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
717 '%d in changelog)\n') %
718 (revcount, frevcount, mrevcount, crevcount))
719 ui.write(_('migrating %s in store; %s tracked data\n') % (
720 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
721
722 # Used to keep track of progress.
723 progress = []
724 def oncopiedrevision(rl, rev, node):
725 progress[1] += 1
726 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
727
728 # Do the actual copying.
729 # FUTURE this operation can be farmed off to worker processes.
730 seen = set()
731 for unencoded, encoded, size in srcrepo.store.walk():
732 if unencoded.endswith('.d'):
733 continue
734
735 oldrl = _revlogfrompath(srcrepo, unencoded)
736 newrl = _revlogfrompath(dstrepo, unencoded)
737
738 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
739 ui.write(_('finished migrating %d manifest revisions across %d '
740 'manifests; change in size: %s\n') %
741 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
742
743 ui.write(_('migrating changelog containing %d revisions '
744 '(%s in store; %s tracked data)\n') %
745 (crevcount, util.bytecount(csrcsize),
746 util.bytecount(crawsize)))
747 seen.add('c')
748 progress[:] = [_('changelog revisions'), 0, crevcount]
749 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
750 ui.write(_('finished migrating %d filelog revisions across %d '
751 'filelogs; change in size: %s\n') %
752 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
753
754 ui.write(_('migrating %d manifests containing %d revisions '
755 '(%s in store; %s tracked data)\n') %
756 (mcount, mrevcount, util.bytecount(msrcsize),
757 util.bytecount(mrawsize)))
758 seen.add('m')
759 progress[:] = [_('manifest revisions'), 0, mrevcount]
760 elif 'f' not in seen:
761 ui.write(_('migrating %d filelogs containing %d revisions '
762 '(%s in store; %s tracked data)\n') %
763 (fcount, frevcount, util.bytecount(fsrcsize),
764 util.bytecount(frawsize)))
765 seen.add('f')
766 progress[:] = [_('file revisions'), 0, frevcount]
767
768 ui.progress(progress[0], progress[1], total=progress[2])
769
770 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
771 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
772 deltareuse=deltareuse,
773 aggressivemergedeltas=aggressivemergedeltas)
774
775 datasize = 0
776 idx = newrl.index
777 for rev in newrl:
778 datasize += idx[rev][1]
779
780 dstsize += datasize
781
782 if isinstance(newrl, changelog.changelog):
783 cdstsize += datasize
784 elif isinstance(newrl, manifest.manifestrevlog):
785 mdstsize += datasize
786 else:
787 fdstsize += datasize
788
789 ui.progress(progress[0], None)
790
791 ui.write(_('finished migrating %d changelog revisions; change in size: '
792 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
793
794 ui.write(_('finished migrating %d total revisions; total change in store '
795 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
796
797 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
798 """Determine whether to copy a store file during upgrade.
799
800 This function is called when migrating store files from ``srcrepo`` to
801 ``dstrepo`` as part of upgrading a repository.
802
803 Args:
804 srcrepo: repo we are copying from
805 dstrepo: repo we are copying to
806 requirements: set of requirements for ``dstrepo``
807 path: store file being examined
808 mode: the ``ST_MODE`` file type of ``path``
809 st: ``stat`` data structure for ``path``
810
811 Function should return ``True`` if the file is to be copied.
812 """
813 # Skip revlogs.
814 if path.endswith(('.i', '.d')):
815 return False
816 # Skip transaction related files.
817 if path.startswith('undo'):
818 return False
819 # Only copy regular files.
820 if mode != stat.S_IFREG:
821 return False
822 # Skip other skipped files.
823 if path in ('lock', 'fncache'):
824 return False
825
826 return True
827
828 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
829 """Hook point for extensions to perform additional actions during upgrade.
830
831 This function is called after revlogs and store files have been copied but
832 before the new store is swapped into the original location.
833 """
834
835 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
836 """Do the low-level work of upgrading a repository.
837
838 The upgrade is effectively performed as a copy between a source
839 repository and a temporary destination repository.
840
841 The source repository is unmodified for as long as possible so the
842 upgrade can abort at any time without causing loss of service for
843 readers and without corrupting the source repository.
844 """
845 assert srcrepo.currentwlock()
846 assert dstrepo.currentwlock()
847
848 ui.write(_('(it is safe to interrupt this process any time before '
849 'data migration completes)\n'))
850
851 if 'redeltaall' in actions:
852 deltareuse = revlog.revlog.DELTAREUSENEVER
853 elif 'redeltaparent' in actions:
854 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
855 elif 'redeltamultibase' in actions:
856 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
857 else:
858 deltareuse = revlog.revlog.DELTAREUSEALWAYS
859
860 with dstrepo.transaction('upgrade') as tr:
861 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
862 'redeltamultibase' in actions)
863
864 # Now copy other files in the store directory.
865 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
866 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
867 p, kind, st):
868 continue
869
870 srcrepo.ui.write(_('copying %s\n') % p)
871 src = srcrepo.store.vfs.join(p)
872 dst = dstrepo.store.vfs.join(p)
873 util.copyfile(src, dst, copystat=True)
874
875 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
876
877 ui.write(_('data fully migrated to temporary repository\n'))
878
879 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
880 backupvfs = vfsmod.vfs(backuppath)
881
882 # Make a backup of requires file first, as it is the first to be modified.
883 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
884
885 # We install an arbitrary requirement that clients must not support
886 # as a mechanism to lock out new clients during the data swap. This is
887 # better than allowing a client to continue while the repository is in
888 # an inconsistent state.
889 ui.write(_('marking source repository as being upgraded; clients will be '
890 'unable to read from repository\n'))
891 scmutil.writerequires(srcrepo.vfs,
892 srcrepo.requirements | set(['upgradeinprogress']))
893
894 ui.write(_('starting in-place swap of repository data\n'))
895 ui.write(_('replaced files will be backed up at %s\n') %
896 backuppath)
897
898 # Now swap in the new store directory. Doing it as a rename should make
899 # the operation nearly instantaneous and atomic (at least in well-behaved
900 # environments).
901 ui.write(_('replacing store...\n'))
902 tstart = util.timer()
903 util.rename(srcrepo.spath, backupvfs.join('store'))
904 util.rename(dstrepo.spath, srcrepo.spath)
905 elapsed = util.timer() - tstart
906 ui.write(_('store replacement complete; repository was inconsistent for '
907 '%0.1fs\n') % elapsed)
908
909 # We first write the requirements file. Any new requirements will lock
910 # out legacy clients.
911 ui.write(_('finalizing requirements file and making repository readable '
912 'again\n'))
913 scmutil.writerequires(srcrepo.vfs, requirements)
914
915 # The lock file from the old store won't be removed because nothing has a
916 # reference to its new location. So clean it up manually. Alternatively, we
917 # could update srcrepo.svfs and other variables to point to the new
918 # location. This is simpler.
919 backupvfs.unlink('store/lock')
920
921 return backuppath
922
923 def upgraderepo(ui, repo, run=False, optimize=None):
924 """Upgrade a repository in place."""
925 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
926 from . import localrepo
927
928 optimize = set(optimize or [])
929 repo = repo.unfiltered()
930
931 # Ensure the repository can be upgraded.
932 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
933 if missingreqs:
934 raise error.Abort(_('cannot upgrade repository; requirement '
935 'missing: %s') % _(', ').join(sorted(missingreqs)))
936
937 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
938 if blockedreqs:
939 raise error.Abort(_('cannot upgrade repository; unsupported source '
940 'requirement: %s') %
941 _(', ').join(sorted(blockedreqs)))
942
943 # FUTURE there is potentially a need to control the wanted requirements via
944 # command arguments or via an extension hook point.
945 newreqs = localrepo.newreporequirements(repo)
946
947 noremovereqs = (repo.requirements - newreqs -
948 upgradesupportremovedrequirements(repo))
949 if noremovereqs:
950 raise error.Abort(_('cannot upgrade repository; requirement would be '
951 'removed: %s') % _(', ').join(sorted(noremovereqs)))
952
953 noaddreqs = (newreqs - repo.requirements -
954 upgradeallowednewrequirements(repo))
955 if noaddreqs:
956 raise error.Abort(_('cannot upgrade repository; do not support adding '
957 'requirement: %s') %
958 _(', ').join(sorted(noaddreqs)))
959
960 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
961 if unsupportedreqs:
962 raise error.Abort(_('cannot upgrade repository; do not support '
963 'destination requirement: %s') %
964 _(', ').join(sorted(unsupportedreqs)))
965
966 # Find and validate all improvements that can be made.
967 improvements = upgradefindimprovements(repo)
968 for i in improvements:
969 if i.type not in (deficiency, optimisation):
970 raise error.Abort(_('unexpected improvement type %s for %s') % (
971 i.type, i.name))
972
973 # Validate arguments.
974 unknownoptimize = optimize - set(i.name for i in improvements
975 if i.type == optimisation)
976 if unknownoptimize:
977 raise error.Abort(_('unknown optimization action requested: %s') %
978 ', '.join(sorted(unknownoptimize)),
979 hint=_('run without arguments to see valid '
980 'optimizations'))
981
982 actions = upgradedetermineactions(repo, improvements, repo.requirements,
983 newreqs, optimize)
984
985 def printrequirements():
986 ui.write(_('requirements\n'))
987 ui.write(_(' preserved: %s\n') %
988 _(', ').join(sorted(newreqs & repo.requirements)))
989
990 if repo.requirements - newreqs:
991 ui.write(_(' removed: %s\n') %
992 _(', ').join(sorted(repo.requirements - newreqs)))
993
994 if newreqs - repo.requirements:
995 ui.write(_(' added: %s\n') %
996 _(', ').join(sorted(newreqs - repo.requirements)))
997
998 ui.write('\n')
999
1000 def printupgradeactions():
1001 for action in actions:
1002 for i in improvements:
1003 if i.name == action:
1004 ui.write('%s\n %s\n\n' %
1005 (i.name, i.upgrademessage))
1006
1007 if not run:
1008 fromdefault = []
1009 fromconfig = []
1010 optimizations = []
1011
1012 for i in improvements:
1013 assert i.type in (deficiency, optimisation)
1014 if i.type == deficiency:
1015 if i.fromdefault:
1016 fromdefault.append(i)
1017 if i.fromconfig:
1018 fromconfig.append(i)
1019 else:
1020 optimizations.append(i)
1021
1022 if fromdefault or fromconfig:
1023 fromconfignames = set(x.name for x in fromconfig)
1024 onlydefault = [i for i in fromdefault
1025 if i.name not in fromconfignames]
1026
1027 if fromconfig:
1028 ui.write(_('repository lacks features recommended by '
1029 'current config options:\n\n'))
1030 for i in fromconfig:
1031 ui.write('%s\n %s\n\n' % (i.name, i.description))
1032
1033 if onlydefault:
1034 ui.write(_('repository lacks features used by the default '
1035 'config options:\n\n'))
1036 for i in onlydefault:
1037 ui.write('%s\n %s\n\n' % (i.name, i.description))
1038
1039 ui.write('\n')
1040 else:
1041 ui.write(_('(no feature deficiencies found in existing '
1042 'repository)\n'))
1043
1044 ui.write(_('performing an upgrade with "--run" will make the following '
1045 'changes:\n\n'))
1046
1047 printrequirements()
1048 printupgradeactions()
1049
1050 unusedoptimize = [i for i in improvements
1051 if i.name not in actions and i.type == optimisation]
1052 if unusedoptimize:
1053 ui.write(_('additional optimizations are available by specifying '
1054 '"--optimize <name>":\n\n'))
1055 for i in unusedoptimize:
1056 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1057 return
1058
1059 # Else we're in the run=true case.
1060 ui.write(_('upgrade will perform the following actions:\n\n'))
1061 printrequirements()
1062 printupgradeactions()
1063
1064 ui.write(_('beginning upgrade...\n'))
1065 with repo.wlock():
1066 with repo.lock():
1067 ui.write(_('repository locked and read-only\n'))
1068 # Our strategy for upgrading the repository is to create a new,
1069 # temporary repository, write data to it, then do a swap of the
1070 # data. There are less heavyweight ways to do this, but it is easier
1071 # to create a new repo object than to instantiate all the components
1072 # (like the store) separately.
1073 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1074 backuppath = None
1075 try:
1076 ui.write(_('creating temporary repository to stage migrated '
1077 'data: %s\n') % tmppath)
1078 dstrepo = localrepo.localrepository(repo.baseui,
1079 path=tmppath,
1080 create=True)
1081
1082 with dstrepo.wlock():
1083 with dstrepo.lock():
1084 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1085 actions)
1086
1087 finally:
1088 ui.write(_('removing temporary repository %s\n') % tmppath)
1089 repo.vfs.rmtree(tmppath, forcibly=True)
1090
1091 if backuppath:
1092 ui.warn(_('copy of old repository backed up at %s\n') %
1093 backuppath)
1094 ui.warn(_('the old repository will not be deleted; remove '
1095 'it to free up disk space once the upgraded '
1096 'repository is verified\n'))
@@ -1,1096 +1,758
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
12 import hashlib
13 import stat
11 import stat
14 import tempfile
12 import tempfile
15
13
16 from .i18n import _
14 from .i18n import _
17 from .node import short
18 from . import (
15 from . import (
19 bundle2,
20 changegroup,
21 changelog,
16 changelog,
22 error,
17 error,
23 exchange,
24 manifest,
18 manifest,
25 obsolete,
26 revlog,
19 revlog,
27 scmutil,
20 scmutil,
28 util,
21 util,
29 vfs as vfsmod,
22 vfs as vfsmod,
30 )
23 )
31
24
32 def _bundle(repo, bases, heads, node, suffix, compress=True):
33 """create a bundle with the specified revisions as a backup"""
34 cgversion = changegroup.safeversion(repo)
35
36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
37 version=cgversion)
38 backupdir = "strip-backup"
39 vfs = repo.vfs
40 if not vfs.isdir(backupdir):
41 vfs.mkdir(backupdir)
42
43 # Include a hash of all the nodes in the filename for uniqueness
44 allcommits = repo.set('%ln::%ln', bases, heads)
45 allhashes = sorted(c.hex() for c in allcommits)
46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
48
49 comp = None
50 if cgversion != '01':
51 bundletype = "HG20"
52 if compress:
53 comp = 'BZ'
54 elif compress:
55 bundletype = "HG10BZ"
56 else:
57 bundletype = "HG10UN"
58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
59 compression=comp)
60
61 def _collectfiles(repo, striprev):
62 """find out the filelogs affected by the strip"""
63 files = set()
64
65 for x in xrange(striprev, len(repo)):
66 files.update(repo[x].files())
67
68 return sorted(files)
69
70 def _collectbrokencsets(repo, files, striprev):
71 """return the changesets which will be broken by the truncation"""
72 s = set()
73 def collectone(revlog):
74 _, brokenset = revlog.getstrippoint(striprev)
75 s.update([revlog.linkrev(r) for r in brokenset])
76
77 collectone(repo.manifestlog._revlog)
78 for fname in files:
79 collectone(repo.file(fname))
80
81 return s
82
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
84 # This function operates within a transaction of its own, but does
85 # not take any lock on the repo.
86 # Simple way to maintain backwards compatibility for this
87 # argument.
88 if backup in ['none', 'strip']:
89 backup = False
90
91 repo = repo.unfiltered()
92 repo.destroying()
93
94 cl = repo.changelog
95 # TODO handle undo of merge sets
96 if isinstance(nodelist, str):
97 nodelist = [nodelist]
98 striplist = [cl.rev(node) for node in nodelist]
99 striprev = min(striplist)
100
101 files = _collectfiles(repo, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
103
104 # Some revisions with rev > striprev may not be descendants of striprev.
105 # We have to find these revisions and put them in a bundle, so that
106 # we can restore them after the truncations.
107 # To create the bundle we use repo.changegroupsubset which requires
108 # the list of heads and bases of the set of interesting revisions.
109 # (head = revision in the set that has no descendant in the set;
110 # base = revision in the set that has no ancestor in the set)
111 tostrip = set(striplist)
112 saveheads = set(saverevs)
113 for r in cl.revs(start=striprev + 1):
114 if any(p in tostrip for p in cl.parentrevs(r)):
115 tostrip.add(r)
116
117 if r not in tostrip:
118 saverevs.add(r)
119 saveheads.difference_update(cl.parentrevs(r))
120 saveheads.add(r)
121 saveheads = [cl.node(r) for r in saveheads]
122
123 # compute base nodes
124 if saverevs:
125 descendants = set(cl.descendants(saverevs))
126 saverevs.difference_update(descendants)
127 savebases = [cl.node(r) for r in saverevs]
128 stripbases = [cl.node(r) for r in tostrip]
129
130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
131 # is much faster
132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
133 if newbmtarget:
134 newbmtarget = repo[newbmtarget.first()].node()
135 else:
136 newbmtarget = '.'
137
138 bm = repo._bookmarks
139 updatebm = []
140 for m in bm:
141 rev = repo[bm[m]].rev()
142 if rev in tostrip:
143 updatebm.append(m)
144
145 # create a changegroup for all the branches we need to keep
146 backupfile = None
147 vfs = repo.vfs
148 node = nodelist[-1]
149 if backup:
150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
151 repo.ui.status(_("saved backup bundle to %s\n") %
152 vfs.join(backupfile))
153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
154 vfs.join(backupfile))
155 tmpbundlefile = None
156 if saveheads:
157 # do not compress temporary bundle if we remove it from disk later
158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
159 compress=False)
160
161 mfst = repo.manifestlog._revlog
162
163 curtr = repo.currenttransaction()
164 if curtr is not None:
165 del curtr # avoid carrying reference to transaction for nothing
166 raise error.ProgrammingError('cannot strip from inside a transaction')
167
168 try:
169 with repo.transaction("strip") as tr:
170 offset = len(tr.entries)
171
172 tr.startgroup()
173 cl.strip(striprev, tr)
174 mfst.strip(striprev, tr)
175 if 'treemanifest' in repo.requirements: # safe but unnecessary
176 # otherwise
177 for unencoded, encoded, size in repo.store.datafiles():
178 if (unencoded.startswith('meta/') and
179 unencoded.endswith('00manifest.i')):
180 dir = unencoded[5:-12]
181 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
182 for fn in files:
183 repo.file(fn).strip(striprev, tr)
184 tr.endgroup()
185
186 for i in xrange(offset, len(tr.entries)):
187 file, troffset, ignore = tr.entries[i]
188 with repo.svfs(file, 'a', checkambig=True) as fp:
189 fp.truncate(troffset)
190 if troffset == 0:
191 repo.store.markremoved(file)
192
193 if tmpbundlefile:
194 ui.note(_("adding branch\n"))
195 f = vfs.open(tmpbundlefile, "rb")
196 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
197 if not repo.ui.verbose:
198 # silence internal shuffling chatter
199 repo.ui.pushbuffer()
200 if isinstance(gen, bundle2.unbundle20):
201 with repo.transaction('strip') as tr:
202 tr.hookargs = {'source': 'strip',
203 'url': 'bundle:' + vfs.join(tmpbundlefile)}
204 bundle2.applybundle(repo, gen, tr, source='strip',
205 url='bundle:' + vfs.join(tmpbundlefile))
206 else:
207 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
208 True)
209 if not repo.ui.verbose:
210 repo.ui.popbuffer()
211 f.close()
212 repo._phasecache.invalidate()
213
214 for m in updatebm:
215 bm[m] = repo[newbmtarget].node()
216
217 with repo.lock():
218 with repo.transaction('repair') as tr:
219 bm.recordchange(tr)
220
221 # remove undo files
222 for undovfs, undofile in repo.undofiles():
223 try:
224 undovfs.unlink(undofile)
225 except OSError as e:
226 if e.errno != errno.ENOENT:
227 ui.warn(_('error removing %s: %s\n') %
228 (undovfs.join(undofile), str(e)))
229
230 except: # re-raises
231 if backupfile:
232 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
233 % vfs.join(backupfile))
234 if tmpbundlefile:
235 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
236 % vfs.join(tmpbundlefile))
237 ui.warn(_("(fix the problem, then recover the changesets with "
238 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
239 raise
240 else:
241 if tmpbundlefile:
242 # Remove temporary bundle only if there were no exceptions
243 vfs.unlink(tmpbundlefile)
244
245 repo.destroyed()
246 # return the backup file path (or None if 'backup' was False) so
247 # extensions can use it
248 return backupfile
249
250 def rebuildfncache(ui, repo):
251 """Rebuilds the fncache file from repo history.
252
253 Missing entries will be added. Extra entries will be removed.
254 """
255 repo = repo.unfiltered()
256
257 if 'fncache' not in repo.requirements:
258 ui.warn(_('(not rebuilding fncache because repository does not '
259 'support fncache)\n'))
260 return
261
262 with repo.lock():
263 fnc = repo.store.fncache
264 # Trigger load of fncache.
265 if 'irrelevant' in fnc:
266 pass
267
268 oldentries = set(fnc.entries)
269 newentries = set()
270 seenfiles = set()
271
272 repolen = len(repo)
273 for rev in repo:
274 ui.progress(_('rebuilding'), rev, total=repolen,
275 unit=_('changesets'))
276
277 ctx = repo[rev]
278 for f in ctx.files():
279 # This is to minimize I/O.
280 if f in seenfiles:
281 continue
282 seenfiles.add(f)
283
284 i = 'data/%s.i' % f
285 d = 'data/%s.d' % f
286
287 if repo.store._exists(i):
288 newentries.add(i)
289 if repo.store._exists(d):
290 newentries.add(d)
291
292 ui.progress(_('rebuilding'), None)
293
294 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
295 for dir in util.dirs(seenfiles):
296 i = 'meta/%s/00manifest.i' % dir
297 d = 'meta/%s/00manifest.d' % dir
298
299 if repo.store._exists(i):
300 newentries.add(i)
301 if repo.store._exists(d):
302 newentries.add(d)
303
304 addcount = len(newentries - oldentries)
305 removecount = len(oldentries - newentries)
306 for p in sorted(oldentries - newentries):
307 ui.write(_('removing %s\n') % p)
308 for p in sorted(newentries - oldentries):
309 ui.write(_('adding %s\n') % p)
310
311 if addcount or removecount:
312 ui.write(_('%d items added, %d removed from fncache\n') %
313 (addcount, removecount))
314 fnc.entries = newentries
315 fnc._dirty = True
316
317 with repo.transaction('fncache') as tr:
318 fnc.write(tr)
319 else:
320 ui.write(_('fncache already up to date\n'))
321
322 def stripbmrevset(repo, mark):
323 """
324 The revset to strip when strip is called with -B mark
325
326 Needs to live here so extensions can use it and wrap it even when strip is
327 not enabled or not present on a box.
328 """
329 return repo.revs("ancestors(bookmark(%s)) - "
330 "ancestors(head() and not bookmark(%s)) - "
331 "ancestors(bookmark() and not bookmark(%s))",
332 mark, mark, mark)
333
334 def deleteobsmarkers(obsstore, indices):
335 """Delete some obsmarkers from obsstore and return how many were deleted
336
337 'indices' is a list of ints which are the indices
338 of the markers to be deleted.
339
340 Every invocation of this function completely rewrites the obsstore file,
341 skipping the markers we want to be removed. The new temporary file is
342 created, remaining markers are written there and on .close() this file
343 gets atomically renamed to obsstore, thus guaranteeing consistency."""
344 if not indices:
345 # we don't want to rewrite the obsstore with the same content
346 return
347
348 left = []
349 current = obsstore._all
350 n = 0
351 for i, m in enumerate(current):
352 if i in indices:
353 n += 1
354 continue
355 left.append(m)
356
357 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
358 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
359 newobsstorefile.write(bytes)
360 newobsstorefile.close()
361 return n
362
363 def upgraderequiredsourcerequirements(repo):
25 def upgraderequiredsourcerequirements(repo):
364 """Obtain requirements required to be present to upgrade a repo.
26 """Obtain requirements required to be present to upgrade a repo.
365
27
366 An upgrade will not be allowed if the repository doesn't have the
28 An upgrade will not be allowed if the repository doesn't have the
367 requirements returned by this function.
29 requirements returned by this function.
368 """
30 """
369 return set([
31 return set([
370 # Introduced in Mercurial 0.9.2.
32 # Introduced in Mercurial 0.9.2.
371 'revlogv1',
33 'revlogv1',
372 # Introduced in Mercurial 0.9.2.
34 # Introduced in Mercurial 0.9.2.
373 'store',
35 'store',
374 ])
36 ])
375
37
376 def upgradeblocksourcerequirements(repo):
38 def upgradeblocksourcerequirements(repo):
377 """Obtain requirements that will prevent an upgrade from occurring.
39 """Obtain requirements that will prevent an upgrade from occurring.
378
40
379 An upgrade cannot be performed if the source repository contains a
41 An upgrade cannot be performed if the source repository contains a
380 requirements in the returned set.
42 requirements in the returned set.
381 """
43 """
382 return set([
44 return set([
383 # The upgrade code does not yet support these experimental features.
45 # The upgrade code does not yet support these experimental features.
384 # This is an artificial limitation.
46 # This is an artificial limitation.
385 'manifestv2',
47 'manifestv2',
386 'treemanifest',
48 'treemanifest',
387 # This was a precursor to generaldelta and was never enabled by default.
49 # This was a precursor to generaldelta and was never enabled by default.
388 # It should (hopefully) not exist in the wild.
50 # It should (hopefully) not exist in the wild.
389 'parentdelta',
51 'parentdelta',
390 # Upgrade should operate on the actual store, not the shared link.
52 # Upgrade should operate on the actual store, not the shared link.
391 'shared',
53 'shared',
392 ])
54 ])
393
55
394 def upgradesupportremovedrequirements(repo):
56 def upgradesupportremovedrequirements(repo):
395 """Obtain requirements that can be removed during an upgrade.
57 """Obtain requirements that can be removed during an upgrade.
396
58
397 If an upgrade were to create a repository that dropped a requirement,
59 If an upgrade were to create a repository that dropped a requirement,
398 the dropped requirement must appear in the returned set for the upgrade
60 the dropped requirement must appear in the returned set for the upgrade
399 to be allowed.
61 to be allowed.
400 """
62 """
401 return set()
63 return set()
402
64
403 def upgradesupporteddestrequirements(repo):
65 def upgradesupporteddestrequirements(repo):
404 """Obtain requirements that upgrade supports in the destination.
66 """Obtain requirements that upgrade supports in the destination.
405
67
406 If the result of the upgrade would create requirements not in this set,
68 If the result of the upgrade would create requirements not in this set,
407 the upgrade is disallowed.
69 the upgrade is disallowed.
408
70
409 Extensions should monkeypatch this to add their custom requirements.
71 Extensions should monkeypatch this to add their custom requirements.
410 """
72 """
411 return set([
73 return set([
412 'dotencode',
74 'dotencode',
413 'fncache',
75 'fncache',
414 'generaldelta',
76 'generaldelta',
415 'revlogv1',
77 'revlogv1',
416 'store',
78 'store',
417 ])
79 ])
418
80
419 def upgradeallowednewrequirements(repo):
81 def upgradeallowednewrequirements(repo):
420 """Obtain requirements that can be added to a repository during upgrade.
82 """Obtain requirements that can be added to a repository during upgrade.
421
83
422 This is used to disallow proposed requirements from being added when
84 This is used to disallow proposed requirements from being added when
423 they weren't present before.
85 they weren't present before.
424
86
425 We use a list of allowed requirement additions instead of a list of known
87 We use a list of allowed requirement additions instead of a list of known
426 bad additions because the whitelist approach is safer and will prevent
88 bad additions because the whitelist approach is safer and will prevent
427 future, unknown requirements from accidentally being added.
89 future, unknown requirements from accidentally being added.
428 """
90 """
429 return set([
91 return set([
430 'dotencode',
92 'dotencode',
431 'fncache',
93 'fncache',
432 'generaldelta',
94 'generaldelta',
433 ])
95 ])
434
96
435 deficiency = 'deficiency'
97 deficiency = 'deficiency'
436 optimisation = 'optimization'
98 optimisation = 'optimization'
437
99
438 class upgradeimprovement(object):
100 class upgradeimprovement(object):
439 """Represents an improvement that can be made as part of an upgrade.
101 """Represents an improvement that can be made as part of an upgrade.
440
102
441 The following attributes are defined on each instance:
103 The following attributes are defined on each instance:
442
104
443 name
105 name
444 Machine-readable string uniquely identifying this improvement. It
106 Machine-readable string uniquely identifying this improvement. It
445 will be mapped to an action later in the upgrade process.
107 will be mapped to an action later in the upgrade process.
446
108
447 type
109 type
448 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
110 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
449 problem. An optimization is an action (sometimes optional) that
111 problem. An optimization is an action (sometimes optional) that
450 can be taken to further improve the state of the repository.
112 can be taken to further improve the state of the repository.
451
113
452 description
114 description
453 Message intended for humans explaining the improvement in more detail,
115 Message intended for humans explaining the improvement in more detail,
454 including the implications of it. For ``deficiency`` types, should be
116 including the implications of it. For ``deficiency`` types, should be
455 worded in the present tense. For ``optimisation`` types, should be
117 worded in the present tense. For ``optimisation`` types, should be
456 worded in the future tense.
118 worded in the future tense.
457
119
458 upgrademessage
120 upgrademessage
459 Message intended for humans explaining what an upgrade addressing this
121 Message intended for humans explaining what an upgrade addressing this
460 issue will do. Should be worded in the future tense.
122 issue will do. Should be worded in the future tense.
461
123
462 fromdefault (``deficiency`` types only)
124 fromdefault (``deficiency`` types only)
463 Boolean indicating whether the current (deficient) state deviates
125 Boolean indicating whether the current (deficient) state deviates
464 from Mercurial's default configuration.
126 from Mercurial's default configuration.
465
127
466 fromconfig (``deficiency`` types only)
128 fromconfig (``deficiency`` types only)
467 Boolean indicating whether the current (deficient) state deviates
129 Boolean indicating whether the current (deficient) state deviates
468 from the current Mercurial configuration.
130 from the current Mercurial configuration.
469 """
131 """
470 def __init__(self, name, type, description, upgrademessage, **kwargs):
132 def __init__(self, name, type, description, upgrademessage, **kwargs):
471 self.name = name
133 self.name = name
472 self.type = type
134 self.type = type
473 self.description = description
135 self.description = description
474 self.upgrademessage = upgrademessage
136 self.upgrademessage = upgrademessage
475
137
476 for k, v in kwargs.items():
138 for k, v in kwargs.items():
477 setattr(self, k, v)
139 setattr(self, k, v)
478
140
479 def upgradefindimprovements(repo):
141 def upgradefindimprovements(repo):
480 """Determine improvements that can be made to the repo during upgrade.
142 """Determine improvements that can be made to the repo during upgrade.
481
143
482 Returns a list of ``upgradeimprovement`` describing repository deficiencies
144 Returns a list of ``upgradeimprovement`` describing repository deficiencies
483 and optimizations.
145 and optimizations.
484 """
146 """
485 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
147 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
486 from . import localrepo
148 from . import localrepo
487
149
488 newreporeqs = localrepo.newreporequirements(repo)
150 newreporeqs = localrepo.newreporequirements(repo)
489
151
490 improvements = []
152 improvements = []
491
153
492 # We could detect lack of revlogv1 and store here, but they were added
154 # We could detect lack of revlogv1 and store here, but they were added
493 # in 0.9.2 and we don't support upgrading repos without these
155 # in 0.9.2 and we don't support upgrading repos without these
494 # requirements, so let's not bother.
156 # requirements, so let's not bother.
495
157
496 if 'fncache' not in repo.requirements:
158 if 'fncache' not in repo.requirements:
497 improvements.append(upgradeimprovement(
159 improvements.append(upgradeimprovement(
498 name='fncache',
160 name='fncache',
499 type=deficiency,
161 type=deficiency,
500 description=_('long and reserved filenames may not work correctly; '
162 description=_('long and reserved filenames may not work correctly; '
501 'repository performance is sub-optimal'),
163 'repository performance is sub-optimal'),
502 upgrademessage=_('repository will be more resilient to storing '
164 upgrademessage=_('repository will be more resilient to storing '
503 'certain paths and performance of certain '
165 'certain paths and performance of certain '
504 'operations should be improved'),
166 'operations should be improved'),
505 fromdefault=True,
167 fromdefault=True,
506 fromconfig='fncache' in newreporeqs))
168 fromconfig='fncache' in newreporeqs))
507
169
508 if 'dotencode' not in repo.requirements:
170 if 'dotencode' not in repo.requirements:
509 improvements.append(upgradeimprovement(
171 improvements.append(upgradeimprovement(
510 name='dotencode',
172 name='dotencode',
511 type=deficiency,
173 type=deficiency,
512 description=_('storage of filenames beginning with a period or '
174 description=_('storage of filenames beginning with a period or '
513 'space may not work correctly'),
175 'space may not work correctly'),
514 upgrademessage=_('repository will be better able to store files '
176 upgrademessage=_('repository will be better able to store files '
515 'beginning with a space or period'),
177 'beginning with a space or period'),
516 fromdefault=True,
178 fromdefault=True,
517 fromconfig='dotencode' in newreporeqs))
179 fromconfig='dotencode' in newreporeqs))
518
180
519 if 'generaldelta' not in repo.requirements:
181 if 'generaldelta' not in repo.requirements:
520 improvements.append(upgradeimprovement(
182 improvements.append(upgradeimprovement(
521 name='generaldelta',
183 name='generaldelta',
522 type=deficiency,
184 type=deficiency,
523 description=_('deltas within internal storage are unable to '
185 description=_('deltas within internal storage are unable to '
524 'choose optimal revisions; repository is larger and '
186 'choose optimal revisions; repository is larger and '
525 'slower than it could be; interaction with other '
187 'slower than it could be; interaction with other '
526 'repositories may require extra network and CPU '
188 'repositories may require extra network and CPU '
527 'resources, making "hg push" and "hg pull" slower'),
189 'resources, making "hg push" and "hg pull" slower'),
528 upgrademessage=_('repository storage will be able to create '
190 upgrademessage=_('repository storage will be able to create '
529 'optimal deltas; new repository data will be '
191 'optimal deltas; new repository data will be '
530 'smaller and read times should decrease; '
192 'smaller and read times should decrease; '
531 'interacting with other repositories using this '
193 'interacting with other repositories using this '
532 'storage model should require less network and '
194 'storage model should require less network and '
533 'CPU resources, making "hg push" and "hg pull" '
195 'CPU resources, making "hg push" and "hg pull" '
534 'faster'),
196 'faster'),
535 fromdefault=True,
197 fromdefault=True,
536 fromconfig='generaldelta' in newreporeqs))
198 fromconfig='generaldelta' in newreporeqs))
537
199
538 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
200 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
539 # changelogs with deltas.
201 # changelogs with deltas.
540 cl = repo.changelog
202 cl = repo.changelog
541 for rev in cl:
203 for rev in cl:
542 chainbase = cl.chainbase(rev)
204 chainbase = cl.chainbase(rev)
543 if chainbase != rev:
205 if chainbase != rev:
544 improvements.append(upgradeimprovement(
206 improvements.append(upgradeimprovement(
545 name='removecldeltachain',
207 name='removecldeltachain',
546 type=deficiency,
208 type=deficiency,
547 description=_('changelog storage is using deltas instead of '
209 description=_('changelog storage is using deltas instead of '
548 'raw entries; changelog reading and any '
210 'raw entries; changelog reading and any '
549 'operation relying on changelog data are slower '
211 'operation relying on changelog data are slower '
550 'than they could be'),
212 'than they could be'),
551 upgrademessage=_('changelog storage will be reformated to '
213 upgrademessage=_('changelog storage will be reformated to '
552 'store raw entries; changelog reading will be '
214 'store raw entries; changelog reading will be '
553 'faster; changelog size may be reduced'),
215 'faster; changelog size may be reduced'),
554 fromdefault=True,
216 fromdefault=True,
555 fromconfig=True))
217 fromconfig=True))
556 break
218 break
557
219
558 # Now for the optimizations.
220 # Now for the optimizations.
559
221
560 # These are unconditionally added. There is logic later that figures out
222 # These are unconditionally added. There is logic later that figures out
561 # which ones to apply.
223 # which ones to apply.
562
224
563 improvements.append(upgradeimprovement(
225 improvements.append(upgradeimprovement(
564 name='redeltaparent',
226 name='redeltaparent',
565 type=optimisation,
227 type=optimisation,
566 description=_('deltas within internal storage will be recalculated to '
228 description=_('deltas within internal storage will be recalculated to '
567 'choose an optimal base revision where this was not '
229 'choose an optimal base revision where this was not '
568 'already done; the size of the repository may shrink and '
230 'already done; the size of the repository may shrink and '
569 'various operations may become faster; the first time '
231 'various operations may become faster; the first time '
570 'this optimization is performed could slow down upgrade '
232 'this optimization is performed could slow down upgrade '
571 'execution considerably; subsequent invocations should '
233 'execution considerably; subsequent invocations should '
572 'not run noticeably slower'),
234 'not run noticeably slower'),
573 upgrademessage=_('deltas within internal storage will choose a new '
235 upgrademessage=_('deltas within internal storage will choose a new '
574 'base revision if needed')))
236 'base revision if needed')))
575
237
576 improvements.append(upgradeimprovement(
238 improvements.append(upgradeimprovement(
577 name='redeltamultibase',
239 name='redeltamultibase',
578 type=optimisation,
240 type=optimisation,
579 description=_('deltas within internal storage will be recalculated '
241 description=_('deltas within internal storage will be recalculated '
580 'against multiple base revision and the smallest '
242 'against multiple base revision and the smallest '
581 'difference will be used; the size of the repository may '
243 'difference will be used; the size of the repository may '
582 'shrink significantly when there are many merges; this '
244 'shrink significantly when there are many merges; this '
583 'optimization will slow down execution in proportion to '
245 'optimization will slow down execution in proportion to '
584 'the number of merges in the repository and the amount '
246 'the number of merges in the repository and the amount '
585 'of files in the repository; this slow down should not '
247 'of files in the repository; this slow down should not '
586 'be significant unless there are tens of thousands of '
248 'be significant unless there are tens of thousands of '
587 'files and thousands of merges'),
249 'files and thousands of merges'),
588 upgrademessage=_('deltas within internal storage will choose an '
250 upgrademessage=_('deltas within internal storage will choose an '
589 'optimal delta by computing deltas against multiple '
251 'optimal delta by computing deltas against multiple '
590 'parents; may slow down execution time '
252 'parents; may slow down execution time '
591 'significantly')))
253 'significantly')))
592
254
593 improvements.append(upgradeimprovement(
255 improvements.append(upgradeimprovement(
594 name='redeltaall',
256 name='redeltaall',
595 type=optimisation,
257 type=optimisation,
596 description=_('deltas within internal storage will always be '
258 description=_('deltas within internal storage will always be '
597 'recalculated without reusing prior deltas; this will '
259 'recalculated without reusing prior deltas; this will '
598 'likely make execution run several times slower; this '
260 'likely make execution run several times slower; this '
599 'optimization is typically not needed'),
261 'optimization is typically not needed'),
600 upgrademessage=_('deltas within internal storage will be fully '
262 upgrademessage=_('deltas within internal storage will be fully '
601 'recomputed; this will likely drastically slow down '
263 'recomputed; this will likely drastically slow down '
602 'execution time')))
264 'execution time')))
603
265
604 return improvements
266 return improvements
605
267
606 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
268 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
607 optimize):
269 optimize):
608 """Determine upgrade actions that will be performed.
270 """Determine upgrade actions that will be performed.
609
271
610 Given a list of improvements as returned by ``upgradefindimprovements``,
272 Given a list of improvements as returned by ``upgradefindimprovements``,
611 determine the list of upgrade actions that will be performed.
273 determine the list of upgrade actions that will be performed.
612
274
613 The role of this function is to filter improvements if needed, apply
275 The role of this function is to filter improvements if needed, apply
614 recommended optimizations from the improvements list that make sense,
276 recommended optimizations from the improvements list that make sense,
615 etc.
277 etc.
616
278
617 Returns a list of action names.
279 Returns a list of action names.
618 """
280 """
619 newactions = []
281 newactions = []
620
282
621 knownreqs = upgradesupporteddestrequirements(repo)
283 knownreqs = upgradesupporteddestrequirements(repo)
622
284
623 for i in improvements:
285 for i in improvements:
624 name = i.name
286 name = i.name
625
287
626 # If the action is a requirement that doesn't show up in the
288 # If the action is a requirement that doesn't show up in the
627 # destination requirements, prune the action.
289 # destination requirements, prune the action.
628 if name in knownreqs and name not in destreqs:
290 if name in knownreqs and name not in destreqs:
629 continue
291 continue
630
292
631 if i.type == deficiency:
293 if i.type == deficiency:
632 newactions.append(name)
294 newactions.append(name)
633
295
634 newactions.extend(o for o in sorted(optimize) if o not in newactions)
296 newactions.extend(o for o in sorted(optimize) if o not in newactions)
635
297
636 # FUTURE consider adding some optimizations here for certain transitions.
298 # FUTURE consider adding some optimizations here for certain transitions.
637 # e.g. adding generaldelta could schedule parent redeltas.
299 # e.g. adding generaldelta could schedule parent redeltas.
638
300
639 return newactions
301 return newactions
640
302
641 def _revlogfrompath(repo, path):
303 def _revlogfrompath(repo, path):
642 """Obtain a revlog from a repo path.
304 """Obtain a revlog from a repo path.
643
305
644 An instance of the appropriate class is returned.
306 An instance of the appropriate class is returned.
645 """
307 """
646 if path == '00changelog.i':
308 if path == '00changelog.i':
647 return changelog.changelog(repo.svfs)
309 return changelog.changelog(repo.svfs)
648 elif path.endswith('00manifest.i'):
310 elif path.endswith('00manifest.i'):
649 mandir = path[:-len('00manifest.i')]
311 mandir = path[:-len('00manifest.i')]
650 return manifest.manifestrevlog(repo.svfs, dir=mandir)
312 return manifest.manifestrevlog(repo.svfs, dir=mandir)
651 else:
313 else:
652 # Filelogs don't do anything special with settings. So we can use a
314 # Filelogs don't do anything special with settings. So we can use a
653 # vanilla revlog.
315 # vanilla revlog.
654 return revlog.revlog(repo.svfs, path)
316 return revlog.revlog(repo.svfs, path)
655
317
656 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
318 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
657 """Copy revlogs between 2 repos."""
319 """Copy revlogs between 2 repos."""
658 revcount = 0
320 revcount = 0
659 srcsize = 0
321 srcsize = 0
660 srcrawsize = 0
322 srcrawsize = 0
661 dstsize = 0
323 dstsize = 0
662 fcount = 0
324 fcount = 0
663 frevcount = 0
325 frevcount = 0
664 fsrcsize = 0
326 fsrcsize = 0
665 frawsize = 0
327 frawsize = 0
666 fdstsize = 0
328 fdstsize = 0
667 mcount = 0
329 mcount = 0
668 mrevcount = 0
330 mrevcount = 0
669 msrcsize = 0
331 msrcsize = 0
670 mrawsize = 0
332 mrawsize = 0
671 mdstsize = 0
333 mdstsize = 0
672 crevcount = 0
334 crevcount = 0
673 csrcsize = 0
335 csrcsize = 0
674 crawsize = 0
336 crawsize = 0
675 cdstsize = 0
337 cdstsize = 0
676
338
677 # Perform a pass to collect metadata. This validates we can open all
339 # Perform a pass to collect metadata. This validates we can open all
678 # source files and allows a unified progress bar to be displayed.
340 # source files and allows a unified progress bar to be displayed.
679 for unencoded, encoded, size in srcrepo.store.walk():
341 for unencoded, encoded, size in srcrepo.store.walk():
680 if unencoded.endswith('.d'):
342 if unencoded.endswith('.d'):
681 continue
343 continue
682
344
683 rl = _revlogfrompath(srcrepo, unencoded)
345 rl = _revlogfrompath(srcrepo, unencoded)
684 revcount += len(rl)
346 revcount += len(rl)
685
347
686 datasize = 0
348 datasize = 0
687 rawsize = 0
349 rawsize = 0
688 idx = rl.index
350 idx = rl.index
689 for rev in rl:
351 for rev in rl:
690 e = idx[rev]
352 e = idx[rev]
691 datasize += e[1]
353 datasize += e[1]
692 rawsize += e[2]
354 rawsize += e[2]
693
355
694 srcsize += datasize
356 srcsize += datasize
695 srcrawsize += rawsize
357 srcrawsize += rawsize
696
358
697 # This is for the separate progress bars.
359 # This is for the separate progress bars.
698 if isinstance(rl, changelog.changelog):
360 if isinstance(rl, changelog.changelog):
699 crevcount += len(rl)
361 crevcount += len(rl)
700 csrcsize += datasize
362 csrcsize += datasize
701 crawsize += rawsize
363 crawsize += rawsize
702 elif isinstance(rl, manifest.manifestrevlog):
364 elif isinstance(rl, manifest.manifestrevlog):
703 mcount += 1
365 mcount += 1
704 mrevcount += len(rl)
366 mrevcount += len(rl)
705 msrcsize += datasize
367 msrcsize += datasize
706 mrawsize += rawsize
368 mrawsize += rawsize
707 elif isinstance(rl, revlog.revlog):
369 elif isinstance(rl, revlog.revlog):
708 fcount += 1
370 fcount += 1
709 frevcount += len(rl)
371 frevcount += len(rl)
710 fsrcsize += datasize
372 fsrcsize += datasize
711 frawsize += rawsize
373 frawsize += rawsize
712
374
713 if not revcount:
375 if not revcount:
714 return
376 return
715
377
716 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
378 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
717 '%d in changelog)\n') %
379 '%d in changelog)\n') %
718 (revcount, frevcount, mrevcount, crevcount))
380 (revcount, frevcount, mrevcount, crevcount))
719 ui.write(_('migrating %s in store; %s tracked data\n') % (
381 ui.write(_('migrating %s in store; %s tracked data\n') % (
720 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
382 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
721
383
722 # Used to keep track of progress.
384 # Used to keep track of progress.
723 progress = []
385 progress = []
724 def oncopiedrevision(rl, rev, node):
386 def oncopiedrevision(rl, rev, node):
725 progress[1] += 1
387 progress[1] += 1
726 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
388 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
727
389
728 # Do the actual copying.
390 # Do the actual copying.
729 # FUTURE this operation can be farmed off to worker processes.
391 # FUTURE this operation can be farmed off to worker processes.
730 seen = set()
392 seen = set()
731 for unencoded, encoded, size in srcrepo.store.walk():
393 for unencoded, encoded, size in srcrepo.store.walk():
732 if unencoded.endswith('.d'):
394 if unencoded.endswith('.d'):
733 continue
395 continue
734
396
735 oldrl = _revlogfrompath(srcrepo, unencoded)
397 oldrl = _revlogfrompath(srcrepo, unencoded)
736 newrl = _revlogfrompath(dstrepo, unencoded)
398 newrl = _revlogfrompath(dstrepo, unencoded)
737
399
738 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
400 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
739 ui.write(_('finished migrating %d manifest revisions across %d '
401 ui.write(_('finished migrating %d manifest revisions across %d '
740 'manifests; change in size: %s\n') %
402 'manifests; change in size: %s\n') %
741 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
403 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
742
404
743 ui.write(_('migrating changelog containing %d revisions '
405 ui.write(_('migrating changelog containing %d revisions '
744 '(%s in store; %s tracked data)\n') %
406 '(%s in store; %s tracked data)\n') %
745 (crevcount, util.bytecount(csrcsize),
407 (crevcount, util.bytecount(csrcsize),
746 util.bytecount(crawsize)))
408 util.bytecount(crawsize)))
747 seen.add('c')
409 seen.add('c')
748 progress[:] = [_('changelog revisions'), 0, crevcount]
410 progress[:] = [_('changelog revisions'), 0, crevcount]
749 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
411 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
750 ui.write(_('finished migrating %d filelog revisions across %d '
412 ui.write(_('finished migrating %d filelog revisions across %d '
751 'filelogs; change in size: %s\n') %
413 'filelogs; change in size: %s\n') %
752 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
414 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
753
415
754 ui.write(_('migrating %d manifests containing %d revisions '
416 ui.write(_('migrating %d manifests containing %d revisions '
755 '(%s in store; %s tracked data)\n') %
417 '(%s in store; %s tracked data)\n') %
756 (mcount, mrevcount, util.bytecount(msrcsize),
418 (mcount, mrevcount, util.bytecount(msrcsize),
757 util.bytecount(mrawsize)))
419 util.bytecount(mrawsize)))
758 seen.add('m')
420 seen.add('m')
759 progress[:] = [_('manifest revisions'), 0, mrevcount]
421 progress[:] = [_('manifest revisions'), 0, mrevcount]
760 elif 'f' not in seen:
422 elif 'f' not in seen:
761 ui.write(_('migrating %d filelogs containing %d revisions '
423 ui.write(_('migrating %d filelogs containing %d revisions '
762 '(%s in store; %s tracked data)\n') %
424 '(%s in store; %s tracked data)\n') %
763 (fcount, frevcount, util.bytecount(fsrcsize),
425 (fcount, frevcount, util.bytecount(fsrcsize),
764 util.bytecount(frawsize)))
426 util.bytecount(frawsize)))
765 seen.add('f')
427 seen.add('f')
766 progress[:] = [_('file revisions'), 0, frevcount]
428 progress[:] = [_('file revisions'), 0, frevcount]
767
429
768 ui.progress(progress[0], progress[1], total=progress[2])
430 ui.progress(progress[0], progress[1], total=progress[2])
769
431
770 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
432 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
771 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
433 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
772 deltareuse=deltareuse,
434 deltareuse=deltareuse,
773 aggressivemergedeltas=aggressivemergedeltas)
435 aggressivemergedeltas=aggressivemergedeltas)
774
436
775 datasize = 0
437 datasize = 0
776 idx = newrl.index
438 idx = newrl.index
777 for rev in newrl:
439 for rev in newrl:
778 datasize += idx[rev][1]
440 datasize += idx[rev][1]
779
441
780 dstsize += datasize
442 dstsize += datasize
781
443
782 if isinstance(newrl, changelog.changelog):
444 if isinstance(newrl, changelog.changelog):
783 cdstsize += datasize
445 cdstsize += datasize
784 elif isinstance(newrl, manifest.manifestrevlog):
446 elif isinstance(newrl, manifest.manifestrevlog):
785 mdstsize += datasize
447 mdstsize += datasize
786 else:
448 else:
787 fdstsize += datasize
449 fdstsize += datasize
788
450
789 ui.progress(progress[0], None)
451 ui.progress(progress[0], None)
790
452
791 ui.write(_('finished migrating %d changelog revisions; change in size: '
453 ui.write(_('finished migrating %d changelog revisions; change in size: '
792 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
454 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
793
455
794 ui.write(_('finished migrating %d total revisions; total change in store '
456 ui.write(_('finished migrating %d total revisions; total change in store '
795 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
457 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
796
458
797 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
459 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
798 """Determine whether to copy a store file during upgrade.
460 """Determine whether to copy a store file during upgrade.
799
461
800 This function is called when migrating store files from ``srcrepo`` to
462 This function is called when migrating store files from ``srcrepo`` to
801 ``dstrepo`` as part of upgrading a repository.
463 ``dstrepo`` as part of upgrading a repository.
802
464
803 Args:
465 Args:
804 srcrepo: repo we are copying from
466 srcrepo: repo we are copying from
805 dstrepo: repo we are copying to
467 dstrepo: repo we are copying to
806 requirements: set of requirements for ``dstrepo``
468 requirements: set of requirements for ``dstrepo``
807 path: store file being examined
469 path: store file being examined
808 mode: the ``ST_MODE`` file type of ``path``
470 mode: the ``ST_MODE`` file type of ``path``
809 st: ``stat`` data structure for ``path``
471 st: ``stat`` data structure for ``path``
810
472
811 Function should return ``True`` if the file is to be copied.
473 Function should return ``True`` if the file is to be copied.
812 """
474 """
813 # Skip revlogs.
475 # Skip revlogs.
814 if path.endswith(('.i', '.d')):
476 if path.endswith(('.i', '.d')):
815 return False
477 return False
816 # Skip transaction related files.
478 # Skip transaction related files.
817 if path.startswith('undo'):
479 if path.startswith('undo'):
818 return False
480 return False
819 # Only copy regular files.
481 # Only copy regular files.
820 if mode != stat.S_IFREG:
482 if mode != stat.S_IFREG:
821 return False
483 return False
822 # Skip other skipped files.
484 # Skip other skipped files.
823 if path in ('lock', 'fncache'):
485 if path in ('lock', 'fncache'):
824 return False
486 return False
825
487
826 return True
488 return True
827
489
828 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
490 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
829 """Hook point for extensions to perform additional actions during upgrade.
491 """Hook point for extensions to perform additional actions during upgrade.
830
492
831 This function is called after revlogs and store files have been copied but
493 This function is called after revlogs and store files have been copied but
832 before the new store is swapped into the original location.
494 before the new store is swapped into the original location.
833 """
495 """
834
496
835 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
497 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
836 """Do the low-level work of upgrading a repository.
498 """Do the low-level work of upgrading a repository.
837
499
838 The upgrade is effectively performed as a copy between a source
500 The upgrade is effectively performed as a copy between a source
839 repository and a temporary destination repository.
501 repository and a temporary destination repository.
840
502
841 The source repository is unmodified for as long as possible so the
503 The source repository is unmodified for as long as possible so the
842 upgrade can abort at any time without causing loss of service for
504 upgrade can abort at any time without causing loss of service for
843 readers and without corrupting the source repository.
505 readers and without corrupting the source repository.
844 """
506 """
845 assert srcrepo.currentwlock()
507 assert srcrepo.currentwlock()
846 assert dstrepo.currentwlock()
508 assert dstrepo.currentwlock()
847
509
848 ui.write(_('(it is safe to interrupt this process any time before '
510 ui.write(_('(it is safe to interrupt this process any time before '
849 'data migration completes)\n'))
511 'data migration completes)\n'))
850
512
851 if 'redeltaall' in actions:
513 if 'redeltaall' in actions:
852 deltareuse = revlog.revlog.DELTAREUSENEVER
514 deltareuse = revlog.revlog.DELTAREUSENEVER
853 elif 'redeltaparent' in actions:
515 elif 'redeltaparent' in actions:
854 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
516 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
855 elif 'redeltamultibase' in actions:
517 elif 'redeltamultibase' in actions:
856 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
518 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
857 else:
519 else:
858 deltareuse = revlog.revlog.DELTAREUSEALWAYS
520 deltareuse = revlog.revlog.DELTAREUSEALWAYS
859
521
860 with dstrepo.transaction('upgrade') as tr:
522 with dstrepo.transaction('upgrade') as tr:
861 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
523 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
862 'redeltamultibase' in actions)
524 'redeltamultibase' in actions)
863
525
864 # Now copy other files in the store directory.
526 # Now copy other files in the store directory.
865 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
527 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
866 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
528 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
867 p, kind, st):
529 p, kind, st):
868 continue
530 continue
869
531
870 srcrepo.ui.write(_('copying %s\n') % p)
532 srcrepo.ui.write(_('copying %s\n') % p)
871 src = srcrepo.store.vfs.join(p)
533 src = srcrepo.store.vfs.join(p)
872 dst = dstrepo.store.vfs.join(p)
534 dst = dstrepo.store.vfs.join(p)
873 util.copyfile(src, dst, copystat=True)
535 util.copyfile(src, dst, copystat=True)
874
536
875 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
537 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
876
538
877 ui.write(_('data fully migrated to temporary repository\n'))
539 ui.write(_('data fully migrated to temporary repository\n'))
878
540
879 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
541 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
880 backupvfs = vfsmod.vfs(backuppath)
542 backupvfs = vfsmod.vfs(backuppath)
881
543
882 # Make a backup of requires file first, as it is the first to be modified.
544 # Make a backup of requires file first, as it is the first to be modified.
883 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
545 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
884
546
885 # We install an arbitrary requirement that clients must not support
547 # We install an arbitrary requirement that clients must not support
886 # as a mechanism to lock out new clients during the data swap. This is
548 # as a mechanism to lock out new clients during the data swap. This is
887 # better than allowing a client to continue while the repository is in
549 # better than allowing a client to continue while the repository is in
888 # an inconsistent state.
550 # an inconsistent state.
889 ui.write(_('marking source repository as being upgraded; clients will be '
551 ui.write(_('marking source repository as being upgraded; clients will be '
890 'unable to read from repository\n'))
552 'unable to read from repository\n'))
891 scmutil.writerequires(srcrepo.vfs,
553 scmutil.writerequires(srcrepo.vfs,
892 srcrepo.requirements | set(['upgradeinprogress']))
554 srcrepo.requirements | set(['upgradeinprogress']))
893
555
894 ui.write(_('starting in-place swap of repository data\n'))
556 ui.write(_('starting in-place swap of repository data\n'))
895 ui.write(_('replaced files will be backed up at %s\n') %
557 ui.write(_('replaced files will be backed up at %s\n') %
896 backuppath)
558 backuppath)
897
559
898 # Now swap in the new store directory. Doing it as a rename should make
560 # Now swap in the new store directory. Doing it as a rename should make
899 # the operation nearly instantaneous and atomic (at least in well-behaved
561 # the operation nearly instantaneous and atomic (at least in well-behaved
900 # environments).
562 # environments).
901 ui.write(_('replacing store...\n'))
563 ui.write(_('replacing store...\n'))
902 tstart = util.timer()
564 tstart = util.timer()
903 util.rename(srcrepo.spath, backupvfs.join('store'))
565 util.rename(srcrepo.spath, backupvfs.join('store'))
904 util.rename(dstrepo.spath, srcrepo.spath)
566 util.rename(dstrepo.spath, srcrepo.spath)
905 elapsed = util.timer() - tstart
567 elapsed = util.timer() - tstart
906 ui.write(_('store replacement complete; repository was inconsistent for '
568 ui.write(_('store replacement complete; repository was inconsistent for '
907 '%0.1fs\n') % elapsed)
569 '%0.1fs\n') % elapsed)
908
570
909 # We first write the requirements file. Any new requirements will lock
571 # We first write the requirements file. Any new requirements will lock
910 # out legacy clients.
572 # out legacy clients.
911 ui.write(_('finalizing requirements file and making repository readable '
573 ui.write(_('finalizing requirements file and making repository readable '
912 'again\n'))
574 'again\n'))
913 scmutil.writerequires(srcrepo.vfs, requirements)
575 scmutil.writerequires(srcrepo.vfs, requirements)
914
576
915 # The lock file from the old store won't be removed because nothing has a
577 # The lock file from the old store won't be removed because nothing has a
916 # reference to its new location. So clean it up manually. Alternatively, we
578 # reference to its new location. So clean it up manually. Alternatively, we
917 # could update srcrepo.svfs and other variables to point to the new
579 # could update srcrepo.svfs and other variables to point to the new
918 # location. This is simpler.
580 # location. This is simpler.
919 backupvfs.unlink('store/lock')
581 backupvfs.unlink('store/lock')
920
582
921 return backuppath
583 return backuppath
922
584
923 def upgraderepo(ui, repo, run=False, optimize=None):
585 def upgraderepo(ui, repo, run=False, optimize=None):
924 """Upgrade a repository in place."""
586 """Upgrade a repository in place."""
925 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
587 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
926 from . import localrepo
588 from . import localrepo
927
589
928 optimize = set(optimize or [])
590 optimize = set(optimize or [])
929 repo = repo.unfiltered()
591 repo = repo.unfiltered()
930
592
931 # Ensure the repository can be upgraded.
593 # Ensure the repository can be upgraded.
932 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
594 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
933 if missingreqs:
595 if missingreqs:
934 raise error.Abort(_('cannot upgrade repository; requirement '
596 raise error.Abort(_('cannot upgrade repository; requirement '
935 'missing: %s') % _(', ').join(sorted(missingreqs)))
597 'missing: %s') % _(', ').join(sorted(missingreqs)))
936
598
937 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
599 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
938 if blockedreqs:
600 if blockedreqs:
939 raise error.Abort(_('cannot upgrade repository; unsupported source '
601 raise error.Abort(_('cannot upgrade repository; unsupported source '
940 'requirement: %s') %
602 'requirement: %s') %
941 _(', ').join(sorted(blockedreqs)))
603 _(', ').join(sorted(blockedreqs)))
942
604
943 # FUTURE there is potentially a need to control the wanted requirements via
605 # FUTURE there is potentially a need to control the wanted requirements via
944 # command arguments or via an extension hook point.
606 # command arguments or via an extension hook point.
945 newreqs = localrepo.newreporequirements(repo)
607 newreqs = localrepo.newreporequirements(repo)
946
608
947 noremovereqs = (repo.requirements - newreqs -
609 noremovereqs = (repo.requirements - newreqs -
948 upgradesupportremovedrequirements(repo))
610 upgradesupportremovedrequirements(repo))
949 if noremovereqs:
611 if noremovereqs:
950 raise error.Abort(_('cannot upgrade repository; requirement would be '
612 raise error.Abort(_('cannot upgrade repository; requirement would be '
951 'removed: %s') % _(', ').join(sorted(noremovereqs)))
613 'removed: %s') % _(', ').join(sorted(noremovereqs)))
952
614
953 noaddreqs = (newreqs - repo.requirements -
615 noaddreqs = (newreqs - repo.requirements -
954 upgradeallowednewrequirements(repo))
616 upgradeallowednewrequirements(repo))
955 if noaddreqs:
617 if noaddreqs:
956 raise error.Abort(_('cannot upgrade repository; do not support adding '
618 raise error.Abort(_('cannot upgrade repository; do not support adding '
957 'requirement: %s') %
619 'requirement: %s') %
958 _(', ').join(sorted(noaddreqs)))
620 _(', ').join(sorted(noaddreqs)))
959
621
960 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
622 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
961 if unsupportedreqs:
623 if unsupportedreqs:
962 raise error.Abort(_('cannot upgrade repository; do not support '
624 raise error.Abort(_('cannot upgrade repository; do not support '
963 'destination requirement: %s') %
625 'destination requirement: %s') %
964 _(', ').join(sorted(unsupportedreqs)))
626 _(', ').join(sorted(unsupportedreqs)))
965
627
966 # Find and validate all improvements that can be made.
628 # Find and validate all improvements that can be made.
967 improvements = upgradefindimprovements(repo)
629 improvements = upgradefindimprovements(repo)
968 for i in improvements:
630 for i in improvements:
969 if i.type not in (deficiency, optimisation):
631 if i.type not in (deficiency, optimisation):
970 raise error.Abort(_('unexpected improvement type %s for %s') % (
632 raise error.Abort(_('unexpected improvement type %s for %s') % (
971 i.type, i.name))
633 i.type, i.name))
972
634
973 # Validate arguments.
635 # Validate arguments.
974 unknownoptimize = optimize - set(i.name for i in improvements
636 unknownoptimize = optimize - set(i.name for i in improvements
975 if i.type == optimisation)
637 if i.type == optimisation)
976 if unknownoptimize:
638 if unknownoptimize:
977 raise error.Abort(_('unknown optimization action requested: %s') %
639 raise error.Abort(_('unknown optimization action requested: %s') %
978 ', '.join(sorted(unknownoptimize)),
640 ', '.join(sorted(unknownoptimize)),
979 hint=_('run without arguments to see valid '
641 hint=_('run without arguments to see valid '
980 'optimizations'))
642 'optimizations'))
981
643
982 actions = upgradedetermineactions(repo, improvements, repo.requirements,
644 actions = upgradedetermineactions(repo, improvements, repo.requirements,
983 newreqs, optimize)
645 newreqs, optimize)
984
646
985 def printrequirements():
647 def printrequirements():
986 ui.write(_('requirements\n'))
648 ui.write(_('requirements\n'))
987 ui.write(_(' preserved: %s\n') %
649 ui.write(_(' preserved: %s\n') %
988 _(', ').join(sorted(newreqs & repo.requirements)))
650 _(', ').join(sorted(newreqs & repo.requirements)))
989
651
990 if repo.requirements - newreqs:
652 if repo.requirements - newreqs:
991 ui.write(_(' removed: %s\n') %
653 ui.write(_(' removed: %s\n') %
992 _(', ').join(sorted(repo.requirements - newreqs)))
654 _(', ').join(sorted(repo.requirements - newreqs)))
993
655
994 if newreqs - repo.requirements:
656 if newreqs - repo.requirements:
995 ui.write(_(' added: %s\n') %
657 ui.write(_(' added: %s\n') %
996 _(', ').join(sorted(newreqs - repo.requirements)))
658 _(', ').join(sorted(newreqs - repo.requirements)))
997
659
998 ui.write('\n')
660 ui.write('\n')
999
661
1000 def printupgradeactions():
662 def printupgradeactions():
1001 for action in actions:
663 for action in actions:
1002 for i in improvements:
664 for i in improvements:
1003 if i.name == action:
665 if i.name == action:
1004 ui.write('%s\n %s\n\n' %
666 ui.write('%s\n %s\n\n' %
1005 (i.name, i.upgrademessage))
667 (i.name, i.upgrademessage))
1006
668
1007 if not run:
669 if not run:
1008 fromdefault = []
670 fromdefault = []
1009 fromconfig = []
671 fromconfig = []
1010 optimizations = []
672 optimizations = []
1011
673
1012 for i in improvements:
674 for i in improvements:
1013 assert i.type in (deficiency, optimisation)
675 assert i.type in (deficiency, optimisation)
1014 if i.type == deficiency:
676 if i.type == deficiency:
1015 if i.fromdefault:
677 if i.fromdefault:
1016 fromdefault.append(i)
678 fromdefault.append(i)
1017 if i.fromconfig:
679 if i.fromconfig:
1018 fromconfig.append(i)
680 fromconfig.append(i)
1019 else:
681 else:
1020 optimizations.append(i)
682 optimizations.append(i)
1021
683
1022 if fromdefault or fromconfig:
684 if fromdefault or fromconfig:
1023 fromconfignames = set(x.name for x in fromconfig)
685 fromconfignames = set(x.name for x in fromconfig)
1024 onlydefault = [i for i in fromdefault
686 onlydefault = [i for i in fromdefault
1025 if i.name not in fromconfignames]
687 if i.name not in fromconfignames]
1026
688
1027 if fromconfig:
689 if fromconfig:
1028 ui.write(_('repository lacks features recommended by '
690 ui.write(_('repository lacks features recommended by '
1029 'current config options:\n\n'))
691 'current config options:\n\n'))
1030 for i in fromconfig:
692 for i in fromconfig:
1031 ui.write('%s\n %s\n\n' % (i.name, i.description))
693 ui.write('%s\n %s\n\n' % (i.name, i.description))
1032
694
1033 if onlydefault:
695 if onlydefault:
1034 ui.write(_('repository lacks features used by the default '
696 ui.write(_('repository lacks features used by the default '
1035 'config options:\n\n'))
697 'config options:\n\n'))
1036 for i in onlydefault:
698 for i in onlydefault:
1037 ui.write('%s\n %s\n\n' % (i.name, i.description))
699 ui.write('%s\n %s\n\n' % (i.name, i.description))
1038
700
1039 ui.write('\n')
701 ui.write('\n')
1040 else:
702 else:
1041 ui.write(_('(no feature deficiencies found in existing '
703 ui.write(_('(no feature deficiencies found in existing '
1042 'repository)\n'))
704 'repository)\n'))
1043
705
1044 ui.write(_('performing an upgrade with "--run" will make the following '
706 ui.write(_('performing an upgrade with "--run" will make the following '
1045 'changes:\n\n'))
707 'changes:\n\n'))
1046
708
1047 printrequirements()
709 printrequirements()
1048 printupgradeactions()
710 printupgradeactions()
1049
711
1050 unusedoptimize = [i for i in improvements
712 unusedoptimize = [i for i in improvements
1051 if i.name not in actions and i.type == optimisation]
713 if i.name not in actions and i.type == optimisation]
1052 if unusedoptimize:
714 if unusedoptimize:
1053 ui.write(_('additional optimizations are available by specifying '
715 ui.write(_('additional optimizations are available by specifying '
1054 '"--optimize <name>":\n\n'))
716 '"--optimize <name>":\n\n'))
1055 for i in unusedoptimize:
717 for i in unusedoptimize:
1056 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
718 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1057 return
719 return
1058
720
1059 # Else we're in the run=true case.
721 # Else we're in the run=true case.
1060 ui.write(_('upgrade will perform the following actions:\n\n'))
722 ui.write(_('upgrade will perform the following actions:\n\n'))
1061 printrequirements()
723 printrequirements()
1062 printupgradeactions()
724 printupgradeactions()
1063
725
1064 ui.write(_('beginning upgrade...\n'))
726 ui.write(_('beginning upgrade...\n'))
1065 with repo.wlock():
727 with repo.wlock():
1066 with repo.lock():
728 with repo.lock():
1067 ui.write(_('repository locked and read-only\n'))
729 ui.write(_('repository locked and read-only\n'))
1068 # Our strategy for upgrading the repository is to create a new,
730 # Our strategy for upgrading the repository is to create a new,
1069 # temporary repository, write data to it, then do a swap of the
731 # temporary repository, write data to it, then do a swap of the
1070 # data. There are less heavyweight ways to do this, but it is easier
732 # data. There are less heavyweight ways to do this, but it is easier
1071 # to create a new repo object than to instantiate all the components
733 # to create a new repo object than to instantiate all the components
1072 # (like the store) separately.
734 # (like the store) separately.
1073 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
735 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1074 backuppath = None
736 backuppath = None
1075 try:
737 try:
1076 ui.write(_('creating temporary repository to stage migrated '
738 ui.write(_('creating temporary repository to stage migrated '
1077 'data: %s\n') % tmppath)
739 'data: %s\n') % tmppath)
1078 dstrepo = localrepo.localrepository(repo.baseui,
740 dstrepo = localrepo.localrepository(repo.baseui,
1079 path=tmppath,
741 path=tmppath,
1080 create=True)
742 create=True)
1081
743
1082 with dstrepo.wlock():
744 with dstrepo.wlock():
1083 with dstrepo.lock():
745 with dstrepo.lock():
1084 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
746 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1085 actions)
747 actions)
1086
748
1087 finally:
749 finally:
1088 ui.write(_('removing temporary repository %s\n') % tmppath)
750 ui.write(_('removing temporary repository %s\n') % tmppath)
1089 repo.vfs.rmtree(tmppath, forcibly=True)
751 repo.vfs.rmtree(tmppath, forcibly=True)
1090
752
1091 if backuppath:
753 if backuppath:
1092 ui.warn(_('copy of old repository backed up at %s\n') %
754 ui.warn(_('copy of old repository backed up at %s\n') %
1093 backuppath)
755 backuppath)
1094 ui.warn(_('the old repository will not be deleted; remove '
756 ui.warn(_('the old repository will not be deleted; remove '
1095 'it to free up disk space once the upgraded '
757 'it to free up disk space once the upgraded '
1096 'repository is verified\n'))
758 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now