##// END OF EJS Templates
obsutil: move 'getmarkers' to the new modules...
marmoute -
r33150:a14e2e7f default
parent child Browse files
Show More
@@ -1,2245 +1,2245 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import difflib
10 import difflib
11 import errno
11 import errno
12 import operator
12 import operator
13 import os
13 import os
14 import random
14 import random
15 import socket
15 import socket
16 import string
16 import string
17 import sys
17 import sys
18 import tempfile
18 import tempfile
19 import time
19 import time
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import (
22 from .node import (
23 bin,
23 bin,
24 hex,
24 hex,
25 nullhex,
25 nullhex,
26 nullid,
26 nullid,
27 nullrev,
27 nullrev,
28 short,
28 short,
29 )
29 )
30 from . import (
30 from . import (
31 bundle2,
31 bundle2,
32 changegroup,
32 changegroup,
33 cmdutil,
33 cmdutil,
34 color,
34 color,
35 context,
35 context,
36 dagparser,
36 dagparser,
37 dagutil,
37 dagutil,
38 encoding,
38 encoding,
39 error,
39 error,
40 exchange,
40 exchange,
41 extensions,
41 extensions,
42 filemerge,
42 filemerge,
43 fileset,
43 fileset,
44 formatter,
44 formatter,
45 hg,
45 hg,
46 localrepo,
46 localrepo,
47 lock as lockmod,
47 lock as lockmod,
48 merge as mergemod,
48 merge as mergemod,
49 obsolete,
49 obsolete,
50 obsutil,
50 obsutil,
51 phases,
51 phases,
52 policy,
52 policy,
53 pvec,
53 pvec,
54 pycompat,
54 pycompat,
55 registrar,
55 registrar,
56 repair,
56 repair,
57 revlog,
57 revlog,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 setdiscovery,
61 setdiscovery,
62 simplemerge,
62 simplemerge,
63 smartset,
63 smartset,
64 sslutil,
64 sslutil,
65 streamclone,
65 streamclone,
66 templater,
66 templater,
67 treediscovery,
67 treediscovery,
68 upgrade,
68 upgrade,
69 util,
69 util,
70 vfs as vfsmod,
70 vfs as vfsmod,
71 )
71 )
72
72
73 release = lockmod.release
73 release = lockmod.release
74
74
75 command = registrar.command()
75 command = registrar.command()
76
76
77 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
77 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
78 def debugancestor(ui, repo, *args):
78 def debugancestor(ui, repo, *args):
79 """find the ancestor revision of two revisions in a given index"""
79 """find the ancestor revision of two revisions in a given index"""
80 if len(args) == 3:
80 if len(args) == 3:
81 index, rev1, rev2 = args
81 index, rev1, rev2 = args
82 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
82 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
83 lookup = r.lookup
83 lookup = r.lookup
84 elif len(args) == 2:
84 elif len(args) == 2:
85 if not repo:
85 if not repo:
86 raise error.Abort(_('there is no Mercurial repository here '
86 raise error.Abort(_('there is no Mercurial repository here '
87 '(.hg not found)'))
87 '(.hg not found)'))
88 rev1, rev2 = args
88 rev1, rev2 = args
89 r = repo.changelog
89 r = repo.changelog
90 lookup = repo.lookup
90 lookup = repo.lookup
91 else:
91 else:
92 raise error.Abort(_('either two or three arguments required'))
92 raise error.Abort(_('either two or three arguments required'))
93 a = r.ancestor(lookup(rev1), lookup(rev2))
93 a = r.ancestor(lookup(rev1), lookup(rev2))
94 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
94 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
95
95
96 @command('debugapplystreamclonebundle', [], 'FILE')
96 @command('debugapplystreamclonebundle', [], 'FILE')
97 def debugapplystreamclonebundle(ui, repo, fname):
97 def debugapplystreamclonebundle(ui, repo, fname):
98 """apply a stream clone bundle file"""
98 """apply a stream clone bundle file"""
99 f = hg.openpath(ui, fname)
99 f = hg.openpath(ui, fname)
100 gen = exchange.readbundle(ui, f, fname)
100 gen = exchange.readbundle(ui, f, fname)
101 gen.apply(repo)
101 gen.apply(repo)
102
102
103 @command('debugbuilddag',
103 @command('debugbuilddag',
104 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
104 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
105 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
105 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
106 ('n', 'new-file', None, _('add new file at each rev'))],
106 ('n', 'new-file', None, _('add new file at each rev'))],
107 _('[OPTION]... [TEXT]'))
107 _('[OPTION]... [TEXT]'))
108 def debugbuilddag(ui, repo, text=None,
108 def debugbuilddag(ui, repo, text=None,
109 mergeable_file=False,
109 mergeable_file=False,
110 overwritten_file=False,
110 overwritten_file=False,
111 new_file=False):
111 new_file=False):
112 """builds a repo with a given DAG from scratch in the current empty repo
112 """builds a repo with a given DAG from scratch in the current empty repo
113
113
114 The description of the DAG is read from stdin if not given on the
114 The description of the DAG is read from stdin if not given on the
115 command line.
115 command line.
116
116
117 Elements:
117 Elements:
118
118
119 - "+n" is a linear run of n nodes based on the current default parent
119 - "+n" is a linear run of n nodes based on the current default parent
120 - "." is a single node based on the current default parent
120 - "." is a single node based on the current default parent
121 - "$" resets the default parent to null (implied at the start);
121 - "$" resets the default parent to null (implied at the start);
122 otherwise the default parent is always the last node created
122 otherwise the default parent is always the last node created
123 - "<p" sets the default parent to the backref p
123 - "<p" sets the default parent to the backref p
124 - "*p" is a fork at parent p, which is a backref
124 - "*p" is a fork at parent p, which is a backref
125 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
125 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
126 - "/p2" is a merge of the preceding node and p2
126 - "/p2" is a merge of the preceding node and p2
127 - ":tag" defines a local tag for the preceding node
127 - ":tag" defines a local tag for the preceding node
128 - "@branch" sets the named branch for subsequent nodes
128 - "@branch" sets the named branch for subsequent nodes
129 - "#...\\n" is a comment up to the end of the line
129 - "#...\\n" is a comment up to the end of the line
130
130
131 Whitespace between the above elements is ignored.
131 Whitespace between the above elements is ignored.
132
132
133 A backref is either
133 A backref is either
134
134
135 - a number n, which references the node curr-n, where curr is the current
135 - a number n, which references the node curr-n, where curr is the current
136 node, or
136 node, or
137 - the name of a local tag you placed earlier using ":tag", or
137 - the name of a local tag you placed earlier using ":tag", or
138 - empty to denote the default parent.
138 - empty to denote the default parent.
139
139
140 All string valued-elements are either strictly alphanumeric, or must
140 All string valued-elements are either strictly alphanumeric, or must
141 be enclosed in double quotes ("..."), with "\\" as escape character.
141 be enclosed in double quotes ("..."), with "\\" as escape character.
142 """
142 """
143
143
144 if text is None:
144 if text is None:
145 ui.status(_("reading DAG from stdin\n"))
145 ui.status(_("reading DAG from stdin\n"))
146 text = ui.fin.read()
146 text = ui.fin.read()
147
147
148 cl = repo.changelog
148 cl = repo.changelog
149 if len(cl) > 0:
149 if len(cl) > 0:
150 raise error.Abort(_('repository is not empty'))
150 raise error.Abort(_('repository is not empty'))
151
151
152 # determine number of revs in DAG
152 # determine number of revs in DAG
153 total = 0
153 total = 0
154 for type, data in dagparser.parsedag(text):
154 for type, data in dagparser.parsedag(text):
155 if type == 'n':
155 if type == 'n':
156 total += 1
156 total += 1
157
157
158 if mergeable_file:
158 if mergeable_file:
159 linesperrev = 2
159 linesperrev = 2
160 # make a file with k lines per rev
160 # make a file with k lines per rev
161 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
161 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
162 initialmergedlines.append("")
162 initialmergedlines.append("")
163
163
164 tags = []
164 tags = []
165
165
166 wlock = lock = tr = None
166 wlock = lock = tr = None
167 try:
167 try:
168 wlock = repo.wlock()
168 wlock = repo.wlock()
169 lock = repo.lock()
169 lock = repo.lock()
170 tr = repo.transaction("builddag")
170 tr = repo.transaction("builddag")
171
171
172 at = -1
172 at = -1
173 atbranch = 'default'
173 atbranch = 'default'
174 nodeids = []
174 nodeids = []
175 id = 0
175 id = 0
176 ui.progress(_('building'), id, unit=_('revisions'), total=total)
176 ui.progress(_('building'), id, unit=_('revisions'), total=total)
177 for type, data in dagparser.parsedag(text):
177 for type, data in dagparser.parsedag(text):
178 if type == 'n':
178 if type == 'n':
179 ui.note(('node %s\n' % str(data)))
179 ui.note(('node %s\n' % str(data)))
180 id, ps = data
180 id, ps = data
181
181
182 files = []
182 files = []
183 fctxs = {}
183 fctxs = {}
184
184
185 p2 = None
185 p2 = None
186 if mergeable_file:
186 if mergeable_file:
187 fn = "mf"
187 fn = "mf"
188 p1 = repo[ps[0]]
188 p1 = repo[ps[0]]
189 if len(ps) > 1:
189 if len(ps) > 1:
190 p2 = repo[ps[1]]
190 p2 = repo[ps[1]]
191 pa = p1.ancestor(p2)
191 pa = p1.ancestor(p2)
192 base, local, other = [x[fn].data() for x in (pa, p1,
192 base, local, other = [x[fn].data() for x in (pa, p1,
193 p2)]
193 p2)]
194 m3 = simplemerge.Merge3Text(base, local, other)
194 m3 = simplemerge.Merge3Text(base, local, other)
195 ml = [l.strip() for l in m3.merge_lines()]
195 ml = [l.strip() for l in m3.merge_lines()]
196 ml.append("")
196 ml.append("")
197 elif at > 0:
197 elif at > 0:
198 ml = p1[fn].data().split("\n")
198 ml = p1[fn].data().split("\n")
199 else:
199 else:
200 ml = initialmergedlines
200 ml = initialmergedlines
201 ml[id * linesperrev] += " r%i" % id
201 ml[id * linesperrev] += " r%i" % id
202 mergedtext = "\n".join(ml)
202 mergedtext = "\n".join(ml)
203 files.append(fn)
203 files.append(fn)
204 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
204 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
205
205
206 if overwritten_file:
206 if overwritten_file:
207 fn = "of"
207 fn = "of"
208 files.append(fn)
208 files.append(fn)
209 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
209 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
210
210
211 if new_file:
211 if new_file:
212 fn = "nf%i" % id
212 fn = "nf%i" % id
213 files.append(fn)
213 files.append(fn)
214 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
214 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
215 if len(ps) > 1:
215 if len(ps) > 1:
216 if not p2:
216 if not p2:
217 p2 = repo[ps[1]]
217 p2 = repo[ps[1]]
218 for fn in p2:
218 for fn in p2:
219 if fn.startswith("nf"):
219 if fn.startswith("nf"):
220 files.append(fn)
220 files.append(fn)
221 fctxs[fn] = p2[fn]
221 fctxs[fn] = p2[fn]
222
222
223 def fctxfn(repo, cx, path):
223 def fctxfn(repo, cx, path):
224 return fctxs.get(path)
224 return fctxs.get(path)
225
225
226 if len(ps) == 0 or ps[0] < 0:
226 if len(ps) == 0 or ps[0] < 0:
227 pars = [None, None]
227 pars = [None, None]
228 elif len(ps) == 1:
228 elif len(ps) == 1:
229 pars = [nodeids[ps[0]], None]
229 pars = [nodeids[ps[0]], None]
230 else:
230 else:
231 pars = [nodeids[p] for p in ps]
231 pars = [nodeids[p] for p in ps]
232 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
232 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
233 date=(id, 0),
233 date=(id, 0),
234 user="debugbuilddag",
234 user="debugbuilddag",
235 extra={'branch': atbranch})
235 extra={'branch': atbranch})
236 nodeid = repo.commitctx(cx)
236 nodeid = repo.commitctx(cx)
237 nodeids.append(nodeid)
237 nodeids.append(nodeid)
238 at = id
238 at = id
239 elif type == 'l':
239 elif type == 'l':
240 id, name = data
240 id, name = data
241 ui.note(('tag %s\n' % name))
241 ui.note(('tag %s\n' % name))
242 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
242 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
243 elif type == 'a':
243 elif type == 'a':
244 ui.note(('branch %s\n' % data))
244 ui.note(('branch %s\n' % data))
245 atbranch = data
245 atbranch = data
246 ui.progress(_('building'), id, unit=_('revisions'), total=total)
246 ui.progress(_('building'), id, unit=_('revisions'), total=total)
247 tr.close()
247 tr.close()
248
248
249 if tags:
249 if tags:
250 repo.vfs.write("localtags", "".join(tags))
250 repo.vfs.write("localtags", "".join(tags))
251 finally:
251 finally:
252 ui.progress(_('building'), None)
252 ui.progress(_('building'), None)
253 release(tr, lock, wlock)
253 release(tr, lock, wlock)
254
254
255 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
255 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
256 indent_string = ' ' * indent
256 indent_string = ' ' * indent
257 if all:
257 if all:
258 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
258 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
259 % indent_string)
259 % indent_string)
260
260
261 def showchunks(named):
261 def showchunks(named):
262 ui.write("\n%s%s\n" % (indent_string, named))
262 ui.write("\n%s%s\n" % (indent_string, named))
263 chain = None
263 chain = None
264 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
264 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
265 node = chunkdata['node']
265 node = chunkdata['node']
266 p1 = chunkdata['p1']
266 p1 = chunkdata['p1']
267 p2 = chunkdata['p2']
267 p2 = chunkdata['p2']
268 cs = chunkdata['cs']
268 cs = chunkdata['cs']
269 deltabase = chunkdata['deltabase']
269 deltabase = chunkdata['deltabase']
270 delta = chunkdata['delta']
270 delta = chunkdata['delta']
271 ui.write("%s%s %s %s %s %s %s\n" %
271 ui.write("%s%s %s %s %s %s %s\n" %
272 (indent_string, hex(node), hex(p1), hex(p2),
272 (indent_string, hex(node), hex(p1), hex(p2),
273 hex(cs), hex(deltabase), len(delta)))
273 hex(cs), hex(deltabase), len(delta)))
274 chain = node
274 chain = node
275
275
276 chunkdata = gen.changelogheader()
276 chunkdata = gen.changelogheader()
277 showchunks("changelog")
277 showchunks("changelog")
278 chunkdata = gen.manifestheader()
278 chunkdata = gen.manifestheader()
279 showchunks("manifest")
279 showchunks("manifest")
280 for chunkdata in iter(gen.filelogheader, {}):
280 for chunkdata in iter(gen.filelogheader, {}):
281 fname = chunkdata['filename']
281 fname = chunkdata['filename']
282 showchunks(fname)
282 showchunks(fname)
283 else:
283 else:
284 if isinstance(gen, bundle2.unbundle20):
284 if isinstance(gen, bundle2.unbundle20):
285 raise error.Abort(_('use debugbundle2 for this file'))
285 raise error.Abort(_('use debugbundle2 for this file'))
286 chunkdata = gen.changelogheader()
286 chunkdata = gen.changelogheader()
287 chain = None
287 chain = None
288 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
288 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
289 node = chunkdata['node']
289 node = chunkdata['node']
290 ui.write("%s%s\n" % (indent_string, hex(node)))
290 ui.write("%s%s\n" % (indent_string, hex(node)))
291 chain = node
291 chain = node
292
292
293 def _debugobsmarkers(ui, part, indent=0, **opts):
293 def _debugobsmarkers(ui, part, indent=0, **opts):
294 """display version and markers contained in 'data'"""
294 """display version and markers contained in 'data'"""
295 opts = pycompat.byteskwargs(opts)
295 opts = pycompat.byteskwargs(opts)
296 data = part.read()
296 data = part.read()
297 indent_string = ' ' * indent
297 indent_string = ' ' * indent
298 try:
298 try:
299 version, markers = obsolete._readmarkers(data)
299 version, markers = obsolete._readmarkers(data)
300 except error.UnknownVersion as exc:
300 except error.UnknownVersion as exc:
301 msg = "%sunsupported version: %s (%d bytes)\n"
301 msg = "%sunsupported version: %s (%d bytes)\n"
302 msg %= indent_string, exc.version, len(data)
302 msg %= indent_string, exc.version, len(data)
303 ui.write(msg)
303 ui.write(msg)
304 else:
304 else:
305 msg = "%sversion: %s (%d bytes)\n"
305 msg = "%sversion: %s (%d bytes)\n"
306 msg %= indent_string, version, len(data)
306 msg %= indent_string, version, len(data)
307 ui.write(msg)
307 ui.write(msg)
308 fm = ui.formatter('debugobsolete', opts)
308 fm = ui.formatter('debugobsolete', opts)
309 for rawmarker in sorted(markers):
309 for rawmarker in sorted(markers):
310 m = obsutil.marker(None, rawmarker)
310 m = obsutil.marker(None, rawmarker)
311 fm.startitem()
311 fm.startitem()
312 fm.plain(indent_string)
312 fm.plain(indent_string)
313 cmdutil.showmarker(fm, m)
313 cmdutil.showmarker(fm, m)
314 fm.end()
314 fm.end()
315
315
316 def _debugphaseheads(ui, data, indent=0):
316 def _debugphaseheads(ui, data, indent=0):
317 """display version and markers contained in 'data'"""
317 """display version and markers contained in 'data'"""
318 indent_string = ' ' * indent
318 indent_string = ' ' * indent
319 headsbyphase = bundle2._readphaseheads(data)
319 headsbyphase = bundle2._readphaseheads(data)
320 for phase in phases.allphases:
320 for phase in phases.allphases:
321 for head in headsbyphase[phase]:
321 for head in headsbyphase[phase]:
322 ui.write(indent_string)
322 ui.write(indent_string)
323 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
323 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
324
324
325 def _debugbundle2(ui, gen, all=None, **opts):
325 def _debugbundle2(ui, gen, all=None, **opts):
326 """lists the contents of a bundle2"""
326 """lists the contents of a bundle2"""
327 if not isinstance(gen, bundle2.unbundle20):
327 if not isinstance(gen, bundle2.unbundle20):
328 raise error.Abort(_('not a bundle2 file'))
328 raise error.Abort(_('not a bundle2 file'))
329 ui.write(('Stream params: %s\n' % repr(gen.params)))
329 ui.write(('Stream params: %s\n' % repr(gen.params)))
330 parttypes = opts.get(r'part_type', [])
330 parttypes = opts.get(r'part_type', [])
331 for part in gen.iterparts():
331 for part in gen.iterparts():
332 if parttypes and part.type not in parttypes:
332 if parttypes and part.type not in parttypes:
333 continue
333 continue
334 ui.write('%s -- %r\n' % (part.type, repr(part.params)))
334 ui.write('%s -- %r\n' % (part.type, repr(part.params)))
335 if part.type == 'changegroup':
335 if part.type == 'changegroup':
336 version = part.params.get('version', '01')
336 version = part.params.get('version', '01')
337 cg = changegroup.getunbundler(version, part, 'UN')
337 cg = changegroup.getunbundler(version, part, 'UN')
338 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
338 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
339 if part.type == 'obsmarkers':
339 if part.type == 'obsmarkers':
340 _debugobsmarkers(ui, part, indent=4, **opts)
340 _debugobsmarkers(ui, part, indent=4, **opts)
341 if part.type == 'phase-heads':
341 if part.type == 'phase-heads':
342 _debugphaseheads(ui, part, indent=4)
342 _debugphaseheads(ui, part, indent=4)
343
343
344 @command('debugbundle',
344 @command('debugbundle',
345 [('a', 'all', None, _('show all details')),
345 [('a', 'all', None, _('show all details')),
346 ('', 'part-type', [], _('show only the named part type')),
346 ('', 'part-type', [], _('show only the named part type')),
347 ('', 'spec', None, _('print the bundlespec of the bundle'))],
347 ('', 'spec', None, _('print the bundlespec of the bundle'))],
348 _('FILE'),
348 _('FILE'),
349 norepo=True)
349 norepo=True)
350 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
350 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
351 """lists the contents of a bundle"""
351 """lists the contents of a bundle"""
352 with hg.openpath(ui, bundlepath) as f:
352 with hg.openpath(ui, bundlepath) as f:
353 if spec:
353 if spec:
354 spec = exchange.getbundlespec(ui, f)
354 spec = exchange.getbundlespec(ui, f)
355 ui.write('%s\n' % spec)
355 ui.write('%s\n' % spec)
356 return
356 return
357
357
358 gen = exchange.readbundle(ui, f, bundlepath)
358 gen = exchange.readbundle(ui, f, bundlepath)
359 if isinstance(gen, bundle2.unbundle20):
359 if isinstance(gen, bundle2.unbundle20):
360 return _debugbundle2(ui, gen, all=all, **opts)
360 return _debugbundle2(ui, gen, all=all, **opts)
361 _debugchangegroup(ui, gen, all=all, **opts)
361 _debugchangegroup(ui, gen, all=all, **opts)
362
362
363 @command('debugcheckstate', [], '')
363 @command('debugcheckstate', [], '')
364 def debugcheckstate(ui, repo):
364 def debugcheckstate(ui, repo):
365 """validate the correctness of the current dirstate"""
365 """validate the correctness of the current dirstate"""
366 parent1, parent2 = repo.dirstate.parents()
366 parent1, parent2 = repo.dirstate.parents()
367 m1 = repo[parent1].manifest()
367 m1 = repo[parent1].manifest()
368 m2 = repo[parent2].manifest()
368 m2 = repo[parent2].manifest()
369 errors = 0
369 errors = 0
370 for f in repo.dirstate:
370 for f in repo.dirstate:
371 state = repo.dirstate[f]
371 state = repo.dirstate[f]
372 if state in "nr" and f not in m1:
372 if state in "nr" and f not in m1:
373 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
373 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
374 errors += 1
374 errors += 1
375 if state in "a" and f in m1:
375 if state in "a" and f in m1:
376 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
376 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
377 errors += 1
377 errors += 1
378 if state in "m" and f not in m1 and f not in m2:
378 if state in "m" and f not in m1 and f not in m2:
379 ui.warn(_("%s in state %s, but not in either manifest\n") %
379 ui.warn(_("%s in state %s, but not in either manifest\n") %
380 (f, state))
380 (f, state))
381 errors += 1
381 errors += 1
382 for f in m1:
382 for f in m1:
383 state = repo.dirstate[f]
383 state = repo.dirstate[f]
384 if state not in "nrm":
384 if state not in "nrm":
385 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
385 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
386 errors += 1
386 errors += 1
387 if errors:
387 if errors:
388 error = _(".hg/dirstate inconsistent with current parent's manifest")
388 error = _(".hg/dirstate inconsistent with current parent's manifest")
389 raise error.Abort(error)
389 raise error.Abort(error)
390
390
391 @command('debugcolor',
391 @command('debugcolor',
392 [('', 'style', None, _('show all configured styles'))],
392 [('', 'style', None, _('show all configured styles'))],
393 'hg debugcolor')
393 'hg debugcolor')
394 def debugcolor(ui, repo, **opts):
394 def debugcolor(ui, repo, **opts):
395 """show available color, effects or style"""
395 """show available color, effects or style"""
396 ui.write(('color mode: %s\n') % ui._colormode)
396 ui.write(('color mode: %s\n') % ui._colormode)
397 if opts.get(r'style'):
397 if opts.get(r'style'):
398 return _debugdisplaystyle(ui)
398 return _debugdisplaystyle(ui)
399 else:
399 else:
400 return _debugdisplaycolor(ui)
400 return _debugdisplaycolor(ui)
401
401
402 def _debugdisplaycolor(ui):
402 def _debugdisplaycolor(ui):
403 ui = ui.copy()
403 ui = ui.copy()
404 ui._styles.clear()
404 ui._styles.clear()
405 for effect in color._activeeffects(ui).keys():
405 for effect in color._activeeffects(ui).keys():
406 ui._styles[effect] = effect
406 ui._styles[effect] = effect
407 if ui._terminfoparams:
407 if ui._terminfoparams:
408 for k, v in ui.configitems('color'):
408 for k, v in ui.configitems('color'):
409 if k.startswith('color.'):
409 if k.startswith('color.'):
410 ui._styles[k] = k[6:]
410 ui._styles[k] = k[6:]
411 elif k.startswith('terminfo.'):
411 elif k.startswith('terminfo.'):
412 ui._styles[k] = k[9:]
412 ui._styles[k] = k[9:]
413 ui.write(_('available colors:\n'))
413 ui.write(_('available colors:\n'))
414 # sort label with a '_' after the other to group '_background' entry.
414 # sort label with a '_' after the other to group '_background' entry.
415 items = sorted(ui._styles.items(),
415 items = sorted(ui._styles.items(),
416 key=lambda i: ('_' in i[0], i[0], i[1]))
416 key=lambda i: ('_' in i[0], i[0], i[1]))
417 for colorname, label in items:
417 for colorname, label in items:
418 ui.write(('%s\n') % colorname, label=label)
418 ui.write(('%s\n') % colorname, label=label)
419
419
420 def _debugdisplaystyle(ui):
420 def _debugdisplaystyle(ui):
421 ui.write(_('available style:\n'))
421 ui.write(_('available style:\n'))
422 width = max(len(s) for s in ui._styles)
422 width = max(len(s) for s in ui._styles)
423 for label, effects in sorted(ui._styles.items()):
423 for label, effects in sorted(ui._styles.items()):
424 ui.write('%s' % label, label=label)
424 ui.write('%s' % label, label=label)
425 if effects:
425 if effects:
426 # 50
426 # 50
427 ui.write(': ')
427 ui.write(': ')
428 ui.write(' ' * (max(0, width - len(label))))
428 ui.write(' ' * (max(0, width - len(label))))
429 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
429 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
430 ui.write('\n')
430 ui.write('\n')
431
431
432 @command('debugcreatestreamclonebundle', [], 'FILE')
432 @command('debugcreatestreamclonebundle', [], 'FILE')
433 def debugcreatestreamclonebundle(ui, repo, fname):
433 def debugcreatestreamclonebundle(ui, repo, fname):
434 """create a stream clone bundle file
434 """create a stream clone bundle file
435
435
436 Stream bundles are special bundles that are essentially archives of
436 Stream bundles are special bundles that are essentially archives of
437 revlog files. They are commonly used for cloning very quickly.
437 revlog files. They are commonly used for cloning very quickly.
438 """
438 """
439 # TODO we may want to turn this into an abort when this functionality
439 # TODO we may want to turn this into an abort when this functionality
440 # is moved into `hg bundle`.
440 # is moved into `hg bundle`.
441 if phases.hassecret(repo):
441 if phases.hassecret(repo):
442 ui.warn(_('(warning: stream clone bundle will contain secret '
442 ui.warn(_('(warning: stream clone bundle will contain secret '
443 'revisions)\n'))
443 'revisions)\n'))
444
444
445 requirements, gen = streamclone.generatebundlev1(repo)
445 requirements, gen = streamclone.generatebundlev1(repo)
446 changegroup.writechunks(ui, gen, fname)
446 changegroup.writechunks(ui, gen, fname)
447
447
448 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
448 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
449
449
450 @command('debugdag',
450 @command('debugdag',
451 [('t', 'tags', None, _('use tags as labels')),
451 [('t', 'tags', None, _('use tags as labels')),
452 ('b', 'branches', None, _('annotate with branch names')),
452 ('b', 'branches', None, _('annotate with branch names')),
453 ('', 'dots', None, _('use dots for runs')),
453 ('', 'dots', None, _('use dots for runs')),
454 ('s', 'spaces', None, _('separate elements by spaces'))],
454 ('s', 'spaces', None, _('separate elements by spaces'))],
455 _('[OPTION]... [FILE [REV]...]'),
455 _('[OPTION]... [FILE [REV]...]'),
456 optionalrepo=True)
456 optionalrepo=True)
457 def debugdag(ui, repo, file_=None, *revs, **opts):
457 def debugdag(ui, repo, file_=None, *revs, **opts):
458 """format the changelog or an index DAG as a concise textual description
458 """format the changelog or an index DAG as a concise textual description
459
459
460 If you pass a revlog index, the revlog's DAG is emitted. If you list
460 If you pass a revlog index, the revlog's DAG is emitted. If you list
461 revision numbers, they get labeled in the output as rN.
461 revision numbers, they get labeled in the output as rN.
462
462
463 Otherwise, the changelog DAG of the current repo is emitted.
463 Otherwise, the changelog DAG of the current repo is emitted.
464 """
464 """
465 spaces = opts.get(r'spaces')
465 spaces = opts.get(r'spaces')
466 dots = opts.get(r'dots')
466 dots = opts.get(r'dots')
467 if file_:
467 if file_:
468 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
468 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
469 file_)
469 file_)
470 revs = set((int(r) for r in revs))
470 revs = set((int(r) for r in revs))
471 def events():
471 def events():
472 for r in rlog:
472 for r in rlog:
473 yield 'n', (r, list(p for p in rlog.parentrevs(r)
473 yield 'n', (r, list(p for p in rlog.parentrevs(r)
474 if p != -1))
474 if p != -1))
475 if r in revs:
475 if r in revs:
476 yield 'l', (r, "r%i" % r)
476 yield 'l', (r, "r%i" % r)
477 elif repo:
477 elif repo:
478 cl = repo.changelog
478 cl = repo.changelog
479 tags = opts.get(r'tags')
479 tags = opts.get(r'tags')
480 branches = opts.get(r'branches')
480 branches = opts.get(r'branches')
481 if tags:
481 if tags:
482 labels = {}
482 labels = {}
483 for l, n in repo.tags().items():
483 for l, n in repo.tags().items():
484 labels.setdefault(cl.rev(n), []).append(l)
484 labels.setdefault(cl.rev(n), []).append(l)
485 def events():
485 def events():
486 b = "default"
486 b = "default"
487 for r in cl:
487 for r in cl:
488 if branches:
488 if branches:
489 newb = cl.read(cl.node(r))[5]['branch']
489 newb = cl.read(cl.node(r))[5]['branch']
490 if newb != b:
490 if newb != b:
491 yield 'a', newb
491 yield 'a', newb
492 b = newb
492 b = newb
493 yield 'n', (r, list(p for p in cl.parentrevs(r)
493 yield 'n', (r, list(p for p in cl.parentrevs(r)
494 if p != -1))
494 if p != -1))
495 if tags:
495 if tags:
496 ls = labels.get(r)
496 ls = labels.get(r)
497 if ls:
497 if ls:
498 for l in ls:
498 for l in ls:
499 yield 'l', (r, l)
499 yield 'l', (r, l)
500 else:
500 else:
501 raise error.Abort(_('need repo for changelog dag'))
501 raise error.Abort(_('need repo for changelog dag'))
502
502
503 for line in dagparser.dagtextlines(events(),
503 for line in dagparser.dagtextlines(events(),
504 addspaces=spaces,
504 addspaces=spaces,
505 wraplabels=True,
505 wraplabels=True,
506 wrapannotations=True,
506 wrapannotations=True,
507 wrapnonlinear=dots,
507 wrapnonlinear=dots,
508 usedots=dots,
508 usedots=dots,
509 maxlinewidth=70):
509 maxlinewidth=70):
510 ui.write(line)
510 ui.write(line)
511 ui.write("\n")
511 ui.write("\n")
512
512
513 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
513 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
514 def debugdata(ui, repo, file_, rev=None, **opts):
514 def debugdata(ui, repo, file_, rev=None, **opts):
515 """dump the contents of a data file revision"""
515 """dump the contents of a data file revision"""
516 opts = pycompat.byteskwargs(opts)
516 opts = pycompat.byteskwargs(opts)
517 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
517 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
518 if rev is not None:
518 if rev is not None:
519 raise error.CommandError('debugdata', _('invalid arguments'))
519 raise error.CommandError('debugdata', _('invalid arguments'))
520 file_, rev = None, file_
520 file_, rev = None, file_
521 elif rev is None:
521 elif rev is None:
522 raise error.CommandError('debugdata', _('invalid arguments'))
522 raise error.CommandError('debugdata', _('invalid arguments'))
523 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
523 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
524 try:
524 try:
525 ui.write(r.revision(r.lookup(rev), raw=True))
525 ui.write(r.revision(r.lookup(rev), raw=True))
526 except KeyError:
526 except KeyError:
527 raise error.Abort(_('invalid revision identifier %s') % rev)
527 raise error.Abort(_('invalid revision identifier %s') % rev)
528
528
529 @command('debugdate',
529 @command('debugdate',
530 [('e', 'extended', None, _('try extended date formats'))],
530 [('e', 'extended', None, _('try extended date formats'))],
531 _('[-e] DATE [RANGE]'),
531 _('[-e] DATE [RANGE]'),
532 norepo=True, optionalrepo=True)
532 norepo=True, optionalrepo=True)
533 def debugdate(ui, date, range=None, **opts):
533 def debugdate(ui, date, range=None, **opts):
534 """parse and display a date"""
534 """parse and display a date"""
535 if opts[r"extended"]:
535 if opts[r"extended"]:
536 d = util.parsedate(date, util.extendeddateformats)
536 d = util.parsedate(date, util.extendeddateformats)
537 else:
537 else:
538 d = util.parsedate(date)
538 d = util.parsedate(date)
539 ui.write(("internal: %s %s\n") % d)
539 ui.write(("internal: %s %s\n") % d)
540 ui.write(("standard: %s\n") % util.datestr(d))
540 ui.write(("standard: %s\n") % util.datestr(d))
541 if range:
541 if range:
542 m = util.matchdate(range)
542 m = util.matchdate(range)
543 ui.write(("match: %s\n") % m(d[0]))
543 ui.write(("match: %s\n") % m(d[0]))
544
544
545 @command('debugdeltachain',
545 @command('debugdeltachain',
546 cmdutil.debugrevlogopts + cmdutil.formatteropts,
546 cmdutil.debugrevlogopts + cmdutil.formatteropts,
547 _('-c|-m|FILE'),
547 _('-c|-m|FILE'),
548 optionalrepo=True)
548 optionalrepo=True)
549 def debugdeltachain(ui, repo, file_=None, **opts):
549 def debugdeltachain(ui, repo, file_=None, **opts):
550 """dump information about delta chains in a revlog
550 """dump information about delta chains in a revlog
551
551
552 Output can be templatized. Available template keywords are:
552 Output can be templatized. Available template keywords are:
553
553
554 :``rev``: revision number
554 :``rev``: revision number
555 :``chainid``: delta chain identifier (numbered by unique base)
555 :``chainid``: delta chain identifier (numbered by unique base)
556 :``chainlen``: delta chain length to this revision
556 :``chainlen``: delta chain length to this revision
557 :``prevrev``: previous revision in delta chain
557 :``prevrev``: previous revision in delta chain
558 :``deltatype``: role of delta / how it was computed
558 :``deltatype``: role of delta / how it was computed
559 :``compsize``: compressed size of revision
559 :``compsize``: compressed size of revision
560 :``uncompsize``: uncompressed size of revision
560 :``uncompsize``: uncompressed size of revision
561 :``chainsize``: total size of compressed revisions in chain
561 :``chainsize``: total size of compressed revisions in chain
562 :``chainratio``: total chain size divided by uncompressed revision size
562 :``chainratio``: total chain size divided by uncompressed revision size
563 (new delta chains typically start at ratio 2.00)
563 (new delta chains typically start at ratio 2.00)
564 :``lindist``: linear distance from base revision in delta chain to end
564 :``lindist``: linear distance from base revision in delta chain to end
565 of this revision
565 of this revision
566 :``extradist``: total size of revisions not part of this delta chain from
566 :``extradist``: total size of revisions not part of this delta chain from
567 base of delta chain to end of this revision; a measurement
567 base of delta chain to end of this revision; a measurement
568 of how much extra data we need to read/seek across to read
568 of how much extra data we need to read/seek across to read
569 the delta chain for this revision
569 the delta chain for this revision
570 :``extraratio``: extradist divided by chainsize; another representation of
570 :``extraratio``: extradist divided by chainsize; another representation of
571 how much unrelated data is needed to load this delta chain
571 how much unrelated data is needed to load this delta chain
572 """
572 """
573 opts = pycompat.byteskwargs(opts)
573 opts = pycompat.byteskwargs(opts)
574 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
574 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
575 index = r.index
575 index = r.index
576 generaldelta = r.version & revlog.FLAG_GENERALDELTA
576 generaldelta = r.version & revlog.FLAG_GENERALDELTA
577
577
578 def revinfo(rev):
578 def revinfo(rev):
579 e = index[rev]
579 e = index[rev]
580 compsize = e[1]
580 compsize = e[1]
581 uncompsize = e[2]
581 uncompsize = e[2]
582 chainsize = 0
582 chainsize = 0
583
583
584 if generaldelta:
584 if generaldelta:
585 if e[3] == e[5]:
585 if e[3] == e[5]:
586 deltatype = 'p1'
586 deltatype = 'p1'
587 elif e[3] == e[6]:
587 elif e[3] == e[6]:
588 deltatype = 'p2'
588 deltatype = 'p2'
589 elif e[3] == rev - 1:
589 elif e[3] == rev - 1:
590 deltatype = 'prev'
590 deltatype = 'prev'
591 elif e[3] == rev:
591 elif e[3] == rev:
592 deltatype = 'base'
592 deltatype = 'base'
593 else:
593 else:
594 deltatype = 'other'
594 deltatype = 'other'
595 else:
595 else:
596 if e[3] == rev:
596 if e[3] == rev:
597 deltatype = 'base'
597 deltatype = 'base'
598 else:
598 else:
599 deltatype = 'prev'
599 deltatype = 'prev'
600
600
601 chain = r._deltachain(rev)[0]
601 chain = r._deltachain(rev)[0]
602 for iterrev in chain:
602 for iterrev in chain:
603 e = index[iterrev]
603 e = index[iterrev]
604 chainsize += e[1]
604 chainsize += e[1]
605
605
606 return compsize, uncompsize, deltatype, chain, chainsize
606 return compsize, uncompsize, deltatype, chain, chainsize
607
607
608 fm = ui.formatter('debugdeltachain', opts)
608 fm = ui.formatter('debugdeltachain', opts)
609
609
610 fm.plain(' rev chain# chainlen prev delta '
610 fm.plain(' rev chain# chainlen prev delta '
611 'size rawsize chainsize ratio lindist extradist '
611 'size rawsize chainsize ratio lindist extradist '
612 'extraratio\n')
612 'extraratio\n')
613
613
614 chainbases = {}
614 chainbases = {}
615 for rev in r:
615 for rev in r:
616 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
616 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
617 chainbase = chain[0]
617 chainbase = chain[0]
618 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
618 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
619 basestart = r.start(chainbase)
619 basestart = r.start(chainbase)
620 revstart = r.start(rev)
620 revstart = r.start(rev)
621 lineardist = revstart + comp - basestart
621 lineardist = revstart + comp - basestart
622 extradist = lineardist - chainsize
622 extradist = lineardist - chainsize
623 try:
623 try:
624 prevrev = chain[-2]
624 prevrev = chain[-2]
625 except IndexError:
625 except IndexError:
626 prevrev = -1
626 prevrev = -1
627
627
628 chainratio = float(chainsize) / float(uncomp)
628 chainratio = float(chainsize) / float(uncomp)
629 extraratio = float(extradist) / float(chainsize)
629 extraratio = float(extradist) / float(chainsize)
630
630
631 fm.startitem()
631 fm.startitem()
632 fm.write('rev chainid chainlen prevrev deltatype compsize '
632 fm.write('rev chainid chainlen prevrev deltatype compsize '
633 'uncompsize chainsize chainratio lindist extradist '
633 'uncompsize chainsize chainratio lindist extradist '
634 'extraratio',
634 'extraratio',
635 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
635 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
636 rev, chainid, len(chain), prevrev, deltatype, comp,
636 rev, chainid, len(chain), prevrev, deltatype, comp,
637 uncomp, chainsize, chainratio, lineardist, extradist,
637 uncomp, chainsize, chainratio, lineardist, extradist,
638 extraratio,
638 extraratio,
639 rev=rev, chainid=chainid, chainlen=len(chain),
639 rev=rev, chainid=chainid, chainlen=len(chain),
640 prevrev=prevrev, deltatype=deltatype, compsize=comp,
640 prevrev=prevrev, deltatype=deltatype, compsize=comp,
641 uncompsize=uncomp, chainsize=chainsize,
641 uncompsize=uncomp, chainsize=chainsize,
642 chainratio=chainratio, lindist=lineardist,
642 chainratio=chainratio, lindist=lineardist,
643 extradist=extradist, extraratio=extraratio)
643 extradist=extradist, extraratio=extraratio)
644
644
645 fm.end()
645 fm.end()
646
646
647 @command('debugdirstate|debugstate',
647 @command('debugdirstate|debugstate',
648 [('', 'nodates', None, _('do not display the saved mtime')),
648 [('', 'nodates', None, _('do not display the saved mtime')),
649 ('', 'datesort', None, _('sort by saved mtime'))],
649 ('', 'datesort', None, _('sort by saved mtime'))],
650 _('[OPTION]...'))
650 _('[OPTION]...'))
651 def debugstate(ui, repo, **opts):
651 def debugstate(ui, repo, **opts):
652 """show the contents of the current dirstate"""
652 """show the contents of the current dirstate"""
653
653
654 nodates = opts.get(r'nodates')
654 nodates = opts.get(r'nodates')
655 datesort = opts.get(r'datesort')
655 datesort = opts.get(r'datesort')
656
656
657 timestr = ""
657 timestr = ""
658 if datesort:
658 if datesort:
659 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
659 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
660 else:
660 else:
661 keyfunc = None # sort by filename
661 keyfunc = None # sort by filename
662 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
662 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
663 if ent[3] == -1:
663 if ent[3] == -1:
664 timestr = 'unset '
664 timestr = 'unset '
665 elif nodates:
665 elif nodates:
666 timestr = 'set '
666 timestr = 'set '
667 else:
667 else:
668 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
668 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
669 time.localtime(ent[3]))
669 time.localtime(ent[3]))
670 if ent[1] & 0o20000:
670 if ent[1] & 0o20000:
671 mode = 'lnk'
671 mode = 'lnk'
672 else:
672 else:
673 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
673 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
674 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
674 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
675 for f in repo.dirstate.copies():
675 for f in repo.dirstate.copies():
676 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
676 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
677
677
678 @command('debugdiscovery',
678 @command('debugdiscovery',
679 [('', 'old', None, _('use old-style discovery')),
679 [('', 'old', None, _('use old-style discovery')),
680 ('', 'nonheads', None,
680 ('', 'nonheads', None,
681 _('use old-style discovery with non-heads included')),
681 _('use old-style discovery with non-heads included')),
682 ] + cmdutil.remoteopts,
682 ] + cmdutil.remoteopts,
683 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
683 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
684 def debugdiscovery(ui, repo, remoteurl="default", **opts):
684 def debugdiscovery(ui, repo, remoteurl="default", **opts):
685 """runs the changeset discovery protocol in isolation"""
685 """runs the changeset discovery protocol in isolation"""
686 opts = pycompat.byteskwargs(opts)
686 opts = pycompat.byteskwargs(opts)
687 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
687 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
688 opts.get('branch'))
688 opts.get('branch'))
689 remote = hg.peer(repo, opts, remoteurl)
689 remote = hg.peer(repo, opts, remoteurl)
690 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
690 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
691
691
692 # make sure tests are repeatable
692 # make sure tests are repeatable
693 random.seed(12323)
693 random.seed(12323)
694
694
695 def doit(localheads, remoteheads, remote=remote):
695 def doit(localheads, remoteheads, remote=remote):
696 if opts.get('old'):
696 if opts.get('old'):
697 if localheads:
697 if localheads:
698 raise error.Abort('cannot use localheads with old style '
698 raise error.Abort('cannot use localheads with old style '
699 'discovery')
699 'discovery')
700 if not util.safehasattr(remote, 'branches'):
700 if not util.safehasattr(remote, 'branches'):
701 # enable in-client legacy support
701 # enable in-client legacy support
702 remote = localrepo.locallegacypeer(remote.local())
702 remote = localrepo.locallegacypeer(remote.local())
703 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
703 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
704 force=True)
704 force=True)
705 common = set(common)
705 common = set(common)
706 if not opts.get('nonheads'):
706 if not opts.get('nonheads'):
707 ui.write(("unpruned common: %s\n") %
707 ui.write(("unpruned common: %s\n") %
708 " ".join(sorted(short(n) for n in common)))
708 " ".join(sorted(short(n) for n in common)))
709 dag = dagutil.revlogdag(repo.changelog)
709 dag = dagutil.revlogdag(repo.changelog)
710 all = dag.ancestorset(dag.internalizeall(common))
710 all = dag.ancestorset(dag.internalizeall(common))
711 common = dag.externalizeall(dag.headsetofconnecteds(all))
711 common = dag.externalizeall(dag.headsetofconnecteds(all))
712 else:
712 else:
713 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
713 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
714 common = set(common)
714 common = set(common)
715 rheads = set(hds)
715 rheads = set(hds)
716 lheads = set(repo.heads())
716 lheads = set(repo.heads())
717 ui.write(("common heads: %s\n") %
717 ui.write(("common heads: %s\n") %
718 " ".join(sorted(short(n) for n in common)))
718 " ".join(sorted(short(n) for n in common)))
719 if lheads <= common:
719 if lheads <= common:
720 ui.write(("local is subset\n"))
720 ui.write(("local is subset\n"))
721 elif rheads <= common:
721 elif rheads <= common:
722 ui.write(("remote is subset\n"))
722 ui.write(("remote is subset\n"))
723
723
724 serverlogs = opts.get('serverlog')
724 serverlogs = opts.get('serverlog')
725 if serverlogs:
725 if serverlogs:
726 for filename in serverlogs:
726 for filename in serverlogs:
727 with open(filename, 'r') as logfile:
727 with open(filename, 'r') as logfile:
728 line = logfile.readline()
728 line = logfile.readline()
729 while line:
729 while line:
730 parts = line.strip().split(';')
730 parts = line.strip().split(';')
731 op = parts[1]
731 op = parts[1]
732 if op == 'cg':
732 if op == 'cg':
733 pass
733 pass
734 elif op == 'cgss':
734 elif op == 'cgss':
735 doit(parts[2].split(' '), parts[3].split(' '))
735 doit(parts[2].split(' '), parts[3].split(' '))
736 elif op == 'unb':
736 elif op == 'unb':
737 doit(parts[3].split(' '), parts[2].split(' '))
737 doit(parts[3].split(' '), parts[2].split(' '))
738 line = logfile.readline()
738 line = logfile.readline()
739 else:
739 else:
740 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
740 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
741 opts.get('remote_head'))
741 opts.get('remote_head'))
742 localrevs = opts.get('local_head')
742 localrevs = opts.get('local_head')
743 doit(localrevs, remoterevs)
743 doit(localrevs, remoterevs)
744
744
745 @command('debugextensions', cmdutil.formatteropts, [], norepo=True)
745 @command('debugextensions', cmdutil.formatteropts, [], norepo=True)
746 def debugextensions(ui, **opts):
746 def debugextensions(ui, **opts):
747 '''show information about active extensions'''
747 '''show information about active extensions'''
748 opts = pycompat.byteskwargs(opts)
748 opts = pycompat.byteskwargs(opts)
749 exts = extensions.extensions(ui)
749 exts = extensions.extensions(ui)
750 hgver = util.version()
750 hgver = util.version()
751 fm = ui.formatter('debugextensions', opts)
751 fm = ui.formatter('debugextensions', opts)
752 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
752 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
753 isinternal = extensions.ismoduleinternal(extmod)
753 isinternal = extensions.ismoduleinternal(extmod)
754 extsource = pycompat.fsencode(extmod.__file__)
754 extsource = pycompat.fsencode(extmod.__file__)
755 if isinternal:
755 if isinternal:
756 exttestedwith = [] # never expose magic string to users
756 exttestedwith = [] # never expose magic string to users
757 else:
757 else:
758 exttestedwith = getattr(extmod, 'testedwith', '').split()
758 exttestedwith = getattr(extmod, 'testedwith', '').split()
759 extbuglink = getattr(extmod, 'buglink', None)
759 extbuglink = getattr(extmod, 'buglink', None)
760
760
761 fm.startitem()
761 fm.startitem()
762
762
763 if ui.quiet or ui.verbose:
763 if ui.quiet or ui.verbose:
764 fm.write('name', '%s\n', extname)
764 fm.write('name', '%s\n', extname)
765 else:
765 else:
766 fm.write('name', '%s', extname)
766 fm.write('name', '%s', extname)
767 if isinternal or hgver in exttestedwith:
767 if isinternal or hgver in exttestedwith:
768 fm.plain('\n')
768 fm.plain('\n')
769 elif not exttestedwith:
769 elif not exttestedwith:
770 fm.plain(_(' (untested!)\n'))
770 fm.plain(_(' (untested!)\n'))
771 else:
771 else:
772 lasttestedversion = exttestedwith[-1]
772 lasttestedversion = exttestedwith[-1]
773 fm.plain(' (%s!)\n' % lasttestedversion)
773 fm.plain(' (%s!)\n' % lasttestedversion)
774
774
775 fm.condwrite(ui.verbose and extsource, 'source',
775 fm.condwrite(ui.verbose and extsource, 'source',
776 _(' location: %s\n'), extsource or "")
776 _(' location: %s\n'), extsource or "")
777
777
778 if ui.verbose:
778 if ui.verbose:
779 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
779 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
780 fm.data(bundled=isinternal)
780 fm.data(bundled=isinternal)
781
781
782 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
782 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
783 _(' tested with: %s\n'),
783 _(' tested with: %s\n'),
784 fm.formatlist(exttestedwith, name='ver'))
784 fm.formatlist(exttestedwith, name='ver'))
785
785
786 fm.condwrite(ui.verbose and extbuglink, 'buglink',
786 fm.condwrite(ui.verbose and extbuglink, 'buglink',
787 _(' bug reporting: %s\n'), extbuglink or "")
787 _(' bug reporting: %s\n'), extbuglink or "")
788
788
789 fm.end()
789 fm.end()
790
790
791 @command('debugfileset',
791 @command('debugfileset',
792 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
792 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
793 _('[-r REV] FILESPEC'))
793 _('[-r REV] FILESPEC'))
794 def debugfileset(ui, repo, expr, **opts):
794 def debugfileset(ui, repo, expr, **opts):
795 '''parse and apply a fileset specification'''
795 '''parse and apply a fileset specification'''
796 ctx = scmutil.revsingle(repo, opts.get(r'rev'), None)
796 ctx = scmutil.revsingle(repo, opts.get(r'rev'), None)
797 if ui.verbose:
797 if ui.verbose:
798 tree = fileset.parse(expr)
798 tree = fileset.parse(expr)
799 ui.note(fileset.prettyformat(tree), "\n")
799 ui.note(fileset.prettyformat(tree), "\n")
800
800
801 for f in ctx.getfileset(expr):
801 for f in ctx.getfileset(expr):
802 ui.write("%s\n" % f)
802 ui.write("%s\n" % f)
803
803
804 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
804 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
805 def debugfsinfo(ui, path="."):
805 def debugfsinfo(ui, path="."):
806 """show information detected about current filesystem"""
806 """show information detected about current filesystem"""
807 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
807 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
808 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
808 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
809 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
809 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
810 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
810 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
811 casesensitive = '(unknown)'
811 casesensitive = '(unknown)'
812 try:
812 try:
813 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
813 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
814 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
814 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
815 except OSError:
815 except OSError:
816 pass
816 pass
817 ui.write(('case-sensitive: %s\n') % casesensitive)
817 ui.write(('case-sensitive: %s\n') % casesensitive)
818
818
819 @command('debuggetbundle',
819 @command('debuggetbundle',
820 [('H', 'head', [], _('id of head node'), _('ID')),
820 [('H', 'head', [], _('id of head node'), _('ID')),
821 ('C', 'common', [], _('id of common node'), _('ID')),
821 ('C', 'common', [], _('id of common node'), _('ID')),
822 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
822 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
823 _('REPO FILE [-H|-C ID]...'),
823 _('REPO FILE [-H|-C ID]...'),
824 norepo=True)
824 norepo=True)
825 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
825 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
826 """retrieves a bundle from a repo
826 """retrieves a bundle from a repo
827
827
828 Every ID must be a full-length hex node id string. Saves the bundle to the
828 Every ID must be a full-length hex node id string. Saves the bundle to the
829 given file.
829 given file.
830 """
830 """
831 opts = pycompat.byteskwargs(opts)
831 opts = pycompat.byteskwargs(opts)
832 repo = hg.peer(ui, opts, repopath)
832 repo = hg.peer(ui, opts, repopath)
833 if not repo.capable('getbundle'):
833 if not repo.capable('getbundle'):
834 raise error.Abort("getbundle() not supported by target repository")
834 raise error.Abort("getbundle() not supported by target repository")
835 args = {}
835 args = {}
836 if common:
836 if common:
837 args[r'common'] = [bin(s) for s in common]
837 args[r'common'] = [bin(s) for s in common]
838 if head:
838 if head:
839 args[r'heads'] = [bin(s) for s in head]
839 args[r'heads'] = [bin(s) for s in head]
840 # TODO: get desired bundlecaps from command line.
840 # TODO: get desired bundlecaps from command line.
841 args[r'bundlecaps'] = None
841 args[r'bundlecaps'] = None
842 bundle = repo.getbundle('debug', **args)
842 bundle = repo.getbundle('debug', **args)
843
843
844 bundletype = opts.get('type', 'bzip2').lower()
844 bundletype = opts.get('type', 'bzip2').lower()
845 btypes = {'none': 'HG10UN',
845 btypes = {'none': 'HG10UN',
846 'bzip2': 'HG10BZ',
846 'bzip2': 'HG10BZ',
847 'gzip': 'HG10GZ',
847 'gzip': 'HG10GZ',
848 'bundle2': 'HG20'}
848 'bundle2': 'HG20'}
849 bundletype = btypes.get(bundletype)
849 bundletype = btypes.get(bundletype)
850 if bundletype not in bundle2.bundletypes:
850 if bundletype not in bundle2.bundletypes:
851 raise error.Abort(_('unknown bundle type specified with --type'))
851 raise error.Abort(_('unknown bundle type specified with --type'))
852 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
852 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
853
853
854 @command('debugignore', [], '[FILE]')
854 @command('debugignore', [], '[FILE]')
855 def debugignore(ui, repo, *files, **opts):
855 def debugignore(ui, repo, *files, **opts):
856 """display the combined ignore pattern and information about ignored files
856 """display the combined ignore pattern and information about ignored files
857
857
858 With no argument display the combined ignore pattern.
858 With no argument display the combined ignore pattern.
859
859
860 Given space separated file names, shows if the given file is ignored and
860 Given space separated file names, shows if the given file is ignored and
861 if so, show the ignore rule (file and line number) that matched it.
861 if so, show the ignore rule (file and line number) that matched it.
862 """
862 """
863 ignore = repo.dirstate._ignore
863 ignore = repo.dirstate._ignore
864 if not files:
864 if not files:
865 # Show all the patterns
865 # Show all the patterns
866 ui.write("%s\n" % repr(ignore))
866 ui.write("%s\n" % repr(ignore))
867 else:
867 else:
868 for f in files:
868 for f in files:
869 nf = util.normpath(f)
869 nf = util.normpath(f)
870 ignored = None
870 ignored = None
871 ignoredata = None
871 ignoredata = None
872 if nf != '.':
872 if nf != '.':
873 if ignore(nf):
873 if ignore(nf):
874 ignored = nf
874 ignored = nf
875 ignoredata = repo.dirstate._ignorefileandline(nf)
875 ignoredata = repo.dirstate._ignorefileandline(nf)
876 else:
876 else:
877 for p in util.finddirs(nf):
877 for p in util.finddirs(nf):
878 if ignore(p):
878 if ignore(p):
879 ignored = p
879 ignored = p
880 ignoredata = repo.dirstate._ignorefileandline(p)
880 ignoredata = repo.dirstate._ignorefileandline(p)
881 break
881 break
882 if ignored:
882 if ignored:
883 if ignored == nf:
883 if ignored == nf:
884 ui.write(_("%s is ignored\n") % f)
884 ui.write(_("%s is ignored\n") % f)
885 else:
885 else:
886 ui.write(_("%s is ignored because of "
886 ui.write(_("%s is ignored because of "
887 "containing folder %s\n")
887 "containing folder %s\n")
888 % (f, ignored))
888 % (f, ignored))
889 ignorefile, lineno, line = ignoredata
889 ignorefile, lineno, line = ignoredata
890 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
890 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
891 % (ignorefile, lineno, line))
891 % (ignorefile, lineno, line))
892 else:
892 else:
893 ui.write(_("%s is not ignored\n") % f)
893 ui.write(_("%s is not ignored\n") % f)
894
894
895 @command('debugindex', cmdutil.debugrevlogopts +
895 @command('debugindex', cmdutil.debugrevlogopts +
896 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
896 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
897 _('[-f FORMAT] -c|-m|FILE'),
897 _('[-f FORMAT] -c|-m|FILE'),
898 optionalrepo=True)
898 optionalrepo=True)
899 def debugindex(ui, repo, file_=None, **opts):
899 def debugindex(ui, repo, file_=None, **opts):
900 """dump the contents of an index file"""
900 """dump the contents of an index file"""
901 opts = pycompat.byteskwargs(opts)
901 opts = pycompat.byteskwargs(opts)
902 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
902 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
903 format = opts.get('format', 0)
903 format = opts.get('format', 0)
904 if format not in (0, 1):
904 if format not in (0, 1):
905 raise error.Abort(_("unknown format %d") % format)
905 raise error.Abort(_("unknown format %d") % format)
906
906
907 generaldelta = r.version & revlog.FLAG_GENERALDELTA
907 generaldelta = r.version & revlog.FLAG_GENERALDELTA
908 if generaldelta:
908 if generaldelta:
909 basehdr = ' delta'
909 basehdr = ' delta'
910 else:
910 else:
911 basehdr = ' base'
911 basehdr = ' base'
912
912
913 if ui.debugflag:
913 if ui.debugflag:
914 shortfn = hex
914 shortfn = hex
915 else:
915 else:
916 shortfn = short
916 shortfn = short
917
917
918 # There might not be anything in r, so have a sane default
918 # There might not be anything in r, so have a sane default
919 idlen = 12
919 idlen = 12
920 for i in r:
920 for i in r:
921 idlen = len(shortfn(r.node(i)))
921 idlen = len(shortfn(r.node(i)))
922 break
922 break
923
923
924 if format == 0:
924 if format == 0:
925 ui.write((" rev offset length " + basehdr + " linkrev"
925 ui.write((" rev offset length " + basehdr + " linkrev"
926 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
926 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
927 elif format == 1:
927 elif format == 1:
928 ui.write((" rev flag offset length"
928 ui.write((" rev flag offset length"
929 " size " + basehdr + " link p1 p2"
929 " size " + basehdr + " link p1 p2"
930 " %s\n") % "nodeid".rjust(idlen))
930 " %s\n") % "nodeid".rjust(idlen))
931
931
932 for i in r:
932 for i in r:
933 node = r.node(i)
933 node = r.node(i)
934 if generaldelta:
934 if generaldelta:
935 base = r.deltaparent(i)
935 base = r.deltaparent(i)
936 else:
936 else:
937 base = r.chainbase(i)
937 base = r.chainbase(i)
938 if format == 0:
938 if format == 0:
939 try:
939 try:
940 pp = r.parents(node)
940 pp = r.parents(node)
941 except Exception:
941 except Exception:
942 pp = [nullid, nullid]
942 pp = [nullid, nullid]
943 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
943 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
944 i, r.start(i), r.length(i), base, r.linkrev(i),
944 i, r.start(i), r.length(i), base, r.linkrev(i),
945 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
945 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
946 elif format == 1:
946 elif format == 1:
947 pr = r.parentrevs(i)
947 pr = r.parentrevs(i)
948 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
948 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
949 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
949 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
950 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
950 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
951
951
952 @command('debugindexdot', cmdutil.debugrevlogopts,
952 @command('debugindexdot', cmdutil.debugrevlogopts,
953 _('-c|-m|FILE'), optionalrepo=True)
953 _('-c|-m|FILE'), optionalrepo=True)
954 def debugindexdot(ui, repo, file_=None, **opts):
954 def debugindexdot(ui, repo, file_=None, **opts):
955 """dump an index DAG as a graphviz dot file"""
955 """dump an index DAG as a graphviz dot file"""
956 opts = pycompat.byteskwargs(opts)
956 opts = pycompat.byteskwargs(opts)
957 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
957 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
958 ui.write(("digraph G {\n"))
958 ui.write(("digraph G {\n"))
959 for i in r:
959 for i in r:
960 node = r.node(i)
960 node = r.node(i)
961 pp = r.parents(node)
961 pp = r.parents(node)
962 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
962 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
963 if pp[1] != nullid:
963 if pp[1] != nullid:
964 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
964 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
965 ui.write("}\n")
965 ui.write("}\n")
966
966
967 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
967 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
968 def debuginstall(ui, **opts):
968 def debuginstall(ui, **opts):
969 '''test Mercurial installation
969 '''test Mercurial installation
970
970
971 Returns 0 on success.
971 Returns 0 on success.
972 '''
972 '''
973 opts = pycompat.byteskwargs(opts)
973 opts = pycompat.byteskwargs(opts)
974
974
975 def writetemp(contents):
975 def writetemp(contents):
976 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
976 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
977 f = os.fdopen(fd, pycompat.sysstr("wb"))
977 f = os.fdopen(fd, pycompat.sysstr("wb"))
978 f.write(contents)
978 f.write(contents)
979 f.close()
979 f.close()
980 return name
980 return name
981
981
982 problems = 0
982 problems = 0
983
983
984 fm = ui.formatter('debuginstall', opts)
984 fm = ui.formatter('debuginstall', opts)
985 fm.startitem()
985 fm.startitem()
986
986
987 # encoding
987 # encoding
988 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
988 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
989 err = None
989 err = None
990 try:
990 try:
991 encoding.fromlocal("test")
991 encoding.fromlocal("test")
992 except error.Abort as inst:
992 except error.Abort as inst:
993 err = inst
993 err = inst
994 problems += 1
994 problems += 1
995 fm.condwrite(err, 'encodingerror', _(" %s\n"
995 fm.condwrite(err, 'encodingerror', _(" %s\n"
996 " (check that your locale is properly set)\n"), err)
996 " (check that your locale is properly set)\n"), err)
997
997
998 # Python
998 # Python
999 fm.write('pythonexe', _("checking Python executable (%s)\n"),
999 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1000 pycompat.sysexecutable)
1000 pycompat.sysexecutable)
1001 fm.write('pythonver', _("checking Python version (%s)\n"),
1001 fm.write('pythonver', _("checking Python version (%s)\n"),
1002 ("%d.%d.%d" % sys.version_info[:3]))
1002 ("%d.%d.%d" % sys.version_info[:3]))
1003 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1003 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1004 os.path.dirname(pycompat.fsencode(os.__file__)))
1004 os.path.dirname(pycompat.fsencode(os.__file__)))
1005
1005
1006 security = set(sslutil.supportedprotocols)
1006 security = set(sslutil.supportedprotocols)
1007 if sslutil.hassni:
1007 if sslutil.hassni:
1008 security.add('sni')
1008 security.add('sni')
1009
1009
1010 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1010 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1011 fm.formatlist(sorted(security), name='protocol',
1011 fm.formatlist(sorted(security), name='protocol',
1012 fmt='%s', sep=','))
1012 fmt='%s', sep=','))
1013
1013
1014 # These are warnings, not errors. So don't increment problem count. This
1014 # These are warnings, not errors. So don't increment problem count. This
1015 # may change in the future.
1015 # may change in the future.
1016 if 'tls1.2' not in security:
1016 if 'tls1.2' not in security:
1017 fm.plain(_(' TLS 1.2 not supported by Python install; '
1017 fm.plain(_(' TLS 1.2 not supported by Python install; '
1018 'network connections lack modern security\n'))
1018 'network connections lack modern security\n'))
1019 if 'sni' not in security:
1019 if 'sni' not in security:
1020 fm.plain(_(' SNI not supported by Python install; may have '
1020 fm.plain(_(' SNI not supported by Python install; may have '
1021 'connectivity issues with some servers\n'))
1021 'connectivity issues with some servers\n'))
1022
1022
1023 # TODO print CA cert info
1023 # TODO print CA cert info
1024
1024
1025 # hg version
1025 # hg version
1026 hgver = util.version()
1026 hgver = util.version()
1027 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1027 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1028 hgver.split('+')[0])
1028 hgver.split('+')[0])
1029 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1029 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1030 '+'.join(hgver.split('+')[1:]))
1030 '+'.join(hgver.split('+')[1:]))
1031
1031
1032 # compiled modules
1032 # compiled modules
1033 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1033 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1034 policy.policy)
1034 policy.policy)
1035 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1035 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1036 os.path.dirname(pycompat.fsencode(__file__)))
1036 os.path.dirname(pycompat.fsencode(__file__)))
1037
1037
1038 if policy.policy in ('c', 'allow'):
1038 if policy.policy in ('c', 'allow'):
1039 err = None
1039 err = None
1040 try:
1040 try:
1041 from .cext import (
1041 from .cext import (
1042 base85,
1042 base85,
1043 bdiff,
1043 bdiff,
1044 mpatch,
1044 mpatch,
1045 osutil,
1045 osutil,
1046 )
1046 )
1047 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1047 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1048 except Exception as inst:
1048 except Exception as inst:
1049 err = inst
1049 err = inst
1050 problems += 1
1050 problems += 1
1051 fm.condwrite(err, 'extensionserror', " %s\n", err)
1051 fm.condwrite(err, 'extensionserror', " %s\n", err)
1052
1052
1053 compengines = util.compengines._engines.values()
1053 compengines = util.compengines._engines.values()
1054 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1054 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1055 fm.formatlist(sorted(e.name() for e in compengines),
1055 fm.formatlist(sorted(e.name() for e in compengines),
1056 name='compengine', fmt='%s', sep=', '))
1056 name='compengine', fmt='%s', sep=', '))
1057 fm.write('compenginesavail', _('checking available compression engines '
1057 fm.write('compenginesavail', _('checking available compression engines '
1058 '(%s)\n'),
1058 '(%s)\n'),
1059 fm.formatlist(sorted(e.name() for e in compengines
1059 fm.formatlist(sorted(e.name() for e in compengines
1060 if e.available()),
1060 if e.available()),
1061 name='compengine', fmt='%s', sep=', '))
1061 name='compengine', fmt='%s', sep=', '))
1062 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1062 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1063 fm.write('compenginesserver', _('checking available compression engines '
1063 fm.write('compenginesserver', _('checking available compression engines '
1064 'for wire protocol (%s)\n'),
1064 'for wire protocol (%s)\n'),
1065 fm.formatlist([e.name() for e in wirecompengines
1065 fm.formatlist([e.name() for e in wirecompengines
1066 if e.wireprotosupport()],
1066 if e.wireprotosupport()],
1067 name='compengine', fmt='%s', sep=', '))
1067 name='compengine', fmt='%s', sep=', '))
1068
1068
1069 # templates
1069 # templates
1070 p = templater.templatepaths()
1070 p = templater.templatepaths()
1071 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1071 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1072 fm.condwrite(not p, '', _(" no template directories found\n"))
1072 fm.condwrite(not p, '', _(" no template directories found\n"))
1073 if p:
1073 if p:
1074 m = templater.templatepath("map-cmdline.default")
1074 m = templater.templatepath("map-cmdline.default")
1075 if m:
1075 if m:
1076 # template found, check if it is working
1076 # template found, check if it is working
1077 err = None
1077 err = None
1078 try:
1078 try:
1079 templater.templater.frommapfile(m)
1079 templater.templater.frommapfile(m)
1080 except Exception as inst:
1080 except Exception as inst:
1081 err = inst
1081 err = inst
1082 p = None
1082 p = None
1083 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1083 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1084 else:
1084 else:
1085 p = None
1085 p = None
1086 fm.condwrite(p, 'defaulttemplate',
1086 fm.condwrite(p, 'defaulttemplate',
1087 _("checking default template (%s)\n"), m)
1087 _("checking default template (%s)\n"), m)
1088 fm.condwrite(not m, 'defaulttemplatenotfound',
1088 fm.condwrite(not m, 'defaulttemplatenotfound',
1089 _(" template '%s' not found\n"), "default")
1089 _(" template '%s' not found\n"), "default")
1090 if not p:
1090 if not p:
1091 problems += 1
1091 problems += 1
1092 fm.condwrite(not p, '',
1092 fm.condwrite(not p, '',
1093 _(" (templates seem to have been installed incorrectly)\n"))
1093 _(" (templates seem to have been installed incorrectly)\n"))
1094
1094
1095 # editor
1095 # editor
1096 editor = ui.geteditor()
1096 editor = ui.geteditor()
1097 editor = util.expandpath(editor)
1097 editor = util.expandpath(editor)
1098 fm.write('editor', _("checking commit editor... (%s)\n"), editor)
1098 fm.write('editor', _("checking commit editor... (%s)\n"), editor)
1099 cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
1099 cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
1100 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1100 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1101 _(" No commit editor set and can't find %s in PATH\n"
1101 _(" No commit editor set and can't find %s in PATH\n"
1102 " (specify a commit editor in your configuration"
1102 " (specify a commit editor in your configuration"
1103 " file)\n"), not cmdpath and editor == 'vi' and editor)
1103 " file)\n"), not cmdpath and editor == 'vi' and editor)
1104 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1104 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1105 _(" Can't find editor '%s' in PATH\n"
1105 _(" Can't find editor '%s' in PATH\n"
1106 " (specify a commit editor in your configuration"
1106 " (specify a commit editor in your configuration"
1107 " file)\n"), not cmdpath and editor)
1107 " file)\n"), not cmdpath and editor)
1108 if not cmdpath and editor != 'vi':
1108 if not cmdpath and editor != 'vi':
1109 problems += 1
1109 problems += 1
1110
1110
1111 # check username
1111 # check username
1112 username = None
1112 username = None
1113 err = None
1113 err = None
1114 try:
1114 try:
1115 username = ui.username()
1115 username = ui.username()
1116 except error.Abort as e:
1116 except error.Abort as e:
1117 err = e
1117 err = e
1118 problems += 1
1118 problems += 1
1119
1119
1120 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1120 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1121 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1121 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1122 " (specify a username in your configuration file)\n"), err)
1122 " (specify a username in your configuration file)\n"), err)
1123
1123
1124 fm.condwrite(not problems, '',
1124 fm.condwrite(not problems, '',
1125 _("no problems detected\n"))
1125 _("no problems detected\n"))
1126 if not problems:
1126 if not problems:
1127 fm.data(problems=problems)
1127 fm.data(problems=problems)
1128 fm.condwrite(problems, 'problems',
1128 fm.condwrite(problems, 'problems',
1129 _("%d problems detected,"
1129 _("%d problems detected,"
1130 " please check your install!\n"), problems)
1130 " please check your install!\n"), problems)
1131 fm.end()
1131 fm.end()
1132
1132
1133 return problems
1133 return problems
1134
1134
1135 @command('debugknown', [], _('REPO ID...'), norepo=True)
1135 @command('debugknown', [], _('REPO ID...'), norepo=True)
1136 def debugknown(ui, repopath, *ids, **opts):
1136 def debugknown(ui, repopath, *ids, **opts):
1137 """test whether node ids are known to a repo
1137 """test whether node ids are known to a repo
1138
1138
1139 Every ID must be a full-length hex node id string. Returns a list of 0s
1139 Every ID must be a full-length hex node id string. Returns a list of 0s
1140 and 1s indicating unknown/known.
1140 and 1s indicating unknown/known.
1141 """
1141 """
1142 opts = pycompat.byteskwargs(opts)
1142 opts = pycompat.byteskwargs(opts)
1143 repo = hg.peer(ui, opts, repopath)
1143 repo = hg.peer(ui, opts, repopath)
1144 if not repo.capable('known'):
1144 if not repo.capable('known'):
1145 raise error.Abort("known() not supported by target repository")
1145 raise error.Abort("known() not supported by target repository")
1146 flags = repo.known([bin(s) for s in ids])
1146 flags = repo.known([bin(s) for s in ids])
1147 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1147 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1148
1148
1149 @command('debuglabelcomplete', [], _('LABEL...'))
1149 @command('debuglabelcomplete', [], _('LABEL...'))
1150 def debuglabelcomplete(ui, repo, *args):
1150 def debuglabelcomplete(ui, repo, *args):
1151 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1151 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1152 debugnamecomplete(ui, repo, *args)
1152 debugnamecomplete(ui, repo, *args)
1153
1153
1154 @command('debuglocks',
1154 @command('debuglocks',
1155 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1155 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1156 ('W', 'force-wlock', None,
1156 ('W', 'force-wlock', None,
1157 _('free the working state lock (DANGEROUS)'))],
1157 _('free the working state lock (DANGEROUS)'))],
1158 _('[OPTION]...'))
1158 _('[OPTION]...'))
1159 def debuglocks(ui, repo, **opts):
1159 def debuglocks(ui, repo, **opts):
1160 """show or modify state of locks
1160 """show or modify state of locks
1161
1161
1162 By default, this command will show which locks are held. This
1162 By default, this command will show which locks are held. This
1163 includes the user and process holding the lock, the amount of time
1163 includes the user and process holding the lock, the amount of time
1164 the lock has been held, and the machine name where the process is
1164 the lock has been held, and the machine name where the process is
1165 running if it's not local.
1165 running if it's not local.
1166
1166
1167 Locks protect the integrity of Mercurial's data, so should be
1167 Locks protect the integrity of Mercurial's data, so should be
1168 treated with care. System crashes or other interruptions may cause
1168 treated with care. System crashes or other interruptions may cause
1169 locks to not be properly released, though Mercurial will usually
1169 locks to not be properly released, though Mercurial will usually
1170 detect and remove such stale locks automatically.
1170 detect and remove such stale locks automatically.
1171
1171
1172 However, detecting stale locks may not always be possible (for
1172 However, detecting stale locks may not always be possible (for
1173 instance, on a shared filesystem). Removing locks may also be
1173 instance, on a shared filesystem). Removing locks may also be
1174 blocked by filesystem permissions.
1174 blocked by filesystem permissions.
1175
1175
1176 Returns 0 if no locks are held.
1176 Returns 0 if no locks are held.
1177
1177
1178 """
1178 """
1179
1179
1180 if opts.get(r'force_lock'):
1180 if opts.get(r'force_lock'):
1181 repo.svfs.unlink('lock')
1181 repo.svfs.unlink('lock')
1182 if opts.get(r'force_wlock'):
1182 if opts.get(r'force_wlock'):
1183 repo.vfs.unlink('wlock')
1183 repo.vfs.unlink('wlock')
1184 if opts.get(r'force_lock') or opts.get(r'force_lock'):
1184 if opts.get(r'force_lock') or opts.get(r'force_lock'):
1185 return 0
1185 return 0
1186
1186
1187 now = time.time()
1187 now = time.time()
1188 held = 0
1188 held = 0
1189
1189
1190 def report(vfs, name, method):
1190 def report(vfs, name, method):
1191 # this causes stale locks to get reaped for more accurate reporting
1191 # this causes stale locks to get reaped for more accurate reporting
1192 try:
1192 try:
1193 l = method(False)
1193 l = method(False)
1194 except error.LockHeld:
1194 except error.LockHeld:
1195 l = None
1195 l = None
1196
1196
1197 if l:
1197 if l:
1198 l.release()
1198 l.release()
1199 else:
1199 else:
1200 try:
1200 try:
1201 stat = vfs.lstat(name)
1201 stat = vfs.lstat(name)
1202 age = now - stat.st_mtime
1202 age = now - stat.st_mtime
1203 user = util.username(stat.st_uid)
1203 user = util.username(stat.st_uid)
1204 locker = vfs.readlock(name)
1204 locker = vfs.readlock(name)
1205 if ":" in locker:
1205 if ":" in locker:
1206 host, pid = locker.split(':')
1206 host, pid = locker.split(':')
1207 if host == socket.gethostname():
1207 if host == socket.gethostname():
1208 locker = 'user %s, process %s' % (user, pid)
1208 locker = 'user %s, process %s' % (user, pid)
1209 else:
1209 else:
1210 locker = 'user %s, process %s, host %s' \
1210 locker = 'user %s, process %s, host %s' \
1211 % (user, pid, host)
1211 % (user, pid, host)
1212 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1212 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1213 return 1
1213 return 1
1214 except OSError as e:
1214 except OSError as e:
1215 if e.errno != errno.ENOENT:
1215 if e.errno != errno.ENOENT:
1216 raise
1216 raise
1217
1217
1218 ui.write(("%-6s free\n") % (name + ":"))
1218 ui.write(("%-6s free\n") % (name + ":"))
1219 return 0
1219 return 0
1220
1220
1221 held += report(repo.svfs, "lock", repo.lock)
1221 held += report(repo.svfs, "lock", repo.lock)
1222 held += report(repo.vfs, "wlock", repo.wlock)
1222 held += report(repo.vfs, "wlock", repo.wlock)
1223
1223
1224 return held
1224 return held
1225
1225
1226 @command('debugmergestate', [], '')
1226 @command('debugmergestate', [], '')
1227 def debugmergestate(ui, repo, *args):
1227 def debugmergestate(ui, repo, *args):
1228 """print merge state
1228 """print merge state
1229
1229
1230 Use --verbose to print out information about whether v1 or v2 merge state
1230 Use --verbose to print out information about whether v1 or v2 merge state
1231 was chosen."""
1231 was chosen."""
1232 def _hashornull(h):
1232 def _hashornull(h):
1233 if h == nullhex:
1233 if h == nullhex:
1234 return 'null'
1234 return 'null'
1235 else:
1235 else:
1236 return h
1236 return h
1237
1237
1238 def printrecords(version):
1238 def printrecords(version):
1239 ui.write(('* version %s records\n') % version)
1239 ui.write(('* version %s records\n') % version)
1240 if version == 1:
1240 if version == 1:
1241 records = v1records
1241 records = v1records
1242 else:
1242 else:
1243 records = v2records
1243 records = v2records
1244
1244
1245 for rtype, record in records:
1245 for rtype, record in records:
1246 # pretty print some record types
1246 # pretty print some record types
1247 if rtype == 'L':
1247 if rtype == 'L':
1248 ui.write(('local: %s\n') % record)
1248 ui.write(('local: %s\n') % record)
1249 elif rtype == 'O':
1249 elif rtype == 'O':
1250 ui.write(('other: %s\n') % record)
1250 ui.write(('other: %s\n') % record)
1251 elif rtype == 'm':
1251 elif rtype == 'm':
1252 driver, mdstate = record.split('\0', 1)
1252 driver, mdstate = record.split('\0', 1)
1253 ui.write(('merge driver: %s (state "%s")\n')
1253 ui.write(('merge driver: %s (state "%s")\n')
1254 % (driver, mdstate))
1254 % (driver, mdstate))
1255 elif rtype in 'FDC':
1255 elif rtype in 'FDC':
1256 r = record.split('\0')
1256 r = record.split('\0')
1257 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1257 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1258 if version == 1:
1258 if version == 1:
1259 onode = 'not stored in v1 format'
1259 onode = 'not stored in v1 format'
1260 flags = r[7]
1260 flags = r[7]
1261 else:
1261 else:
1262 onode, flags = r[7:9]
1262 onode, flags = r[7:9]
1263 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1263 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1264 % (f, rtype, state, _hashornull(hash)))
1264 % (f, rtype, state, _hashornull(hash)))
1265 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1265 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1266 ui.write((' ancestor path: %s (node %s)\n')
1266 ui.write((' ancestor path: %s (node %s)\n')
1267 % (afile, _hashornull(anode)))
1267 % (afile, _hashornull(anode)))
1268 ui.write((' other path: %s (node %s)\n')
1268 ui.write((' other path: %s (node %s)\n')
1269 % (ofile, _hashornull(onode)))
1269 % (ofile, _hashornull(onode)))
1270 elif rtype == 'f':
1270 elif rtype == 'f':
1271 filename, rawextras = record.split('\0', 1)
1271 filename, rawextras = record.split('\0', 1)
1272 extras = rawextras.split('\0')
1272 extras = rawextras.split('\0')
1273 i = 0
1273 i = 0
1274 extrastrings = []
1274 extrastrings = []
1275 while i < len(extras):
1275 while i < len(extras):
1276 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1276 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1277 i += 2
1277 i += 2
1278
1278
1279 ui.write(('file extras: %s (%s)\n')
1279 ui.write(('file extras: %s (%s)\n')
1280 % (filename, ', '.join(extrastrings)))
1280 % (filename, ', '.join(extrastrings)))
1281 elif rtype == 'l':
1281 elif rtype == 'l':
1282 labels = record.split('\0', 2)
1282 labels = record.split('\0', 2)
1283 labels = [l for l in labels if len(l) > 0]
1283 labels = [l for l in labels if len(l) > 0]
1284 ui.write(('labels:\n'))
1284 ui.write(('labels:\n'))
1285 ui.write((' local: %s\n' % labels[0]))
1285 ui.write((' local: %s\n' % labels[0]))
1286 ui.write((' other: %s\n' % labels[1]))
1286 ui.write((' other: %s\n' % labels[1]))
1287 if len(labels) > 2:
1287 if len(labels) > 2:
1288 ui.write((' base: %s\n' % labels[2]))
1288 ui.write((' base: %s\n' % labels[2]))
1289 else:
1289 else:
1290 ui.write(('unrecognized entry: %s\t%s\n')
1290 ui.write(('unrecognized entry: %s\t%s\n')
1291 % (rtype, record.replace('\0', '\t')))
1291 % (rtype, record.replace('\0', '\t')))
1292
1292
1293 # Avoid mergestate.read() since it may raise an exception for unsupported
1293 # Avoid mergestate.read() since it may raise an exception for unsupported
1294 # merge state records. We shouldn't be doing this, but this is OK since this
1294 # merge state records. We shouldn't be doing this, but this is OK since this
1295 # command is pretty low-level.
1295 # command is pretty low-level.
1296 ms = mergemod.mergestate(repo)
1296 ms = mergemod.mergestate(repo)
1297
1297
1298 # sort so that reasonable information is on top
1298 # sort so that reasonable information is on top
1299 v1records = ms._readrecordsv1()
1299 v1records = ms._readrecordsv1()
1300 v2records = ms._readrecordsv2()
1300 v2records = ms._readrecordsv2()
1301 order = 'LOml'
1301 order = 'LOml'
1302 def key(r):
1302 def key(r):
1303 idx = order.find(r[0])
1303 idx = order.find(r[0])
1304 if idx == -1:
1304 if idx == -1:
1305 return (1, r[1])
1305 return (1, r[1])
1306 else:
1306 else:
1307 return (0, idx)
1307 return (0, idx)
1308 v1records.sort(key=key)
1308 v1records.sort(key=key)
1309 v2records.sort(key=key)
1309 v2records.sort(key=key)
1310
1310
1311 if not v1records and not v2records:
1311 if not v1records and not v2records:
1312 ui.write(('no merge state found\n'))
1312 ui.write(('no merge state found\n'))
1313 elif not v2records:
1313 elif not v2records:
1314 ui.note(('no version 2 merge state\n'))
1314 ui.note(('no version 2 merge state\n'))
1315 printrecords(1)
1315 printrecords(1)
1316 elif ms._v1v2match(v1records, v2records):
1316 elif ms._v1v2match(v1records, v2records):
1317 ui.note(('v1 and v2 states match: using v2\n'))
1317 ui.note(('v1 and v2 states match: using v2\n'))
1318 printrecords(2)
1318 printrecords(2)
1319 else:
1319 else:
1320 ui.note(('v1 and v2 states mismatch: using v1\n'))
1320 ui.note(('v1 and v2 states mismatch: using v1\n'))
1321 printrecords(1)
1321 printrecords(1)
1322 if ui.verbose:
1322 if ui.verbose:
1323 printrecords(2)
1323 printrecords(2)
1324
1324
1325 @command('debugnamecomplete', [], _('NAME...'))
1325 @command('debugnamecomplete', [], _('NAME...'))
1326 def debugnamecomplete(ui, repo, *args):
1326 def debugnamecomplete(ui, repo, *args):
1327 '''complete "names" - tags, open branch names, bookmark names'''
1327 '''complete "names" - tags, open branch names, bookmark names'''
1328
1328
1329 names = set()
1329 names = set()
1330 # since we previously only listed open branches, we will handle that
1330 # since we previously only listed open branches, we will handle that
1331 # specially (after this for loop)
1331 # specially (after this for loop)
1332 for name, ns in repo.names.iteritems():
1332 for name, ns in repo.names.iteritems():
1333 if name != 'branches':
1333 if name != 'branches':
1334 names.update(ns.listnames(repo))
1334 names.update(ns.listnames(repo))
1335 names.update(tag for (tag, heads, tip, closed)
1335 names.update(tag for (tag, heads, tip, closed)
1336 in repo.branchmap().iterbranches() if not closed)
1336 in repo.branchmap().iterbranches() if not closed)
1337 completions = set()
1337 completions = set()
1338 if not args:
1338 if not args:
1339 args = ['']
1339 args = ['']
1340 for a in args:
1340 for a in args:
1341 completions.update(n for n in names if n.startswith(a))
1341 completions.update(n for n in names if n.startswith(a))
1342 ui.write('\n'.join(sorted(completions)))
1342 ui.write('\n'.join(sorted(completions)))
1343 ui.write('\n')
1343 ui.write('\n')
1344
1344
1345 @command('debugobsolete',
1345 @command('debugobsolete',
1346 [('', 'flags', 0, _('markers flag')),
1346 [('', 'flags', 0, _('markers flag')),
1347 ('', 'record-parents', False,
1347 ('', 'record-parents', False,
1348 _('record parent information for the precursor')),
1348 _('record parent information for the precursor')),
1349 ('r', 'rev', [], _('display markers relevant to REV')),
1349 ('r', 'rev', [], _('display markers relevant to REV')),
1350 ('', 'exclusive', False, _('restrict display to markers only '
1350 ('', 'exclusive', False, _('restrict display to markers only '
1351 'relevant to REV')),
1351 'relevant to REV')),
1352 ('', 'index', False, _('display index of the marker')),
1352 ('', 'index', False, _('display index of the marker')),
1353 ('', 'delete', [], _('delete markers specified by indices')),
1353 ('', 'delete', [], _('delete markers specified by indices')),
1354 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1354 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1355 _('[OBSOLETED [REPLACEMENT ...]]'))
1355 _('[OBSOLETED [REPLACEMENT ...]]'))
1356 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1356 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1357 """create arbitrary obsolete marker
1357 """create arbitrary obsolete marker
1358
1358
1359 With no arguments, displays the list of obsolescence markers."""
1359 With no arguments, displays the list of obsolescence markers."""
1360
1360
1361 opts = pycompat.byteskwargs(opts)
1361 opts = pycompat.byteskwargs(opts)
1362
1362
1363 def parsenodeid(s):
1363 def parsenodeid(s):
1364 try:
1364 try:
1365 # We do not use revsingle/revrange functions here to accept
1365 # We do not use revsingle/revrange functions here to accept
1366 # arbitrary node identifiers, possibly not present in the
1366 # arbitrary node identifiers, possibly not present in the
1367 # local repository.
1367 # local repository.
1368 n = bin(s)
1368 n = bin(s)
1369 if len(n) != len(nullid):
1369 if len(n) != len(nullid):
1370 raise TypeError()
1370 raise TypeError()
1371 return n
1371 return n
1372 except TypeError:
1372 except TypeError:
1373 raise error.Abort('changeset references must be full hexadecimal '
1373 raise error.Abort('changeset references must be full hexadecimal '
1374 'node identifiers')
1374 'node identifiers')
1375
1375
1376 if opts.get('delete'):
1376 if opts.get('delete'):
1377 indices = []
1377 indices = []
1378 for v in opts.get('delete'):
1378 for v in opts.get('delete'):
1379 try:
1379 try:
1380 indices.append(int(v))
1380 indices.append(int(v))
1381 except ValueError:
1381 except ValueError:
1382 raise error.Abort(_('invalid index value: %r') % v,
1382 raise error.Abort(_('invalid index value: %r') % v,
1383 hint=_('use integers for indices'))
1383 hint=_('use integers for indices'))
1384
1384
1385 if repo.currenttransaction():
1385 if repo.currenttransaction():
1386 raise error.Abort(_('cannot delete obsmarkers in the middle '
1386 raise error.Abort(_('cannot delete obsmarkers in the middle '
1387 'of transaction.'))
1387 'of transaction.'))
1388
1388
1389 with repo.lock():
1389 with repo.lock():
1390 n = repair.deleteobsmarkers(repo.obsstore, indices)
1390 n = repair.deleteobsmarkers(repo.obsstore, indices)
1391 ui.write(_('deleted %i obsolescence markers\n') % n)
1391 ui.write(_('deleted %i obsolescence markers\n') % n)
1392
1392
1393 return
1393 return
1394
1394
1395 if precursor is not None:
1395 if precursor is not None:
1396 if opts['rev']:
1396 if opts['rev']:
1397 raise error.Abort('cannot select revision when creating marker')
1397 raise error.Abort('cannot select revision when creating marker')
1398 metadata = {}
1398 metadata = {}
1399 metadata['user'] = opts['user'] or ui.username()
1399 metadata['user'] = opts['user'] or ui.username()
1400 succs = tuple(parsenodeid(succ) for succ in successors)
1400 succs = tuple(parsenodeid(succ) for succ in successors)
1401 l = repo.lock()
1401 l = repo.lock()
1402 try:
1402 try:
1403 tr = repo.transaction('debugobsolete')
1403 tr = repo.transaction('debugobsolete')
1404 try:
1404 try:
1405 date = opts.get('date')
1405 date = opts.get('date')
1406 if date:
1406 if date:
1407 date = util.parsedate(date)
1407 date = util.parsedate(date)
1408 else:
1408 else:
1409 date = None
1409 date = None
1410 prec = parsenodeid(precursor)
1410 prec = parsenodeid(precursor)
1411 parents = None
1411 parents = None
1412 if opts['record_parents']:
1412 if opts['record_parents']:
1413 if prec not in repo.unfiltered():
1413 if prec not in repo.unfiltered():
1414 raise error.Abort('cannot used --record-parents on '
1414 raise error.Abort('cannot used --record-parents on '
1415 'unknown changesets')
1415 'unknown changesets')
1416 parents = repo.unfiltered()[prec].parents()
1416 parents = repo.unfiltered()[prec].parents()
1417 parents = tuple(p.node() for p in parents)
1417 parents = tuple(p.node() for p in parents)
1418 repo.obsstore.create(tr, prec, succs, opts['flags'],
1418 repo.obsstore.create(tr, prec, succs, opts['flags'],
1419 parents=parents, date=date,
1419 parents=parents, date=date,
1420 metadata=metadata, ui=ui)
1420 metadata=metadata, ui=ui)
1421 tr.close()
1421 tr.close()
1422 except ValueError as exc:
1422 except ValueError as exc:
1423 raise error.Abort(_('bad obsmarker input: %s') % exc)
1423 raise error.Abort(_('bad obsmarker input: %s') % exc)
1424 finally:
1424 finally:
1425 tr.release()
1425 tr.release()
1426 finally:
1426 finally:
1427 l.release()
1427 l.release()
1428 else:
1428 else:
1429 if opts['rev']:
1429 if opts['rev']:
1430 revs = scmutil.revrange(repo, opts['rev'])
1430 revs = scmutil.revrange(repo, opts['rev'])
1431 nodes = [repo[r].node() for r in revs]
1431 nodes = [repo[r].node() for r in revs]
1432 markers = list(obsolete.getmarkers(repo, nodes=nodes,
1432 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1433 exclusive=opts['exclusive']))
1433 exclusive=opts['exclusive']))
1434 markers.sort(key=lambda x: x._data)
1434 markers.sort(key=lambda x: x._data)
1435 else:
1435 else:
1436 markers = obsolete.getmarkers(repo)
1436 markers = obsutil.getmarkers(repo)
1437
1437
1438 markerstoiter = markers
1438 markerstoiter = markers
1439 isrelevant = lambda m: True
1439 isrelevant = lambda m: True
1440 if opts.get('rev') and opts.get('index'):
1440 if opts.get('rev') and opts.get('index'):
1441 markerstoiter = obsolete.getmarkers(repo)
1441 markerstoiter = obsutil.getmarkers(repo)
1442 markerset = set(markers)
1442 markerset = set(markers)
1443 isrelevant = lambda m: m in markerset
1443 isrelevant = lambda m: m in markerset
1444
1444
1445 fm = ui.formatter('debugobsolete', opts)
1445 fm = ui.formatter('debugobsolete', opts)
1446 for i, m in enumerate(markerstoiter):
1446 for i, m in enumerate(markerstoiter):
1447 if not isrelevant(m):
1447 if not isrelevant(m):
1448 # marker can be irrelevant when we're iterating over a set
1448 # marker can be irrelevant when we're iterating over a set
1449 # of markers (markerstoiter) which is bigger than the set
1449 # of markers (markerstoiter) which is bigger than the set
1450 # of markers we want to display (markers)
1450 # of markers we want to display (markers)
1451 # this can happen if both --index and --rev options are
1451 # this can happen if both --index and --rev options are
1452 # provided and thus we need to iterate over all of the markers
1452 # provided and thus we need to iterate over all of the markers
1453 # to get the correct indices, but only display the ones that
1453 # to get the correct indices, but only display the ones that
1454 # are relevant to --rev value
1454 # are relevant to --rev value
1455 continue
1455 continue
1456 fm.startitem()
1456 fm.startitem()
1457 ind = i if opts.get('index') else None
1457 ind = i if opts.get('index') else None
1458 cmdutil.showmarker(fm, m, index=ind)
1458 cmdutil.showmarker(fm, m, index=ind)
1459 fm.end()
1459 fm.end()
1460
1460
1461 @command('debugpathcomplete',
1461 @command('debugpathcomplete',
1462 [('f', 'full', None, _('complete an entire path')),
1462 [('f', 'full', None, _('complete an entire path')),
1463 ('n', 'normal', None, _('show only normal files')),
1463 ('n', 'normal', None, _('show only normal files')),
1464 ('a', 'added', None, _('show only added files')),
1464 ('a', 'added', None, _('show only added files')),
1465 ('r', 'removed', None, _('show only removed files'))],
1465 ('r', 'removed', None, _('show only removed files'))],
1466 _('FILESPEC...'))
1466 _('FILESPEC...'))
1467 def debugpathcomplete(ui, repo, *specs, **opts):
1467 def debugpathcomplete(ui, repo, *specs, **opts):
1468 '''complete part or all of a tracked path
1468 '''complete part or all of a tracked path
1469
1469
1470 This command supports shells that offer path name completion. It
1470 This command supports shells that offer path name completion. It
1471 currently completes only files already known to the dirstate.
1471 currently completes only files already known to the dirstate.
1472
1472
1473 Completion extends only to the next path segment unless
1473 Completion extends only to the next path segment unless
1474 --full is specified, in which case entire paths are used.'''
1474 --full is specified, in which case entire paths are used.'''
1475
1475
1476 def complete(path, acceptable):
1476 def complete(path, acceptable):
1477 dirstate = repo.dirstate
1477 dirstate = repo.dirstate
1478 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1478 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1479 rootdir = repo.root + pycompat.ossep
1479 rootdir = repo.root + pycompat.ossep
1480 if spec != repo.root and not spec.startswith(rootdir):
1480 if spec != repo.root and not spec.startswith(rootdir):
1481 return [], []
1481 return [], []
1482 if os.path.isdir(spec):
1482 if os.path.isdir(spec):
1483 spec += '/'
1483 spec += '/'
1484 spec = spec[len(rootdir):]
1484 spec = spec[len(rootdir):]
1485 fixpaths = pycompat.ossep != '/'
1485 fixpaths = pycompat.ossep != '/'
1486 if fixpaths:
1486 if fixpaths:
1487 spec = spec.replace(pycompat.ossep, '/')
1487 spec = spec.replace(pycompat.ossep, '/')
1488 speclen = len(spec)
1488 speclen = len(spec)
1489 fullpaths = opts[r'full']
1489 fullpaths = opts[r'full']
1490 files, dirs = set(), set()
1490 files, dirs = set(), set()
1491 adddir, addfile = dirs.add, files.add
1491 adddir, addfile = dirs.add, files.add
1492 for f, st in dirstate.iteritems():
1492 for f, st in dirstate.iteritems():
1493 if f.startswith(spec) and st[0] in acceptable:
1493 if f.startswith(spec) and st[0] in acceptable:
1494 if fixpaths:
1494 if fixpaths:
1495 f = f.replace('/', pycompat.ossep)
1495 f = f.replace('/', pycompat.ossep)
1496 if fullpaths:
1496 if fullpaths:
1497 addfile(f)
1497 addfile(f)
1498 continue
1498 continue
1499 s = f.find(pycompat.ossep, speclen)
1499 s = f.find(pycompat.ossep, speclen)
1500 if s >= 0:
1500 if s >= 0:
1501 adddir(f[:s])
1501 adddir(f[:s])
1502 else:
1502 else:
1503 addfile(f)
1503 addfile(f)
1504 return files, dirs
1504 return files, dirs
1505
1505
1506 acceptable = ''
1506 acceptable = ''
1507 if opts[r'normal']:
1507 if opts[r'normal']:
1508 acceptable += 'nm'
1508 acceptable += 'nm'
1509 if opts[r'added']:
1509 if opts[r'added']:
1510 acceptable += 'a'
1510 acceptable += 'a'
1511 if opts[r'removed']:
1511 if opts[r'removed']:
1512 acceptable += 'r'
1512 acceptable += 'r'
1513 cwd = repo.getcwd()
1513 cwd = repo.getcwd()
1514 if not specs:
1514 if not specs:
1515 specs = ['.']
1515 specs = ['.']
1516
1516
1517 files, dirs = set(), set()
1517 files, dirs = set(), set()
1518 for spec in specs:
1518 for spec in specs:
1519 f, d = complete(spec, acceptable or 'nmar')
1519 f, d = complete(spec, acceptable or 'nmar')
1520 files.update(f)
1520 files.update(f)
1521 dirs.update(d)
1521 dirs.update(d)
1522 files.update(dirs)
1522 files.update(dirs)
1523 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1523 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1524 ui.write('\n')
1524 ui.write('\n')
1525
1525
1526 @command('debugpickmergetool',
1526 @command('debugpickmergetool',
1527 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1527 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1528 ('', 'changedelete', None, _('emulate merging change and delete')),
1528 ('', 'changedelete', None, _('emulate merging change and delete')),
1529 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1529 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1530 _('[PATTERN]...'),
1530 _('[PATTERN]...'),
1531 inferrepo=True)
1531 inferrepo=True)
1532 def debugpickmergetool(ui, repo, *pats, **opts):
1532 def debugpickmergetool(ui, repo, *pats, **opts):
1533 """examine which merge tool is chosen for specified file
1533 """examine which merge tool is chosen for specified file
1534
1534
1535 As described in :hg:`help merge-tools`, Mercurial examines
1535 As described in :hg:`help merge-tools`, Mercurial examines
1536 configurations below in this order to decide which merge tool is
1536 configurations below in this order to decide which merge tool is
1537 chosen for specified file.
1537 chosen for specified file.
1538
1538
1539 1. ``--tool`` option
1539 1. ``--tool`` option
1540 2. ``HGMERGE`` environment variable
1540 2. ``HGMERGE`` environment variable
1541 3. configurations in ``merge-patterns`` section
1541 3. configurations in ``merge-patterns`` section
1542 4. configuration of ``ui.merge``
1542 4. configuration of ``ui.merge``
1543 5. configurations in ``merge-tools`` section
1543 5. configurations in ``merge-tools`` section
1544 6. ``hgmerge`` tool (for historical reason only)
1544 6. ``hgmerge`` tool (for historical reason only)
1545 7. default tool for fallback (``:merge`` or ``:prompt``)
1545 7. default tool for fallback (``:merge`` or ``:prompt``)
1546
1546
1547 This command writes out examination result in the style below::
1547 This command writes out examination result in the style below::
1548
1548
1549 FILE = MERGETOOL
1549 FILE = MERGETOOL
1550
1550
1551 By default, all files known in the first parent context of the
1551 By default, all files known in the first parent context of the
1552 working directory are examined. Use file patterns and/or -I/-X
1552 working directory are examined. Use file patterns and/or -I/-X
1553 options to limit target files. -r/--rev is also useful to examine
1553 options to limit target files. -r/--rev is also useful to examine
1554 files in another context without actual updating to it.
1554 files in another context without actual updating to it.
1555
1555
1556 With --debug, this command shows warning messages while matching
1556 With --debug, this command shows warning messages while matching
1557 against ``merge-patterns`` and so on, too. It is recommended to
1557 against ``merge-patterns`` and so on, too. It is recommended to
1558 use this option with explicit file patterns and/or -I/-X options,
1558 use this option with explicit file patterns and/or -I/-X options,
1559 because this option increases amount of output per file according
1559 because this option increases amount of output per file according
1560 to configurations in hgrc.
1560 to configurations in hgrc.
1561
1561
1562 With -v/--verbose, this command shows configurations below at
1562 With -v/--verbose, this command shows configurations below at
1563 first (only if specified).
1563 first (only if specified).
1564
1564
1565 - ``--tool`` option
1565 - ``--tool`` option
1566 - ``HGMERGE`` environment variable
1566 - ``HGMERGE`` environment variable
1567 - configuration of ``ui.merge``
1567 - configuration of ``ui.merge``
1568
1568
1569 If merge tool is chosen before matching against
1569 If merge tool is chosen before matching against
1570 ``merge-patterns``, this command can't show any helpful
1570 ``merge-patterns``, this command can't show any helpful
1571 information, even with --debug. In such case, information above is
1571 information, even with --debug. In such case, information above is
1572 useful to know why a merge tool is chosen.
1572 useful to know why a merge tool is chosen.
1573 """
1573 """
1574 opts = pycompat.byteskwargs(opts)
1574 opts = pycompat.byteskwargs(opts)
1575 overrides = {}
1575 overrides = {}
1576 if opts['tool']:
1576 if opts['tool']:
1577 overrides[('ui', 'forcemerge')] = opts['tool']
1577 overrides[('ui', 'forcemerge')] = opts['tool']
1578 ui.note(('with --tool %r\n') % (opts['tool']))
1578 ui.note(('with --tool %r\n') % (opts['tool']))
1579
1579
1580 with ui.configoverride(overrides, 'debugmergepatterns'):
1580 with ui.configoverride(overrides, 'debugmergepatterns'):
1581 hgmerge = encoding.environ.get("HGMERGE")
1581 hgmerge = encoding.environ.get("HGMERGE")
1582 if hgmerge is not None:
1582 if hgmerge is not None:
1583 ui.note(('with HGMERGE=%r\n') % (hgmerge))
1583 ui.note(('with HGMERGE=%r\n') % (hgmerge))
1584 uimerge = ui.config("ui", "merge")
1584 uimerge = ui.config("ui", "merge")
1585 if uimerge:
1585 if uimerge:
1586 ui.note(('with ui.merge=%r\n') % (uimerge))
1586 ui.note(('with ui.merge=%r\n') % (uimerge))
1587
1587
1588 ctx = scmutil.revsingle(repo, opts.get('rev'))
1588 ctx = scmutil.revsingle(repo, opts.get('rev'))
1589 m = scmutil.match(ctx, pats, opts)
1589 m = scmutil.match(ctx, pats, opts)
1590 changedelete = opts['changedelete']
1590 changedelete = opts['changedelete']
1591 for path in ctx.walk(m):
1591 for path in ctx.walk(m):
1592 fctx = ctx[path]
1592 fctx = ctx[path]
1593 try:
1593 try:
1594 if not ui.debugflag:
1594 if not ui.debugflag:
1595 ui.pushbuffer(error=True)
1595 ui.pushbuffer(error=True)
1596 tool, toolpath = filemerge._picktool(repo, ui, path,
1596 tool, toolpath = filemerge._picktool(repo, ui, path,
1597 fctx.isbinary(),
1597 fctx.isbinary(),
1598 'l' in fctx.flags(),
1598 'l' in fctx.flags(),
1599 changedelete)
1599 changedelete)
1600 finally:
1600 finally:
1601 if not ui.debugflag:
1601 if not ui.debugflag:
1602 ui.popbuffer()
1602 ui.popbuffer()
1603 ui.write(('%s = %s\n') % (path, tool))
1603 ui.write(('%s = %s\n') % (path, tool))
1604
1604
1605 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1605 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1606 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1606 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1607 '''access the pushkey key/value protocol
1607 '''access the pushkey key/value protocol
1608
1608
1609 With two args, list the keys in the given namespace.
1609 With two args, list the keys in the given namespace.
1610
1610
1611 With five args, set a key to new if it currently is set to old.
1611 With five args, set a key to new if it currently is set to old.
1612 Reports success or failure.
1612 Reports success or failure.
1613 '''
1613 '''
1614
1614
1615 target = hg.peer(ui, {}, repopath)
1615 target = hg.peer(ui, {}, repopath)
1616 if keyinfo:
1616 if keyinfo:
1617 key, old, new = keyinfo
1617 key, old, new = keyinfo
1618 r = target.pushkey(namespace, key, old, new)
1618 r = target.pushkey(namespace, key, old, new)
1619 ui.status(str(r) + '\n')
1619 ui.status(str(r) + '\n')
1620 return not r
1620 return not r
1621 else:
1621 else:
1622 for k, v in sorted(target.listkeys(namespace).iteritems()):
1622 for k, v in sorted(target.listkeys(namespace).iteritems()):
1623 ui.write("%s\t%s\n" % (util.escapestr(k),
1623 ui.write("%s\t%s\n" % (util.escapestr(k),
1624 util.escapestr(v)))
1624 util.escapestr(v)))
1625
1625
1626 @command('debugpvec', [], _('A B'))
1626 @command('debugpvec', [], _('A B'))
1627 def debugpvec(ui, repo, a, b=None):
1627 def debugpvec(ui, repo, a, b=None):
1628 ca = scmutil.revsingle(repo, a)
1628 ca = scmutil.revsingle(repo, a)
1629 cb = scmutil.revsingle(repo, b)
1629 cb = scmutil.revsingle(repo, b)
1630 pa = pvec.ctxpvec(ca)
1630 pa = pvec.ctxpvec(ca)
1631 pb = pvec.ctxpvec(cb)
1631 pb = pvec.ctxpvec(cb)
1632 if pa == pb:
1632 if pa == pb:
1633 rel = "="
1633 rel = "="
1634 elif pa > pb:
1634 elif pa > pb:
1635 rel = ">"
1635 rel = ">"
1636 elif pa < pb:
1636 elif pa < pb:
1637 rel = "<"
1637 rel = "<"
1638 elif pa | pb:
1638 elif pa | pb:
1639 rel = "|"
1639 rel = "|"
1640 ui.write(_("a: %s\n") % pa)
1640 ui.write(_("a: %s\n") % pa)
1641 ui.write(_("b: %s\n") % pb)
1641 ui.write(_("b: %s\n") % pb)
1642 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1642 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1643 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1643 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1644 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1644 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1645 pa.distance(pb), rel))
1645 pa.distance(pb), rel))
1646
1646
1647 @command('debugrebuilddirstate|debugrebuildstate',
1647 @command('debugrebuilddirstate|debugrebuildstate',
1648 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1648 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1649 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1649 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1650 'the working copy parent')),
1650 'the working copy parent')),
1651 ],
1651 ],
1652 _('[-r REV]'))
1652 _('[-r REV]'))
1653 def debugrebuilddirstate(ui, repo, rev, **opts):
1653 def debugrebuilddirstate(ui, repo, rev, **opts):
1654 """rebuild the dirstate as it would look like for the given revision
1654 """rebuild the dirstate as it would look like for the given revision
1655
1655
1656 If no revision is specified the first current parent will be used.
1656 If no revision is specified the first current parent will be used.
1657
1657
1658 The dirstate will be set to the files of the given revision.
1658 The dirstate will be set to the files of the given revision.
1659 The actual working directory content or existing dirstate
1659 The actual working directory content or existing dirstate
1660 information such as adds or removes is not considered.
1660 information such as adds or removes is not considered.
1661
1661
1662 ``minimal`` will only rebuild the dirstate status for files that claim to be
1662 ``minimal`` will only rebuild the dirstate status for files that claim to be
1663 tracked but are not in the parent manifest, or that exist in the parent
1663 tracked but are not in the parent manifest, or that exist in the parent
1664 manifest but are not in the dirstate. It will not change adds, removes, or
1664 manifest but are not in the dirstate. It will not change adds, removes, or
1665 modified files that are in the working copy parent.
1665 modified files that are in the working copy parent.
1666
1666
1667 One use of this command is to make the next :hg:`status` invocation
1667 One use of this command is to make the next :hg:`status` invocation
1668 check the actual file content.
1668 check the actual file content.
1669 """
1669 """
1670 ctx = scmutil.revsingle(repo, rev)
1670 ctx = scmutil.revsingle(repo, rev)
1671 with repo.wlock():
1671 with repo.wlock():
1672 dirstate = repo.dirstate
1672 dirstate = repo.dirstate
1673 changedfiles = None
1673 changedfiles = None
1674 # See command doc for what minimal does.
1674 # See command doc for what minimal does.
1675 if opts.get(r'minimal'):
1675 if opts.get(r'minimal'):
1676 manifestfiles = set(ctx.manifest().keys())
1676 manifestfiles = set(ctx.manifest().keys())
1677 dirstatefiles = set(dirstate)
1677 dirstatefiles = set(dirstate)
1678 manifestonly = manifestfiles - dirstatefiles
1678 manifestonly = manifestfiles - dirstatefiles
1679 dsonly = dirstatefiles - manifestfiles
1679 dsonly = dirstatefiles - manifestfiles
1680 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1680 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1681 changedfiles = manifestonly | dsnotadded
1681 changedfiles = manifestonly | dsnotadded
1682
1682
1683 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1683 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1684
1684
1685 @command('debugrebuildfncache', [], '')
1685 @command('debugrebuildfncache', [], '')
1686 def debugrebuildfncache(ui, repo):
1686 def debugrebuildfncache(ui, repo):
1687 """rebuild the fncache file"""
1687 """rebuild the fncache file"""
1688 repair.rebuildfncache(ui, repo)
1688 repair.rebuildfncache(ui, repo)
1689
1689
1690 @command('debugrename',
1690 @command('debugrename',
1691 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1691 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1692 _('[-r REV] FILE'))
1692 _('[-r REV] FILE'))
1693 def debugrename(ui, repo, file1, *pats, **opts):
1693 def debugrename(ui, repo, file1, *pats, **opts):
1694 """dump rename information"""
1694 """dump rename information"""
1695
1695
1696 opts = pycompat.byteskwargs(opts)
1696 opts = pycompat.byteskwargs(opts)
1697 ctx = scmutil.revsingle(repo, opts.get('rev'))
1697 ctx = scmutil.revsingle(repo, opts.get('rev'))
1698 m = scmutil.match(ctx, (file1,) + pats, opts)
1698 m = scmutil.match(ctx, (file1,) + pats, opts)
1699 for abs in ctx.walk(m):
1699 for abs in ctx.walk(m):
1700 fctx = ctx[abs]
1700 fctx = ctx[abs]
1701 o = fctx.filelog().renamed(fctx.filenode())
1701 o = fctx.filelog().renamed(fctx.filenode())
1702 rel = m.rel(abs)
1702 rel = m.rel(abs)
1703 if o:
1703 if o:
1704 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1704 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1705 else:
1705 else:
1706 ui.write(_("%s not renamed\n") % rel)
1706 ui.write(_("%s not renamed\n") % rel)
1707
1707
1708 @command('debugrevlog', cmdutil.debugrevlogopts +
1708 @command('debugrevlog', cmdutil.debugrevlogopts +
1709 [('d', 'dump', False, _('dump index data'))],
1709 [('d', 'dump', False, _('dump index data'))],
1710 _('-c|-m|FILE'),
1710 _('-c|-m|FILE'),
1711 optionalrepo=True)
1711 optionalrepo=True)
1712 def debugrevlog(ui, repo, file_=None, **opts):
1712 def debugrevlog(ui, repo, file_=None, **opts):
1713 """show data and statistics about a revlog"""
1713 """show data and statistics about a revlog"""
1714 opts = pycompat.byteskwargs(opts)
1714 opts = pycompat.byteskwargs(opts)
1715 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1715 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1716
1716
1717 if opts.get("dump"):
1717 if opts.get("dump"):
1718 numrevs = len(r)
1718 numrevs = len(r)
1719 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1719 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1720 " rawsize totalsize compression heads chainlen\n"))
1720 " rawsize totalsize compression heads chainlen\n"))
1721 ts = 0
1721 ts = 0
1722 heads = set()
1722 heads = set()
1723
1723
1724 for rev in xrange(numrevs):
1724 for rev in xrange(numrevs):
1725 dbase = r.deltaparent(rev)
1725 dbase = r.deltaparent(rev)
1726 if dbase == -1:
1726 if dbase == -1:
1727 dbase = rev
1727 dbase = rev
1728 cbase = r.chainbase(rev)
1728 cbase = r.chainbase(rev)
1729 clen = r.chainlen(rev)
1729 clen = r.chainlen(rev)
1730 p1, p2 = r.parentrevs(rev)
1730 p1, p2 = r.parentrevs(rev)
1731 rs = r.rawsize(rev)
1731 rs = r.rawsize(rev)
1732 ts = ts + rs
1732 ts = ts + rs
1733 heads -= set(r.parentrevs(rev))
1733 heads -= set(r.parentrevs(rev))
1734 heads.add(rev)
1734 heads.add(rev)
1735 try:
1735 try:
1736 compression = ts / r.end(rev)
1736 compression = ts / r.end(rev)
1737 except ZeroDivisionError:
1737 except ZeroDivisionError:
1738 compression = 0
1738 compression = 0
1739 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1739 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1740 "%11d %5d %8d\n" %
1740 "%11d %5d %8d\n" %
1741 (rev, p1, p2, r.start(rev), r.end(rev),
1741 (rev, p1, p2, r.start(rev), r.end(rev),
1742 r.start(dbase), r.start(cbase),
1742 r.start(dbase), r.start(cbase),
1743 r.start(p1), r.start(p2),
1743 r.start(p1), r.start(p2),
1744 rs, ts, compression, len(heads), clen))
1744 rs, ts, compression, len(heads), clen))
1745 return 0
1745 return 0
1746
1746
1747 v = r.version
1747 v = r.version
1748 format = v & 0xFFFF
1748 format = v & 0xFFFF
1749 flags = []
1749 flags = []
1750 gdelta = False
1750 gdelta = False
1751 if v & revlog.FLAG_INLINE_DATA:
1751 if v & revlog.FLAG_INLINE_DATA:
1752 flags.append('inline')
1752 flags.append('inline')
1753 if v & revlog.FLAG_GENERALDELTA:
1753 if v & revlog.FLAG_GENERALDELTA:
1754 gdelta = True
1754 gdelta = True
1755 flags.append('generaldelta')
1755 flags.append('generaldelta')
1756 if not flags:
1756 if not flags:
1757 flags = ['(none)']
1757 flags = ['(none)']
1758
1758
1759 nummerges = 0
1759 nummerges = 0
1760 numfull = 0
1760 numfull = 0
1761 numprev = 0
1761 numprev = 0
1762 nump1 = 0
1762 nump1 = 0
1763 nump2 = 0
1763 nump2 = 0
1764 numother = 0
1764 numother = 0
1765 nump1prev = 0
1765 nump1prev = 0
1766 nump2prev = 0
1766 nump2prev = 0
1767 chainlengths = []
1767 chainlengths = []
1768 chainbases = []
1768 chainbases = []
1769 chainspans = []
1769 chainspans = []
1770
1770
1771 datasize = [None, 0, 0]
1771 datasize = [None, 0, 0]
1772 fullsize = [None, 0, 0]
1772 fullsize = [None, 0, 0]
1773 deltasize = [None, 0, 0]
1773 deltasize = [None, 0, 0]
1774 chunktypecounts = {}
1774 chunktypecounts = {}
1775 chunktypesizes = {}
1775 chunktypesizes = {}
1776
1776
1777 def addsize(size, l):
1777 def addsize(size, l):
1778 if l[0] is None or size < l[0]:
1778 if l[0] is None or size < l[0]:
1779 l[0] = size
1779 l[0] = size
1780 if size > l[1]:
1780 if size > l[1]:
1781 l[1] = size
1781 l[1] = size
1782 l[2] += size
1782 l[2] += size
1783
1783
1784 numrevs = len(r)
1784 numrevs = len(r)
1785 for rev in xrange(numrevs):
1785 for rev in xrange(numrevs):
1786 p1, p2 = r.parentrevs(rev)
1786 p1, p2 = r.parentrevs(rev)
1787 delta = r.deltaparent(rev)
1787 delta = r.deltaparent(rev)
1788 if format > 0:
1788 if format > 0:
1789 addsize(r.rawsize(rev), datasize)
1789 addsize(r.rawsize(rev), datasize)
1790 if p2 != nullrev:
1790 if p2 != nullrev:
1791 nummerges += 1
1791 nummerges += 1
1792 size = r.length(rev)
1792 size = r.length(rev)
1793 if delta == nullrev:
1793 if delta == nullrev:
1794 chainlengths.append(0)
1794 chainlengths.append(0)
1795 chainbases.append(r.start(rev))
1795 chainbases.append(r.start(rev))
1796 chainspans.append(size)
1796 chainspans.append(size)
1797 numfull += 1
1797 numfull += 1
1798 addsize(size, fullsize)
1798 addsize(size, fullsize)
1799 else:
1799 else:
1800 chainlengths.append(chainlengths[delta] + 1)
1800 chainlengths.append(chainlengths[delta] + 1)
1801 baseaddr = chainbases[delta]
1801 baseaddr = chainbases[delta]
1802 revaddr = r.start(rev)
1802 revaddr = r.start(rev)
1803 chainbases.append(baseaddr)
1803 chainbases.append(baseaddr)
1804 chainspans.append((revaddr - baseaddr) + size)
1804 chainspans.append((revaddr - baseaddr) + size)
1805 addsize(size, deltasize)
1805 addsize(size, deltasize)
1806 if delta == rev - 1:
1806 if delta == rev - 1:
1807 numprev += 1
1807 numprev += 1
1808 if delta == p1:
1808 if delta == p1:
1809 nump1prev += 1
1809 nump1prev += 1
1810 elif delta == p2:
1810 elif delta == p2:
1811 nump2prev += 1
1811 nump2prev += 1
1812 elif delta == p1:
1812 elif delta == p1:
1813 nump1 += 1
1813 nump1 += 1
1814 elif delta == p2:
1814 elif delta == p2:
1815 nump2 += 1
1815 nump2 += 1
1816 elif delta != nullrev:
1816 elif delta != nullrev:
1817 numother += 1
1817 numother += 1
1818
1818
1819 # Obtain data on the raw chunks in the revlog.
1819 # Obtain data on the raw chunks in the revlog.
1820 segment = r._getsegmentforrevs(rev, rev)[1]
1820 segment = r._getsegmentforrevs(rev, rev)[1]
1821 if segment:
1821 if segment:
1822 chunktype = bytes(segment[0:1])
1822 chunktype = bytes(segment[0:1])
1823 else:
1823 else:
1824 chunktype = 'empty'
1824 chunktype = 'empty'
1825
1825
1826 if chunktype not in chunktypecounts:
1826 if chunktype not in chunktypecounts:
1827 chunktypecounts[chunktype] = 0
1827 chunktypecounts[chunktype] = 0
1828 chunktypesizes[chunktype] = 0
1828 chunktypesizes[chunktype] = 0
1829
1829
1830 chunktypecounts[chunktype] += 1
1830 chunktypecounts[chunktype] += 1
1831 chunktypesizes[chunktype] += size
1831 chunktypesizes[chunktype] += size
1832
1832
1833 # Adjust size min value for empty cases
1833 # Adjust size min value for empty cases
1834 for size in (datasize, fullsize, deltasize):
1834 for size in (datasize, fullsize, deltasize):
1835 if size[0] is None:
1835 if size[0] is None:
1836 size[0] = 0
1836 size[0] = 0
1837
1837
1838 numdeltas = numrevs - numfull
1838 numdeltas = numrevs - numfull
1839 numoprev = numprev - nump1prev - nump2prev
1839 numoprev = numprev - nump1prev - nump2prev
1840 totalrawsize = datasize[2]
1840 totalrawsize = datasize[2]
1841 datasize[2] /= numrevs
1841 datasize[2] /= numrevs
1842 fulltotal = fullsize[2]
1842 fulltotal = fullsize[2]
1843 fullsize[2] /= numfull
1843 fullsize[2] /= numfull
1844 deltatotal = deltasize[2]
1844 deltatotal = deltasize[2]
1845 if numrevs - numfull > 0:
1845 if numrevs - numfull > 0:
1846 deltasize[2] /= numrevs - numfull
1846 deltasize[2] /= numrevs - numfull
1847 totalsize = fulltotal + deltatotal
1847 totalsize = fulltotal + deltatotal
1848 avgchainlen = sum(chainlengths) / numrevs
1848 avgchainlen = sum(chainlengths) / numrevs
1849 maxchainlen = max(chainlengths)
1849 maxchainlen = max(chainlengths)
1850 maxchainspan = max(chainspans)
1850 maxchainspan = max(chainspans)
1851 compratio = 1
1851 compratio = 1
1852 if totalsize:
1852 if totalsize:
1853 compratio = totalrawsize / totalsize
1853 compratio = totalrawsize / totalsize
1854
1854
1855 basedfmtstr = '%%%dd\n'
1855 basedfmtstr = '%%%dd\n'
1856 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1856 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1857
1857
1858 def dfmtstr(max):
1858 def dfmtstr(max):
1859 return basedfmtstr % len(str(max))
1859 return basedfmtstr % len(str(max))
1860 def pcfmtstr(max, padding=0):
1860 def pcfmtstr(max, padding=0):
1861 return basepcfmtstr % (len(str(max)), ' ' * padding)
1861 return basepcfmtstr % (len(str(max)), ' ' * padding)
1862
1862
1863 def pcfmt(value, total):
1863 def pcfmt(value, total):
1864 if total:
1864 if total:
1865 return (value, 100 * float(value) / total)
1865 return (value, 100 * float(value) / total)
1866 else:
1866 else:
1867 return value, 100.0
1867 return value, 100.0
1868
1868
1869 ui.write(('format : %d\n') % format)
1869 ui.write(('format : %d\n') % format)
1870 ui.write(('flags : %s\n') % ', '.join(flags))
1870 ui.write(('flags : %s\n') % ', '.join(flags))
1871
1871
1872 ui.write('\n')
1872 ui.write('\n')
1873 fmt = pcfmtstr(totalsize)
1873 fmt = pcfmtstr(totalsize)
1874 fmt2 = dfmtstr(totalsize)
1874 fmt2 = dfmtstr(totalsize)
1875 ui.write(('revisions : ') + fmt2 % numrevs)
1875 ui.write(('revisions : ') + fmt2 % numrevs)
1876 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
1876 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
1877 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
1877 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
1878 ui.write(('revisions : ') + fmt2 % numrevs)
1878 ui.write(('revisions : ') + fmt2 % numrevs)
1879 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
1879 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
1880 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
1880 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
1881 ui.write(('revision size : ') + fmt2 % totalsize)
1881 ui.write(('revision size : ') + fmt2 % totalsize)
1882 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
1882 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
1883 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
1883 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
1884
1884
1885 def fmtchunktype(chunktype):
1885 def fmtchunktype(chunktype):
1886 if chunktype == 'empty':
1886 if chunktype == 'empty':
1887 return ' %s : ' % chunktype
1887 return ' %s : ' % chunktype
1888 elif chunktype in pycompat.bytestr(string.ascii_letters):
1888 elif chunktype in pycompat.bytestr(string.ascii_letters):
1889 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
1889 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
1890 else:
1890 else:
1891 return ' 0x%s : ' % hex(chunktype)
1891 return ' 0x%s : ' % hex(chunktype)
1892
1892
1893 ui.write('\n')
1893 ui.write('\n')
1894 ui.write(('chunks : ') + fmt2 % numrevs)
1894 ui.write(('chunks : ') + fmt2 % numrevs)
1895 for chunktype in sorted(chunktypecounts):
1895 for chunktype in sorted(chunktypecounts):
1896 ui.write(fmtchunktype(chunktype))
1896 ui.write(fmtchunktype(chunktype))
1897 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
1897 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
1898 ui.write(('chunks size : ') + fmt2 % totalsize)
1898 ui.write(('chunks size : ') + fmt2 % totalsize)
1899 for chunktype in sorted(chunktypecounts):
1899 for chunktype in sorted(chunktypecounts):
1900 ui.write(fmtchunktype(chunktype))
1900 ui.write(fmtchunktype(chunktype))
1901 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
1901 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
1902
1902
1903 ui.write('\n')
1903 ui.write('\n')
1904 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
1904 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
1905 ui.write(('avg chain length : ') + fmt % avgchainlen)
1905 ui.write(('avg chain length : ') + fmt % avgchainlen)
1906 ui.write(('max chain length : ') + fmt % maxchainlen)
1906 ui.write(('max chain length : ') + fmt % maxchainlen)
1907 ui.write(('max chain reach : ') + fmt % maxchainspan)
1907 ui.write(('max chain reach : ') + fmt % maxchainspan)
1908 ui.write(('compression ratio : ') + fmt % compratio)
1908 ui.write(('compression ratio : ') + fmt % compratio)
1909
1909
1910 if format > 0:
1910 if format > 0:
1911 ui.write('\n')
1911 ui.write('\n')
1912 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
1912 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
1913 % tuple(datasize))
1913 % tuple(datasize))
1914 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
1914 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
1915 % tuple(fullsize))
1915 % tuple(fullsize))
1916 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
1916 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
1917 % tuple(deltasize))
1917 % tuple(deltasize))
1918
1918
1919 if numdeltas > 0:
1919 if numdeltas > 0:
1920 ui.write('\n')
1920 ui.write('\n')
1921 fmt = pcfmtstr(numdeltas)
1921 fmt = pcfmtstr(numdeltas)
1922 fmt2 = pcfmtstr(numdeltas, 4)
1922 fmt2 = pcfmtstr(numdeltas, 4)
1923 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
1923 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
1924 if numprev > 0:
1924 if numprev > 0:
1925 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
1925 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
1926 numprev))
1926 numprev))
1927 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
1927 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
1928 numprev))
1928 numprev))
1929 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
1929 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
1930 numprev))
1930 numprev))
1931 if gdelta:
1931 if gdelta:
1932 ui.write(('deltas against p1 : ')
1932 ui.write(('deltas against p1 : ')
1933 + fmt % pcfmt(nump1, numdeltas))
1933 + fmt % pcfmt(nump1, numdeltas))
1934 ui.write(('deltas against p2 : ')
1934 ui.write(('deltas against p2 : ')
1935 + fmt % pcfmt(nump2, numdeltas))
1935 + fmt % pcfmt(nump2, numdeltas))
1936 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
1936 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
1937 numdeltas))
1937 numdeltas))
1938
1938
1939 @command('debugrevspec',
1939 @command('debugrevspec',
1940 [('', 'optimize', None,
1940 [('', 'optimize', None,
1941 _('print parsed tree after optimizing (DEPRECATED)')),
1941 _('print parsed tree after optimizing (DEPRECATED)')),
1942 ('', 'show-revs', True, _('print list of result revisions (default)')),
1942 ('', 'show-revs', True, _('print list of result revisions (default)')),
1943 ('s', 'show-set', None, _('print internal representation of result set')),
1943 ('s', 'show-set', None, _('print internal representation of result set')),
1944 ('p', 'show-stage', [],
1944 ('p', 'show-stage', [],
1945 _('print parsed tree at the given stage'), _('NAME')),
1945 _('print parsed tree at the given stage'), _('NAME')),
1946 ('', 'no-optimized', False, _('evaluate tree without optimization')),
1946 ('', 'no-optimized', False, _('evaluate tree without optimization')),
1947 ('', 'verify-optimized', False, _('verify optimized result')),
1947 ('', 'verify-optimized', False, _('verify optimized result')),
1948 ],
1948 ],
1949 ('REVSPEC'))
1949 ('REVSPEC'))
1950 def debugrevspec(ui, repo, expr, **opts):
1950 def debugrevspec(ui, repo, expr, **opts):
1951 """parse and apply a revision specification
1951 """parse and apply a revision specification
1952
1952
1953 Use -p/--show-stage option to print the parsed tree at the given stages.
1953 Use -p/--show-stage option to print the parsed tree at the given stages.
1954 Use -p all to print tree at every stage.
1954 Use -p all to print tree at every stage.
1955
1955
1956 Use --no-show-revs option with -s or -p to print only the set
1956 Use --no-show-revs option with -s or -p to print only the set
1957 representation or the parsed tree respectively.
1957 representation or the parsed tree respectively.
1958
1958
1959 Use --verify-optimized to compare the optimized result with the unoptimized
1959 Use --verify-optimized to compare the optimized result with the unoptimized
1960 one. Returns 1 if the optimized result differs.
1960 one. Returns 1 if the optimized result differs.
1961 """
1961 """
1962 opts = pycompat.byteskwargs(opts)
1962 opts = pycompat.byteskwargs(opts)
1963 stages = [
1963 stages = [
1964 ('parsed', lambda tree: tree),
1964 ('parsed', lambda tree: tree),
1965 ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
1965 ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
1966 ('concatenated', revsetlang.foldconcat),
1966 ('concatenated', revsetlang.foldconcat),
1967 ('analyzed', revsetlang.analyze),
1967 ('analyzed', revsetlang.analyze),
1968 ('optimized', revsetlang.optimize),
1968 ('optimized', revsetlang.optimize),
1969 ]
1969 ]
1970 if opts['no_optimized']:
1970 if opts['no_optimized']:
1971 stages = stages[:-1]
1971 stages = stages[:-1]
1972 if opts['verify_optimized'] and opts['no_optimized']:
1972 if opts['verify_optimized'] and opts['no_optimized']:
1973 raise error.Abort(_('cannot use --verify-optimized with '
1973 raise error.Abort(_('cannot use --verify-optimized with '
1974 '--no-optimized'))
1974 '--no-optimized'))
1975 stagenames = set(n for n, f in stages)
1975 stagenames = set(n for n, f in stages)
1976
1976
1977 showalways = set()
1977 showalways = set()
1978 showchanged = set()
1978 showchanged = set()
1979 if ui.verbose and not opts['show_stage']:
1979 if ui.verbose and not opts['show_stage']:
1980 # show parsed tree by --verbose (deprecated)
1980 # show parsed tree by --verbose (deprecated)
1981 showalways.add('parsed')
1981 showalways.add('parsed')
1982 showchanged.update(['expanded', 'concatenated'])
1982 showchanged.update(['expanded', 'concatenated'])
1983 if opts['optimize']:
1983 if opts['optimize']:
1984 showalways.add('optimized')
1984 showalways.add('optimized')
1985 if opts['show_stage'] and opts['optimize']:
1985 if opts['show_stage'] and opts['optimize']:
1986 raise error.Abort(_('cannot use --optimize with --show-stage'))
1986 raise error.Abort(_('cannot use --optimize with --show-stage'))
1987 if opts['show_stage'] == ['all']:
1987 if opts['show_stage'] == ['all']:
1988 showalways.update(stagenames)
1988 showalways.update(stagenames)
1989 else:
1989 else:
1990 for n in opts['show_stage']:
1990 for n in opts['show_stage']:
1991 if n not in stagenames:
1991 if n not in stagenames:
1992 raise error.Abort(_('invalid stage name: %s') % n)
1992 raise error.Abort(_('invalid stage name: %s') % n)
1993 showalways.update(opts['show_stage'])
1993 showalways.update(opts['show_stage'])
1994
1994
1995 treebystage = {}
1995 treebystage = {}
1996 printedtree = None
1996 printedtree = None
1997 tree = revsetlang.parse(expr, lookup=repo.__contains__)
1997 tree = revsetlang.parse(expr, lookup=repo.__contains__)
1998 for n, f in stages:
1998 for n, f in stages:
1999 treebystage[n] = tree = f(tree)
1999 treebystage[n] = tree = f(tree)
2000 if n in showalways or (n in showchanged and tree != printedtree):
2000 if n in showalways or (n in showchanged and tree != printedtree):
2001 if opts['show_stage'] or n != 'parsed':
2001 if opts['show_stage'] or n != 'parsed':
2002 ui.write(("* %s:\n") % n)
2002 ui.write(("* %s:\n") % n)
2003 ui.write(revsetlang.prettyformat(tree), "\n")
2003 ui.write(revsetlang.prettyformat(tree), "\n")
2004 printedtree = tree
2004 printedtree = tree
2005
2005
2006 if opts['verify_optimized']:
2006 if opts['verify_optimized']:
2007 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2007 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2008 brevs = revset.makematcher(treebystage['optimized'])(repo)
2008 brevs = revset.makematcher(treebystage['optimized'])(repo)
2009 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2009 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2010 ui.write(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
2010 ui.write(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
2011 ui.write(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
2011 ui.write(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
2012 arevs = list(arevs)
2012 arevs = list(arevs)
2013 brevs = list(brevs)
2013 brevs = list(brevs)
2014 if arevs == brevs:
2014 if arevs == brevs:
2015 return 0
2015 return 0
2016 ui.write(('--- analyzed\n'), label='diff.file_a')
2016 ui.write(('--- analyzed\n'), label='diff.file_a')
2017 ui.write(('+++ optimized\n'), label='diff.file_b')
2017 ui.write(('+++ optimized\n'), label='diff.file_b')
2018 sm = difflib.SequenceMatcher(None, arevs, brevs)
2018 sm = difflib.SequenceMatcher(None, arevs, brevs)
2019 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2019 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2020 if tag in ('delete', 'replace'):
2020 if tag in ('delete', 'replace'):
2021 for c in arevs[alo:ahi]:
2021 for c in arevs[alo:ahi]:
2022 ui.write('-%s\n' % c, label='diff.deleted')
2022 ui.write('-%s\n' % c, label='diff.deleted')
2023 if tag in ('insert', 'replace'):
2023 if tag in ('insert', 'replace'):
2024 for c in brevs[blo:bhi]:
2024 for c in brevs[blo:bhi]:
2025 ui.write('+%s\n' % c, label='diff.inserted')
2025 ui.write('+%s\n' % c, label='diff.inserted')
2026 if tag == 'equal':
2026 if tag == 'equal':
2027 for c in arevs[alo:ahi]:
2027 for c in arevs[alo:ahi]:
2028 ui.write(' %s\n' % c)
2028 ui.write(' %s\n' % c)
2029 return 1
2029 return 1
2030
2030
2031 func = revset.makematcher(tree)
2031 func = revset.makematcher(tree)
2032 revs = func(repo)
2032 revs = func(repo)
2033 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2033 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2034 ui.write(("* set:\n"), smartset.prettyformat(revs), "\n")
2034 ui.write(("* set:\n"), smartset.prettyformat(revs), "\n")
2035 if not opts['show_revs']:
2035 if not opts['show_revs']:
2036 return
2036 return
2037 for c in revs:
2037 for c in revs:
2038 ui.write("%s\n" % c)
2038 ui.write("%s\n" % c)
2039
2039
2040 @command('debugsetparents', [], _('REV1 [REV2]'))
2040 @command('debugsetparents', [], _('REV1 [REV2]'))
2041 def debugsetparents(ui, repo, rev1, rev2=None):
2041 def debugsetparents(ui, repo, rev1, rev2=None):
2042 """manually set the parents of the current working directory
2042 """manually set the parents of the current working directory
2043
2043
2044 This is useful for writing repository conversion tools, but should
2044 This is useful for writing repository conversion tools, but should
2045 be used with care. For example, neither the working directory nor the
2045 be used with care. For example, neither the working directory nor the
2046 dirstate is updated, so file status may be incorrect after running this
2046 dirstate is updated, so file status may be incorrect after running this
2047 command.
2047 command.
2048
2048
2049 Returns 0 on success.
2049 Returns 0 on success.
2050 """
2050 """
2051
2051
2052 r1 = scmutil.revsingle(repo, rev1).node()
2052 r1 = scmutil.revsingle(repo, rev1).node()
2053 r2 = scmutil.revsingle(repo, rev2, 'null').node()
2053 r2 = scmutil.revsingle(repo, rev2, 'null').node()
2054
2054
2055 with repo.wlock():
2055 with repo.wlock():
2056 repo.setparents(r1, r2)
2056 repo.setparents(r1, r2)
2057
2057
2058 @command('debugsub',
2058 @command('debugsub',
2059 [('r', 'rev', '',
2059 [('r', 'rev', '',
2060 _('revision to check'), _('REV'))],
2060 _('revision to check'), _('REV'))],
2061 _('[-r REV] [REV]'))
2061 _('[-r REV] [REV]'))
2062 def debugsub(ui, repo, rev=None):
2062 def debugsub(ui, repo, rev=None):
2063 ctx = scmutil.revsingle(repo, rev, None)
2063 ctx = scmutil.revsingle(repo, rev, None)
2064 for k, v in sorted(ctx.substate.items()):
2064 for k, v in sorted(ctx.substate.items()):
2065 ui.write(('path %s\n') % k)
2065 ui.write(('path %s\n') % k)
2066 ui.write((' source %s\n') % v[0])
2066 ui.write((' source %s\n') % v[0])
2067 ui.write((' revision %s\n') % v[1])
2067 ui.write((' revision %s\n') % v[1])
2068
2068
2069 @command('debugsuccessorssets',
2069 @command('debugsuccessorssets',
2070 [],
2070 [],
2071 _('[REV]'))
2071 _('[REV]'))
2072 def debugsuccessorssets(ui, repo, *revs):
2072 def debugsuccessorssets(ui, repo, *revs):
2073 """show set of successors for revision
2073 """show set of successors for revision
2074
2074
2075 A successors set of changeset A is a consistent group of revisions that
2075 A successors set of changeset A is a consistent group of revisions that
2076 succeed A. It contains non-obsolete changesets only.
2076 succeed A. It contains non-obsolete changesets only.
2077
2077
2078 In most cases a changeset A has a single successors set containing a single
2078 In most cases a changeset A has a single successors set containing a single
2079 successor (changeset A replaced by A').
2079 successor (changeset A replaced by A').
2080
2080
2081 A changeset that is made obsolete with no successors are called "pruned".
2081 A changeset that is made obsolete with no successors are called "pruned".
2082 Such changesets have no successors sets at all.
2082 Such changesets have no successors sets at all.
2083
2083
2084 A changeset that has been "split" will have a successors set containing
2084 A changeset that has been "split" will have a successors set containing
2085 more than one successor.
2085 more than one successor.
2086
2086
2087 A changeset that has been rewritten in multiple different ways is called
2087 A changeset that has been rewritten in multiple different ways is called
2088 "divergent". Such changesets have multiple successor sets (each of which
2088 "divergent". Such changesets have multiple successor sets (each of which
2089 may also be split, i.e. have multiple successors).
2089 may also be split, i.e. have multiple successors).
2090
2090
2091 Results are displayed as follows::
2091 Results are displayed as follows::
2092
2092
2093 <rev1>
2093 <rev1>
2094 <successors-1A>
2094 <successors-1A>
2095 <rev2>
2095 <rev2>
2096 <successors-2A>
2096 <successors-2A>
2097 <successors-2B1> <successors-2B2> <successors-2B3>
2097 <successors-2B1> <successors-2B2> <successors-2B3>
2098
2098
2099 Here rev2 has two possible (i.e. divergent) successors sets. The first
2099 Here rev2 has two possible (i.e. divergent) successors sets. The first
2100 holds one element, whereas the second holds three (i.e. the changeset has
2100 holds one element, whereas the second holds three (i.e. the changeset has
2101 been split).
2101 been split).
2102 """
2102 """
2103 # passed to successorssets caching computation from one call to another
2103 # passed to successorssets caching computation from one call to another
2104 cache = {}
2104 cache = {}
2105 ctx2str = str
2105 ctx2str = str
2106 node2str = short
2106 node2str = short
2107 if ui.debug():
2107 if ui.debug():
2108 def ctx2str(ctx):
2108 def ctx2str(ctx):
2109 return ctx.hex()
2109 return ctx.hex()
2110 node2str = hex
2110 node2str = hex
2111 for rev in scmutil.revrange(repo, revs):
2111 for rev in scmutil.revrange(repo, revs):
2112 ctx = repo[rev]
2112 ctx = repo[rev]
2113 ui.write('%s\n'% ctx2str(ctx))
2113 ui.write('%s\n'% ctx2str(ctx))
2114 for succsset in obsutil.successorssets(repo, ctx.node(), cache):
2114 for succsset in obsutil.successorssets(repo, ctx.node(), cache):
2115 if succsset:
2115 if succsset:
2116 ui.write(' ')
2116 ui.write(' ')
2117 ui.write(node2str(succsset[0]))
2117 ui.write(node2str(succsset[0]))
2118 for node in succsset[1:]:
2118 for node in succsset[1:]:
2119 ui.write(' ')
2119 ui.write(' ')
2120 ui.write(node2str(node))
2120 ui.write(node2str(node))
2121 ui.write('\n')
2121 ui.write('\n')
2122
2122
2123 @command('debugtemplate',
2123 @command('debugtemplate',
2124 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2124 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2125 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2125 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2126 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2126 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2127 optionalrepo=True)
2127 optionalrepo=True)
2128 def debugtemplate(ui, repo, tmpl, **opts):
2128 def debugtemplate(ui, repo, tmpl, **opts):
2129 """parse and apply a template
2129 """parse and apply a template
2130
2130
2131 If -r/--rev is given, the template is processed as a log template and
2131 If -r/--rev is given, the template is processed as a log template and
2132 applied to the given changesets. Otherwise, it is processed as a generic
2132 applied to the given changesets. Otherwise, it is processed as a generic
2133 template.
2133 template.
2134
2134
2135 Use --verbose to print the parsed tree.
2135 Use --verbose to print the parsed tree.
2136 """
2136 """
2137 revs = None
2137 revs = None
2138 if opts[r'rev']:
2138 if opts[r'rev']:
2139 if repo is None:
2139 if repo is None:
2140 raise error.RepoError(_('there is no Mercurial repository here '
2140 raise error.RepoError(_('there is no Mercurial repository here '
2141 '(.hg not found)'))
2141 '(.hg not found)'))
2142 revs = scmutil.revrange(repo, opts[r'rev'])
2142 revs = scmutil.revrange(repo, opts[r'rev'])
2143
2143
2144 props = {}
2144 props = {}
2145 for d in opts[r'define']:
2145 for d in opts[r'define']:
2146 try:
2146 try:
2147 k, v = (e.strip() for e in d.split('=', 1))
2147 k, v = (e.strip() for e in d.split('=', 1))
2148 if not k or k == 'ui':
2148 if not k or k == 'ui':
2149 raise ValueError
2149 raise ValueError
2150 props[k] = v
2150 props[k] = v
2151 except ValueError:
2151 except ValueError:
2152 raise error.Abort(_('malformed keyword definition: %s') % d)
2152 raise error.Abort(_('malformed keyword definition: %s') % d)
2153
2153
2154 if ui.verbose:
2154 if ui.verbose:
2155 aliases = ui.configitems('templatealias')
2155 aliases = ui.configitems('templatealias')
2156 tree = templater.parse(tmpl)
2156 tree = templater.parse(tmpl)
2157 ui.note(templater.prettyformat(tree), '\n')
2157 ui.note(templater.prettyformat(tree), '\n')
2158 newtree = templater.expandaliases(tree, aliases)
2158 newtree = templater.expandaliases(tree, aliases)
2159 if newtree != tree:
2159 if newtree != tree:
2160 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2160 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2161
2161
2162 if revs is None:
2162 if revs is None:
2163 t = formatter.maketemplater(ui, tmpl)
2163 t = formatter.maketemplater(ui, tmpl)
2164 props['ui'] = ui
2164 props['ui'] = ui
2165 ui.write(t.render(props))
2165 ui.write(t.render(props))
2166 else:
2166 else:
2167 displayer = cmdutil.makelogtemplater(ui, repo, tmpl)
2167 displayer = cmdutil.makelogtemplater(ui, repo, tmpl)
2168 for r in revs:
2168 for r in revs:
2169 displayer.show(repo[r], **pycompat.strkwargs(props))
2169 displayer.show(repo[r], **pycompat.strkwargs(props))
2170 displayer.close()
2170 displayer.close()
2171
2171
2172 @command('debugupdatecaches', [])
2172 @command('debugupdatecaches', [])
2173 def debugupdatecaches(ui, repo, *pats, **opts):
2173 def debugupdatecaches(ui, repo, *pats, **opts):
2174 """warm all known caches in the repository"""
2174 """warm all known caches in the repository"""
2175 with repo.wlock():
2175 with repo.wlock():
2176 with repo.lock():
2176 with repo.lock():
2177 repo.updatecaches()
2177 repo.updatecaches()
2178
2178
2179 @command('debugupgraderepo', [
2179 @command('debugupgraderepo', [
2180 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2180 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2181 ('', 'run', False, _('performs an upgrade')),
2181 ('', 'run', False, _('performs an upgrade')),
2182 ])
2182 ])
2183 def debugupgraderepo(ui, repo, run=False, optimize=None):
2183 def debugupgraderepo(ui, repo, run=False, optimize=None):
2184 """upgrade a repository to use different features
2184 """upgrade a repository to use different features
2185
2185
2186 If no arguments are specified, the repository is evaluated for upgrade
2186 If no arguments are specified, the repository is evaluated for upgrade
2187 and a list of problems and potential optimizations is printed.
2187 and a list of problems and potential optimizations is printed.
2188
2188
2189 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2189 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2190 can be influenced via additional arguments. More details will be provided
2190 can be influenced via additional arguments. More details will be provided
2191 by the command output when run without ``--run``.
2191 by the command output when run without ``--run``.
2192
2192
2193 During the upgrade, the repository will be locked and no writes will be
2193 During the upgrade, the repository will be locked and no writes will be
2194 allowed.
2194 allowed.
2195
2195
2196 At the end of the upgrade, the repository may not be readable while new
2196 At the end of the upgrade, the repository may not be readable while new
2197 repository data is swapped in. This window will be as long as it takes to
2197 repository data is swapped in. This window will be as long as it takes to
2198 rename some directories inside the ``.hg`` directory. On most machines, this
2198 rename some directories inside the ``.hg`` directory. On most machines, this
2199 should complete almost instantaneously and the chances of a consumer being
2199 should complete almost instantaneously and the chances of a consumer being
2200 unable to access the repository should be low.
2200 unable to access the repository should be low.
2201 """
2201 """
2202 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2202 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2203
2203
2204 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2204 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2205 inferrepo=True)
2205 inferrepo=True)
2206 def debugwalk(ui, repo, *pats, **opts):
2206 def debugwalk(ui, repo, *pats, **opts):
2207 """show how files match on given patterns"""
2207 """show how files match on given patterns"""
2208 opts = pycompat.byteskwargs(opts)
2208 opts = pycompat.byteskwargs(opts)
2209 m = scmutil.match(repo[None], pats, opts)
2209 m = scmutil.match(repo[None], pats, opts)
2210 ui.write(('matcher: %r\n' % m))
2210 ui.write(('matcher: %r\n' % m))
2211 items = list(repo[None].walk(m))
2211 items = list(repo[None].walk(m))
2212 if not items:
2212 if not items:
2213 return
2213 return
2214 f = lambda fn: fn
2214 f = lambda fn: fn
2215 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2215 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2216 f = lambda fn: util.normpath(fn)
2216 f = lambda fn: util.normpath(fn)
2217 fmt = 'f %%-%ds %%-%ds %%s' % (
2217 fmt = 'f %%-%ds %%-%ds %%s' % (
2218 max([len(abs) for abs in items]),
2218 max([len(abs) for abs in items]),
2219 max([len(m.rel(abs)) for abs in items]))
2219 max([len(m.rel(abs)) for abs in items]))
2220 for abs in items:
2220 for abs in items:
2221 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2221 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2222 ui.write("%s\n" % line.rstrip())
2222 ui.write("%s\n" % line.rstrip())
2223
2223
2224 @command('debugwireargs',
2224 @command('debugwireargs',
2225 [('', 'three', '', 'three'),
2225 [('', 'three', '', 'three'),
2226 ('', 'four', '', 'four'),
2226 ('', 'four', '', 'four'),
2227 ('', 'five', '', 'five'),
2227 ('', 'five', '', 'five'),
2228 ] + cmdutil.remoteopts,
2228 ] + cmdutil.remoteopts,
2229 _('REPO [OPTIONS]... [ONE [TWO]]'),
2229 _('REPO [OPTIONS]... [ONE [TWO]]'),
2230 norepo=True)
2230 norepo=True)
2231 def debugwireargs(ui, repopath, *vals, **opts):
2231 def debugwireargs(ui, repopath, *vals, **opts):
2232 opts = pycompat.byteskwargs(opts)
2232 opts = pycompat.byteskwargs(opts)
2233 repo = hg.peer(ui, opts, repopath)
2233 repo = hg.peer(ui, opts, repopath)
2234 for opt in cmdutil.remoteopts:
2234 for opt in cmdutil.remoteopts:
2235 del opts[opt[1]]
2235 del opts[opt[1]]
2236 args = {}
2236 args = {}
2237 for k, v in opts.iteritems():
2237 for k, v in opts.iteritems():
2238 if v:
2238 if v:
2239 args[k] = v
2239 args[k] = v
2240 # run twice to check that we don't mess up the stream for the next command
2240 # run twice to check that we don't mess up the stream for the next command
2241 res1 = repo.debugwireargs(*vals, **args)
2241 res1 = repo.debugwireargs(*vals, **args)
2242 res2 = repo.debugwireargs(*vals, **args)
2242 res2 = repo.debugwireargs(*vals, **args)
2243 ui.write("%s\n" % res1)
2243 ui.write("%s\n" % res1)
2244 if res1 != res2:
2244 if res1 != res2:
2245 ui.warn("%s\n" % res2)
2245 ui.warn("%s\n" % res2)
@@ -1,1041 +1,1031 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84
84
85 parsers = policy.importmod(r'parsers')
85 parsers = policy.importmod(r'parsers')
86
86
87 _pack = struct.pack
87 _pack = struct.pack
88 _unpack = struct.unpack
88 _unpack = struct.unpack
89 _calcsize = struct.calcsize
89 _calcsize = struct.calcsize
90 propertycache = util.propertycache
90 propertycache = util.propertycache
91
91
92 # the obsolete feature is not mature enough to be enabled by default.
92 # the obsolete feature is not mature enough to be enabled by default.
93 # you have to rely on third party extension extension to enable this.
93 # you have to rely on third party extension extension to enable this.
94 _enabled = False
94 _enabled = False
95
95
96 # Options for obsolescence
96 # Options for obsolescence
97 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange'
99 exchangeopt = 'exchange'
100
100
101 def isenabled(repo, option):
101 def isenabled(repo, option):
102 """Returns True if the given repository has the given obsolete option
102 """Returns True if the given repository has the given obsolete option
103 enabled.
103 enabled.
104 """
104 """
105 result = set(repo.ui.configlist('experimental', 'evolution'))
105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 if 'all' in result:
106 if 'all' in result:
107 return True
107 return True
108
108
109 # For migration purposes, temporarily return true if the config hasn't been
109 # For migration purposes, temporarily return true if the config hasn't been
110 # set but _enabled is true.
110 # set but _enabled is true.
111 if len(result) == 0 and _enabled:
111 if len(result) == 0 and _enabled:
112 return True
112 return True
113
113
114 # createmarkers must be enabled if other options are enabled
114 # createmarkers must be enabled if other options are enabled
115 if ((allowunstableopt in result or exchangeopt in result) and
115 if ((allowunstableopt in result or exchangeopt in result) and
116 not createmarkersopt in result):
116 not createmarkersopt in result):
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 "if other obsolete options are enabled"))
118 "if other obsolete options are enabled"))
119
119
120 return option in result
120 return option in result
121
121
122 ### obsolescence marker flag
122 ### obsolescence marker flag
123
123
124 ## bumpedfix flag
124 ## bumpedfix flag
125 #
125 #
126 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 # "bumped" because it's a successors of a public changesets
127 # "bumped" because it's a successors of a public changesets
128 #
128 #
129 # o A' (bumped)
129 # o A' (bumped)
130 # |`:
130 # |`:
131 # | o A
131 # | o A
132 # |/
132 # |/
133 # o Z
133 # o Z
134 #
134 #
135 # The way to solve this situation is to create a new changeset Ad as children
135 # The way to solve this situation is to create a new changeset Ad as children
136 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 #
138 #
139 # o Ad
139 # o Ad
140 # |`:
140 # |`:
141 # | x A'
141 # | x A'
142 # |'|
142 # |'|
143 # o | A
143 # o | A
144 # |/
144 # |/
145 # o Z
145 # o Z
146 #
146 #
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 # This flag mean that the successors express the changes between the public and
149 # This flag mean that the successors express the changes between the public and
150 # bumped version and fix the situation, breaking the transitivity of
150 # bumped version and fix the situation, breaking the transitivity of
151 # "bumped" here.
151 # "bumped" here.
152 bumpedfix = 1
152 bumpedfix = 1
153 usingsha256 = 2
153 usingsha256 = 2
154
154
155 ## Parsing and writing of version "0"
155 ## Parsing and writing of version "0"
156 #
156 #
157 # The header is followed by the markers. Each marker is made of:
157 # The header is followed by the markers. Each marker is made of:
158 #
158 #
159 # - 1 uint8 : number of new changesets "N", can be zero.
159 # - 1 uint8 : number of new changesets "N", can be zero.
160 #
160 #
161 # - 1 uint32: metadata size "M" in bytes.
161 # - 1 uint32: metadata size "M" in bytes.
162 #
162 #
163 # - 1 byte: a bit field. It is reserved for flags used in common
163 # - 1 byte: a bit field. It is reserved for flags used in common
164 # obsolete marker operations, to avoid repeated decoding of metadata
164 # obsolete marker operations, to avoid repeated decoding of metadata
165 # entries.
165 # entries.
166 #
166 #
167 # - 20 bytes: obsoleted changeset identifier.
167 # - 20 bytes: obsoleted changeset identifier.
168 #
168 #
169 # - N*20 bytes: new changesets identifiers.
169 # - N*20 bytes: new changesets identifiers.
170 #
170 #
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 # string contains a key and a value, separated by a colon ':', without
172 # string contains a key and a value, separated by a colon ':', without
173 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 # cannot contain '\0'.
174 # cannot contain '\0'.
175 _fm0version = 0
175 _fm0version = 0
176 _fm0fixed = '>BIB20s'
176 _fm0fixed = '>BIB20s'
177 _fm0node = '20s'
177 _fm0node = '20s'
178 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fsize = _calcsize(_fm0fixed)
179 _fm0fnodesize = _calcsize(_fm0node)
179 _fm0fnodesize = _calcsize(_fm0node)
180
180
181 def _fm0readmarkers(data, off):
181 def _fm0readmarkers(data, off):
182 # Loop on markers
182 # Loop on markers
183 l = len(data)
183 l = len(data)
184 while off + _fm0fsize <= l:
184 while off + _fm0fsize <= l:
185 # read fixed part
185 # read fixed part
186 cur = data[off:off + _fm0fsize]
186 cur = data[off:off + _fm0fsize]
187 off += _fm0fsize
187 off += _fm0fsize
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 # read replacement
189 # read replacement
190 sucs = ()
190 sucs = ()
191 if numsuc:
191 if numsuc:
192 s = (_fm0fnodesize * numsuc)
192 s = (_fm0fnodesize * numsuc)
193 cur = data[off:off + s]
193 cur = data[off:off + s]
194 sucs = _unpack(_fm0node * numsuc, cur)
194 sucs = _unpack(_fm0node * numsuc, cur)
195 off += s
195 off += s
196 # read metadata
196 # read metadata
197 # (metadata will be decoded on demand)
197 # (metadata will be decoded on demand)
198 metadata = data[off:off + mdsize]
198 metadata = data[off:off + mdsize]
199 if len(metadata) != mdsize:
199 if len(metadata) != mdsize:
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 'short, %d bytes expected, got %d')
201 'short, %d bytes expected, got %d')
202 % (mdsize, len(metadata)))
202 % (mdsize, len(metadata)))
203 off += mdsize
203 off += mdsize
204 metadata = _fm0decodemeta(metadata)
204 metadata = _fm0decodemeta(metadata)
205 try:
205 try:
206 when, offset = metadata.pop('date', '0 0').split(' ')
206 when, offset = metadata.pop('date', '0 0').split(' ')
207 date = float(when), int(offset)
207 date = float(when), int(offset)
208 except ValueError:
208 except ValueError:
209 date = (0., 0)
209 date = (0., 0)
210 parents = None
210 parents = None
211 if 'p2' in metadata:
211 if 'p2' in metadata:
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 elif 'p1' in metadata:
213 elif 'p1' in metadata:
214 parents = (metadata.pop('p1', None),)
214 parents = (metadata.pop('p1', None),)
215 elif 'p0' in metadata:
215 elif 'p0' in metadata:
216 parents = ()
216 parents = ()
217 if parents is not None:
217 if parents is not None:
218 try:
218 try:
219 parents = tuple(node.bin(p) for p in parents)
219 parents = tuple(node.bin(p) for p in parents)
220 # if parent content is not a nodeid, drop the data
220 # if parent content is not a nodeid, drop the data
221 for p in parents:
221 for p in parents:
222 if len(p) != 20:
222 if len(p) != 20:
223 parents = None
223 parents = None
224 break
224 break
225 except TypeError:
225 except TypeError:
226 # if content cannot be translated to nodeid drop the data.
226 # if content cannot be translated to nodeid drop the data.
227 parents = None
227 parents = None
228
228
229 metadata = tuple(sorted(metadata.iteritems()))
229 metadata = tuple(sorted(metadata.iteritems()))
230
230
231 yield (pre, sucs, flags, metadata, date, parents)
231 yield (pre, sucs, flags, metadata, date, parents)
232
232
233 def _fm0encodeonemarker(marker):
233 def _fm0encodeonemarker(marker):
234 pre, sucs, flags, metadata, date, parents = marker
234 pre, sucs, flags, metadata, date, parents = marker
235 if flags & usingsha256:
235 if flags & usingsha256:
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 metadata = dict(metadata)
237 metadata = dict(metadata)
238 time, tz = date
238 time, tz = date
239 metadata['date'] = '%r %i' % (time, tz)
239 metadata['date'] = '%r %i' % (time, tz)
240 if parents is not None:
240 if parents is not None:
241 if not parents:
241 if not parents:
242 # mark that we explicitly recorded no parents
242 # mark that we explicitly recorded no parents
243 metadata['p0'] = ''
243 metadata['p0'] = ''
244 for i, p in enumerate(parents, 1):
244 for i, p in enumerate(parents, 1):
245 metadata['p%i' % i] = node.hex(p)
245 metadata['p%i' % i] = node.hex(p)
246 metadata = _fm0encodemeta(metadata)
246 metadata = _fm0encodemeta(metadata)
247 numsuc = len(sucs)
247 numsuc = len(sucs)
248 format = _fm0fixed + (_fm0node * numsuc)
248 format = _fm0fixed + (_fm0node * numsuc)
249 data = [numsuc, len(metadata), flags, pre]
249 data = [numsuc, len(metadata), flags, pre]
250 data.extend(sucs)
250 data.extend(sucs)
251 return _pack(format, *data) + metadata
251 return _pack(format, *data) + metadata
252
252
253 def _fm0encodemeta(meta):
253 def _fm0encodemeta(meta):
254 """Return encoded metadata string to string mapping.
254 """Return encoded metadata string to string mapping.
255
255
256 Assume no ':' in key and no '\0' in both key and value."""
256 Assume no ':' in key and no '\0' in both key and value."""
257 for key, value in meta.iteritems():
257 for key, value in meta.iteritems():
258 if ':' in key or '\0' in key:
258 if ':' in key or '\0' in key:
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 if '\0' in value:
260 if '\0' in value:
261 raise ValueError("':' is forbidden in metadata value'")
261 raise ValueError("':' is forbidden in metadata value'")
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263
263
264 def _fm0decodemeta(data):
264 def _fm0decodemeta(data):
265 """Return string to string dictionary from encoded version."""
265 """Return string to string dictionary from encoded version."""
266 d = {}
266 d = {}
267 for l in data.split('\0'):
267 for l in data.split('\0'):
268 if l:
268 if l:
269 key, value = l.split(':')
269 key, value = l.split(':')
270 d[key] = value
270 d[key] = value
271 return d
271 return d
272
272
273 ## Parsing and writing of version "1"
273 ## Parsing and writing of version "1"
274 #
274 #
275 # The header is followed by the markers. Each marker is made of:
275 # The header is followed by the markers. Each marker is made of:
276 #
276 #
277 # - uint32: total size of the marker (including this field)
277 # - uint32: total size of the marker (including this field)
278 #
278 #
279 # - float64: date in seconds since epoch
279 # - float64: date in seconds since epoch
280 #
280 #
281 # - int16: timezone offset in minutes
281 # - int16: timezone offset in minutes
282 #
282 #
283 # - uint16: a bit field. It is reserved for flags used in common
283 # - uint16: a bit field. It is reserved for flags used in common
284 # obsolete marker operations, to avoid repeated decoding of metadata
284 # obsolete marker operations, to avoid repeated decoding of metadata
285 # entries.
285 # entries.
286 #
286 #
287 # - uint8: number of successors "N", can be zero.
287 # - uint8: number of successors "N", can be zero.
288 #
288 #
289 # - uint8: number of parents "P", can be zero.
289 # - uint8: number of parents "P", can be zero.
290 #
290 #
291 # 0: parents data stored but no parent,
291 # 0: parents data stored but no parent,
292 # 1: one parent stored,
292 # 1: one parent stored,
293 # 2: two parents stored,
293 # 2: two parents stored,
294 # 3: no parent data stored
294 # 3: no parent data stored
295 #
295 #
296 # - uint8: number of metadata entries M
296 # - uint8: number of metadata entries M
297 #
297 #
298 # - 20 or 32 bytes: precursor changeset identifier.
298 # - 20 or 32 bytes: precursor changeset identifier.
299 #
299 #
300 # - N*(20 or 32) bytes: successors changesets identifiers.
300 # - N*(20 or 32) bytes: successors changesets identifiers.
301 #
301 #
302 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 # - P*(20 or 32) bytes: parents of the precursors changesets.
303 #
303 #
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 #
305 #
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 _fm1version = 1
307 _fm1version = 1
308 _fm1fixed = '>IdhHBBB20s'
308 _fm1fixed = '>IdhHBBB20s'
309 _fm1nodesha1 = '20s'
309 _fm1nodesha1 = '20s'
310 _fm1nodesha256 = '32s'
310 _fm1nodesha256 = '32s'
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1fsize = _calcsize(_fm1fixed)
314 _fm1parentnone = 3
314 _fm1parentnone = 3
315 _fm1parentshift = 14
315 _fm1parentshift = 14
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 _fm1metapair = 'BB'
317 _fm1metapair = 'BB'
318 _fm1metapairsize = _calcsize('BB')
318 _fm1metapairsize = _calcsize('BB')
319
319
320 def _fm1purereadmarkers(data, off):
320 def _fm1purereadmarkers(data, off):
321 # make some global constants local for performance
321 # make some global constants local for performance
322 noneflag = _fm1parentnone
322 noneflag = _fm1parentnone
323 sha2flag = usingsha256
323 sha2flag = usingsha256
324 sha1size = _fm1nodesha1size
324 sha1size = _fm1nodesha1size
325 sha2size = _fm1nodesha256size
325 sha2size = _fm1nodesha256size
326 sha1fmt = _fm1nodesha1
326 sha1fmt = _fm1nodesha1
327 sha2fmt = _fm1nodesha256
327 sha2fmt = _fm1nodesha256
328 metasize = _fm1metapairsize
328 metasize = _fm1metapairsize
329 metafmt = _fm1metapair
329 metafmt = _fm1metapair
330 fsize = _fm1fsize
330 fsize = _fm1fsize
331 unpack = _unpack
331 unpack = _unpack
332
332
333 # Loop on markers
333 # Loop on markers
334 stop = len(data) - _fm1fsize
334 stop = len(data) - _fm1fsize
335 ufixed = struct.Struct(_fm1fixed).unpack
335 ufixed = struct.Struct(_fm1fixed).unpack
336
336
337 while off <= stop:
337 while off <= stop:
338 # read fixed part
338 # read fixed part
339 o1 = off + fsize
339 o1 = off + fsize
340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
341
341
342 if flags & sha2flag:
342 if flags & sha2flag:
343 # FIXME: prec was read as a SHA1, needs to be amended
343 # FIXME: prec was read as a SHA1, needs to be amended
344
344
345 # read 0 or more successors
345 # read 0 or more successors
346 if numsuc == 1:
346 if numsuc == 1:
347 o2 = o1 + sha2size
347 o2 = o1 + sha2size
348 sucs = (data[o1:o2],)
348 sucs = (data[o1:o2],)
349 else:
349 else:
350 o2 = o1 + sha2size * numsuc
350 o2 = o1 + sha2size * numsuc
351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
352
352
353 # read parents
353 # read parents
354 if numpar == noneflag:
354 if numpar == noneflag:
355 o3 = o2
355 o3 = o2
356 parents = None
356 parents = None
357 elif numpar == 1:
357 elif numpar == 1:
358 o3 = o2 + sha2size
358 o3 = o2 + sha2size
359 parents = (data[o2:o3],)
359 parents = (data[o2:o3],)
360 else:
360 else:
361 o3 = o2 + sha2size * numpar
361 o3 = o2 + sha2size * numpar
362 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 parents = unpack(sha2fmt * numpar, data[o2:o3])
363 else:
363 else:
364 # read 0 or more successors
364 # read 0 or more successors
365 if numsuc == 1:
365 if numsuc == 1:
366 o2 = o1 + sha1size
366 o2 = o1 + sha1size
367 sucs = (data[o1:o2],)
367 sucs = (data[o1:o2],)
368 else:
368 else:
369 o2 = o1 + sha1size * numsuc
369 o2 = o1 + sha1size * numsuc
370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
371
371
372 # read parents
372 # read parents
373 if numpar == noneflag:
373 if numpar == noneflag:
374 o3 = o2
374 o3 = o2
375 parents = None
375 parents = None
376 elif numpar == 1:
376 elif numpar == 1:
377 o3 = o2 + sha1size
377 o3 = o2 + sha1size
378 parents = (data[o2:o3],)
378 parents = (data[o2:o3],)
379 else:
379 else:
380 o3 = o2 + sha1size * numpar
380 o3 = o2 + sha1size * numpar
381 parents = unpack(sha1fmt * numpar, data[o2:o3])
381 parents = unpack(sha1fmt * numpar, data[o2:o3])
382
382
383 # read metadata
383 # read metadata
384 off = o3 + metasize * nummeta
384 off = o3 + metasize * nummeta
385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
386 metadata = []
386 metadata = []
387 for idx in xrange(0, len(metapairsize), 2):
387 for idx in xrange(0, len(metapairsize), 2):
388 o1 = off + metapairsize[idx]
388 o1 = off + metapairsize[idx]
389 o2 = o1 + metapairsize[idx + 1]
389 o2 = o1 + metapairsize[idx + 1]
390 metadata.append((data[off:o1], data[o1:o2]))
390 metadata.append((data[off:o1], data[o1:o2]))
391 off = o2
391 off = o2
392
392
393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
394
394
395 def _fm1encodeonemarker(marker):
395 def _fm1encodeonemarker(marker):
396 pre, sucs, flags, metadata, date, parents = marker
396 pre, sucs, flags, metadata, date, parents = marker
397 # determine node size
397 # determine node size
398 _fm1node = _fm1nodesha1
398 _fm1node = _fm1nodesha1
399 if flags & usingsha256:
399 if flags & usingsha256:
400 _fm1node = _fm1nodesha256
400 _fm1node = _fm1nodesha256
401 numsuc = len(sucs)
401 numsuc = len(sucs)
402 numextranodes = numsuc
402 numextranodes = numsuc
403 if parents is None:
403 if parents is None:
404 numpar = _fm1parentnone
404 numpar = _fm1parentnone
405 else:
405 else:
406 numpar = len(parents)
406 numpar = len(parents)
407 numextranodes += numpar
407 numextranodes += numpar
408 formatnodes = _fm1node * numextranodes
408 formatnodes = _fm1node * numextranodes
409 formatmeta = _fm1metapair * len(metadata)
409 formatmeta = _fm1metapair * len(metadata)
410 format = _fm1fixed + formatnodes + formatmeta
410 format = _fm1fixed + formatnodes + formatmeta
411 # tz is stored in minutes so we divide by 60
411 # tz is stored in minutes so we divide by 60
412 tz = date[1]//60
412 tz = date[1]//60
413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
414 data.extend(sucs)
414 data.extend(sucs)
415 if parents is not None:
415 if parents is not None:
416 data.extend(parents)
416 data.extend(parents)
417 totalsize = _calcsize(format)
417 totalsize = _calcsize(format)
418 for key, value in metadata:
418 for key, value in metadata:
419 lk = len(key)
419 lk = len(key)
420 lv = len(value)
420 lv = len(value)
421 data.append(lk)
421 data.append(lk)
422 data.append(lv)
422 data.append(lv)
423 totalsize += lk + lv
423 totalsize += lk + lv
424 data[0] = totalsize
424 data[0] = totalsize
425 data = [_pack(format, *data)]
425 data = [_pack(format, *data)]
426 for key, value in metadata:
426 for key, value in metadata:
427 data.append(key)
427 data.append(key)
428 data.append(value)
428 data.append(value)
429 return ''.join(data)
429 return ''.join(data)
430
430
431 def _fm1readmarkers(data, off):
431 def _fm1readmarkers(data, off):
432 native = getattr(parsers, 'fm1readmarkers', None)
432 native = getattr(parsers, 'fm1readmarkers', None)
433 if not native:
433 if not native:
434 return _fm1purereadmarkers(data, off)
434 return _fm1purereadmarkers(data, off)
435 stop = len(data) - _fm1fsize
435 stop = len(data) - _fm1fsize
436 return native(data, off, stop)
436 return native(data, off, stop)
437
437
438 # mapping to read/write various marker formats
438 # mapping to read/write various marker formats
439 # <version> -> (decoder, encoder)
439 # <version> -> (decoder, encoder)
440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
442
442
443 def _readmarkerversion(data):
443 def _readmarkerversion(data):
444 return _unpack('>B', data[0:1])[0]
444 return _unpack('>B', data[0:1])[0]
445
445
446 @util.nogc
446 @util.nogc
447 def _readmarkers(data):
447 def _readmarkers(data):
448 """Read and enumerate markers from raw data"""
448 """Read and enumerate markers from raw data"""
449 diskversion = _readmarkerversion(data)
449 diskversion = _readmarkerversion(data)
450 off = 1
450 off = 1
451 if diskversion not in formats:
451 if diskversion not in formats:
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 raise error.UnknownVersion(msg, version=diskversion)
453 raise error.UnknownVersion(msg, version=diskversion)
454 return diskversion, formats[diskversion][0](data, off)
454 return diskversion, formats[diskversion][0](data, off)
455
455
456 def encodeheader(version=_fm0version):
456 def encodeheader(version=_fm0version):
457 return _pack('>B', version)
457 return _pack('>B', version)
458
458
459 def encodemarkers(markers, addheader=False, version=_fm0version):
459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 # Kept separate from flushmarkers(), it will be reused for
460 # Kept separate from flushmarkers(), it will be reused for
461 # markers exchange.
461 # markers exchange.
462 encodeone = formats[version][1]
462 encodeone = formats[version][1]
463 if addheader:
463 if addheader:
464 yield encodeheader(version)
464 yield encodeheader(version)
465 for marker in markers:
465 for marker in markers:
466 yield encodeone(marker)
466 yield encodeone(marker)
467
467
468 @util.nogc
468 @util.nogc
469 def _addsuccessors(successors, markers):
469 def _addsuccessors(successors, markers):
470 for mark in markers:
470 for mark in markers:
471 successors.setdefault(mark[0], set()).add(mark)
471 successors.setdefault(mark[0], set()).add(mark)
472
472
473 @util.nogc
473 @util.nogc
474 def _addprecursors(precursors, markers):
474 def _addprecursors(precursors, markers):
475 for mark in markers:
475 for mark in markers:
476 for suc in mark[1]:
476 for suc in mark[1]:
477 precursors.setdefault(suc, set()).add(mark)
477 precursors.setdefault(suc, set()).add(mark)
478
478
479 @util.nogc
479 @util.nogc
480 def _addchildren(children, markers):
480 def _addchildren(children, markers):
481 for mark in markers:
481 for mark in markers:
482 parents = mark[5]
482 parents = mark[5]
483 if parents is not None:
483 if parents is not None:
484 for p in parents:
484 for p in parents:
485 children.setdefault(p, set()).add(mark)
485 children.setdefault(p, set()).add(mark)
486
486
487 def _checkinvalidmarkers(markers):
487 def _checkinvalidmarkers(markers):
488 """search for marker with invalid data and raise error if needed
488 """search for marker with invalid data and raise error if needed
489
489
490 Exist as a separated function to allow the evolve extension for a more
490 Exist as a separated function to allow the evolve extension for a more
491 subtle handling.
491 subtle handling.
492 """
492 """
493 for mark in markers:
493 for mark in markers:
494 if node.nullid in mark[1]:
494 if node.nullid in mark[1]:
495 raise error.Abort(_('bad obsolescence marker detected: '
495 raise error.Abort(_('bad obsolescence marker detected: '
496 'invalid successors nullid'))
496 'invalid successors nullid'))
497
497
498 class obsstore(object):
498 class obsstore(object):
499 """Store obsolete markers
499 """Store obsolete markers
500
500
501 Markers can be accessed with two mappings:
501 Markers can be accessed with two mappings:
502 - precursors[x] -> set(markers on precursors edges of x)
502 - precursors[x] -> set(markers on precursors edges of x)
503 - successors[x] -> set(markers on successors edges of x)
503 - successors[x] -> set(markers on successors edges of x)
504 - children[x] -> set(markers on precursors edges of children(x)
504 - children[x] -> set(markers on precursors edges of children(x)
505 """
505 """
506
506
507 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
507 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
508 # prec: nodeid, precursor changesets
508 # prec: nodeid, precursor changesets
509 # succs: tuple of nodeid, successor changesets (0-N length)
509 # succs: tuple of nodeid, successor changesets (0-N length)
510 # flag: integer, flag field carrying modifier for the markers (see doc)
510 # flag: integer, flag field carrying modifier for the markers (see doc)
511 # meta: binary blob, encoded metadata dictionary
511 # meta: binary blob, encoded metadata dictionary
512 # date: (float, int) tuple, date of marker creation
512 # date: (float, int) tuple, date of marker creation
513 # parents: (tuple of nodeid) or None, parents of precursors
513 # parents: (tuple of nodeid) or None, parents of precursors
514 # None is used when no data has been recorded
514 # None is used when no data has been recorded
515
515
516 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
516 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
517 # caches for various obsolescence related cache
517 # caches for various obsolescence related cache
518 self.caches = {}
518 self.caches = {}
519 self.svfs = svfs
519 self.svfs = svfs
520 self._defaultformat = defaultformat
520 self._defaultformat = defaultformat
521 self._readonly = readonly
521 self._readonly = readonly
522
522
523 def __iter__(self):
523 def __iter__(self):
524 return iter(self._all)
524 return iter(self._all)
525
525
526 def __len__(self):
526 def __len__(self):
527 return len(self._all)
527 return len(self._all)
528
528
529 def __nonzero__(self):
529 def __nonzero__(self):
530 if not self._cached('_all'):
530 if not self._cached('_all'):
531 try:
531 try:
532 return self.svfs.stat('obsstore').st_size > 1
532 return self.svfs.stat('obsstore').st_size > 1
533 except OSError as inst:
533 except OSError as inst:
534 if inst.errno != errno.ENOENT:
534 if inst.errno != errno.ENOENT:
535 raise
535 raise
536 # just build an empty _all list if no obsstore exists, which
536 # just build an empty _all list if no obsstore exists, which
537 # avoids further stat() syscalls
537 # avoids further stat() syscalls
538 pass
538 pass
539 return bool(self._all)
539 return bool(self._all)
540
540
541 __bool__ = __nonzero__
541 __bool__ = __nonzero__
542
542
543 @property
543 @property
544 def readonly(self):
544 def readonly(self):
545 """True if marker creation is disabled
545 """True if marker creation is disabled
546
546
547 Remove me in the future when obsolete marker is always on."""
547 Remove me in the future when obsolete marker is always on."""
548 return self._readonly
548 return self._readonly
549
549
550 def create(self, transaction, prec, succs=(), flag=0, parents=None,
550 def create(self, transaction, prec, succs=(), flag=0, parents=None,
551 date=None, metadata=None, ui=None):
551 date=None, metadata=None, ui=None):
552 """obsolete: add a new obsolete marker
552 """obsolete: add a new obsolete marker
553
553
554 * ensuring it is hashable
554 * ensuring it is hashable
555 * check mandatory metadata
555 * check mandatory metadata
556 * encode metadata
556 * encode metadata
557
557
558 If you are a human writing code creating marker you want to use the
558 If you are a human writing code creating marker you want to use the
559 `createmarkers` function in this module instead.
559 `createmarkers` function in this module instead.
560
560
561 return True if a new marker have been added, False if the markers
561 return True if a new marker have been added, False if the markers
562 already existed (no op).
562 already existed (no op).
563 """
563 """
564 if metadata is None:
564 if metadata is None:
565 metadata = {}
565 metadata = {}
566 if date is None:
566 if date is None:
567 if 'date' in metadata:
567 if 'date' in metadata:
568 # as a courtesy for out-of-tree extensions
568 # as a courtesy for out-of-tree extensions
569 date = util.parsedate(metadata.pop('date'))
569 date = util.parsedate(metadata.pop('date'))
570 elif ui is not None:
570 elif ui is not None:
571 date = ui.configdate('devel', 'default-date')
571 date = ui.configdate('devel', 'default-date')
572 if date is None:
572 if date is None:
573 date = util.makedate()
573 date = util.makedate()
574 else:
574 else:
575 date = util.makedate()
575 date = util.makedate()
576 if len(prec) != 20:
576 if len(prec) != 20:
577 raise ValueError(prec)
577 raise ValueError(prec)
578 for succ in succs:
578 for succ in succs:
579 if len(succ) != 20:
579 if len(succ) != 20:
580 raise ValueError(succ)
580 raise ValueError(succ)
581 if prec in succs:
581 if prec in succs:
582 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
582 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
583
583
584 metadata = tuple(sorted(metadata.iteritems()))
584 metadata = tuple(sorted(metadata.iteritems()))
585
585
586 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
586 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
587 return bool(self.add(transaction, [marker]))
587 return bool(self.add(transaction, [marker]))
588
588
589 def add(self, transaction, markers):
589 def add(self, transaction, markers):
590 """Add new markers to the store
590 """Add new markers to the store
591
591
592 Take care of filtering duplicate.
592 Take care of filtering duplicate.
593 Return the number of new marker."""
593 Return the number of new marker."""
594 if self._readonly:
594 if self._readonly:
595 raise error.Abort(_('creating obsolete markers is not enabled on '
595 raise error.Abort(_('creating obsolete markers is not enabled on '
596 'this repo'))
596 'this repo'))
597 known = set()
597 known = set()
598 getsuccessors = self.successors.get
598 getsuccessors = self.successors.get
599 new = []
599 new = []
600 for m in markers:
600 for m in markers:
601 if m not in getsuccessors(m[0], ()) and m not in known:
601 if m not in getsuccessors(m[0], ()) and m not in known:
602 known.add(m)
602 known.add(m)
603 new.append(m)
603 new.append(m)
604 if new:
604 if new:
605 f = self.svfs('obsstore', 'ab')
605 f = self.svfs('obsstore', 'ab')
606 try:
606 try:
607 offset = f.tell()
607 offset = f.tell()
608 transaction.add('obsstore', offset)
608 transaction.add('obsstore', offset)
609 # offset == 0: new file - add the version header
609 # offset == 0: new file - add the version header
610 for bytes in encodemarkers(new, offset == 0, self._version):
610 for bytes in encodemarkers(new, offset == 0, self._version):
611 f.write(bytes)
611 f.write(bytes)
612 finally:
612 finally:
613 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
613 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
614 # call 'filecacheentry.refresh()' here
614 # call 'filecacheentry.refresh()' here
615 f.close()
615 f.close()
616 self._addmarkers(new)
616 self._addmarkers(new)
617 # new marker *may* have changed several set. invalidate the cache.
617 # new marker *may* have changed several set. invalidate the cache.
618 self.caches.clear()
618 self.caches.clear()
619 # records the number of new markers for the transaction hooks
619 # records the number of new markers for the transaction hooks
620 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
620 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
621 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
621 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
622 return len(new)
622 return len(new)
623
623
624 def mergemarkers(self, transaction, data):
624 def mergemarkers(self, transaction, data):
625 """merge a binary stream of markers inside the obsstore
625 """merge a binary stream of markers inside the obsstore
626
626
627 Returns the number of new markers added."""
627 Returns the number of new markers added."""
628 version, markers = _readmarkers(data)
628 version, markers = _readmarkers(data)
629 return self.add(transaction, markers)
629 return self.add(transaction, markers)
630
630
631 @propertycache
631 @propertycache
632 def _data(self):
632 def _data(self):
633 return self.svfs.tryread('obsstore')
633 return self.svfs.tryread('obsstore')
634
634
635 @propertycache
635 @propertycache
636 def _version(self):
636 def _version(self):
637 if len(self._data) >= 1:
637 if len(self._data) >= 1:
638 return _readmarkerversion(self._data)
638 return _readmarkerversion(self._data)
639 else:
639 else:
640 return self._defaultformat
640 return self._defaultformat
641
641
642 @propertycache
642 @propertycache
643 def _all(self):
643 def _all(self):
644 data = self._data
644 data = self._data
645 if not data:
645 if not data:
646 return []
646 return []
647 self._version, markers = _readmarkers(data)
647 self._version, markers = _readmarkers(data)
648 markers = list(markers)
648 markers = list(markers)
649 _checkinvalidmarkers(markers)
649 _checkinvalidmarkers(markers)
650 return markers
650 return markers
651
651
652 @propertycache
652 @propertycache
653 def successors(self):
653 def successors(self):
654 successors = {}
654 successors = {}
655 _addsuccessors(successors, self._all)
655 _addsuccessors(successors, self._all)
656 return successors
656 return successors
657
657
658 @propertycache
658 @propertycache
659 def precursors(self):
659 def precursors(self):
660 precursors = {}
660 precursors = {}
661 _addprecursors(precursors, self._all)
661 _addprecursors(precursors, self._all)
662 return precursors
662 return precursors
663
663
664 @propertycache
664 @propertycache
665 def children(self):
665 def children(self):
666 children = {}
666 children = {}
667 _addchildren(children, self._all)
667 _addchildren(children, self._all)
668 return children
668 return children
669
669
670 def _cached(self, attr):
670 def _cached(self, attr):
671 return attr in self.__dict__
671 return attr in self.__dict__
672
672
673 def _addmarkers(self, markers):
673 def _addmarkers(self, markers):
674 markers = list(markers) # to allow repeated iteration
674 markers = list(markers) # to allow repeated iteration
675 self._all.extend(markers)
675 self._all.extend(markers)
676 if self._cached('successors'):
676 if self._cached('successors'):
677 _addsuccessors(self.successors, markers)
677 _addsuccessors(self.successors, markers)
678 if self._cached('precursors'):
678 if self._cached('precursors'):
679 _addprecursors(self.precursors, markers)
679 _addprecursors(self.precursors, markers)
680 if self._cached('children'):
680 if self._cached('children'):
681 _addchildren(self.children, markers)
681 _addchildren(self.children, markers)
682 _checkinvalidmarkers(markers)
682 _checkinvalidmarkers(markers)
683
683
684 def relevantmarkers(self, nodes):
684 def relevantmarkers(self, nodes):
685 """return a set of all obsolescence markers relevant to a set of nodes.
685 """return a set of all obsolescence markers relevant to a set of nodes.
686
686
687 "relevant" to a set of nodes mean:
687 "relevant" to a set of nodes mean:
688
688
689 - marker that use this changeset as successor
689 - marker that use this changeset as successor
690 - prune marker of direct children on this changeset
690 - prune marker of direct children on this changeset
691 - recursive application of the two rules on precursors of these markers
691 - recursive application of the two rules on precursors of these markers
692
692
693 It is a set so you cannot rely on order."""
693 It is a set so you cannot rely on order."""
694
694
695 pendingnodes = set(nodes)
695 pendingnodes = set(nodes)
696 seenmarkers = set()
696 seenmarkers = set()
697 seennodes = set(pendingnodes)
697 seennodes = set(pendingnodes)
698 precursorsmarkers = self.precursors
698 precursorsmarkers = self.precursors
699 succsmarkers = self.successors
699 succsmarkers = self.successors
700 children = self.children
700 children = self.children
701 while pendingnodes:
701 while pendingnodes:
702 direct = set()
702 direct = set()
703 for current in pendingnodes:
703 for current in pendingnodes:
704 direct.update(precursorsmarkers.get(current, ()))
704 direct.update(precursorsmarkers.get(current, ()))
705 pruned = [m for m in children.get(current, ()) if not m[1]]
705 pruned = [m for m in children.get(current, ()) if not m[1]]
706 direct.update(pruned)
706 direct.update(pruned)
707 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
707 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
708 direct.update(pruned)
708 direct.update(pruned)
709 direct -= seenmarkers
709 direct -= seenmarkers
710 pendingnodes = set([m[0] for m in direct])
710 pendingnodes = set([m[0] for m in direct])
711 seenmarkers |= direct
711 seenmarkers |= direct
712 pendingnodes -= seennodes
712 pendingnodes -= seennodes
713 seennodes |= pendingnodes
713 seennodes |= pendingnodes
714 return seenmarkers
714 return seenmarkers
715
715
716 def makestore(ui, repo):
716 def makestore(ui, repo):
717 """Create an obsstore instance from a repo."""
717 """Create an obsstore instance from a repo."""
718 # read default format for new obsstore.
718 # read default format for new obsstore.
719 # developer config: format.obsstore-version
719 # developer config: format.obsstore-version
720 defaultformat = ui.configint('format', 'obsstore-version', None)
720 defaultformat = ui.configint('format', 'obsstore-version', None)
721 # rely on obsstore class default when possible.
721 # rely on obsstore class default when possible.
722 kwargs = {}
722 kwargs = {}
723 if defaultformat is not None:
723 if defaultformat is not None:
724 kwargs['defaultformat'] = defaultformat
724 kwargs['defaultformat'] = defaultformat
725 readonly = not isenabled(repo, createmarkersopt)
725 readonly = not isenabled(repo, createmarkersopt)
726 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
726 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
727 if store and readonly:
727 if store and readonly:
728 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
728 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
729 % len(list(store)))
729 % len(list(store)))
730 return store
730 return store
731
731
732 def commonversion(versions):
732 def commonversion(versions):
733 """Return the newest version listed in both versions and our local formats.
733 """Return the newest version listed in both versions and our local formats.
734
734
735 Returns None if no common version exists.
735 Returns None if no common version exists.
736 """
736 """
737 versions.sort(reverse=True)
737 versions.sort(reverse=True)
738 # search for highest version known on both side
738 # search for highest version known on both side
739 for v in versions:
739 for v in versions:
740 if v in formats:
740 if v in formats:
741 return v
741 return v
742 return None
742 return None
743
743
744 # arbitrary picked to fit into 8K limit from HTTP server
744 # arbitrary picked to fit into 8K limit from HTTP server
745 # you have to take in account:
745 # you have to take in account:
746 # - the version header
746 # - the version header
747 # - the base85 encoding
747 # - the base85 encoding
748 _maxpayload = 5300
748 _maxpayload = 5300
749
749
750 def _pushkeyescape(markers):
750 def _pushkeyescape(markers):
751 """encode markers into a dict suitable for pushkey exchange
751 """encode markers into a dict suitable for pushkey exchange
752
752
753 - binary data is base85 encoded
753 - binary data is base85 encoded
754 - split in chunks smaller than 5300 bytes"""
754 - split in chunks smaller than 5300 bytes"""
755 keys = {}
755 keys = {}
756 parts = []
756 parts = []
757 currentlen = _maxpayload * 2 # ensure we create a new part
757 currentlen = _maxpayload * 2 # ensure we create a new part
758 for marker in markers:
758 for marker in markers:
759 nextdata = _fm0encodeonemarker(marker)
759 nextdata = _fm0encodeonemarker(marker)
760 if (len(nextdata) + currentlen > _maxpayload):
760 if (len(nextdata) + currentlen > _maxpayload):
761 currentpart = []
761 currentpart = []
762 currentlen = 0
762 currentlen = 0
763 parts.append(currentpart)
763 parts.append(currentpart)
764 currentpart.append(nextdata)
764 currentpart.append(nextdata)
765 currentlen += len(nextdata)
765 currentlen += len(nextdata)
766 for idx, part in enumerate(reversed(parts)):
766 for idx, part in enumerate(reversed(parts)):
767 data = ''.join([_pack('>B', _fm0version)] + part)
767 data = ''.join([_pack('>B', _fm0version)] + part)
768 keys['dump%i' % idx] = util.b85encode(data)
768 keys['dump%i' % idx] = util.b85encode(data)
769 return keys
769 return keys
770
770
771 def listmarkers(repo):
771 def listmarkers(repo):
772 """List markers over pushkey"""
772 """List markers over pushkey"""
773 if not repo.obsstore:
773 if not repo.obsstore:
774 return {}
774 return {}
775 return _pushkeyescape(sorted(repo.obsstore))
775 return _pushkeyescape(sorted(repo.obsstore))
776
776
777 def pushmarker(repo, key, old, new):
777 def pushmarker(repo, key, old, new):
778 """Push markers over pushkey"""
778 """Push markers over pushkey"""
779 if not key.startswith('dump'):
779 if not key.startswith('dump'):
780 repo.ui.warn(_('unknown key: %r') % key)
780 repo.ui.warn(_('unknown key: %r') % key)
781 return False
781 return False
782 if old:
782 if old:
783 repo.ui.warn(_('unexpected old value for %r') % key)
783 repo.ui.warn(_('unexpected old value for %r') % key)
784 return False
784 return False
785 data = util.b85decode(new)
785 data = util.b85decode(new)
786 lock = repo.lock()
786 lock = repo.lock()
787 try:
787 try:
788 tr = repo.transaction('pushkey: obsolete markers')
788 tr = repo.transaction('pushkey: obsolete markers')
789 try:
789 try:
790 repo.obsstore.mergemarkers(tr, data)
790 repo.obsstore.mergemarkers(tr, data)
791 repo.invalidatevolatilesets()
791 repo.invalidatevolatilesets()
792 tr.close()
792 tr.close()
793 return True
793 return True
794 finally:
794 finally:
795 tr.release()
795 tr.release()
796 finally:
796 finally:
797 lock.release()
797 lock.release()
798
798
799 def getmarkers(repo, nodes=None, exclusive=False):
800 """returns markers known in a repository
801
802 If <nodes> is specified, only markers "relevant" to those nodes are are
803 returned"""
804 if nodes is None:
805 rawmarkers = repo.obsstore
806 elif exclusive:
807 rawmarkers = obsutil.exclusivemarkers(repo, nodes)
808 else:
809 rawmarkers = repo.obsstore.relevantmarkers(nodes)
810
811 for markerdata in rawmarkers:
812 yield obsutil.marker(repo, markerdata)
813
814 # keep compatibility for the 4.3 cycle
799 # keep compatibility for the 4.3 cycle
815 def allprecursors(obsstore, nodes, ignoreflags=0):
800 def allprecursors(obsstore, nodes, ignoreflags=0):
816 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
801 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
817 util.nouideprecwarn(movemsg, '4.3')
802 util.nouideprecwarn(movemsg, '4.3')
818 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
803 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
819
804
820 def allsuccessors(obsstore, nodes, ignoreflags=0):
805 def allsuccessors(obsstore, nodes, ignoreflags=0):
821 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
806 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
822 util.nouideprecwarn(movemsg, '4.3')
807 util.nouideprecwarn(movemsg, '4.3')
823 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
808 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
824
809
825 def marker(repo, data):
810 def marker(repo, data):
826 movemsg = 'obsolete.marker moved to obsutil.marker'
811 movemsg = 'obsolete.marker moved to obsutil.marker'
827 repo.ui.deprecwarn(movemsg, '4.3')
812 repo.ui.deprecwarn(movemsg, '4.3')
828 return obsutil.marker(repo, data)
813 return obsutil.marker(repo, data)
829
814
815 def getmarkers(repo, nodes=None, exclusive=False):
816 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
817 repo.ui.deprecwarn(movemsg, '4.3')
818 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
819
830 def exclusivemarkers(repo, nodes):
820 def exclusivemarkers(repo, nodes):
831 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
821 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
832 repo.ui.deprecwarn(movemsg, '4.3')
822 repo.ui.deprecwarn(movemsg, '4.3')
833 return obsutil.exclusivemarkers(repo, nodes)
823 return obsutil.exclusivemarkers(repo, nodes)
834
824
835 def foreground(repo, nodes):
825 def foreground(repo, nodes):
836 movemsg = 'obsolete.foreground moved to obsutil.foreground'
826 movemsg = 'obsolete.foreground moved to obsutil.foreground'
837 repo.ui.deprecwarn(movemsg, '4.3')
827 repo.ui.deprecwarn(movemsg, '4.3')
838 return obsutil.foreground(repo, nodes)
828 return obsutil.foreground(repo, nodes)
839
829
840 def successorssets(repo, initialnode, cache=None):
830 def successorssets(repo, initialnode, cache=None):
841 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
831 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
842 repo.ui.deprecwarn(movemsg, '4.3')
832 repo.ui.deprecwarn(movemsg, '4.3')
843 return obsutil.successorssets(repo, initialnode, cache=cache)
833 return obsutil.successorssets(repo, initialnode, cache=cache)
844
834
845 # mapping of 'set-name' -> <function to compute this set>
835 # mapping of 'set-name' -> <function to compute this set>
846 cachefuncs = {}
836 cachefuncs = {}
847 def cachefor(name):
837 def cachefor(name):
848 """Decorator to register a function as computing the cache for a set"""
838 """Decorator to register a function as computing the cache for a set"""
849 def decorator(func):
839 def decorator(func):
850 if name in cachefuncs:
840 if name in cachefuncs:
851 msg = "duplicated registration for volatileset '%s' (existing: %r)"
841 msg = "duplicated registration for volatileset '%s' (existing: %r)"
852 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
842 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
853 cachefuncs[name] = func
843 cachefuncs[name] = func
854 return func
844 return func
855 return decorator
845 return decorator
856
846
857 def getrevs(repo, name):
847 def getrevs(repo, name):
858 """Return the set of revision that belong to the <name> set
848 """Return the set of revision that belong to the <name> set
859
849
860 Such access may compute the set and cache it for future use"""
850 Such access may compute the set and cache it for future use"""
861 repo = repo.unfiltered()
851 repo = repo.unfiltered()
862 if not repo.obsstore:
852 if not repo.obsstore:
863 return frozenset()
853 return frozenset()
864 if name not in repo.obsstore.caches:
854 if name not in repo.obsstore.caches:
865 repo.obsstore.caches[name] = cachefuncs[name](repo)
855 repo.obsstore.caches[name] = cachefuncs[name](repo)
866 return repo.obsstore.caches[name]
856 return repo.obsstore.caches[name]
867
857
868 # To be simple we need to invalidate obsolescence cache when:
858 # To be simple we need to invalidate obsolescence cache when:
869 #
859 #
870 # - new changeset is added:
860 # - new changeset is added:
871 # - public phase is changed
861 # - public phase is changed
872 # - obsolescence marker are added
862 # - obsolescence marker are added
873 # - strip is used a repo
863 # - strip is used a repo
874 def clearobscaches(repo):
864 def clearobscaches(repo):
875 """Remove all obsolescence related cache from a repo
865 """Remove all obsolescence related cache from a repo
876
866
877 This remove all cache in obsstore is the obsstore already exist on the
867 This remove all cache in obsstore is the obsstore already exist on the
878 repo.
868 repo.
879
869
880 (We could be smarter here given the exact event that trigger the cache
870 (We could be smarter here given the exact event that trigger the cache
881 clearing)"""
871 clearing)"""
882 # only clear cache is there is obsstore data in this repo
872 # only clear cache is there is obsstore data in this repo
883 if 'obsstore' in repo._filecache:
873 if 'obsstore' in repo._filecache:
884 repo.obsstore.caches.clear()
874 repo.obsstore.caches.clear()
885
875
886 def _mutablerevs(repo):
876 def _mutablerevs(repo):
887 """the set of mutable revision in the repository"""
877 """the set of mutable revision in the repository"""
888 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
878 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
889
879
890 @cachefor('obsolete')
880 @cachefor('obsolete')
891 def _computeobsoleteset(repo):
881 def _computeobsoleteset(repo):
892 """the set of obsolete revisions"""
882 """the set of obsolete revisions"""
893 getnode = repo.changelog.node
883 getnode = repo.changelog.node
894 notpublic = _mutablerevs(repo)
884 notpublic = _mutablerevs(repo)
895 isobs = repo.obsstore.successors.__contains__
885 isobs = repo.obsstore.successors.__contains__
896 obs = set(r for r in notpublic if isobs(getnode(r)))
886 obs = set(r for r in notpublic if isobs(getnode(r)))
897 return obs
887 return obs
898
888
899 @cachefor('unstable')
889 @cachefor('unstable')
900 def _computeunstableset(repo):
890 def _computeunstableset(repo):
901 """the set of non obsolete revisions with obsolete parents"""
891 """the set of non obsolete revisions with obsolete parents"""
902 pfunc = repo.changelog.parentrevs
892 pfunc = repo.changelog.parentrevs
903 mutable = _mutablerevs(repo)
893 mutable = _mutablerevs(repo)
904 obsolete = getrevs(repo, 'obsolete')
894 obsolete = getrevs(repo, 'obsolete')
905 others = mutable - obsolete
895 others = mutable - obsolete
906 unstable = set()
896 unstable = set()
907 for r in sorted(others):
897 for r in sorted(others):
908 # A rev is unstable if one of its parent is obsolete or unstable
898 # A rev is unstable if one of its parent is obsolete or unstable
909 # this works since we traverse following growing rev order
899 # this works since we traverse following growing rev order
910 for p in pfunc(r):
900 for p in pfunc(r):
911 if p in obsolete or p in unstable:
901 if p in obsolete or p in unstable:
912 unstable.add(r)
902 unstable.add(r)
913 break
903 break
914 return unstable
904 return unstable
915
905
916 @cachefor('suspended')
906 @cachefor('suspended')
917 def _computesuspendedset(repo):
907 def _computesuspendedset(repo):
918 """the set of obsolete parents with non obsolete descendants"""
908 """the set of obsolete parents with non obsolete descendants"""
919 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
909 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
920 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
910 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
921
911
922 @cachefor('extinct')
912 @cachefor('extinct')
923 def _computeextinctset(repo):
913 def _computeextinctset(repo):
924 """the set of obsolete parents without non obsolete descendants"""
914 """the set of obsolete parents without non obsolete descendants"""
925 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
915 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
926
916
927
917
928 @cachefor('bumped')
918 @cachefor('bumped')
929 def _computebumpedset(repo):
919 def _computebumpedset(repo):
930 """the set of revs trying to obsolete public revisions"""
920 """the set of revs trying to obsolete public revisions"""
931 bumped = set()
921 bumped = set()
932 # util function (avoid attribute lookup in the loop)
922 # util function (avoid attribute lookup in the loop)
933 phase = repo._phasecache.phase # would be faster to grab the full list
923 phase = repo._phasecache.phase # would be faster to grab the full list
934 public = phases.public
924 public = phases.public
935 cl = repo.changelog
925 cl = repo.changelog
936 torev = cl.nodemap.get
926 torev = cl.nodemap.get
937 for ctx in repo.set('(not public()) and (not obsolete())'):
927 for ctx in repo.set('(not public()) and (not obsolete())'):
938 rev = ctx.rev()
928 rev = ctx.rev()
939 # We only evaluate mutable, non-obsolete revision
929 # We only evaluate mutable, non-obsolete revision
940 node = ctx.node()
930 node = ctx.node()
941 # (future) A cache of precursors may worth if split is very common
931 # (future) A cache of precursors may worth if split is very common
942 for pnode in obsutil.allprecursors(repo.obsstore, [node],
932 for pnode in obsutil.allprecursors(repo.obsstore, [node],
943 ignoreflags=bumpedfix):
933 ignoreflags=bumpedfix):
944 prev = torev(pnode) # unfiltered! but so is phasecache
934 prev = torev(pnode) # unfiltered! but so is phasecache
945 if (prev is not None) and (phase(repo, prev) <= public):
935 if (prev is not None) and (phase(repo, prev) <= public):
946 # we have a public precursor
936 # we have a public precursor
947 bumped.add(rev)
937 bumped.add(rev)
948 break # Next draft!
938 break # Next draft!
949 return bumped
939 return bumped
950
940
951 @cachefor('divergent')
941 @cachefor('divergent')
952 def _computedivergentset(repo):
942 def _computedivergentset(repo):
953 """the set of rev that compete to be the final successors of some revision.
943 """the set of rev that compete to be the final successors of some revision.
954 """
944 """
955 divergent = set()
945 divergent = set()
956 obsstore = repo.obsstore
946 obsstore = repo.obsstore
957 newermap = {}
947 newermap = {}
958 for ctx in repo.set('(not public()) - obsolete()'):
948 for ctx in repo.set('(not public()) - obsolete()'):
959 mark = obsstore.precursors.get(ctx.node(), ())
949 mark = obsstore.precursors.get(ctx.node(), ())
960 toprocess = set(mark)
950 toprocess = set(mark)
961 seen = set()
951 seen = set()
962 while toprocess:
952 while toprocess:
963 prec = toprocess.pop()[0]
953 prec = toprocess.pop()[0]
964 if prec in seen:
954 if prec in seen:
965 continue # emergency cycle hanging prevention
955 continue # emergency cycle hanging prevention
966 seen.add(prec)
956 seen.add(prec)
967 if prec not in newermap:
957 if prec not in newermap:
968 obsutil.successorssets(repo, prec, newermap)
958 obsutil.successorssets(repo, prec, newermap)
969 newer = [n for n in newermap[prec] if n]
959 newer = [n for n in newermap[prec] if n]
970 if len(newer) > 1:
960 if len(newer) > 1:
971 divergent.add(ctx.rev())
961 divergent.add(ctx.rev())
972 break
962 break
973 toprocess.update(obsstore.precursors.get(prec, ()))
963 toprocess.update(obsstore.precursors.get(prec, ()))
974 return divergent
964 return divergent
975
965
976
966
977 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
967 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
978 operation=None):
968 operation=None):
979 """Add obsolete markers between changesets in a repo
969 """Add obsolete markers between changesets in a repo
980
970
981 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
971 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
982 tuple. `old` and `news` are changectx. metadata is an optional dictionary
972 tuple. `old` and `news` are changectx. metadata is an optional dictionary
983 containing metadata for this marker only. It is merged with the global
973 containing metadata for this marker only. It is merged with the global
984 metadata specified through the `metadata` argument of this function,
974 metadata specified through the `metadata` argument of this function,
985
975
986 Trying to obsolete a public changeset will raise an exception.
976 Trying to obsolete a public changeset will raise an exception.
987
977
988 Current user and date are used except if specified otherwise in the
978 Current user and date are used except if specified otherwise in the
989 metadata attribute.
979 metadata attribute.
990
980
991 This function operates within a transaction of its own, but does
981 This function operates within a transaction of its own, but does
992 not take any lock on the repo.
982 not take any lock on the repo.
993 """
983 """
994 # prepare metadata
984 # prepare metadata
995 if metadata is None:
985 if metadata is None:
996 metadata = {}
986 metadata = {}
997 if 'user' not in metadata:
987 if 'user' not in metadata:
998 metadata['user'] = repo.ui.username()
988 metadata['user'] = repo.ui.username()
999 useoperation = repo.ui.configbool('experimental',
989 useoperation = repo.ui.configbool('experimental',
1000 'evolution.track-operation',
990 'evolution.track-operation',
1001 False)
991 False)
1002 if useoperation and operation:
992 if useoperation and operation:
1003 metadata['operation'] = operation
993 metadata['operation'] = operation
1004 tr = repo.transaction('add-obsolescence-marker')
994 tr = repo.transaction('add-obsolescence-marker')
1005 try:
995 try:
1006 markerargs = []
996 markerargs = []
1007 for rel in relations:
997 for rel in relations:
1008 prec = rel[0]
998 prec = rel[0]
1009 sucs = rel[1]
999 sucs = rel[1]
1010 localmetadata = metadata.copy()
1000 localmetadata = metadata.copy()
1011 if 2 < len(rel):
1001 if 2 < len(rel):
1012 localmetadata.update(rel[2])
1002 localmetadata.update(rel[2])
1013
1003
1014 if not prec.mutable():
1004 if not prec.mutable():
1015 raise error.Abort(_("cannot obsolete public changeset: %s")
1005 raise error.Abort(_("cannot obsolete public changeset: %s")
1016 % prec,
1006 % prec,
1017 hint="see 'hg help phases' for details")
1007 hint="see 'hg help phases' for details")
1018 nprec = prec.node()
1008 nprec = prec.node()
1019 nsucs = tuple(s.node() for s in sucs)
1009 nsucs = tuple(s.node() for s in sucs)
1020 npare = None
1010 npare = None
1021 if not nsucs:
1011 if not nsucs:
1022 npare = tuple(p.node() for p in prec.parents())
1012 npare = tuple(p.node() for p in prec.parents())
1023 if nprec in nsucs:
1013 if nprec in nsucs:
1024 raise error.Abort(_("changeset %s cannot obsolete itself")
1014 raise error.Abort(_("changeset %s cannot obsolete itself")
1025 % prec)
1015 % prec)
1026
1016
1027 # Creating the marker causes the hidden cache to become invalid,
1017 # Creating the marker causes the hidden cache to become invalid,
1028 # which causes recomputation when we ask for prec.parents() above.
1018 # which causes recomputation when we ask for prec.parents() above.
1029 # Resulting in n^2 behavior. So let's prepare all of the args
1019 # Resulting in n^2 behavior. So let's prepare all of the args
1030 # first, then create the markers.
1020 # first, then create the markers.
1031 markerargs.append((nprec, nsucs, npare, localmetadata))
1021 markerargs.append((nprec, nsucs, npare, localmetadata))
1032
1022
1033 for args in markerargs:
1023 for args in markerargs:
1034 nprec, nsucs, npare, localmetadata = args
1024 nprec, nsucs, npare, localmetadata = args
1035 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1025 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1036 date=date, metadata=localmetadata,
1026 date=date, metadata=localmetadata,
1037 ui=repo.ui)
1027 ui=repo.ui)
1038 repo.filteredrevcache.clear()
1028 repo.filteredrevcache.clear()
1039 tr.close()
1029 tr.close()
1040 finally:
1030 finally:
1041 tr.release()
1031 tr.release()
@@ -1,476 +1,491 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 class marker(object):
10 class marker(object):
11 """Wrap obsolete marker raw data"""
11 """Wrap obsolete marker raw data"""
12
12
13 def __init__(self, repo, data):
13 def __init__(self, repo, data):
14 # the repo argument will be used to create changectx in later version
14 # the repo argument will be used to create changectx in later version
15 self._repo = repo
15 self._repo = repo
16 self._data = data
16 self._data = data
17 self._decodedmeta = None
17 self._decodedmeta = None
18
18
19 def __hash__(self):
19 def __hash__(self):
20 return hash(self._data)
20 return hash(self._data)
21
21
22 def __eq__(self, other):
22 def __eq__(self, other):
23 if type(other) != type(self):
23 if type(other) != type(self):
24 return False
24 return False
25 return self._data == other._data
25 return self._data == other._data
26
26
27 def precnode(self):
27 def precnode(self):
28 """Precursor changeset node identifier"""
28 """Precursor changeset node identifier"""
29 return self._data[0]
29 return self._data[0]
30
30
31 def succnodes(self):
31 def succnodes(self):
32 """List of successor changesets node identifiers"""
32 """List of successor changesets node identifiers"""
33 return self._data[1]
33 return self._data[1]
34
34
35 def parentnodes(self):
35 def parentnodes(self):
36 """Parents of the precursors (None if not recorded)"""
36 """Parents of the precursors (None if not recorded)"""
37 return self._data[5]
37 return self._data[5]
38
38
39 def metadata(self):
39 def metadata(self):
40 """Decoded metadata dictionary"""
40 """Decoded metadata dictionary"""
41 return dict(self._data[3])
41 return dict(self._data[3])
42
42
43 def date(self):
43 def date(self):
44 """Creation date as (unixtime, offset)"""
44 """Creation date as (unixtime, offset)"""
45 return self._data[4]
45 return self._data[4]
46
46
47 def flags(self):
47 def flags(self):
48 """The flags field of the marker"""
48 """The flags field of the marker"""
49 return self._data[2]
49 return self._data[2]
50
50
51 def getmarkers(repo, nodes=None, exclusive=False):
52 """returns markers known in a repository
53
54 If <nodes> is specified, only markers "relevant" to those nodes are are
55 returned"""
56 if nodes is None:
57 rawmarkers = repo.obsstore
58 elif exclusive:
59 rawmarkers = exclusivemarkers(repo, nodes)
60 else:
61 rawmarkers = repo.obsstore.relevantmarkers(nodes)
62
63 for markerdata in rawmarkers:
64 yield marker(repo, markerdata)
65
51 def closestpredecessors(repo, nodeid):
66 def closestpredecessors(repo, nodeid):
52 """yield the list of next predecessors pointing on visible changectx nodes
67 """yield the list of next predecessors pointing on visible changectx nodes
53
68
54 This function respect the repoview filtering, filtered revision will be
69 This function respect the repoview filtering, filtered revision will be
55 considered missing.
70 considered missing.
56 """
71 """
57
72
58 precursors = repo.obsstore.precursors
73 precursors = repo.obsstore.precursors
59 stack = [nodeid]
74 stack = [nodeid]
60 seen = set(stack)
75 seen = set(stack)
61
76
62 while stack:
77 while stack:
63 current = stack.pop()
78 current = stack.pop()
64 currentpreccs = precursors.get(current, ())
79 currentpreccs = precursors.get(current, ())
65
80
66 for prec in currentpreccs:
81 for prec in currentpreccs:
67 precnodeid = prec[0]
82 precnodeid = prec[0]
68
83
69 # Basic cycle protection
84 # Basic cycle protection
70 if precnodeid in seen:
85 if precnodeid in seen:
71 continue
86 continue
72 seen.add(precnodeid)
87 seen.add(precnodeid)
73
88
74 if precnodeid in repo:
89 if precnodeid in repo:
75 yield precnodeid
90 yield precnodeid
76 else:
91 else:
77 stack.append(precnodeid)
92 stack.append(precnodeid)
78
93
79 def allprecursors(obsstore, nodes, ignoreflags=0):
94 def allprecursors(obsstore, nodes, ignoreflags=0):
80 """Yield node for every precursors of <nodes>.
95 """Yield node for every precursors of <nodes>.
81
96
82 Some precursors may be unknown locally.
97 Some precursors may be unknown locally.
83
98
84 This is a linear yield unsuited to detecting folded changesets. It includes
99 This is a linear yield unsuited to detecting folded changesets. It includes
85 initial nodes too."""
100 initial nodes too."""
86
101
87 remaining = set(nodes)
102 remaining = set(nodes)
88 seen = set(remaining)
103 seen = set(remaining)
89 while remaining:
104 while remaining:
90 current = remaining.pop()
105 current = remaining.pop()
91 yield current
106 yield current
92 for mark in obsstore.precursors.get(current, ()):
107 for mark in obsstore.precursors.get(current, ()):
93 # ignore marker flagged with specified flag
108 # ignore marker flagged with specified flag
94 if mark[2] & ignoreflags:
109 if mark[2] & ignoreflags:
95 continue
110 continue
96 suc = mark[0]
111 suc = mark[0]
97 if suc not in seen:
112 if suc not in seen:
98 seen.add(suc)
113 seen.add(suc)
99 remaining.add(suc)
114 remaining.add(suc)
100
115
101 def allsuccessors(obsstore, nodes, ignoreflags=0):
116 def allsuccessors(obsstore, nodes, ignoreflags=0):
102 """Yield node for every successor of <nodes>.
117 """Yield node for every successor of <nodes>.
103
118
104 Some successors may be unknown locally.
119 Some successors may be unknown locally.
105
120
106 This is a linear yield unsuited to detecting split changesets. It includes
121 This is a linear yield unsuited to detecting split changesets. It includes
107 initial nodes too."""
122 initial nodes too."""
108 remaining = set(nodes)
123 remaining = set(nodes)
109 seen = set(remaining)
124 seen = set(remaining)
110 while remaining:
125 while remaining:
111 current = remaining.pop()
126 current = remaining.pop()
112 yield current
127 yield current
113 for mark in obsstore.successors.get(current, ()):
128 for mark in obsstore.successors.get(current, ()):
114 # ignore marker flagged with specified flag
129 # ignore marker flagged with specified flag
115 if mark[2] & ignoreflags:
130 if mark[2] & ignoreflags:
116 continue
131 continue
117 for suc in mark[1]:
132 for suc in mark[1]:
118 if suc not in seen:
133 if suc not in seen:
119 seen.add(suc)
134 seen.add(suc)
120 remaining.add(suc)
135 remaining.add(suc)
121
136
122 def _filterprunes(markers):
137 def _filterprunes(markers):
123 """return a set with no prune markers"""
138 """return a set with no prune markers"""
124 return set(m for m in markers if m[1])
139 return set(m for m in markers if m[1])
125
140
126 def exclusivemarkers(repo, nodes):
141 def exclusivemarkers(repo, nodes):
127 """set of markers relevant to "nodes" but no other locally-known nodes
142 """set of markers relevant to "nodes" but no other locally-known nodes
128
143
129 This function compute the set of markers "exclusive" to a locally-known
144 This function compute the set of markers "exclusive" to a locally-known
130 node. This means we walk the markers starting from <nodes> until we reach a
145 node. This means we walk the markers starting from <nodes> until we reach a
131 locally-known precursors outside of <nodes>. Element of <nodes> with
146 locally-known precursors outside of <nodes>. Element of <nodes> with
132 locally-known successors outside of <nodes> are ignored (since their
147 locally-known successors outside of <nodes> are ignored (since their
133 precursors markers are also relevant to these successors).
148 precursors markers are also relevant to these successors).
134
149
135 For example:
150 For example:
136
151
137 # (A0 rewritten as A1)
152 # (A0 rewritten as A1)
138 #
153 #
139 # A0 <-1- A1 # Marker "1" is exclusive to A1
154 # A0 <-1- A1 # Marker "1" is exclusive to A1
140
155
141 or
156 or
142
157
143 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
158 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
144 #
159 #
145 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
160 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
146
161
147 or
162 or
148
163
149 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
164 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
150 #
165 #
151 # <-2- A1 # Marker "2" is exclusive to A0,A1
166 # <-2- A1 # Marker "2" is exclusive to A0,A1
152 # /
167 # /
153 # <-1- A0
168 # <-1- A0
154 # \
169 # \
155 # <-3- A2 # Marker "3" is exclusive to A0,A2
170 # <-3- A2 # Marker "3" is exclusive to A0,A2
156 #
171 #
157 # in addition:
172 # in addition:
158 #
173 #
159 # Markers "2,3" are exclusive to A1,A2
174 # Markers "2,3" are exclusive to A1,A2
160 # Markers "1,2,3" are exclusive to A0,A1,A2
175 # Markers "1,2,3" are exclusive to A0,A1,A2
161
176
162 See test/test-obsolete-bundle-strip.t for more examples.
177 See test/test-obsolete-bundle-strip.t for more examples.
163
178
164 An example usage is strip. When stripping a changeset, we also want to
179 An example usage is strip. When stripping a changeset, we also want to
165 strip the markers exclusive to this changeset. Otherwise we would have
180 strip the markers exclusive to this changeset. Otherwise we would have
166 "dangling"" obsolescence markers from its precursors: Obsolescence markers
181 "dangling"" obsolescence markers from its precursors: Obsolescence markers
167 marking a node as obsolete without any successors available locally.
182 marking a node as obsolete without any successors available locally.
168
183
169 As for relevant markers, the prune markers for children will be followed.
184 As for relevant markers, the prune markers for children will be followed.
170 Of course, they will only be followed if the pruned children is
185 Of course, they will only be followed if the pruned children is
171 locally-known. Since the prune markers are relevant to the pruned node.
186 locally-known. Since the prune markers are relevant to the pruned node.
172 However, while prune markers are considered relevant to the parent of the
187 However, while prune markers are considered relevant to the parent of the
173 pruned changesets, prune markers for locally-known changeset (with no
188 pruned changesets, prune markers for locally-known changeset (with no
174 successors) are considered exclusive to the pruned nodes. This allows
189 successors) are considered exclusive to the pruned nodes. This allows
175 to strip the prune markers (with the rest of the exclusive chain) alongside
190 to strip the prune markers (with the rest of the exclusive chain) alongside
176 the pruned changesets.
191 the pruned changesets.
177 """
192 """
178 # running on a filtered repository would be dangerous as markers could be
193 # running on a filtered repository would be dangerous as markers could be
179 # reported as exclusive when they are relevant for other filtered nodes.
194 # reported as exclusive when they are relevant for other filtered nodes.
180 unfi = repo.unfiltered()
195 unfi = repo.unfiltered()
181
196
182 # shortcut to various useful item
197 # shortcut to various useful item
183 nm = unfi.changelog.nodemap
198 nm = unfi.changelog.nodemap
184 precursorsmarkers = unfi.obsstore.precursors
199 precursorsmarkers = unfi.obsstore.precursors
185 successormarkers = unfi.obsstore.successors
200 successormarkers = unfi.obsstore.successors
186 childrenmarkers = unfi.obsstore.children
201 childrenmarkers = unfi.obsstore.children
187
202
188 # exclusive markers (return of the function)
203 # exclusive markers (return of the function)
189 exclmarkers = set()
204 exclmarkers = set()
190 # we need fast membership testing
205 # we need fast membership testing
191 nodes = set(nodes)
206 nodes = set(nodes)
192 # looking for head in the obshistory
207 # looking for head in the obshistory
193 #
208 #
194 # XXX we are ignoring all issues in regard with cycle for now.
209 # XXX we are ignoring all issues in regard with cycle for now.
195 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
210 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
196 stack.sort()
211 stack.sort()
197 # nodes already stacked
212 # nodes already stacked
198 seennodes = set(stack)
213 seennodes = set(stack)
199 while stack:
214 while stack:
200 current = stack.pop()
215 current = stack.pop()
201 # fetch precursors markers
216 # fetch precursors markers
202 markers = list(precursorsmarkers.get(current, ()))
217 markers = list(precursorsmarkers.get(current, ()))
203 # extend the list with prune markers
218 # extend the list with prune markers
204 for mark in successormarkers.get(current, ()):
219 for mark in successormarkers.get(current, ()):
205 if not mark[1]:
220 if not mark[1]:
206 markers.append(mark)
221 markers.append(mark)
207 # and markers from children (looking for prune)
222 # and markers from children (looking for prune)
208 for mark in childrenmarkers.get(current, ()):
223 for mark in childrenmarkers.get(current, ()):
209 if not mark[1]:
224 if not mark[1]:
210 markers.append(mark)
225 markers.append(mark)
211 # traverse the markers
226 # traverse the markers
212 for mark in markers:
227 for mark in markers:
213 if mark in exclmarkers:
228 if mark in exclmarkers:
214 # markers already selected
229 # markers already selected
215 continue
230 continue
216
231
217 # If the markers is about the current node, select it
232 # If the markers is about the current node, select it
218 #
233 #
219 # (this delay the addition of markers from children)
234 # (this delay the addition of markers from children)
220 if mark[1] or mark[0] == current:
235 if mark[1] or mark[0] == current:
221 exclmarkers.add(mark)
236 exclmarkers.add(mark)
222
237
223 # should we keep traversing through the precursors?
238 # should we keep traversing through the precursors?
224 prec = mark[0]
239 prec = mark[0]
225
240
226 # nodes in the stack or already processed
241 # nodes in the stack or already processed
227 if prec in seennodes:
242 if prec in seennodes:
228 continue
243 continue
229
244
230 # is this a locally known node ?
245 # is this a locally known node ?
231 known = prec in nm
246 known = prec in nm
232 # if locally-known and not in the <nodes> set the traversal
247 # if locally-known and not in the <nodes> set the traversal
233 # stop here.
248 # stop here.
234 if known and prec not in nodes:
249 if known and prec not in nodes:
235 continue
250 continue
236
251
237 # do not keep going if there are unselected markers pointing to this
252 # do not keep going if there are unselected markers pointing to this
238 # nodes. If we end up traversing these unselected markers later the
253 # nodes. If we end up traversing these unselected markers later the
239 # node will be taken care of at that point.
254 # node will be taken care of at that point.
240 precmarkers = _filterprunes(successormarkers.get(prec))
255 precmarkers = _filterprunes(successormarkers.get(prec))
241 if precmarkers.issubset(exclmarkers):
256 if precmarkers.issubset(exclmarkers):
242 seennodes.add(prec)
257 seennodes.add(prec)
243 stack.append(prec)
258 stack.append(prec)
244
259
245 return exclmarkers
260 return exclmarkers
246
261
247 def foreground(repo, nodes):
262 def foreground(repo, nodes):
248 """return all nodes in the "foreground" of other node
263 """return all nodes in the "foreground" of other node
249
264
250 The foreground of a revision is anything reachable using parent -> children
265 The foreground of a revision is anything reachable using parent -> children
251 or precursor -> successor relation. It is very similar to "descendant" but
266 or precursor -> successor relation. It is very similar to "descendant" but
252 augmented with obsolescence information.
267 augmented with obsolescence information.
253
268
254 Beware that possible obsolescence cycle may result if complex situation.
269 Beware that possible obsolescence cycle may result if complex situation.
255 """
270 """
256 repo = repo.unfiltered()
271 repo = repo.unfiltered()
257 foreground = set(repo.set('%ln::', nodes))
272 foreground = set(repo.set('%ln::', nodes))
258 if repo.obsstore:
273 if repo.obsstore:
259 # We only need this complicated logic if there is obsolescence
274 # We only need this complicated logic if there is obsolescence
260 # XXX will probably deserve an optimised revset.
275 # XXX will probably deserve an optimised revset.
261 nm = repo.changelog.nodemap
276 nm = repo.changelog.nodemap
262 plen = -1
277 plen = -1
263 # compute the whole set of successors or descendants
278 # compute the whole set of successors or descendants
264 while len(foreground) != plen:
279 while len(foreground) != plen:
265 plen = len(foreground)
280 plen = len(foreground)
266 succs = set(c.node() for c in foreground)
281 succs = set(c.node() for c in foreground)
267 mutable = [c.node() for c in foreground if c.mutable()]
282 mutable = [c.node() for c in foreground if c.mutable()]
268 succs.update(allsuccessors(repo.obsstore, mutable))
283 succs.update(allsuccessors(repo.obsstore, mutable))
269 known = (n for n in succs if n in nm)
284 known = (n for n in succs if n in nm)
270 foreground = set(repo.set('%ln::', known))
285 foreground = set(repo.set('%ln::', known))
271 return set(c.node() for c in foreground)
286 return set(c.node() for c in foreground)
272
287
273 def successorssets(repo, initialnode, cache=None):
288 def successorssets(repo, initialnode, cache=None):
274 """Return set of all latest successors of initial nodes
289 """Return set of all latest successors of initial nodes
275
290
276 The successors set of a changeset A are the group of revisions that succeed
291 The successors set of a changeset A are the group of revisions that succeed
277 A. It succeeds A as a consistent whole, each revision being only a partial
292 A. It succeeds A as a consistent whole, each revision being only a partial
278 replacement. The successors set contains non-obsolete changesets only.
293 replacement. The successors set contains non-obsolete changesets only.
279
294
280 This function returns the full list of successor sets which is why it
295 This function returns the full list of successor sets which is why it
281 returns a list of tuples and not just a single tuple. Each tuple is a valid
296 returns a list of tuples and not just a single tuple. Each tuple is a valid
282 successors set. Note that (A,) may be a valid successors set for changeset A
297 successors set. Note that (A,) may be a valid successors set for changeset A
283 (see below).
298 (see below).
284
299
285 In most cases, a changeset A will have a single element (e.g. the changeset
300 In most cases, a changeset A will have a single element (e.g. the changeset
286 A is replaced by A') in its successors set. Though, it is also common for a
301 A is replaced by A') in its successors set. Though, it is also common for a
287 changeset A to have no elements in its successor set (e.g. the changeset
302 changeset A to have no elements in its successor set (e.g. the changeset
288 has been pruned). Therefore, the returned list of successors sets will be
303 has been pruned). Therefore, the returned list of successors sets will be
289 [(A',)] or [], respectively.
304 [(A',)] or [], respectively.
290
305
291 When a changeset A is split into A' and B', however, it will result in a
306 When a changeset A is split into A' and B', however, it will result in a
292 successors set containing more than a single element, i.e. [(A',B')].
307 successors set containing more than a single element, i.e. [(A',B')].
293 Divergent changesets will result in multiple successors sets, i.e. [(A',),
308 Divergent changesets will result in multiple successors sets, i.e. [(A',),
294 (A'')].
309 (A'')].
295
310
296 If a changeset A is not obsolete, then it will conceptually have no
311 If a changeset A is not obsolete, then it will conceptually have no
297 successors set. To distinguish this from a pruned changeset, the successor
312 successors set. To distinguish this from a pruned changeset, the successor
298 set will contain itself only, i.e. [(A,)].
313 set will contain itself only, i.e. [(A,)].
299
314
300 Finally, successors unknown locally are considered to be pruned (obsoleted
315 Finally, successors unknown locally are considered to be pruned (obsoleted
301 without any successors).
316 without any successors).
302
317
303 The optional `cache` parameter is a dictionary that may contain precomputed
318 The optional `cache` parameter is a dictionary that may contain precomputed
304 successors sets. It is meant to reuse the computation of a previous call to
319 successors sets. It is meant to reuse the computation of a previous call to
305 `successorssets` when multiple calls are made at the same time. The cache
320 `successorssets` when multiple calls are made at the same time. The cache
306 dictionary is updated in place. The caller is responsible for its life
321 dictionary is updated in place. The caller is responsible for its life
307 span. Code that makes multiple calls to `successorssets` *must* use this
322 span. Code that makes multiple calls to `successorssets` *must* use this
308 cache mechanism or suffer terrible performance.
323 cache mechanism or suffer terrible performance.
309 """
324 """
310
325
311 succmarkers = repo.obsstore.successors
326 succmarkers = repo.obsstore.successors
312
327
313 # Stack of nodes we search successors sets for
328 # Stack of nodes we search successors sets for
314 toproceed = [initialnode]
329 toproceed = [initialnode]
315 # set version of above list for fast loop detection
330 # set version of above list for fast loop detection
316 # element added to "toproceed" must be added here
331 # element added to "toproceed" must be added here
317 stackedset = set(toproceed)
332 stackedset = set(toproceed)
318 if cache is None:
333 if cache is None:
319 cache = {}
334 cache = {}
320
335
321 # This while loop is the flattened version of a recursive search for
336 # This while loop is the flattened version of a recursive search for
322 # successors sets
337 # successors sets
323 #
338 #
324 # def successorssets(x):
339 # def successorssets(x):
325 # successors = directsuccessors(x)
340 # successors = directsuccessors(x)
326 # ss = [[]]
341 # ss = [[]]
327 # for succ in directsuccessors(x):
342 # for succ in directsuccessors(x):
328 # # product as in itertools cartesian product
343 # # product as in itertools cartesian product
329 # ss = product(ss, successorssets(succ))
344 # ss = product(ss, successorssets(succ))
330 # return ss
345 # return ss
331 #
346 #
332 # But we can not use plain recursive calls here:
347 # But we can not use plain recursive calls here:
333 # - that would blow the python call stack
348 # - that would blow the python call stack
334 # - obsolescence markers may have cycles, we need to handle them.
349 # - obsolescence markers may have cycles, we need to handle them.
335 #
350 #
336 # The `toproceed` list act as our call stack. Every node we search
351 # The `toproceed` list act as our call stack. Every node we search
337 # successors set for are stacked there.
352 # successors set for are stacked there.
338 #
353 #
339 # The `stackedset` is set version of this stack used to check if a node is
354 # The `stackedset` is set version of this stack used to check if a node is
340 # already stacked. This check is used to detect cycles and prevent infinite
355 # already stacked. This check is used to detect cycles and prevent infinite
341 # loop.
356 # loop.
342 #
357 #
343 # successors set of all nodes are stored in the `cache` dictionary.
358 # successors set of all nodes are stored in the `cache` dictionary.
344 #
359 #
345 # After this while loop ends we use the cache to return the successors sets
360 # After this while loop ends we use the cache to return the successors sets
346 # for the node requested by the caller.
361 # for the node requested by the caller.
347 while toproceed:
362 while toproceed:
348 # Every iteration tries to compute the successors sets of the topmost
363 # Every iteration tries to compute the successors sets of the topmost
349 # node of the stack: CURRENT.
364 # node of the stack: CURRENT.
350 #
365 #
351 # There are four possible outcomes:
366 # There are four possible outcomes:
352 #
367 #
353 # 1) We already know the successors sets of CURRENT:
368 # 1) We already know the successors sets of CURRENT:
354 # -> mission accomplished, pop it from the stack.
369 # -> mission accomplished, pop it from the stack.
355 # 2) Node is not obsolete:
370 # 2) Node is not obsolete:
356 # -> the node is its own successors sets. Add it to the cache.
371 # -> the node is its own successors sets. Add it to the cache.
357 # 3) We do not know successors set of direct successors of CURRENT:
372 # 3) We do not know successors set of direct successors of CURRENT:
358 # -> We add those successors to the stack.
373 # -> We add those successors to the stack.
359 # 4) We know successors sets of all direct successors of CURRENT:
374 # 4) We know successors sets of all direct successors of CURRENT:
360 # -> We can compute CURRENT successors set and add it to the
375 # -> We can compute CURRENT successors set and add it to the
361 # cache.
376 # cache.
362 #
377 #
363 current = toproceed[-1]
378 current = toproceed[-1]
364 if current in cache:
379 if current in cache:
365 # case (1): We already know the successors sets
380 # case (1): We already know the successors sets
366 stackedset.remove(toproceed.pop())
381 stackedset.remove(toproceed.pop())
367 elif current not in succmarkers:
382 elif current not in succmarkers:
368 # case (2): The node is not obsolete.
383 # case (2): The node is not obsolete.
369 if current in repo:
384 if current in repo:
370 # We have a valid last successors.
385 # We have a valid last successors.
371 cache[current] = [(current,)]
386 cache[current] = [(current,)]
372 else:
387 else:
373 # Final obsolete version is unknown locally.
388 # Final obsolete version is unknown locally.
374 # Do not count that as a valid successors
389 # Do not count that as a valid successors
375 cache[current] = []
390 cache[current] = []
376 else:
391 else:
377 # cases (3) and (4)
392 # cases (3) and (4)
378 #
393 #
379 # We proceed in two phases. Phase 1 aims to distinguish case (3)
394 # We proceed in two phases. Phase 1 aims to distinguish case (3)
380 # from case (4):
395 # from case (4):
381 #
396 #
382 # For each direct successors of CURRENT, we check whether its
397 # For each direct successors of CURRENT, we check whether its
383 # successors sets are known. If they are not, we stack the
398 # successors sets are known. If they are not, we stack the
384 # unknown node and proceed to the next iteration of the while
399 # unknown node and proceed to the next iteration of the while
385 # loop. (case 3)
400 # loop. (case 3)
386 #
401 #
387 # During this step, we may detect obsolescence cycles: a node
402 # During this step, we may detect obsolescence cycles: a node
388 # with unknown successors sets but already in the call stack.
403 # with unknown successors sets but already in the call stack.
389 # In such a situation, we arbitrary set the successors sets of
404 # In such a situation, we arbitrary set the successors sets of
390 # the node to nothing (node pruned) to break the cycle.
405 # the node to nothing (node pruned) to break the cycle.
391 #
406 #
392 # If no break was encountered we proceed to phase 2.
407 # If no break was encountered we proceed to phase 2.
393 #
408 #
394 # Phase 2 computes successors sets of CURRENT (case 4); see details
409 # Phase 2 computes successors sets of CURRENT (case 4); see details
395 # in phase 2 itself.
410 # in phase 2 itself.
396 #
411 #
397 # Note the two levels of iteration in each phase.
412 # Note the two levels of iteration in each phase.
398 # - The first one handles obsolescence markers using CURRENT as
413 # - The first one handles obsolescence markers using CURRENT as
399 # precursor (successors markers of CURRENT).
414 # precursor (successors markers of CURRENT).
400 #
415 #
401 # Having multiple entry here means divergence.
416 # Having multiple entry here means divergence.
402 #
417 #
403 # - The second one handles successors defined in each marker.
418 # - The second one handles successors defined in each marker.
404 #
419 #
405 # Having none means pruned node, multiple successors means split,
420 # Having none means pruned node, multiple successors means split,
406 # single successors are standard replacement.
421 # single successors are standard replacement.
407 #
422 #
408 for mark in sorted(succmarkers[current]):
423 for mark in sorted(succmarkers[current]):
409 for suc in mark[1]:
424 for suc in mark[1]:
410 if suc not in cache:
425 if suc not in cache:
411 if suc in stackedset:
426 if suc in stackedset:
412 # cycle breaking
427 # cycle breaking
413 cache[suc] = []
428 cache[suc] = []
414 else:
429 else:
415 # case (3) If we have not computed successors sets
430 # case (3) If we have not computed successors sets
416 # of one of those successors we add it to the
431 # of one of those successors we add it to the
417 # `toproceed` stack and stop all work for this
432 # `toproceed` stack and stop all work for this
418 # iteration.
433 # iteration.
419 toproceed.append(suc)
434 toproceed.append(suc)
420 stackedset.add(suc)
435 stackedset.add(suc)
421 break
436 break
422 else:
437 else:
423 continue
438 continue
424 break
439 break
425 else:
440 else:
426 # case (4): we know all successors sets of all direct
441 # case (4): we know all successors sets of all direct
427 # successors
442 # successors
428 #
443 #
429 # Successors set contributed by each marker depends on the
444 # Successors set contributed by each marker depends on the
430 # successors sets of all its "successors" node.
445 # successors sets of all its "successors" node.
431 #
446 #
432 # Each different marker is a divergence in the obsolescence
447 # Each different marker is a divergence in the obsolescence
433 # history. It contributes successors sets distinct from other
448 # history. It contributes successors sets distinct from other
434 # markers.
449 # markers.
435 #
450 #
436 # Within a marker, a successor may have divergent successors
451 # Within a marker, a successor may have divergent successors
437 # sets. In such a case, the marker will contribute multiple
452 # sets. In such a case, the marker will contribute multiple
438 # divergent successors sets. If multiple successors have
453 # divergent successors sets. If multiple successors have
439 # divergent successors sets, a Cartesian product is used.
454 # divergent successors sets, a Cartesian product is used.
440 #
455 #
441 # At the end we post-process successors sets to remove
456 # At the end we post-process successors sets to remove
442 # duplicated entry and successors set that are strict subset of
457 # duplicated entry and successors set that are strict subset of
443 # another one.
458 # another one.
444 succssets = []
459 succssets = []
445 for mark in sorted(succmarkers[current]):
460 for mark in sorted(succmarkers[current]):
446 # successors sets contributed by this marker
461 # successors sets contributed by this marker
447 markss = [[]]
462 markss = [[]]
448 for suc in mark[1]:
463 for suc in mark[1]:
449 # cardinal product with previous successors
464 # cardinal product with previous successors
450 productresult = []
465 productresult = []
451 for prefix in markss:
466 for prefix in markss:
452 for suffix in cache[suc]:
467 for suffix in cache[suc]:
453 newss = list(prefix)
468 newss = list(prefix)
454 for part in suffix:
469 for part in suffix:
455 # do not duplicated entry in successors set
470 # do not duplicated entry in successors set
456 # first entry wins.
471 # first entry wins.
457 if part not in newss:
472 if part not in newss:
458 newss.append(part)
473 newss.append(part)
459 productresult.append(newss)
474 productresult.append(newss)
460 markss = productresult
475 markss = productresult
461 succssets.extend(markss)
476 succssets.extend(markss)
462 # remove duplicated and subset
477 # remove duplicated and subset
463 seen = []
478 seen = []
464 final = []
479 final = []
465 candidate = sorted(((set(s), s) for s in succssets if s),
480 candidate = sorted(((set(s), s) for s in succssets if s),
466 key=lambda x: len(x[1]), reverse=True)
481 key=lambda x: len(x[1]), reverse=True)
467 for setversion, listversion in candidate:
482 for setversion, listversion in candidate:
468 for seenset in seen:
483 for seenset in seen:
469 if setversion.issubset(seenset):
484 if setversion.issubset(seenset):
470 break
485 break
471 else:
486 else:
472 final.append(listversion)
487 final.append(listversion)
473 seen.append(setversion)
488 seen.append(setversion)
474 final.reverse() # put small successors set first
489 final.reverse() # put small successors set first
475 cache[current] = final
490 cache[current] = final
476 return cache[initialnode]
491 return cache[initialnode]
General Comments 0
You need to be logged in to leave comments. Login now