##// END OF EJS Templates
nodemap: track the maximum revision tracked in the nodemap...
marmoute -
r44807:e41a164d default
parent child Browse files
Show More
@@ -1,4364 +1,4365 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import difflib
12 import difflib
13 import errno
13 import errno
14 import operator
14 import operator
15 import os
15 import os
16 import platform
16 import platform
17 import random
17 import random
18 import re
18 import re
19 import socket
19 import socket
20 import ssl
20 import ssl
21 import stat
21 import stat
22 import string
22 import string
23 import subprocess
23 import subprocess
24 import sys
24 import sys
25 import time
25 import time
26
26
27 from .i18n import _
27 from .i18n import _
28 from .node import (
28 from .node import (
29 bin,
29 bin,
30 hex,
30 hex,
31 nullhex,
31 nullhex,
32 nullid,
32 nullid,
33 nullrev,
33 nullrev,
34 short,
34 short,
35 )
35 )
36 from .pycompat import (
36 from .pycompat import (
37 getattr,
37 getattr,
38 open,
38 open,
39 )
39 )
40 from . import (
40 from . import (
41 bundle2,
41 bundle2,
42 changegroup,
42 changegroup,
43 cmdutil,
43 cmdutil,
44 color,
44 color,
45 context,
45 context,
46 copies,
46 copies,
47 dagparser,
47 dagparser,
48 encoding,
48 encoding,
49 error,
49 error,
50 exchange,
50 exchange,
51 extensions,
51 extensions,
52 filemerge,
52 filemerge,
53 filesetlang,
53 filesetlang,
54 formatter,
54 formatter,
55 hg,
55 hg,
56 httppeer,
56 httppeer,
57 localrepo,
57 localrepo,
58 lock as lockmod,
58 lock as lockmod,
59 logcmdutil,
59 logcmdutil,
60 merge as mergemod,
60 merge as mergemod,
61 obsolete,
61 obsolete,
62 obsutil,
62 obsutil,
63 pathutil,
63 pathutil,
64 phases,
64 phases,
65 policy,
65 policy,
66 pvec,
66 pvec,
67 pycompat,
67 pycompat,
68 registrar,
68 registrar,
69 repair,
69 repair,
70 revlog,
70 revlog,
71 revset,
71 revset,
72 revsetlang,
72 revsetlang,
73 scmutil,
73 scmutil,
74 setdiscovery,
74 setdiscovery,
75 simplemerge,
75 simplemerge,
76 sshpeer,
76 sshpeer,
77 sslutil,
77 sslutil,
78 streamclone,
78 streamclone,
79 tags as tagsmod,
79 tags as tagsmod,
80 templater,
80 templater,
81 treediscovery,
81 treediscovery,
82 upgrade,
82 upgrade,
83 url as urlmod,
83 url as urlmod,
84 util,
84 util,
85 vfs as vfsmod,
85 vfs as vfsmod,
86 wireprotoframing,
86 wireprotoframing,
87 wireprotoserver,
87 wireprotoserver,
88 wireprotov2peer,
88 wireprotov2peer,
89 )
89 )
90 from .utils import (
90 from .utils import (
91 cborutil,
91 cborutil,
92 compression,
92 compression,
93 dateutil,
93 dateutil,
94 procutil,
94 procutil,
95 stringutil,
95 stringutil,
96 )
96 )
97
97
98 from .revlogutils import (
98 from .revlogutils import (
99 deltas as deltautil,
99 deltas as deltautil,
100 nodemap,
100 nodemap,
101 )
101 )
102
102
103 release = lockmod.release
103 release = lockmod.release
104
104
105 command = registrar.command()
105 command = registrar.command()
106
106
107
107
108 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
108 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
109 def debugancestor(ui, repo, *args):
109 def debugancestor(ui, repo, *args):
110 """find the ancestor revision of two revisions in a given index"""
110 """find the ancestor revision of two revisions in a given index"""
111 if len(args) == 3:
111 if len(args) == 3:
112 index, rev1, rev2 = args
112 index, rev1, rev2 = args
113 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
113 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
114 lookup = r.lookup
114 lookup = r.lookup
115 elif len(args) == 2:
115 elif len(args) == 2:
116 if not repo:
116 if not repo:
117 raise error.Abort(
117 raise error.Abort(
118 _(b'there is no Mercurial repository here (.hg not found)')
118 _(b'there is no Mercurial repository here (.hg not found)')
119 )
119 )
120 rev1, rev2 = args
120 rev1, rev2 = args
121 r = repo.changelog
121 r = repo.changelog
122 lookup = repo.lookup
122 lookup = repo.lookup
123 else:
123 else:
124 raise error.Abort(_(b'either two or three arguments required'))
124 raise error.Abort(_(b'either two or three arguments required'))
125 a = r.ancestor(lookup(rev1), lookup(rev2))
125 a = r.ancestor(lookup(rev1), lookup(rev2))
126 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
126 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
127
127
128
128
129 @command(b'debugapplystreamclonebundle', [], b'FILE')
129 @command(b'debugapplystreamclonebundle', [], b'FILE')
130 def debugapplystreamclonebundle(ui, repo, fname):
130 def debugapplystreamclonebundle(ui, repo, fname):
131 """apply a stream clone bundle file"""
131 """apply a stream clone bundle file"""
132 f = hg.openpath(ui, fname)
132 f = hg.openpath(ui, fname)
133 gen = exchange.readbundle(ui, f, fname)
133 gen = exchange.readbundle(ui, f, fname)
134 gen.apply(repo)
134 gen.apply(repo)
135
135
136
136
137 @command(
137 @command(
138 b'debugbuilddag',
138 b'debugbuilddag',
139 [
139 [
140 (
140 (
141 b'm',
141 b'm',
142 b'mergeable-file',
142 b'mergeable-file',
143 None,
143 None,
144 _(b'add single file mergeable changes'),
144 _(b'add single file mergeable changes'),
145 ),
145 ),
146 (
146 (
147 b'o',
147 b'o',
148 b'overwritten-file',
148 b'overwritten-file',
149 None,
149 None,
150 _(b'add single file all revs overwrite'),
150 _(b'add single file all revs overwrite'),
151 ),
151 ),
152 (b'n', b'new-file', None, _(b'add new file at each rev')),
152 (b'n', b'new-file', None, _(b'add new file at each rev')),
153 ],
153 ],
154 _(b'[OPTION]... [TEXT]'),
154 _(b'[OPTION]... [TEXT]'),
155 )
155 )
156 def debugbuilddag(
156 def debugbuilddag(
157 ui,
157 ui,
158 repo,
158 repo,
159 text=None,
159 text=None,
160 mergeable_file=False,
160 mergeable_file=False,
161 overwritten_file=False,
161 overwritten_file=False,
162 new_file=False,
162 new_file=False,
163 ):
163 ):
164 """builds a repo with a given DAG from scratch in the current empty repo
164 """builds a repo with a given DAG from scratch in the current empty repo
165
165
166 The description of the DAG is read from stdin if not given on the
166 The description of the DAG is read from stdin if not given on the
167 command line.
167 command line.
168
168
169 Elements:
169 Elements:
170
170
171 - "+n" is a linear run of n nodes based on the current default parent
171 - "+n" is a linear run of n nodes based on the current default parent
172 - "." is a single node based on the current default parent
172 - "." is a single node based on the current default parent
173 - "$" resets the default parent to null (implied at the start);
173 - "$" resets the default parent to null (implied at the start);
174 otherwise the default parent is always the last node created
174 otherwise the default parent is always the last node created
175 - "<p" sets the default parent to the backref p
175 - "<p" sets the default parent to the backref p
176 - "*p" is a fork at parent p, which is a backref
176 - "*p" is a fork at parent p, which is a backref
177 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
177 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
178 - "/p2" is a merge of the preceding node and p2
178 - "/p2" is a merge of the preceding node and p2
179 - ":tag" defines a local tag for the preceding node
179 - ":tag" defines a local tag for the preceding node
180 - "@branch" sets the named branch for subsequent nodes
180 - "@branch" sets the named branch for subsequent nodes
181 - "#...\\n" is a comment up to the end of the line
181 - "#...\\n" is a comment up to the end of the line
182
182
183 Whitespace between the above elements is ignored.
183 Whitespace between the above elements is ignored.
184
184
185 A backref is either
185 A backref is either
186
186
187 - a number n, which references the node curr-n, where curr is the current
187 - a number n, which references the node curr-n, where curr is the current
188 node, or
188 node, or
189 - the name of a local tag you placed earlier using ":tag", or
189 - the name of a local tag you placed earlier using ":tag", or
190 - empty to denote the default parent.
190 - empty to denote the default parent.
191
191
192 All string valued-elements are either strictly alphanumeric, or must
192 All string valued-elements are either strictly alphanumeric, or must
193 be enclosed in double quotes ("..."), with "\\" as escape character.
193 be enclosed in double quotes ("..."), with "\\" as escape character.
194 """
194 """
195
195
196 if text is None:
196 if text is None:
197 ui.status(_(b"reading DAG from stdin\n"))
197 ui.status(_(b"reading DAG from stdin\n"))
198 text = ui.fin.read()
198 text = ui.fin.read()
199
199
200 cl = repo.changelog
200 cl = repo.changelog
201 if len(cl) > 0:
201 if len(cl) > 0:
202 raise error.Abort(_(b'repository is not empty'))
202 raise error.Abort(_(b'repository is not empty'))
203
203
204 # determine number of revs in DAG
204 # determine number of revs in DAG
205 total = 0
205 total = 0
206 for type, data in dagparser.parsedag(text):
206 for type, data in dagparser.parsedag(text):
207 if type == b'n':
207 if type == b'n':
208 total += 1
208 total += 1
209
209
210 if mergeable_file:
210 if mergeable_file:
211 linesperrev = 2
211 linesperrev = 2
212 # make a file with k lines per rev
212 # make a file with k lines per rev
213 initialmergedlines = [
213 initialmergedlines = [
214 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
214 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
215 ]
215 ]
216 initialmergedlines.append(b"")
216 initialmergedlines.append(b"")
217
217
218 tags = []
218 tags = []
219 progress = ui.makeprogress(
219 progress = ui.makeprogress(
220 _(b'building'), unit=_(b'revisions'), total=total
220 _(b'building'), unit=_(b'revisions'), total=total
221 )
221 )
222 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
222 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
223 at = -1
223 at = -1
224 atbranch = b'default'
224 atbranch = b'default'
225 nodeids = []
225 nodeids = []
226 id = 0
226 id = 0
227 progress.update(id)
227 progress.update(id)
228 for type, data in dagparser.parsedag(text):
228 for type, data in dagparser.parsedag(text):
229 if type == b'n':
229 if type == b'n':
230 ui.note((b'node %s\n' % pycompat.bytestr(data)))
230 ui.note((b'node %s\n' % pycompat.bytestr(data)))
231 id, ps = data
231 id, ps = data
232
232
233 files = []
233 files = []
234 filecontent = {}
234 filecontent = {}
235
235
236 p2 = None
236 p2 = None
237 if mergeable_file:
237 if mergeable_file:
238 fn = b"mf"
238 fn = b"mf"
239 p1 = repo[ps[0]]
239 p1 = repo[ps[0]]
240 if len(ps) > 1:
240 if len(ps) > 1:
241 p2 = repo[ps[1]]
241 p2 = repo[ps[1]]
242 pa = p1.ancestor(p2)
242 pa = p1.ancestor(p2)
243 base, local, other = [
243 base, local, other = [
244 x[fn].data() for x in (pa, p1, p2)
244 x[fn].data() for x in (pa, p1, p2)
245 ]
245 ]
246 m3 = simplemerge.Merge3Text(base, local, other)
246 m3 = simplemerge.Merge3Text(base, local, other)
247 ml = [l.strip() for l in m3.merge_lines()]
247 ml = [l.strip() for l in m3.merge_lines()]
248 ml.append(b"")
248 ml.append(b"")
249 elif at > 0:
249 elif at > 0:
250 ml = p1[fn].data().split(b"\n")
250 ml = p1[fn].data().split(b"\n")
251 else:
251 else:
252 ml = initialmergedlines
252 ml = initialmergedlines
253 ml[id * linesperrev] += b" r%i" % id
253 ml[id * linesperrev] += b" r%i" % id
254 mergedtext = b"\n".join(ml)
254 mergedtext = b"\n".join(ml)
255 files.append(fn)
255 files.append(fn)
256 filecontent[fn] = mergedtext
256 filecontent[fn] = mergedtext
257
257
258 if overwritten_file:
258 if overwritten_file:
259 fn = b"of"
259 fn = b"of"
260 files.append(fn)
260 files.append(fn)
261 filecontent[fn] = b"r%i\n" % id
261 filecontent[fn] = b"r%i\n" % id
262
262
263 if new_file:
263 if new_file:
264 fn = b"nf%i" % id
264 fn = b"nf%i" % id
265 files.append(fn)
265 files.append(fn)
266 filecontent[fn] = b"r%i\n" % id
266 filecontent[fn] = b"r%i\n" % id
267 if len(ps) > 1:
267 if len(ps) > 1:
268 if not p2:
268 if not p2:
269 p2 = repo[ps[1]]
269 p2 = repo[ps[1]]
270 for fn in p2:
270 for fn in p2:
271 if fn.startswith(b"nf"):
271 if fn.startswith(b"nf"):
272 files.append(fn)
272 files.append(fn)
273 filecontent[fn] = p2[fn].data()
273 filecontent[fn] = p2[fn].data()
274
274
275 def fctxfn(repo, cx, path):
275 def fctxfn(repo, cx, path):
276 if path in filecontent:
276 if path in filecontent:
277 return context.memfilectx(
277 return context.memfilectx(
278 repo, cx, path, filecontent[path]
278 repo, cx, path, filecontent[path]
279 )
279 )
280 return None
280 return None
281
281
282 if len(ps) == 0 or ps[0] < 0:
282 if len(ps) == 0 or ps[0] < 0:
283 pars = [None, None]
283 pars = [None, None]
284 elif len(ps) == 1:
284 elif len(ps) == 1:
285 pars = [nodeids[ps[0]], None]
285 pars = [nodeids[ps[0]], None]
286 else:
286 else:
287 pars = [nodeids[p] for p in ps]
287 pars = [nodeids[p] for p in ps]
288 cx = context.memctx(
288 cx = context.memctx(
289 repo,
289 repo,
290 pars,
290 pars,
291 b"r%i" % id,
291 b"r%i" % id,
292 files,
292 files,
293 fctxfn,
293 fctxfn,
294 date=(id, 0),
294 date=(id, 0),
295 user=b"debugbuilddag",
295 user=b"debugbuilddag",
296 extra={b'branch': atbranch},
296 extra={b'branch': atbranch},
297 )
297 )
298 nodeid = repo.commitctx(cx)
298 nodeid = repo.commitctx(cx)
299 nodeids.append(nodeid)
299 nodeids.append(nodeid)
300 at = id
300 at = id
301 elif type == b'l':
301 elif type == b'l':
302 id, name = data
302 id, name = data
303 ui.note((b'tag %s\n' % name))
303 ui.note((b'tag %s\n' % name))
304 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
304 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
305 elif type == b'a':
305 elif type == b'a':
306 ui.note((b'branch %s\n' % data))
306 ui.note((b'branch %s\n' % data))
307 atbranch = data
307 atbranch = data
308 progress.update(id)
308 progress.update(id)
309
309
310 if tags:
310 if tags:
311 repo.vfs.write(b"localtags", b"".join(tags))
311 repo.vfs.write(b"localtags", b"".join(tags))
312
312
313
313
314 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
314 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
315 indent_string = b' ' * indent
315 indent_string = b' ' * indent
316 if all:
316 if all:
317 ui.writenoi18n(
317 ui.writenoi18n(
318 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
318 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
319 % indent_string
319 % indent_string
320 )
320 )
321
321
322 def showchunks(named):
322 def showchunks(named):
323 ui.write(b"\n%s%s\n" % (indent_string, named))
323 ui.write(b"\n%s%s\n" % (indent_string, named))
324 for deltadata in gen.deltaiter():
324 for deltadata in gen.deltaiter():
325 node, p1, p2, cs, deltabase, delta, flags = deltadata
325 node, p1, p2, cs, deltabase, delta, flags = deltadata
326 ui.write(
326 ui.write(
327 b"%s%s %s %s %s %s %d\n"
327 b"%s%s %s %s %s %s %d\n"
328 % (
328 % (
329 indent_string,
329 indent_string,
330 hex(node),
330 hex(node),
331 hex(p1),
331 hex(p1),
332 hex(p2),
332 hex(p2),
333 hex(cs),
333 hex(cs),
334 hex(deltabase),
334 hex(deltabase),
335 len(delta),
335 len(delta),
336 )
336 )
337 )
337 )
338
338
339 gen.changelogheader()
339 gen.changelogheader()
340 showchunks(b"changelog")
340 showchunks(b"changelog")
341 gen.manifestheader()
341 gen.manifestheader()
342 showchunks(b"manifest")
342 showchunks(b"manifest")
343 for chunkdata in iter(gen.filelogheader, {}):
343 for chunkdata in iter(gen.filelogheader, {}):
344 fname = chunkdata[b'filename']
344 fname = chunkdata[b'filename']
345 showchunks(fname)
345 showchunks(fname)
346 else:
346 else:
347 if isinstance(gen, bundle2.unbundle20):
347 if isinstance(gen, bundle2.unbundle20):
348 raise error.Abort(_(b'use debugbundle2 for this file'))
348 raise error.Abort(_(b'use debugbundle2 for this file'))
349 gen.changelogheader()
349 gen.changelogheader()
350 for deltadata in gen.deltaiter():
350 for deltadata in gen.deltaiter():
351 node, p1, p2, cs, deltabase, delta, flags = deltadata
351 node, p1, p2, cs, deltabase, delta, flags = deltadata
352 ui.write(b"%s%s\n" % (indent_string, hex(node)))
352 ui.write(b"%s%s\n" % (indent_string, hex(node)))
353
353
354
354
355 def _debugobsmarkers(ui, part, indent=0, **opts):
355 def _debugobsmarkers(ui, part, indent=0, **opts):
356 """display version and markers contained in 'data'"""
356 """display version and markers contained in 'data'"""
357 opts = pycompat.byteskwargs(opts)
357 opts = pycompat.byteskwargs(opts)
358 data = part.read()
358 data = part.read()
359 indent_string = b' ' * indent
359 indent_string = b' ' * indent
360 try:
360 try:
361 version, markers = obsolete._readmarkers(data)
361 version, markers = obsolete._readmarkers(data)
362 except error.UnknownVersion as exc:
362 except error.UnknownVersion as exc:
363 msg = b"%sunsupported version: %s (%d bytes)\n"
363 msg = b"%sunsupported version: %s (%d bytes)\n"
364 msg %= indent_string, exc.version, len(data)
364 msg %= indent_string, exc.version, len(data)
365 ui.write(msg)
365 ui.write(msg)
366 else:
366 else:
367 msg = b"%sversion: %d (%d bytes)\n"
367 msg = b"%sversion: %d (%d bytes)\n"
368 msg %= indent_string, version, len(data)
368 msg %= indent_string, version, len(data)
369 ui.write(msg)
369 ui.write(msg)
370 fm = ui.formatter(b'debugobsolete', opts)
370 fm = ui.formatter(b'debugobsolete', opts)
371 for rawmarker in sorted(markers):
371 for rawmarker in sorted(markers):
372 m = obsutil.marker(None, rawmarker)
372 m = obsutil.marker(None, rawmarker)
373 fm.startitem()
373 fm.startitem()
374 fm.plain(indent_string)
374 fm.plain(indent_string)
375 cmdutil.showmarker(fm, m)
375 cmdutil.showmarker(fm, m)
376 fm.end()
376 fm.end()
377
377
378
378
379 def _debugphaseheads(ui, data, indent=0):
379 def _debugphaseheads(ui, data, indent=0):
380 """display version and markers contained in 'data'"""
380 """display version and markers contained in 'data'"""
381 indent_string = b' ' * indent
381 indent_string = b' ' * indent
382 headsbyphase = phases.binarydecode(data)
382 headsbyphase = phases.binarydecode(data)
383 for phase in phases.allphases:
383 for phase in phases.allphases:
384 for head in headsbyphase[phase]:
384 for head in headsbyphase[phase]:
385 ui.write(indent_string)
385 ui.write(indent_string)
386 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
386 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
387
387
388
388
389 def _quasirepr(thing):
389 def _quasirepr(thing):
390 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
390 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
391 return b'{%s}' % (
391 return b'{%s}' % (
392 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
392 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
393 )
393 )
394 return pycompat.bytestr(repr(thing))
394 return pycompat.bytestr(repr(thing))
395
395
396
396
397 def _debugbundle2(ui, gen, all=None, **opts):
397 def _debugbundle2(ui, gen, all=None, **opts):
398 """lists the contents of a bundle2"""
398 """lists the contents of a bundle2"""
399 if not isinstance(gen, bundle2.unbundle20):
399 if not isinstance(gen, bundle2.unbundle20):
400 raise error.Abort(_(b'not a bundle2 file'))
400 raise error.Abort(_(b'not a bundle2 file'))
401 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
401 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
402 parttypes = opts.get('part_type', [])
402 parttypes = opts.get('part_type', [])
403 for part in gen.iterparts():
403 for part in gen.iterparts():
404 if parttypes and part.type not in parttypes:
404 if parttypes and part.type not in parttypes:
405 continue
405 continue
406 msg = b'%s -- %s (mandatory: %r)\n'
406 msg = b'%s -- %s (mandatory: %r)\n'
407 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
407 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
408 if part.type == b'changegroup':
408 if part.type == b'changegroup':
409 version = part.params.get(b'version', b'01')
409 version = part.params.get(b'version', b'01')
410 cg = changegroup.getunbundler(version, part, b'UN')
410 cg = changegroup.getunbundler(version, part, b'UN')
411 if not ui.quiet:
411 if not ui.quiet:
412 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
412 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
413 if part.type == b'obsmarkers':
413 if part.type == b'obsmarkers':
414 if not ui.quiet:
414 if not ui.quiet:
415 _debugobsmarkers(ui, part, indent=4, **opts)
415 _debugobsmarkers(ui, part, indent=4, **opts)
416 if part.type == b'phase-heads':
416 if part.type == b'phase-heads':
417 if not ui.quiet:
417 if not ui.quiet:
418 _debugphaseheads(ui, part, indent=4)
418 _debugphaseheads(ui, part, indent=4)
419
419
420
420
421 @command(
421 @command(
422 b'debugbundle',
422 b'debugbundle',
423 [
423 [
424 (b'a', b'all', None, _(b'show all details')),
424 (b'a', b'all', None, _(b'show all details')),
425 (b'', b'part-type', [], _(b'show only the named part type')),
425 (b'', b'part-type', [], _(b'show only the named part type')),
426 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
426 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
427 ],
427 ],
428 _(b'FILE'),
428 _(b'FILE'),
429 norepo=True,
429 norepo=True,
430 )
430 )
431 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
431 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
432 """lists the contents of a bundle"""
432 """lists the contents of a bundle"""
433 with hg.openpath(ui, bundlepath) as f:
433 with hg.openpath(ui, bundlepath) as f:
434 if spec:
434 if spec:
435 spec = exchange.getbundlespec(ui, f)
435 spec = exchange.getbundlespec(ui, f)
436 ui.write(b'%s\n' % spec)
436 ui.write(b'%s\n' % spec)
437 return
437 return
438
438
439 gen = exchange.readbundle(ui, f, bundlepath)
439 gen = exchange.readbundle(ui, f, bundlepath)
440 if isinstance(gen, bundle2.unbundle20):
440 if isinstance(gen, bundle2.unbundle20):
441 return _debugbundle2(ui, gen, all=all, **opts)
441 return _debugbundle2(ui, gen, all=all, **opts)
442 _debugchangegroup(ui, gen, all=all, **opts)
442 _debugchangegroup(ui, gen, all=all, **opts)
443
443
444
444
445 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
445 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
446 def debugcapabilities(ui, path, **opts):
446 def debugcapabilities(ui, path, **opts):
447 """lists the capabilities of a remote peer"""
447 """lists the capabilities of a remote peer"""
448 opts = pycompat.byteskwargs(opts)
448 opts = pycompat.byteskwargs(opts)
449 peer = hg.peer(ui, opts, path)
449 peer = hg.peer(ui, opts, path)
450 caps = peer.capabilities()
450 caps = peer.capabilities()
451 ui.writenoi18n(b'Main capabilities:\n')
451 ui.writenoi18n(b'Main capabilities:\n')
452 for c in sorted(caps):
452 for c in sorted(caps):
453 ui.write(b' %s\n' % c)
453 ui.write(b' %s\n' % c)
454 b2caps = bundle2.bundle2caps(peer)
454 b2caps = bundle2.bundle2caps(peer)
455 if b2caps:
455 if b2caps:
456 ui.writenoi18n(b'Bundle2 capabilities:\n')
456 ui.writenoi18n(b'Bundle2 capabilities:\n')
457 for key, values in sorted(pycompat.iteritems(b2caps)):
457 for key, values in sorted(pycompat.iteritems(b2caps)):
458 ui.write(b' %s\n' % key)
458 ui.write(b' %s\n' % key)
459 for v in values:
459 for v in values:
460 ui.write(b' %s\n' % v)
460 ui.write(b' %s\n' % v)
461
461
462
462
463 @command(b'debugcheckstate', [], b'')
463 @command(b'debugcheckstate', [], b'')
464 def debugcheckstate(ui, repo):
464 def debugcheckstate(ui, repo):
465 """validate the correctness of the current dirstate"""
465 """validate the correctness of the current dirstate"""
466 parent1, parent2 = repo.dirstate.parents()
466 parent1, parent2 = repo.dirstate.parents()
467 m1 = repo[parent1].manifest()
467 m1 = repo[parent1].manifest()
468 m2 = repo[parent2].manifest()
468 m2 = repo[parent2].manifest()
469 errors = 0
469 errors = 0
470 for f in repo.dirstate:
470 for f in repo.dirstate:
471 state = repo.dirstate[f]
471 state = repo.dirstate[f]
472 if state in b"nr" and f not in m1:
472 if state in b"nr" and f not in m1:
473 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
473 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
474 errors += 1
474 errors += 1
475 if state in b"a" and f in m1:
475 if state in b"a" and f in m1:
476 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
476 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
477 errors += 1
477 errors += 1
478 if state in b"m" and f not in m1 and f not in m2:
478 if state in b"m" and f not in m1 and f not in m2:
479 ui.warn(
479 ui.warn(
480 _(b"%s in state %s, but not in either manifest\n") % (f, state)
480 _(b"%s in state %s, but not in either manifest\n") % (f, state)
481 )
481 )
482 errors += 1
482 errors += 1
483 for f in m1:
483 for f in m1:
484 state = repo.dirstate[f]
484 state = repo.dirstate[f]
485 if state not in b"nrm":
485 if state not in b"nrm":
486 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
486 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
487 errors += 1
487 errors += 1
488 if errors:
488 if errors:
489 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
489 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
490 raise error.Abort(errstr)
490 raise error.Abort(errstr)
491
491
492
492
493 @command(
493 @command(
494 b'debugcolor',
494 b'debugcolor',
495 [(b'', b'style', None, _(b'show all configured styles'))],
495 [(b'', b'style', None, _(b'show all configured styles'))],
496 b'hg debugcolor',
496 b'hg debugcolor',
497 )
497 )
498 def debugcolor(ui, repo, **opts):
498 def debugcolor(ui, repo, **opts):
499 """show available color, effects or style"""
499 """show available color, effects or style"""
500 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
500 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
501 if opts.get('style'):
501 if opts.get('style'):
502 return _debugdisplaystyle(ui)
502 return _debugdisplaystyle(ui)
503 else:
503 else:
504 return _debugdisplaycolor(ui)
504 return _debugdisplaycolor(ui)
505
505
506
506
507 def _debugdisplaycolor(ui):
507 def _debugdisplaycolor(ui):
508 ui = ui.copy()
508 ui = ui.copy()
509 ui._styles.clear()
509 ui._styles.clear()
510 for effect in color._activeeffects(ui).keys():
510 for effect in color._activeeffects(ui).keys():
511 ui._styles[effect] = effect
511 ui._styles[effect] = effect
512 if ui._terminfoparams:
512 if ui._terminfoparams:
513 for k, v in ui.configitems(b'color'):
513 for k, v in ui.configitems(b'color'):
514 if k.startswith(b'color.'):
514 if k.startswith(b'color.'):
515 ui._styles[k] = k[6:]
515 ui._styles[k] = k[6:]
516 elif k.startswith(b'terminfo.'):
516 elif k.startswith(b'terminfo.'):
517 ui._styles[k] = k[9:]
517 ui._styles[k] = k[9:]
518 ui.write(_(b'available colors:\n'))
518 ui.write(_(b'available colors:\n'))
519 # sort label with a '_' after the other to group '_background' entry.
519 # sort label with a '_' after the other to group '_background' entry.
520 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
520 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
521 for colorname, label in items:
521 for colorname, label in items:
522 ui.write(b'%s\n' % colorname, label=label)
522 ui.write(b'%s\n' % colorname, label=label)
523
523
524
524
525 def _debugdisplaystyle(ui):
525 def _debugdisplaystyle(ui):
526 ui.write(_(b'available style:\n'))
526 ui.write(_(b'available style:\n'))
527 if not ui._styles:
527 if not ui._styles:
528 return
528 return
529 width = max(len(s) for s in ui._styles)
529 width = max(len(s) for s in ui._styles)
530 for label, effects in sorted(ui._styles.items()):
530 for label, effects in sorted(ui._styles.items()):
531 ui.write(b'%s' % label, label=label)
531 ui.write(b'%s' % label, label=label)
532 if effects:
532 if effects:
533 # 50
533 # 50
534 ui.write(b': ')
534 ui.write(b': ')
535 ui.write(b' ' * (max(0, width - len(label))))
535 ui.write(b' ' * (max(0, width - len(label))))
536 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
536 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
537 ui.write(b'\n')
537 ui.write(b'\n')
538
538
539
539
540 @command(b'debugcreatestreamclonebundle', [], b'FILE')
540 @command(b'debugcreatestreamclonebundle', [], b'FILE')
541 def debugcreatestreamclonebundle(ui, repo, fname):
541 def debugcreatestreamclonebundle(ui, repo, fname):
542 """create a stream clone bundle file
542 """create a stream clone bundle file
543
543
544 Stream bundles are special bundles that are essentially archives of
544 Stream bundles are special bundles that are essentially archives of
545 revlog files. They are commonly used for cloning very quickly.
545 revlog files. They are commonly used for cloning very quickly.
546 """
546 """
547 # TODO we may want to turn this into an abort when this functionality
547 # TODO we may want to turn this into an abort when this functionality
548 # is moved into `hg bundle`.
548 # is moved into `hg bundle`.
549 if phases.hassecret(repo):
549 if phases.hassecret(repo):
550 ui.warn(
550 ui.warn(
551 _(
551 _(
552 b'(warning: stream clone bundle will contain secret '
552 b'(warning: stream clone bundle will contain secret '
553 b'revisions)\n'
553 b'revisions)\n'
554 )
554 )
555 )
555 )
556
556
557 requirements, gen = streamclone.generatebundlev1(repo)
557 requirements, gen = streamclone.generatebundlev1(repo)
558 changegroup.writechunks(ui, gen, fname)
558 changegroup.writechunks(ui, gen, fname)
559
559
560 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
560 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
561
561
562
562
563 @command(
563 @command(
564 b'debugdag',
564 b'debugdag',
565 [
565 [
566 (b't', b'tags', None, _(b'use tags as labels')),
566 (b't', b'tags', None, _(b'use tags as labels')),
567 (b'b', b'branches', None, _(b'annotate with branch names')),
567 (b'b', b'branches', None, _(b'annotate with branch names')),
568 (b'', b'dots', None, _(b'use dots for runs')),
568 (b'', b'dots', None, _(b'use dots for runs')),
569 (b's', b'spaces', None, _(b'separate elements by spaces')),
569 (b's', b'spaces', None, _(b'separate elements by spaces')),
570 ],
570 ],
571 _(b'[OPTION]... [FILE [REV]...]'),
571 _(b'[OPTION]... [FILE [REV]...]'),
572 optionalrepo=True,
572 optionalrepo=True,
573 )
573 )
574 def debugdag(ui, repo, file_=None, *revs, **opts):
574 def debugdag(ui, repo, file_=None, *revs, **opts):
575 """format the changelog or an index DAG as a concise textual description
575 """format the changelog or an index DAG as a concise textual description
576
576
577 If you pass a revlog index, the revlog's DAG is emitted. If you list
577 If you pass a revlog index, the revlog's DAG is emitted. If you list
578 revision numbers, they get labeled in the output as rN.
578 revision numbers, they get labeled in the output as rN.
579
579
580 Otherwise, the changelog DAG of the current repo is emitted.
580 Otherwise, the changelog DAG of the current repo is emitted.
581 """
581 """
582 spaces = opts.get('spaces')
582 spaces = opts.get('spaces')
583 dots = opts.get('dots')
583 dots = opts.get('dots')
584 if file_:
584 if file_:
585 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
585 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
586 revs = set((int(r) for r in revs))
586 revs = set((int(r) for r in revs))
587
587
588 def events():
588 def events():
589 for r in rlog:
589 for r in rlog:
590 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
590 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
591 if r in revs:
591 if r in revs:
592 yield b'l', (r, b"r%i" % r)
592 yield b'l', (r, b"r%i" % r)
593
593
594 elif repo:
594 elif repo:
595 cl = repo.changelog
595 cl = repo.changelog
596 tags = opts.get('tags')
596 tags = opts.get('tags')
597 branches = opts.get('branches')
597 branches = opts.get('branches')
598 if tags:
598 if tags:
599 labels = {}
599 labels = {}
600 for l, n in repo.tags().items():
600 for l, n in repo.tags().items():
601 labels.setdefault(cl.rev(n), []).append(l)
601 labels.setdefault(cl.rev(n), []).append(l)
602
602
603 def events():
603 def events():
604 b = b"default"
604 b = b"default"
605 for r in cl:
605 for r in cl:
606 if branches:
606 if branches:
607 newb = cl.read(cl.node(r))[5][b'branch']
607 newb = cl.read(cl.node(r))[5][b'branch']
608 if newb != b:
608 if newb != b:
609 yield b'a', newb
609 yield b'a', newb
610 b = newb
610 b = newb
611 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
611 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
612 if tags:
612 if tags:
613 ls = labels.get(r)
613 ls = labels.get(r)
614 if ls:
614 if ls:
615 for l in ls:
615 for l in ls:
616 yield b'l', (r, l)
616 yield b'l', (r, l)
617
617
618 else:
618 else:
619 raise error.Abort(_(b'need repo for changelog dag'))
619 raise error.Abort(_(b'need repo for changelog dag'))
620
620
621 for line in dagparser.dagtextlines(
621 for line in dagparser.dagtextlines(
622 events(),
622 events(),
623 addspaces=spaces,
623 addspaces=spaces,
624 wraplabels=True,
624 wraplabels=True,
625 wrapannotations=True,
625 wrapannotations=True,
626 wrapnonlinear=dots,
626 wrapnonlinear=dots,
627 usedots=dots,
627 usedots=dots,
628 maxlinewidth=70,
628 maxlinewidth=70,
629 ):
629 ):
630 ui.write(line)
630 ui.write(line)
631 ui.write(b"\n")
631 ui.write(b"\n")
632
632
633
633
634 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
634 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
635 def debugdata(ui, repo, file_, rev=None, **opts):
635 def debugdata(ui, repo, file_, rev=None, **opts):
636 """dump the contents of a data file revision"""
636 """dump the contents of a data file revision"""
637 opts = pycompat.byteskwargs(opts)
637 opts = pycompat.byteskwargs(opts)
638 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
638 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
639 if rev is not None:
639 if rev is not None:
640 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
640 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
641 file_, rev = None, file_
641 file_, rev = None, file_
642 elif rev is None:
642 elif rev is None:
643 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
643 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
644 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
644 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
645 try:
645 try:
646 ui.write(r.rawdata(r.lookup(rev)))
646 ui.write(r.rawdata(r.lookup(rev)))
647 except KeyError:
647 except KeyError:
648 raise error.Abort(_(b'invalid revision identifier %s') % rev)
648 raise error.Abort(_(b'invalid revision identifier %s') % rev)
649
649
650
650
651 @command(
651 @command(
652 b'debugdate',
652 b'debugdate',
653 [(b'e', b'extended', None, _(b'try extended date formats'))],
653 [(b'e', b'extended', None, _(b'try extended date formats'))],
654 _(b'[-e] DATE [RANGE]'),
654 _(b'[-e] DATE [RANGE]'),
655 norepo=True,
655 norepo=True,
656 optionalrepo=True,
656 optionalrepo=True,
657 )
657 )
658 def debugdate(ui, date, range=None, **opts):
658 def debugdate(ui, date, range=None, **opts):
659 """parse and display a date"""
659 """parse and display a date"""
660 if opts["extended"]:
660 if opts["extended"]:
661 d = dateutil.parsedate(date, dateutil.extendeddateformats)
661 d = dateutil.parsedate(date, dateutil.extendeddateformats)
662 else:
662 else:
663 d = dateutil.parsedate(date)
663 d = dateutil.parsedate(date)
664 ui.writenoi18n(b"internal: %d %d\n" % d)
664 ui.writenoi18n(b"internal: %d %d\n" % d)
665 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
665 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
666 if range:
666 if range:
667 m = dateutil.matchdate(range)
667 m = dateutil.matchdate(range)
668 ui.writenoi18n(b"match: %s\n" % m(d[0]))
668 ui.writenoi18n(b"match: %s\n" % m(d[0]))
669
669
670
670
671 @command(
671 @command(
672 b'debugdeltachain',
672 b'debugdeltachain',
673 cmdutil.debugrevlogopts + cmdutil.formatteropts,
673 cmdutil.debugrevlogopts + cmdutil.formatteropts,
674 _(b'-c|-m|FILE'),
674 _(b'-c|-m|FILE'),
675 optionalrepo=True,
675 optionalrepo=True,
676 )
676 )
677 def debugdeltachain(ui, repo, file_=None, **opts):
677 def debugdeltachain(ui, repo, file_=None, **opts):
678 """dump information about delta chains in a revlog
678 """dump information about delta chains in a revlog
679
679
680 Output can be templatized. Available template keywords are:
680 Output can be templatized. Available template keywords are:
681
681
682 :``rev``: revision number
682 :``rev``: revision number
683 :``chainid``: delta chain identifier (numbered by unique base)
683 :``chainid``: delta chain identifier (numbered by unique base)
684 :``chainlen``: delta chain length to this revision
684 :``chainlen``: delta chain length to this revision
685 :``prevrev``: previous revision in delta chain
685 :``prevrev``: previous revision in delta chain
686 :``deltatype``: role of delta / how it was computed
686 :``deltatype``: role of delta / how it was computed
687 :``compsize``: compressed size of revision
687 :``compsize``: compressed size of revision
688 :``uncompsize``: uncompressed size of revision
688 :``uncompsize``: uncompressed size of revision
689 :``chainsize``: total size of compressed revisions in chain
689 :``chainsize``: total size of compressed revisions in chain
690 :``chainratio``: total chain size divided by uncompressed revision size
690 :``chainratio``: total chain size divided by uncompressed revision size
691 (new delta chains typically start at ratio 2.00)
691 (new delta chains typically start at ratio 2.00)
692 :``lindist``: linear distance from base revision in delta chain to end
692 :``lindist``: linear distance from base revision in delta chain to end
693 of this revision
693 of this revision
694 :``extradist``: total size of revisions not part of this delta chain from
694 :``extradist``: total size of revisions not part of this delta chain from
695 base of delta chain to end of this revision; a measurement
695 base of delta chain to end of this revision; a measurement
696 of how much extra data we need to read/seek across to read
696 of how much extra data we need to read/seek across to read
697 the delta chain for this revision
697 the delta chain for this revision
698 :``extraratio``: extradist divided by chainsize; another representation of
698 :``extraratio``: extradist divided by chainsize; another representation of
699 how much unrelated data is needed to load this delta chain
699 how much unrelated data is needed to load this delta chain
700
700
701 If the repository is configured to use the sparse read, additional keywords
701 If the repository is configured to use the sparse read, additional keywords
702 are available:
702 are available:
703
703
704 :``readsize``: total size of data read from the disk for a revision
704 :``readsize``: total size of data read from the disk for a revision
705 (sum of the sizes of all the blocks)
705 (sum of the sizes of all the blocks)
706 :``largestblock``: size of the largest block of data read from the disk
706 :``largestblock``: size of the largest block of data read from the disk
707 :``readdensity``: density of useful bytes in the data read from the disk
707 :``readdensity``: density of useful bytes in the data read from the disk
708 :``srchunks``: in how many data hunks the whole revision would be read
708 :``srchunks``: in how many data hunks the whole revision would be read
709
709
710 The sparse read can be enabled with experimental.sparse-read = True
710 The sparse read can be enabled with experimental.sparse-read = True
711 """
711 """
712 opts = pycompat.byteskwargs(opts)
712 opts = pycompat.byteskwargs(opts)
713 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
713 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
714 index = r.index
714 index = r.index
715 start = r.start
715 start = r.start
716 length = r.length
716 length = r.length
717 generaldelta = r.version & revlog.FLAG_GENERALDELTA
717 generaldelta = r.version & revlog.FLAG_GENERALDELTA
718 withsparseread = getattr(r, '_withsparseread', False)
718 withsparseread = getattr(r, '_withsparseread', False)
719
719
720 def revinfo(rev):
720 def revinfo(rev):
721 e = index[rev]
721 e = index[rev]
722 compsize = e[1]
722 compsize = e[1]
723 uncompsize = e[2]
723 uncompsize = e[2]
724 chainsize = 0
724 chainsize = 0
725
725
726 if generaldelta:
726 if generaldelta:
727 if e[3] == e[5]:
727 if e[3] == e[5]:
728 deltatype = b'p1'
728 deltatype = b'p1'
729 elif e[3] == e[6]:
729 elif e[3] == e[6]:
730 deltatype = b'p2'
730 deltatype = b'p2'
731 elif e[3] == rev - 1:
731 elif e[3] == rev - 1:
732 deltatype = b'prev'
732 deltatype = b'prev'
733 elif e[3] == rev:
733 elif e[3] == rev:
734 deltatype = b'base'
734 deltatype = b'base'
735 else:
735 else:
736 deltatype = b'other'
736 deltatype = b'other'
737 else:
737 else:
738 if e[3] == rev:
738 if e[3] == rev:
739 deltatype = b'base'
739 deltatype = b'base'
740 else:
740 else:
741 deltatype = b'prev'
741 deltatype = b'prev'
742
742
743 chain = r._deltachain(rev)[0]
743 chain = r._deltachain(rev)[0]
744 for iterrev in chain:
744 for iterrev in chain:
745 e = index[iterrev]
745 e = index[iterrev]
746 chainsize += e[1]
746 chainsize += e[1]
747
747
748 return compsize, uncompsize, deltatype, chain, chainsize
748 return compsize, uncompsize, deltatype, chain, chainsize
749
749
750 fm = ui.formatter(b'debugdeltachain', opts)
750 fm = ui.formatter(b'debugdeltachain', opts)
751
751
752 fm.plain(
752 fm.plain(
753 b' rev chain# chainlen prev delta '
753 b' rev chain# chainlen prev delta '
754 b'size rawsize chainsize ratio lindist extradist '
754 b'size rawsize chainsize ratio lindist extradist '
755 b'extraratio'
755 b'extraratio'
756 )
756 )
757 if withsparseread:
757 if withsparseread:
758 fm.plain(b' readsize largestblk rddensity srchunks')
758 fm.plain(b' readsize largestblk rddensity srchunks')
759 fm.plain(b'\n')
759 fm.plain(b'\n')
760
760
761 chainbases = {}
761 chainbases = {}
762 for rev in r:
762 for rev in r:
763 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
763 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
764 chainbase = chain[0]
764 chainbase = chain[0]
765 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
765 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
766 basestart = start(chainbase)
766 basestart = start(chainbase)
767 revstart = start(rev)
767 revstart = start(rev)
768 lineardist = revstart + comp - basestart
768 lineardist = revstart + comp - basestart
769 extradist = lineardist - chainsize
769 extradist = lineardist - chainsize
770 try:
770 try:
771 prevrev = chain[-2]
771 prevrev = chain[-2]
772 except IndexError:
772 except IndexError:
773 prevrev = -1
773 prevrev = -1
774
774
775 if uncomp != 0:
775 if uncomp != 0:
776 chainratio = float(chainsize) / float(uncomp)
776 chainratio = float(chainsize) / float(uncomp)
777 else:
777 else:
778 chainratio = chainsize
778 chainratio = chainsize
779
779
780 if chainsize != 0:
780 if chainsize != 0:
781 extraratio = float(extradist) / float(chainsize)
781 extraratio = float(extradist) / float(chainsize)
782 else:
782 else:
783 extraratio = extradist
783 extraratio = extradist
784
784
785 fm.startitem()
785 fm.startitem()
786 fm.write(
786 fm.write(
787 b'rev chainid chainlen prevrev deltatype compsize '
787 b'rev chainid chainlen prevrev deltatype compsize '
788 b'uncompsize chainsize chainratio lindist extradist '
788 b'uncompsize chainsize chainratio lindist extradist '
789 b'extraratio',
789 b'extraratio',
790 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
790 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
791 rev,
791 rev,
792 chainid,
792 chainid,
793 len(chain),
793 len(chain),
794 prevrev,
794 prevrev,
795 deltatype,
795 deltatype,
796 comp,
796 comp,
797 uncomp,
797 uncomp,
798 chainsize,
798 chainsize,
799 chainratio,
799 chainratio,
800 lineardist,
800 lineardist,
801 extradist,
801 extradist,
802 extraratio,
802 extraratio,
803 rev=rev,
803 rev=rev,
804 chainid=chainid,
804 chainid=chainid,
805 chainlen=len(chain),
805 chainlen=len(chain),
806 prevrev=prevrev,
806 prevrev=prevrev,
807 deltatype=deltatype,
807 deltatype=deltatype,
808 compsize=comp,
808 compsize=comp,
809 uncompsize=uncomp,
809 uncompsize=uncomp,
810 chainsize=chainsize,
810 chainsize=chainsize,
811 chainratio=chainratio,
811 chainratio=chainratio,
812 lindist=lineardist,
812 lindist=lineardist,
813 extradist=extradist,
813 extradist=extradist,
814 extraratio=extraratio,
814 extraratio=extraratio,
815 )
815 )
816 if withsparseread:
816 if withsparseread:
817 readsize = 0
817 readsize = 0
818 largestblock = 0
818 largestblock = 0
819 srchunks = 0
819 srchunks = 0
820
820
821 for revschunk in deltautil.slicechunk(r, chain):
821 for revschunk in deltautil.slicechunk(r, chain):
822 srchunks += 1
822 srchunks += 1
823 blkend = start(revschunk[-1]) + length(revschunk[-1])
823 blkend = start(revschunk[-1]) + length(revschunk[-1])
824 blksize = blkend - start(revschunk[0])
824 blksize = blkend - start(revschunk[0])
825
825
826 readsize += blksize
826 readsize += blksize
827 if largestblock < blksize:
827 if largestblock < blksize:
828 largestblock = blksize
828 largestblock = blksize
829
829
830 if readsize:
830 if readsize:
831 readdensity = float(chainsize) / float(readsize)
831 readdensity = float(chainsize) / float(readsize)
832 else:
832 else:
833 readdensity = 1
833 readdensity = 1
834
834
835 fm.write(
835 fm.write(
836 b'readsize largestblock readdensity srchunks',
836 b'readsize largestblock readdensity srchunks',
837 b' %10d %10d %9.5f %8d',
837 b' %10d %10d %9.5f %8d',
838 readsize,
838 readsize,
839 largestblock,
839 largestblock,
840 readdensity,
840 readdensity,
841 srchunks,
841 srchunks,
842 readsize=readsize,
842 readsize=readsize,
843 largestblock=largestblock,
843 largestblock=largestblock,
844 readdensity=readdensity,
844 readdensity=readdensity,
845 srchunks=srchunks,
845 srchunks=srchunks,
846 )
846 )
847
847
848 fm.plain(b'\n')
848 fm.plain(b'\n')
849
849
850 fm.end()
850 fm.end()
851
851
852
852
853 @command(
853 @command(
854 b'debugdirstate|debugstate',
854 b'debugdirstate|debugstate',
855 [
855 [
856 (
856 (
857 b'',
857 b'',
858 b'nodates',
858 b'nodates',
859 None,
859 None,
860 _(b'do not display the saved mtime (DEPRECATED)'),
860 _(b'do not display the saved mtime (DEPRECATED)'),
861 ),
861 ),
862 (b'', b'dates', True, _(b'display the saved mtime')),
862 (b'', b'dates', True, _(b'display the saved mtime')),
863 (b'', b'datesort', None, _(b'sort by saved mtime')),
863 (b'', b'datesort', None, _(b'sort by saved mtime')),
864 ],
864 ],
865 _(b'[OPTION]...'),
865 _(b'[OPTION]...'),
866 )
866 )
867 def debugstate(ui, repo, **opts):
867 def debugstate(ui, repo, **opts):
868 """show the contents of the current dirstate"""
868 """show the contents of the current dirstate"""
869
869
870 nodates = not opts['dates']
870 nodates = not opts['dates']
871 if opts.get('nodates') is not None:
871 if opts.get('nodates') is not None:
872 nodates = True
872 nodates = True
873 datesort = opts.get('datesort')
873 datesort = opts.get('datesort')
874
874
875 if datesort:
875 if datesort:
876 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
876 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
877 else:
877 else:
878 keyfunc = None # sort by filename
878 keyfunc = None # sort by filename
879 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
879 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
880 if ent[3] == -1:
880 if ent[3] == -1:
881 timestr = b'unset '
881 timestr = b'unset '
882 elif nodates:
882 elif nodates:
883 timestr = b'set '
883 timestr = b'set '
884 else:
884 else:
885 timestr = time.strftime(
885 timestr = time.strftime(
886 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
886 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
887 )
887 )
888 timestr = encoding.strtolocal(timestr)
888 timestr = encoding.strtolocal(timestr)
889 if ent[1] & 0o20000:
889 if ent[1] & 0o20000:
890 mode = b'lnk'
890 mode = b'lnk'
891 else:
891 else:
892 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
892 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
893 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
893 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
894 for f in repo.dirstate.copies():
894 for f in repo.dirstate.copies():
895 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
895 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
896
896
897
897
898 @command(
898 @command(
899 b'debugdiscovery',
899 b'debugdiscovery',
900 [
900 [
901 (b'', b'old', None, _(b'use old-style discovery')),
901 (b'', b'old', None, _(b'use old-style discovery')),
902 (
902 (
903 b'',
903 b'',
904 b'nonheads',
904 b'nonheads',
905 None,
905 None,
906 _(b'use old-style discovery with non-heads included'),
906 _(b'use old-style discovery with non-heads included'),
907 ),
907 ),
908 (b'', b'rev', [], b'restrict discovery to this set of revs'),
908 (b'', b'rev', [], b'restrict discovery to this set of revs'),
909 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
909 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
910 ]
910 ]
911 + cmdutil.remoteopts,
911 + cmdutil.remoteopts,
912 _(b'[--rev REV] [OTHER]'),
912 _(b'[--rev REV] [OTHER]'),
913 )
913 )
914 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
914 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
915 """runs the changeset discovery protocol in isolation"""
915 """runs the changeset discovery protocol in isolation"""
916 opts = pycompat.byteskwargs(opts)
916 opts = pycompat.byteskwargs(opts)
917 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
917 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
918 remote = hg.peer(repo, opts, remoteurl)
918 remote = hg.peer(repo, opts, remoteurl)
919 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
919 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
920
920
921 # make sure tests are repeatable
921 # make sure tests are repeatable
922 random.seed(int(opts[b'seed']))
922 random.seed(int(opts[b'seed']))
923
923
924 if opts.get(b'old'):
924 if opts.get(b'old'):
925
925
926 def doit(pushedrevs, remoteheads, remote=remote):
926 def doit(pushedrevs, remoteheads, remote=remote):
927 if not util.safehasattr(remote, b'branches'):
927 if not util.safehasattr(remote, b'branches'):
928 # enable in-client legacy support
928 # enable in-client legacy support
929 remote = localrepo.locallegacypeer(remote.local())
929 remote = localrepo.locallegacypeer(remote.local())
930 common, _in, hds = treediscovery.findcommonincoming(
930 common, _in, hds = treediscovery.findcommonincoming(
931 repo, remote, force=True
931 repo, remote, force=True
932 )
932 )
933 common = set(common)
933 common = set(common)
934 if not opts.get(b'nonheads'):
934 if not opts.get(b'nonheads'):
935 ui.writenoi18n(
935 ui.writenoi18n(
936 b"unpruned common: %s\n"
936 b"unpruned common: %s\n"
937 % b" ".join(sorted(short(n) for n in common))
937 % b" ".join(sorted(short(n) for n in common))
938 )
938 )
939
939
940 clnode = repo.changelog.node
940 clnode = repo.changelog.node
941 common = repo.revs(b'heads(::%ln)', common)
941 common = repo.revs(b'heads(::%ln)', common)
942 common = {clnode(r) for r in common}
942 common = {clnode(r) for r in common}
943 return common, hds
943 return common, hds
944
944
945 else:
945 else:
946
946
947 def doit(pushedrevs, remoteheads, remote=remote):
947 def doit(pushedrevs, remoteheads, remote=remote):
948 nodes = None
948 nodes = None
949 if pushedrevs:
949 if pushedrevs:
950 revs = scmutil.revrange(repo, pushedrevs)
950 revs = scmutil.revrange(repo, pushedrevs)
951 nodes = [repo[r].node() for r in revs]
951 nodes = [repo[r].node() for r in revs]
952 common, any, hds = setdiscovery.findcommonheads(
952 common, any, hds = setdiscovery.findcommonheads(
953 ui, repo, remote, ancestorsof=nodes
953 ui, repo, remote, ancestorsof=nodes
954 )
954 )
955 return common, hds
955 return common, hds
956
956
957 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
957 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
958 localrevs = opts[b'rev']
958 localrevs = opts[b'rev']
959 with util.timedcm('debug-discovery') as t:
959 with util.timedcm('debug-discovery') as t:
960 common, hds = doit(localrevs, remoterevs)
960 common, hds = doit(localrevs, remoterevs)
961
961
962 # compute all statistics
962 # compute all statistics
963 common = set(common)
963 common = set(common)
964 rheads = set(hds)
964 rheads = set(hds)
965 lheads = set(repo.heads())
965 lheads = set(repo.heads())
966
966
967 data = {}
967 data = {}
968 data[b'elapsed'] = t.elapsed
968 data[b'elapsed'] = t.elapsed
969 data[b'nb-common'] = len(common)
969 data[b'nb-common'] = len(common)
970 data[b'nb-common-local'] = len(common & lheads)
970 data[b'nb-common-local'] = len(common & lheads)
971 data[b'nb-common-remote'] = len(common & rheads)
971 data[b'nb-common-remote'] = len(common & rheads)
972 data[b'nb-common-both'] = len(common & rheads & lheads)
972 data[b'nb-common-both'] = len(common & rheads & lheads)
973 data[b'nb-local'] = len(lheads)
973 data[b'nb-local'] = len(lheads)
974 data[b'nb-local-missing'] = data[b'nb-local'] - data[b'nb-common-local']
974 data[b'nb-local-missing'] = data[b'nb-local'] - data[b'nb-common-local']
975 data[b'nb-remote'] = len(rheads)
975 data[b'nb-remote'] = len(rheads)
976 data[b'nb-remote-unknown'] = data[b'nb-remote'] - data[b'nb-common-remote']
976 data[b'nb-remote-unknown'] = data[b'nb-remote'] - data[b'nb-common-remote']
977 data[b'nb-revs'] = len(repo.revs(b'all()'))
977 data[b'nb-revs'] = len(repo.revs(b'all()'))
978 data[b'nb-revs-common'] = len(repo.revs(b'::%ln', common))
978 data[b'nb-revs-common'] = len(repo.revs(b'::%ln', common))
979 data[b'nb-revs-missing'] = data[b'nb-revs'] - data[b'nb-revs-common']
979 data[b'nb-revs-missing'] = data[b'nb-revs'] - data[b'nb-revs-common']
980
980
981 # display discovery summary
981 # display discovery summary
982 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
982 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
983 ui.writenoi18n(b"heads summary:\n")
983 ui.writenoi18n(b"heads summary:\n")
984 ui.writenoi18n(b" total common heads: %(nb-common)9d\n" % data)
984 ui.writenoi18n(b" total common heads: %(nb-common)9d\n" % data)
985 ui.writenoi18n(b" also local heads: %(nb-common-local)9d\n" % data)
985 ui.writenoi18n(b" also local heads: %(nb-common-local)9d\n" % data)
986 ui.writenoi18n(b" also remote heads: %(nb-common-remote)9d\n" % data)
986 ui.writenoi18n(b" also remote heads: %(nb-common-remote)9d\n" % data)
987 ui.writenoi18n(b" both: %(nb-common-both)9d\n" % data)
987 ui.writenoi18n(b" both: %(nb-common-both)9d\n" % data)
988 ui.writenoi18n(b" local heads: %(nb-local)9d\n" % data)
988 ui.writenoi18n(b" local heads: %(nb-local)9d\n" % data)
989 ui.writenoi18n(b" common: %(nb-common-local)9d\n" % data)
989 ui.writenoi18n(b" common: %(nb-common-local)9d\n" % data)
990 ui.writenoi18n(b" missing: %(nb-local-missing)9d\n" % data)
990 ui.writenoi18n(b" missing: %(nb-local-missing)9d\n" % data)
991 ui.writenoi18n(b" remote heads: %(nb-remote)9d\n" % data)
991 ui.writenoi18n(b" remote heads: %(nb-remote)9d\n" % data)
992 ui.writenoi18n(b" common: %(nb-common-remote)9d\n" % data)
992 ui.writenoi18n(b" common: %(nb-common-remote)9d\n" % data)
993 ui.writenoi18n(b" unknown: %(nb-remote-unknown)9d\n" % data)
993 ui.writenoi18n(b" unknown: %(nb-remote-unknown)9d\n" % data)
994 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
994 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
995 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
995 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
996 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
996 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
997
997
998 if ui.verbose:
998 if ui.verbose:
999 ui.writenoi18n(
999 ui.writenoi18n(
1000 b"common heads: %s\n" % b" ".join(sorted(short(n) for n in common))
1000 b"common heads: %s\n" % b" ".join(sorted(short(n) for n in common))
1001 )
1001 )
1002
1002
1003
1003
1004 _chunksize = 4 << 10
1004 _chunksize = 4 << 10
1005
1005
1006
1006
1007 @command(
1007 @command(
1008 b'debugdownload', [(b'o', b'output', b'', _(b'path')),], optionalrepo=True
1008 b'debugdownload', [(b'o', b'output', b'', _(b'path')),], optionalrepo=True
1009 )
1009 )
1010 def debugdownload(ui, repo, url, output=None, **opts):
1010 def debugdownload(ui, repo, url, output=None, **opts):
1011 """download a resource using Mercurial logic and config
1011 """download a resource using Mercurial logic and config
1012 """
1012 """
1013 fh = urlmod.open(ui, url, output)
1013 fh = urlmod.open(ui, url, output)
1014
1014
1015 dest = ui
1015 dest = ui
1016 if output:
1016 if output:
1017 dest = open(output, b"wb", _chunksize)
1017 dest = open(output, b"wb", _chunksize)
1018 try:
1018 try:
1019 data = fh.read(_chunksize)
1019 data = fh.read(_chunksize)
1020 while data:
1020 while data:
1021 dest.write(data)
1021 dest.write(data)
1022 data = fh.read(_chunksize)
1022 data = fh.read(_chunksize)
1023 finally:
1023 finally:
1024 if output:
1024 if output:
1025 dest.close()
1025 dest.close()
1026
1026
1027
1027
1028 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1028 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1029 def debugextensions(ui, repo, **opts):
1029 def debugextensions(ui, repo, **opts):
1030 '''show information about active extensions'''
1030 '''show information about active extensions'''
1031 opts = pycompat.byteskwargs(opts)
1031 opts = pycompat.byteskwargs(opts)
1032 exts = extensions.extensions(ui)
1032 exts = extensions.extensions(ui)
1033 hgver = util.version()
1033 hgver = util.version()
1034 fm = ui.formatter(b'debugextensions', opts)
1034 fm = ui.formatter(b'debugextensions', opts)
1035 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1035 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1036 isinternal = extensions.ismoduleinternal(extmod)
1036 isinternal = extensions.ismoduleinternal(extmod)
1037 extsource = None
1037 extsource = None
1038
1038
1039 if util.safehasattr(extmod, '__file__'):
1039 if util.safehasattr(extmod, '__file__'):
1040 extsource = pycompat.fsencode(extmod.__file__)
1040 extsource = pycompat.fsencode(extmod.__file__)
1041 elif getattr(sys, 'oxidized', False):
1041 elif getattr(sys, 'oxidized', False):
1042 extsource = pycompat.sysexecutable
1042 extsource = pycompat.sysexecutable
1043 if isinternal:
1043 if isinternal:
1044 exttestedwith = [] # never expose magic string to users
1044 exttestedwith = [] # never expose magic string to users
1045 else:
1045 else:
1046 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1046 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1047 extbuglink = getattr(extmod, 'buglink', None)
1047 extbuglink = getattr(extmod, 'buglink', None)
1048
1048
1049 fm.startitem()
1049 fm.startitem()
1050
1050
1051 if ui.quiet or ui.verbose:
1051 if ui.quiet or ui.verbose:
1052 fm.write(b'name', b'%s\n', extname)
1052 fm.write(b'name', b'%s\n', extname)
1053 else:
1053 else:
1054 fm.write(b'name', b'%s', extname)
1054 fm.write(b'name', b'%s', extname)
1055 if isinternal or hgver in exttestedwith:
1055 if isinternal or hgver in exttestedwith:
1056 fm.plain(b'\n')
1056 fm.plain(b'\n')
1057 elif not exttestedwith:
1057 elif not exttestedwith:
1058 fm.plain(_(b' (untested!)\n'))
1058 fm.plain(_(b' (untested!)\n'))
1059 else:
1059 else:
1060 lasttestedversion = exttestedwith[-1]
1060 lasttestedversion = exttestedwith[-1]
1061 fm.plain(b' (%s!)\n' % lasttestedversion)
1061 fm.plain(b' (%s!)\n' % lasttestedversion)
1062
1062
1063 fm.condwrite(
1063 fm.condwrite(
1064 ui.verbose and extsource,
1064 ui.verbose and extsource,
1065 b'source',
1065 b'source',
1066 _(b' location: %s\n'),
1066 _(b' location: %s\n'),
1067 extsource or b"",
1067 extsource or b"",
1068 )
1068 )
1069
1069
1070 if ui.verbose:
1070 if ui.verbose:
1071 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1071 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1072 fm.data(bundled=isinternal)
1072 fm.data(bundled=isinternal)
1073
1073
1074 fm.condwrite(
1074 fm.condwrite(
1075 ui.verbose and exttestedwith,
1075 ui.verbose and exttestedwith,
1076 b'testedwith',
1076 b'testedwith',
1077 _(b' tested with: %s\n'),
1077 _(b' tested with: %s\n'),
1078 fm.formatlist(exttestedwith, name=b'ver'),
1078 fm.formatlist(exttestedwith, name=b'ver'),
1079 )
1079 )
1080
1080
1081 fm.condwrite(
1081 fm.condwrite(
1082 ui.verbose and extbuglink,
1082 ui.verbose and extbuglink,
1083 b'buglink',
1083 b'buglink',
1084 _(b' bug reporting: %s\n'),
1084 _(b' bug reporting: %s\n'),
1085 extbuglink or b"",
1085 extbuglink or b"",
1086 )
1086 )
1087
1087
1088 fm.end()
1088 fm.end()
1089
1089
1090
1090
1091 @command(
1091 @command(
1092 b'debugfileset',
1092 b'debugfileset',
1093 [
1093 [
1094 (
1094 (
1095 b'r',
1095 b'r',
1096 b'rev',
1096 b'rev',
1097 b'',
1097 b'',
1098 _(b'apply the filespec on this revision'),
1098 _(b'apply the filespec on this revision'),
1099 _(b'REV'),
1099 _(b'REV'),
1100 ),
1100 ),
1101 (
1101 (
1102 b'',
1102 b'',
1103 b'all-files',
1103 b'all-files',
1104 False,
1104 False,
1105 _(b'test files from all revisions and working directory'),
1105 _(b'test files from all revisions and working directory'),
1106 ),
1106 ),
1107 (
1107 (
1108 b's',
1108 b's',
1109 b'show-matcher',
1109 b'show-matcher',
1110 None,
1110 None,
1111 _(b'print internal representation of matcher'),
1111 _(b'print internal representation of matcher'),
1112 ),
1112 ),
1113 (
1113 (
1114 b'p',
1114 b'p',
1115 b'show-stage',
1115 b'show-stage',
1116 [],
1116 [],
1117 _(b'print parsed tree at the given stage'),
1117 _(b'print parsed tree at the given stage'),
1118 _(b'NAME'),
1118 _(b'NAME'),
1119 ),
1119 ),
1120 ],
1120 ],
1121 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1121 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1122 )
1122 )
1123 def debugfileset(ui, repo, expr, **opts):
1123 def debugfileset(ui, repo, expr, **opts):
1124 '''parse and apply a fileset specification'''
1124 '''parse and apply a fileset specification'''
1125 from . import fileset
1125 from . import fileset
1126
1126
1127 fileset.symbols # force import of fileset so we have predicates to optimize
1127 fileset.symbols # force import of fileset so we have predicates to optimize
1128 opts = pycompat.byteskwargs(opts)
1128 opts = pycompat.byteskwargs(opts)
1129 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1129 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1130
1130
1131 stages = [
1131 stages = [
1132 (b'parsed', pycompat.identity),
1132 (b'parsed', pycompat.identity),
1133 (b'analyzed', filesetlang.analyze),
1133 (b'analyzed', filesetlang.analyze),
1134 (b'optimized', filesetlang.optimize),
1134 (b'optimized', filesetlang.optimize),
1135 ]
1135 ]
1136 stagenames = set(n for n, f in stages)
1136 stagenames = set(n for n, f in stages)
1137
1137
1138 showalways = set()
1138 showalways = set()
1139 if ui.verbose and not opts[b'show_stage']:
1139 if ui.verbose and not opts[b'show_stage']:
1140 # show parsed tree by --verbose (deprecated)
1140 # show parsed tree by --verbose (deprecated)
1141 showalways.add(b'parsed')
1141 showalways.add(b'parsed')
1142 if opts[b'show_stage'] == [b'all']:
1142 if opts[b'show_stage'] == [b'all']:
1143 showalways.update(stagenames)
1143 showalways.update(stagenames)
1144 else:
1144 else:
1145 for n in opts[b'show_stage']:
1145 for n in opts[b'show_stage']:
1146 if n not in stagenames:
1146 if n not in stagenames:
1147 raise error.Abort(_(b'invalid stage name: %s') % n)
1147 raise error.Abort(_(b'invalid stage name: %s') % n)
1148 showalways.update(opts[b'show_stage'])
1148 showalways.update(opts[b'show_stage'])
1149
1149
1150 tree = filesetlang.parse(expr)
1150 tree = filesetlang.parse(expr)
1151 for n, f in stages:
1151 for n, f in stages:
1152 tree = f(tree)
1152 tree = f(tree)
1153 if n in showalways:
1153 if n in showalways:
1154 if opts[b'show_stage'] or n != b'parsed':
1154 if opts[b'show_stage'] or n != b'parsed':
1155 ui.write(b"* %s:\n" % n)
1155 ui.write(b"* %s:\n" % n)
1156 ui.write(filesetlang.prettyformat(tree), b"\n")
1156 ui.write(filesetlang.prettyformat(tree), b"\n")
1157
1157
1158 files = set()
1158 files = set()
1159 if opts[b'all_files']:
1159 if opts[b'all_files']:
1160 for r in repo:
1160 for r in repo:
1161 c = repo[r]
1161 c = repo[r]
1162 files.update(c.files())
1162 files.update(c.files())
1163 files.update(c.substate)
1163 files.update(c.substate)
1164 if opts[b'all_files'] or ctx.rev() is None:
1164 if opts[b'all_files'] or ctx.rev() is None:
1165 wctx = repo[None]
1165 wctx = repo[None]
1166 files.update(
1166 files.update(
1167 repo.dirstate.walk(
1167 repo.dirstate.walk(
1168 scmutil.matchall(repo),
1168 scmutil.matchall(repo),
1169 subrepos=list(wctx.substate),
1169 subrepos=list(wctx.substate),
1170 unknown=True,
1170 unknown=True,
1171 ignored=True,
1171 ignored=True,
1172 )
1172 )
1173 )
1173 )
1174 files.update(wctx.substate)
1174 files.update(wctx.substate)
1175 else:
1175 else:
1176 files.update(ctx.files())
1176 files.update(ctx.files())
1177 files.update(ctx.substate)
1177 files.update(ctx.substate)
1178
1178
1179 m = ctx.matchfileset(repo.getcwd(), expr)
1179 m = ctx.matchfileset(repo.getcwd(), expr)
1180 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1180 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1181 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1181 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1182 for f in sorted(files):
1182 for f in sorted(files):
1183 if not m(f):
1183 if not m(f):
1184 continue
1184 continue
1185 ui.write(b"%s\n" % f)
1185 ui.write(b"%s\n" % f)
1186
1186
1187
1187
1188 @command(b'debugformat', [] + cmdutil.formatteropts)
1188 @command(b'debugformat', [] + cmdutil.formatteropts)
1189 def debugformat(ui, repo, **opts):
1189 def debugformat(ui, repo, **opts):
1190 """display format information about the current repository
1190 """display format information about the current repository
1191
1191
1192 Use --verbose to get extra information about current config value and
1192 Use --verbose to get extra information about current config value and
1193 Mercurial default."""
1193 Mercurial default."""
1194 opts = pycompat.byteskwargs(opts)
1194 opts = pycompat.byteskwargs(opts)
1195 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1195 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1196 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1196 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1197
1197
1198 def makeformatname(name):
1198 def makeformatname(name):
1199 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1199 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1200
1200
1201 fm = ui.formatter(b'debugformat', opts)
1201 fm = ui.formatter(b'debugformat', opts)
1202 if fm.isplain():
1202 if fm.isplain():
1203
1203
1204 def formatvalue(value):
1204 def formatvalue(value):
1205 if util.safehasattr(value, b'startswith'):
1205 if util.safehasattr(value, b'startswith'):
1206 return value
1206 return value
1207 if value:
1207 if value:
1208 return b'yes'
1208 return b'yes'
1209 else:
1209 else:
1210 return b'no'
1210 return b'no'
1211
1211
1212 else:
1212 else:
1213 formatvalue = pycompat.identity
1213 formatvalue = pycompat.identity
1214
1214
1215 fm.plain(b'format-variant')
1215 fm.plain(b'format-variant')
1216 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1216 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1217 fm.plain(b' repo')
1217 fm.plain(b' repo')
1218 if ui.verbose:
1218 if ui.verbose:
1219 fm.plain(b' config default')
1219 fm.plain(b' config default')
1220 fm.plain(b'\n')
1220 fm.plain(b'\n')
1221 for fv in upgrade.allformatvariant:
1221 for fv in upgrade.allformatvariant:
1222 fm.startitem()
1222 fm.startitem()
1223 repovalue = fv.fromrepo(repo)
1223 repovalue = fv.fromrepo(repo)
1224 configvalue = fv.fromconfig(repo)
1224 configvalue = fv.fromconfig(repo)
1225
1225
1226 if repovalue != configvalue:
1226 if repovalue != configvalue:
1227 namelabel = b'formatvariant.name.mismatchconfig'
1227 namelabel = b'formatvariant.name.mismatchconfig'
1228 repolabel = b'formatvariant.repo.mismatchconfig'
1228 repolabel = b'formatvariant.repo.mismatchconfig'
1229 elif repovalue != fv.default:
1229 elif repovalue != fv.default:
1230 namelabel = b'formatvariant.name.mismatchdefault'
1230 namelabel = b'formatvariant.name.mismatchdefault'
1231 repolabel = b'formatvariant.repo.mismatchdefault'
1231 repolabel = b'formatvariant.repo.mismatchdefault'
1232 else:
1232 else:
1233 namelabel = b'formatvariant.name.uptodate'
1233 namelabel = b'formatvariant.name.uptodate'
1234 repolabel = b'formatvariant.repo.uptodate'
1234 repolabel = b'formatvariant.repo.uptodate'
1235
1235
1236 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1236 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1237 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1237 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1238 if fv.default != configvalue:
1238 if fv.default != configvalue:
1239 configlabel = b'formatvariant.config.special'
1239 configlabel = b'formatvariant.config.special'
1240 else:
1240 else:
1241 configlabel = b'formatvariant.config.default'
1241 configlabel = b'formatvariant.config.default'
1242 fm.condwrite(
1242 fm.condwrite(
1243 ui.verbose,
1243 ui.verbose,
1244 b'config',
1244 b'config',
1245 b' %6s',
1245 b' %6s',
1246 formatvalue(configvalue),
1246 formatvalue(configvalue),
1247 label=configlabel,
1247 label=configlabel,
1248 )
1248 )
1249 fm.condwrite(
1249 fm.condwrite(
1250 ui.verbose,
1250 ui.verbose,
1251 b'default',
1251 b'default',
1252 b' %7s',
1252 b' %7s',
1253 formatvalue(fv.default),
1253 formatvalue(fv.default),
1254 label=b'formatvariant.default',
1254 label=b'formatvariant.default',
1255 )
1255 )
1256 fm.plain(b'\n')
1256 fm.plain(b'\n')
1257 fm.end()
1257 fm.end()
1258
1258
1259
1259
1260 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1260 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1261 def debugfsinfo(ui, path=b"."):
1261 def debugfsinfo(ui, path=b"."):
1262 """show information detected about current filesystem"""
1262 """show information detected about current filesystem"""
1263 ui.writenoi18n(b'path: %s\n' % path)
1263 ui.writenoi18n(b'path: %s\n' % path)
1264 ui.writenoi18n(
1264 ui.writenoi18n(
1265 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1265 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1266 )
1266 )
1267 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1267 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1268 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1268 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1269 ui.writenoi18n(
1269 ui.writenoi18n(
1270 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1270 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1271 )
1271 )
1272 ui.writenoi18n(
1272 ui.writenoi18n(
1273 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1273 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1274 )
1274 )
1275 casesensitive = b'(unknown)'
1275 casesensitive = b'(unknown)'
1276 try:
1276 try:
1277 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1277 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1278 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1278 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1279 except OSError:
1279 except OSError:
1280 pass
1280 pass
1281 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1281 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1282
1282
1283
1283
1284 @command(
1284 @command(
1285 b'debuggetbundle',
1285 b'debuggetbundle',
1286 [
1286 [
1287 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1287 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1288 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1288 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1289 (
1289 (
1290 b't',
1290 b't',
1291 b'type',
1291 b'type',
1292 b'bzip2',
1292 b'bzip2',
1293 _(b'bundle compression type to use'),
1293 _(b'bundle compression type to use'),
1294 _(b'TYPE'),
1294 _(b'TYPE'),
1295 ),
1295 ),
1296 ],
1296 ],
1297 _(b'REPO FILE [-H|-C ID]...'),
1297 _(b'REPO FILE [-H|-C ID]...'),
1298 norepo=True,
1298 norepo=True,
1299 )
1299 )
1300 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1300 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1301 """retrieves a bundle from a repo
1301 """retrieves a bundle from a repo
1302
1302
1303 Every ID must be a full-length hex node id string. Saves the bundle to the
1303 Every ID must be a full-length hex node id string. Saves the bundle to the
1304 given file.
1304 given file.
1305 """
1305 """
1306 opts = pycompat.byteskwargs(opts)
1306 opts = pycompat.byteskwargs(opts)
1307 repo = hg.peer(ui, opts, repopath)
1307 repo = hg.peer(ui, opts, repopath)
1308 if not repo.capable(b'getbundle'):
1308 if not repo.capable(b'getbundle'):
1309 raise error.Abort(b"getbundle() not supported by target repository")
1309 raise error.Abort(b"getbundle() not supported by target repository")
1310 args = {}
1310 args = {}
1311 if common:
1311 if common:
1312 args['common'] = [bin(s) for s in common]
1312 args['common'] = [bin(s) for s in common]
1313 if head:
1313 if head:
1314 args['heads'] = [bin(s) for s in head]
1314 args['heads'] = [bin(s) for s in head]
1315 # TODO: get desired bundlecaps from command line.
1315 # TODO: get desired bundlecaps from command line.
1316 args['bundlecaps'] = None
1316 args['bundlecaps'] = None
1317 bundle = repo.getbundle(b'debug', **args)
1317 bundle = repo.getbundle(b'debug', **args)
1318
1318
1319 bundletype = opts.get(b'type', b'bzip2').lower()
1319 bundletype = opts.get(b'type', b'bzip2').lower()
1320 btypes = {
1320 btypes = {
1321 b'none': b'HG10UN',
1321 b'none': b'HG10UN',
1322 b'bzip2': b'HG10BZ',
1322 b'bzip2': b'HG10BZ',
1323 b'gzip': b'HG10GZ',
1323 b'gzip': b'HG10GZ',
1324 b'bundle2': b'HG20',
1324 b'bundle2': b'HG20',
1325 }
1325 }
1326 bundletype = btypes.get(bundletype)
1326 bundletype = btypes.get(bundletype)
1327 if bundletype not in bundle2.bundletypes:
1327 if bundletype not in bundle2.bundletypes:
1328 raise error.Abort(_(b'unknown bundle type specified with --type'))
1328 raise error.Abort(_(b'unknown bundle type specified with --type'))
1329 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1329 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1330
1330
1331
1331
1332 @command(b'debugignore', [], b'[FILE]')
1332 @command(b'debugignore', [], b'[FILE]')
1333 def debugignore(ui, repo, *files, **opts):
1333 def debugignore(ui, repo, *files, **opts):
1334 """display the combined ignore pattern and information about ignored files
1334 """display the combined ignore pattern and information about ignored files
1335
1335
1336 With no argument display the combined ignore pattern.
1336 With no argument display the combined ignore pattern.
1337
1337
1338 Given space separated file names, shows if the given file is ignored and
1338 Given space separated file names, shows if the given file is ignored and
1339 if so, show the ignore rule (file and line number) that matched it.
1339 if so, show the ignore rule (file and line number) that matched it.
1340 """
1340 """
1341 ignore = repo.dirstate._ignore
1341 ignore = repo.dirstate._ignore
1342 if not files:
1342 if not files:
1343 # Show all the patterns
1343 # Show all the patterns
1344 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1344 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1345 else:
1345 else:
1346 m = scmutil.match(repo[None], pats=files)
1346 m = scmutil.match(repo[None], pats=files)
1347 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1347 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1348 for f in m.files():
1348 for f in m.files():
1349 nf = util.normpath(f)
1349 nf = util.normpath(f)
1350 ignored = None
1350 ignored = None
1351 ignoredata = None
1351 ignoredata = None
1352 if nf != b'.':
1352 if nf != b'.':
1353 if ignore(nf):
1353 if ignore(nf):
1354 ignored = nf
1354 ignored = nf
1355 ignoredata = repo.dirstate._ignorefileandline(nf)
1355 ignoredata = repo.dirstate._ignorefileandline(nf)
1356 else:
1356 else:
1357 for p in pathutil.finddirs(nf):
1357 for p in pathutil.finddirs(nf):
1358 if ignore(p):
1358 if ignore(p):
1359 ignored = p
1359 ignored = p
1360 ignoredata = repo.dirstate._ignorefileandline(p)
1360 ignoredata = repo.dirstate._ignorefileandline(p)
1361 break
1361 break
1362 if ignored:
1362 if ignored:
1363 if ignored == nf:
1363 if ignored == nf:
1364 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1364 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1365 else:
1365 else:
1366 ui.write(
1366 ui.write(
1367 _(
1367 _(
1368 b"%s is ignored because of "
1368 b"%s is ignored because of "
1369 b"containing directory %s\n"
1369 b"containing directory %s\n"
1370 )
1370 )
1371 % (uipathfn(f), ignored)
1371 % (uipathfn(f), ignored)
1372 )
1372 )
1373 ignorefile, lineno, line = ignoredata
1373 ignorefile, lineno, line = ignoredata
1374 ui.write(
1374 ui.write(
1375 _(b"(ignore rule in %s, line %d: '%s')\n")
1375 _(b"(ignore rule in %s, line %d: '%s')\n")
1376 % (ignorefile, lineno, line)
1376 % (ignorefile, lineno, line)
1377 )
1377 )
1378 else:
1378 else:
1379 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1379 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1380
1380
1381
1381
1382 @command(
1382 @command(
1383 b'debugindex',
1383 b'debugindex',
1384 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1384 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1385 _(b'-c|-m|FILE'),
1385 _(b'-c|-m|FILE'),
1386 )
1386 )
1387 def debugindex(ui, repo, file_=None, **opts):
1387 def debugindex(ui, repo, file_=None, **opts):
1388 """dump index data for a storage primitive"""
1388 """dump index data for a storage primitive"""
1389 opts = pycompat.byteskwargs(opts)
1389 opts = pycompat.byteskwargs(opts)
1390 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1390 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1391
1391
1392 if ui.debugflag:
1392 if ui.debugflag:
1393 shortfn = hex
1393 shortfn = hex
1394 else:
1394 else:
1395 shortfn = short
1395 shortfn = short
1396
1396
1397 idlen = 12
1397 idlen = 12
1398 for i in store:
1398 for i in store:
1399 idlen = len(shortfn(store.node(i)))
1399 idlen = len(shortfn(store.node(i)))
1400 break
1400 break
1401
1401
1402 fm = ui.formatter(b'debugindex', opts)
1402 fm = ui.formatter(b'debugindex', opts)
1403 fm.plain(
1403 fm.plain(
1404 b' rev linkrev %s %s p2\n'
1404 b' rev linkrev %s %s p2\n'
1405 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1405 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1406 )
1406 )
1407
1407
1408 for rev in store:
1408 for rev in store:
1409 node = store.node(rev)
1409 node = store.node(rev)
1410 parents = store.parents(node)
1410 parents = store.parents(node)
1411
1411
1412 fm.startitem()
1412 fm.startitem()
1413 fm.write(b'rev', b'%6d ', rev)
1413 fm.write(b'rev', b'%6d ', rev)
1414 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1414 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1415 fm.write(b'node', b'%s ', shortfn(node))
1415 fm.write(b'node', b'%s ', shortfn(node))
1416 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1416 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1417 fm.write(b'p2', b'%s', shortfn(parents[1]))
1417 fm.write(b'p2', b'%s', shortfn(parents[1]))
1418 fm.plain(b'\n')
1418 fm.plain(b'\n')
1419
1419
1420 fm.end()
1420 fm.end()
1421
1421
1422
1422
1423 @command(
1423 @command(
1424 b'debugindexdot',
1424 b'debugindexdot',
1425 cmdutil.debugrevlogopts,
1425 cmdutil.debugrevlogopts,
1426 _(b'-c|-m|FILE'),
1426 _(b'-c|-m|FILE'),
1427 optionalrepo=True,
1427 optionalrepo=True,
1428 )
1428 )
1429 def debugindexdot(ui, repo, file_=None, **opts):
1429 def debugindexdot(ui, repo, file_=None, **opts):
1430 """dump an index DAG as a graphviz dot file"""
1430 """dump an index DAG as a graphviz dot file"""
1431 opts = pycompat.byteskwargs(opts)
1431 opts = pycompat.byteskwargs(opts)
1432 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1432 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1433 ui.writenoi18n(b"digraph G {\n")
1433 ui.writenoi18n(b"digraph G {\n")
1434 for i in r:
1434 for i in r:
1435 node = r.node(i)
1435 node = r.node(i)
1436 pp = r.parents(node)
1436 pp = r.parents(node)
1437 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1437 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1438 if pp[1] != nullid:
1438 if pp[1] != nullid:
1439 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1439 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1440 ui.write(b"}\n")
1440 ui.write(b"}\n")
1441
1441
1442
1442
1443 @command(b'debugindexstats', [])
1443 @command(b'debugindexstats', [])
1444 def debugindexstats(ui, repo):
1444 def debugindexstats(ui, repo):
1445 """show stats related to the changelog index"""
1445 """show stats related to the changelog index"""
1446 repo.changelog.shortest(nullid, 1)
1446 repo.changelog.shortest(nullid, 1)
1447 index = repo.changelog.index
1447 index = repo.changelog.index
1448 if not util.safehasattr(index, b'stats'):
1448 if not util.safehasattr(index, b'stats'):
1449 raise error.Abort(_(b'debugindexstats only works with native code'))
1449 raise error.Abort(_(b'debugindexstats only works with native code'))
1450 for k, v in sorted(index.stats().items()):
1450 for k, v in sorted(index.stats().items()):
1451 ui.write(b'%s: %d\n' % (k, v))
1451 ui.write(b'%s: %d\n' % (k, v))
1452
1452
1453
1453
1454 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1454 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1455 def debuginstall(ui, **opts):
1455 def debuginstall(ui, **opts):
1456 '''test Mercurial installation
1456 '''test Mercurial installation
1457
1457
1458 Returns 0 on success.
1458 Returns 0 on success.
1459 '''
1459 '''
1460 opts = pycompat.byteskwargs(opts)
1460 opts = pycompat.byteskwargs(opts)
1461
1461
1462 problems = 0
1462 problems = 0
1463
1463
1464 fm = ui.formatter(b'debuginstall', opts)
1464 fm = ui.formatter(b'debuginstall', opts)
1465 fm.startitem()
1465 fm.startitem()
1466
1466
1467 # encoding
1467 # encoding
1468 fm.write(b'encoding', _(b"checking encoding (%s)...\n"), encoding.encoding)
1468 fm.write(b'encoding', _(b"checking encoding (%s)...\n"), encoding.encoding)
1469 err = None
1469 err = None
1470 try:
1470 try:
1471 codecs.lookup(pycompat.sysstr(encoding.encoding))
1471 codecs.lookup(pycompat.sysstr(encoding.encoding))
1472 except LookupError as inst:
1472 except LookupError as inst:
1473 err = stringutil.forcebytestr(inst)
1473 err = stringutil.forcebytestr(inst)
1474 problems += 1
1474 problems += 1
1475 fm.condwrite(
1475 fm.condwrite(
1476 err,
1476 err,
1477 b'encodingerror',
1477 b'encodingerror',
1478 _(b" %s\n (check that your locale is properly set)\n"),
1478 _(b" %s\n (check that your locale is properly set)\n"),
1479 err,
1479 err,
1480 )
1480 )
1481
1481
1482 # Python
1482 # Python
1483 pythonlib = None
1483 pythonlib = None
1484 if util.safehasattr(os, '__file__'):
1484 if util.safehasattr(os, '__file__'):
1485 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1485 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1486 elif getattr(sys, 'oxidized', False):
1486 elif getattr(sys, 'oxidized', False):
1487 pythonlib = pycompat.sysexecutable
1487 pythonlib = pycompat.sysexecutable
1488
1488
1489 fm.write(
1489 fm.write(
1490 b'pythonexe',
1490 b'pythonexe',
1491 _(b"checking Python executable (%s)\n"),
1491 _(b"checking Python executable (%s)\n"),
1492 pycompat.sysexecutable or _(b"unknown"),
1492 pycompat.sysexecutable or _(b"unknown"),
1493 )
1493 )
1494 fm.write(
1494 fm.write(
1495 b'pythonimplementation',
1495 b'pythonimplementation',
1496 _(b"checking Python implementation (%s)\n"),
1496 _(b"checking Python implementation (%s)\n"),
1497 pycompat.sysbytes(platform.python_implementation()),
1497 pycompat.sysbytes(platform.python_implementation()),
1498 )
1498 )
1499 fm.write(
1499 fm.write(
1500 b'pythonver',
1500 b'pythonver',
1501 _(b"checking Python version (%s)\n"),
1501 _(b"checking Python version (%s)\n"),
1502 (b"%d.%d.%d" % sys.version_info[:3]),
1502 (b"%d.%d.%d" % sys.version_info[:3]),
1503 )
1503 )
1504 fm.write(
1504 fm.write(
1505 b'pythonlib',
1505 b'pythonlib',
1506 _(b"checking Python lib (%s)...\n"),
1506 _(b"checking Python lib (%s)...\n"),
1507 pythonlib or _(b"unknown"),
1507 pythonlib or _(b"unknown"),
1508 )
1508 )
1509
1509
1510 security = set(sslutil.supportedprotocols)
1510 security = set(sslutil.supportedprotocols)
1511 if sslutil.hassni:
1511 if sslutil.hassni:
1512 security.add(b'sni')
1512 security.add(b'sni')
1513
1513
1514 fm.write(
1514 fm.write(
1515 b'pythonsecurity',
1515 b'pythonsecurity',
1516 _(b"checking Python security support (%s)\n"),
1516 _(b"checking Python security support (%s)\n"),
1517 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1517 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1518 )
1518 )
1519
1519
1520 # These are warnings, not errors. So don't increment problem count. This
1520 # These are warnings, not errors. So don't increment problem count. This
1521 # may change in the future.
1521 # may change in the future.
1522 if b'tls1.2' not in security:
1522 if b'tls1.2' not in security:
1523 fm.plain(
1523 fm.plain(
1524 _(
1524 _(
1525 b' TLS 1.2 not supported by Python install; '
1525 b' TLS 1.2 not supported by Python install; '
1526 b'network connections lack modern security\n'
1526 b'network connections lack modern security\n'
1527 )
1527 )
1528 )
1528 )
1529 if b'sni' not in security:
1529 if b'sni' not in security:
1530 fm.plain(
1530 fm.plain(
1531 _(
1531 _(
1532 b' SNI not supported by Python install; may have '
1532 b' SNI not supported by Python install; may have '
1533 b'connectivity issues with some servers\n'
1533 b'connectivity issues with some servers\n'
1534 )
1534 )
1535 )
1535 )
1536
1536
1537 # TODO print CA cert info
1537 # TODO print CA cert info
1538
1538
1539 # hg version
1539 # hg version
1540 hgver = util.version()
1540 hgver = util.version()
1541 fm.write(
1541 fm.write(
1542 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1542 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1543 )
1543 )
1544 fm.write(
1544 fm.write(
1545 b'hgverextra',
1545 b'hgverextra',
1546 _(b"checking Mercurial custom build (%s)\n"),
1546 _(b"checking Mercurial custom build (%s)\n"),
1547 b'+'.join(hgver.split(b'+')[1:]),
1547 b'+'.join(hgver.split(b'+')[1:]),
1548 )
1548 )
1549
1549
1550 # compiled modules
1550 # compiled modules
1551 hgmodules = None
1551 hgmodules = None
1552 if util.safehasattr(sys.modules[__name__], '__file__'):
1552 if util.safehasattr(sys.modules[__name__], '__file__'):
1553 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1553 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1554 elif getattr(sys, 'oxidized', False):
1554 elif getattr(sys, 'oxidized', False):
1555 hgmodules = pycompat.sysexecutable
1555 hgmodules = pycompat.sysexecutable
1556
1556
1557 fm.write(
1557 fm.write(
1558 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1558 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1559 )
1559 )
1560 fm.write(
1560 fm.write(
1561 b'hgmodules',
1561 b'hgmodules',
1562 _(b"checking installed modules (%s)...\n"),
1562 _(b"checking installed modules (%s)...\n"),
1563 hgmodules or _(b"unknown"),
1563 hgmodules or _(b"unknown"),
1564 )
1564 )
1565
1565
1566 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1566 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1567 rustext = rustandc # for now, that's the only case
1567 rustext = rustandc # for now, that's the only case
1568 cext = policy.policy in (b'c', b'allow') or rustandc
1568 cext = policy.policy in (b'c', b'allow') or rustandc
1569 nopure = cext or rustext
1569 nopure = cext or rustext
1570 if nopure:
1570 if nopure:
1571 err = None
1571 err = None
1572 try:
1572 try:
1573 if cext:
1573 if cext:
1574 from .cext import ( # pytype: disable=import-error
1574 from .cext import ( # pytype: disable=import-error
1575 base85,
1575 base85,
1576 bdiff,
1576 bdiff,
1577 mpatch,
1577 mpatch,
1578 osutil,
1578 osutil,
1579 )
1579 )
1580
1580
1581 # quiet pyflakes
1581 # quiet pyflakes
1582 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1582 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1583 if rustext:
1583 if rustext:
1584 from .rustext import ( # pytype: disable=import-error
1584 from .rustext import ( # pytype: disable=import-error
1585 ancestor,
1585 ancestor,
1586 dirstate,
1586 dirstate,
1587 )
1587 )
1588
1588
1589 dir(ancestor), dir(dirstate) # quiet pyflakes
1589 dir(ancestor), dir(dirstate) # quiet pyflakes
1590 except Exception as inst:
1590 except Exception as inst:
1591 err = stringutil.forcebytestr(inst)
1591 err = stringutil.forcebytestr(inst)
1592 problems += 1
1592 problems += 1
1593 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1593 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1594
1594
1595 compengines = util.compengines._engines.values()
1595 compengines = util.compengines._engines.values()
1596 fm.write(
1596 fm.write(
1597 b'compengines',
1597 b'compengines',
1598 _(b'checking registered compression engines (%s)\n'),
1598 _(b'checking registered compression engines (%s)\n'),
1599 fm.formatlist(
1599 fm.formatlist(
1600 sorted(e.name() for e in compengines),
1600 sorted(e.name() for e in compengines),
1601 name=b'compengine',
1601 name=b'compengine',
1602 fmt=b'%s',
1602 fmt=b'%s',
1603 sep=b', ',
1603 sep=b', ',
1604 ),
1604 ),
1605 )
1605 )
1606 fm.write(
1606 fm.write(
1607 b'compenginesavail',
1607 b'compenginesavail',
1608 _(b'checking available compression engines (%s)\n'),
1608 _(b'checking available compression engines (%s)\n'),
1609 fm.formatlist(
1609 fm.formatlist(
1610 sorted(e.name() for e in compengines if e.available()),
1610 sorted(e.name() for e in compengines if e.available()),
1611 name=b'compengine',
1611 name=b'compengine',
1612 fmt=b'%s',
1612 fmt=b'%s',
1613 sep=b', ',
1613 sep=b', ',
1614 ),
1614 ),
1615 )
1615 )
1616 wirecompengines = compression.compengines.supportedwireengines(
1616 wirecompengines = compression.compengines.supportedwireengines(
1617 compression.SERVERROLE
1617 compression.SERVERROLE
1618 )
1618 )
1619 fm.write(
1619 fm.write(
1620 b'compenginesserver',
1620 b'compenginesserver',
1621 _(
1621 _(
1622 b'checking available compression engines '
1622 b'checking available compression engines '
1623 b'for wire protocol (%s)\n'
1623 b'for wire protocol (%s)\n'
1624 ),
1624 ),
1625 fm.formatlist(
1625 fm.formatlist(
1626 [e.name() for e in wirecompengines if e.wireprotosupport()],
1626 [e.name() for e in wirecompengines if e.wireprotosupport()],
1627 name=b'compengine',
1627 name=b'compengine',
1628 fmt=b'%s',
1628 fmt=b'%s',
1629 sep=b', ',
1629 sep=b', ',
1630 ),
1630 ),
1631 )
1631 )
1632 re2 = b'missing'
1632 re2 = b'missing'
1633 if util._re2:
1633 if util._re2:
1634 re2 = b'available'
1634 re2 = b'available'
1635 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1635 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1636 fm.data(re2=bool(util._re2))
1636 fm.data(re2=bool(util._re2))
1637
1637
1638 # templates
1638 # templates
1639 p = templater.templatepaths()
1639 p = templater.templatepaths()
1640 fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p))
1640 fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p))
1641 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1641 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1642 if p:
1642 if p:
1643 m = templater.templatepath(b"map-cmdline.default")
1643 m = templater.templatepath(b"map-cmdline.default")
1644 if m:
1644 if m:
1645 # template found, check if it is working
1645 # template found, check if it is working
1646 err = None
1646 err = None
1647 try:
1647 try:
1648 templater.templater.frommapfile(m)
1648 templater.templater.frommapfile(m)
1649 except Exception as inst:
1649 except Exception as inst:
1650 err = stringutil.forcebytestr(inst)
1650 err = stringutil.forcebytestr(inst)
1651 p = None
1651 p = None
1652 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1652 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1653 else:
1653 else:
1654 p = None
1654 p = None
1655 fm.condwrite(
1655 fm.condwrite(
1656 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1656 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1657 )
1657 )
1658 fm.condwrite(
1658 fm.condwrite(
1659 not m,
1659 not m,
1660 b'defaulttemplatenotfound',
1660 b'defaulttemplatenotfound',
1661 _(b" template '%s' not found\n"),
1661 _(b" template '%s' not found\n"),
1662 b"default",
1662 b"default",
1663 )
1663 )
1664 if not p:
1664 if not p:
1665 problems += 1
1665 problems += 1
1666 fm.condwrite(
1666 fm.condwrite(
1667 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1667 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1668 )
1668 )
1669
1669
1670 # editor
1670 # editor
1671 editor = ui.geteditor()
1671 editor = ui.geteditor()
1672 editor = util.expandpath(editor)
1672 editor = util.expandpath(editor)
1673 editorbin = procutil.shellsplit(editor)[0]
1673 editorbin = procutil.shellsplit(editor)[0]
1674 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1674 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1675 cmdpath = procutil.findexe(editorbin)
1675 cmdpath = procutil.findexe(editorbin)
1676 fm.condwrite(
1676 fm.condwrite(
1677 not cmdpath and editor == b'vi',
1677 not cmdpath and editor == b'vi',
1678 b'vinotfound',
1678 b'vinotfound',
1679 _(
1679 _(
1680 b" No commit editor set and can't find %s in PATH\n"
1680 b" No commit editor set and can't find %s in PATH\n"
1681 b" (specify a commit editor in your configuration"
1681 b" (specify a commit editor in your configuration"
1682 b" file)\n"
1682 b" file)\n"
1683 ),
1683 ),
1684 not cmdpath and editor == b'vi' and editorbin,
1684 not cmdpath and editor == b'vi' and editorbin,
1685 )
1685 )
1686 fm.condwrite(
1686 fm.condwrite(
1687 not cmdpath and editor != b'vi',
1687 not cmdpath and editor != b'vi',
1688 b'editornotfound',
1688 b'editornotfound',
1689 _(
1689 _(
1690 b" Can't find editor '%s' in PATH\n"
1690 b" Can't find editor '%s' in PATH\n"
1691 b" (specify a commit editor in your configuration"
1691 b" (specify a commit editor in your configuration"
1692 b" file)\n"
1692 b" file)\n"
1693 ),
1693 ),
1694 not cmdpath and editorbin,
1694 not cmdpath and editorbin,
1695 )
1695 )
1696 if not cmdpath and editor != b'vi':
1696 if not cmdpath and editor != b'vi':
1697 problems += 1
1697 problems += 1
1698
1698
1699 # check username
1699 # check username
1700 username = None
1700 username = None
1701 err = None
1701 err = None
1702 try:
1702 try:
1703 username = ui.username()
1703 username = ui.username()
1704 except error.Abort as e:
1704 except error.Abort as e:
1705 err = stringutil.forcebytestr(e)
1705 err = stringutil.forcebytestr(e)
1706 problems += 1
1706 problems += 1
1707
1707
1708 fm.condwrite(
1708 fm.condwrite(
1709 username, b'username', _(b"checking username (%s)\n"), username
1709 username, b'username', _(b"checking username (%s)\n"), username
1710 )
1710 )
1711 fm.condwrite(
1711 fm.condwrite(
1712 err,
1712 err,
1713 b'usernameerror',
1713 b'usernameerror',
1714 _(
1714 _(
1715 b"checking username...\n %s\n"
1715 b"checking username...\n %s\n"
1716 b" (specify a username in your configuration file)\n"
1716 b" (specify a username in your configuration file)\n"
1717 ),
1717 ),
1718 err,
1718 err,
1719 )
1719 )
1720
1720
1721 for name, mod in extensions.extensions():
1721 for name, mod in extensions.extensions():
1722 handler = getattr(mod, 'debuginstall', None)
1722 handler = getattr(mod, 'debuginstall', None)
1723 if handler is not None:
1723 if handler is not None:
1724 problems += handler(ui, fm)
1724 problems += handler(ui, fm)
1725
1725
1726 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1726 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1727 if not problems:
1727 if not problems:
1728 fm.data(problems=problems)
1728 fm.data(problems=problems)
1729 fm.condwrite(
1729 fm.condwrite(
1730 problems,
1730 problems,
1731 b'problems',
1731 b'problems',
1732 _(b"%d problems detected, please check your install!\n"),
1732 _(b"%d problems detected, please check your install!\n"),
1733 problems,
1733 problems,
1734 )
1734 )
1735 fm.end()
1735 fm.end()
1736
1736
1737 return problems
1737 return problems
1738
1738
1739
1739
1740 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1740 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1741 def debugknown(ui, repopath, *ids, **opts):
1741 def debugknown(ui, repopath, *ids, **opts):
1742 """test whether node ids are known to a repo
1742 """test whether node ids are known to a repo
1743
1743
1744 Every ID must be a full-length hex node id string. Returns a list of 0s
1744 Every ID must be a full-length hex node id string. Returns a list of 0s
1745 and 1s indicating unknown/known.
1745 and 1s indicating unknown/known.
1746 """
1746 """
1747 opts = pycompat.byteskwargs(opts)
1747 opts = pycompat.byteskwargs(opts)
1748 repo = hg.peer(ui, opts, repopath)
1748 repo = hg.peer(ui, opts, repopath)
1749 if not repo.capable(b'known'):
1749 if not repo.capable(b'known'):
1750 raise error.Abort(b"known() not supported by target repository")
1750 raise error.Abort(b"known() not supported by target repository")
1751 flags = repo.known([bin(s) for s in ids])
1751 flags = repo.known([bin(s) for s in ids])
1752 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1752 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1753
1753
1754
1754
1755 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1755 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1756 def debuglabelcomplete(ui, repo, *args):
1756 def debuglabelcomplete(ui, repo, *args):
1757 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1757 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1758 debugnamecomplete(ui, repo, *args)
1758 debugnamecomplete(ui, repo, *args)
1759
1759
1760
1760
1761 @command(
1761 @command(
1762 b'debuglocks',
1762 b'debuglocks',
1763 [
1763 [
1764 (b'L', b'force-lock', None, _(b'free the store lock (DANGEROUS)')),
1764 (b'L', b'force-lock', None, _(b'free the store lock (DANGEROUS)')),
1765 (
1765 (
1766 b'W',
1766 b'W',
1767 b'force-wlock',
1767 b'force-wlock',
1768 None,
1768 None,
1769 _(b'free the working state lock (DANGEROUS)'),
1769 _(b'free the working state lock (DANGEROUS)'),
1770 ),
1770 ),
1771 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1771 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1772 (
1772 (
1773 b'S',
1773 b'S',
1774 b'set-wlock',
1774 b'set-wlock',
1775 None,
1775 None,
1776 _(b'set the working state lock until stopped'),
1776 _(b'set the working state lock until stopped'),
1777 ),
1777 ),
1778 ],
1778 ],
1779 _(b'[OPTION]...'),
1779 _(b'[OPTION]...'),
1780 )
1780 )
1781 def debuglocks(ui, repo, **opts):
1781 def debuglocks(ui, repo, **opts):
1782 """show or modify state of locks
1782 """show or modify state of locks
1783
1783
1784 By default, this command will show which locks are held. This
1784 By default, this command will show which locks are held. This
1785 includes the user and process holding the lock, the amount of time
1785 includes the user and process holding the lock, the amount of time
1786 the lock has been held, and the machine name where the process is
1786 the lock has been held, and the machine name where the process is
1787 running if it's not local.
1787 running if it's not local.
1788
1788
1789 Locks protect the integrity of Mercurial's data, so should be
1789 Locks protect the integrity of Mercurial's data, so should be
1790 treated with care. System crashes or other interruptions may cause
1790 treated with care. System crashes or other interruptions may cause
1791 locks to not be properly released, though Mercurial will usually
1791 locks to not be properly released, though Mercurial will usually
1792 detect and remove such stale locks automatically.
1792 detect and remove such stale locks automatically.
1793
1793
1794 However, detecting stale locks may not always be possible (for
1794 However, detecting stale locks may not always be possible (for
1795 instance, on a shared filesystem). Removing locks may also be
1795 instance, on a shared filesystem). Removing locks may also be
1796 blocked by filesystem permissions.
1796 blocked by filesystem permissions.
1797
1797
1798 Setting a lock will prevent other commands from changing the data.
1798 Setting a lock will prevent other commands from changing the data.
1799 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1799 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1800 The set locks are removed when the command exits.
1800 The set locks are removed when the command exits.
1801
1801
1802 Returns 0 if no locks are held.
1802 Returns 0 if no locks are held.
1803
1803
1804 """
1804 """
1805
1805
1806 if opts.get('force_lock'):
1806 if opts.get('force_lock'):
1807 repo.svfs.unlink(b'lock')
1807 repo.svfs.unlink(b'lock')
1808 if opts.get('force_wlock'):
1808 if opts.get('force_wlock'):
1809 repo.vfs.unlink(b'wlock')
1809 repo.vfs.unlink(b'wlock')
1810 if opts.get('force_lock') or opts.get('force_wlock'):
1810 if opts.get('force_lock') or opts.get('force_wlock'):
1811 return 0
1811 return 0
1812
1812
1813 locks = []
1813 locks = []
1814 try:
1814 try:
1815 if opts.get('set_wlock'):
1815 if opts.get('set_wlock'):
1816 try:
1816 try:
1817 locks.append(repo.wlock(False))
1817 locks.append(repo.wlock(False))
1818 except error.LockHeld:
1818 except error.LockHeld:
1819 raise error.Abort(_(b'wlock is already held'))
1819 raise error.Abort(_(b'wlock is already held'))
1820 if opts.get('set_lock'):
1820 if opts.get('set_lock'):
1821 try:
1821 try:
1822 locks.append(repo.lock(False))
1822 locks.append(repo.lock(False))
1823 except error.LockHeld:
1823 except error.LockHeld:
1824 raise error.Abort(_(b'lock is already held'))
1824 raise error.Abort(_(b'lock is already held'))
1825 if len(locks):
1825 if len(locks):
1826 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
1826 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
1827 return 0
1827 return 0
1828 finally:
1828 finally:
1829 release(*locks)
1829 release(*locks)
1830
1830
1831 now = time.time()
1831 now = time.time()
1832 held = 0
1832 held = 0
1833
1833
1834 def report(vfs, name, method):
1834 def report(vfs, name, method):
1835 # this causes stale locks to get reaped for more accurate reporting
1835 # this causes stale locks to get reaped for more accurate reporting
1836 try:
1836 try:
1837 l = method(False)
1837 l = method(False)
1838 except error.LockHeld:
1838 except error.LockHeld:
1839 l = None
1839 l = None
1840
1840
1841 if l:
1841 if l:
1842 l.release()
1842 l.release()
1843 else:
1843 else:
1844 try:
1844 try:
1845 st = vfs.lstat(name)
1845 st = vfs.lstat(name)
1846 age = now - st[stat.ST_MTIME]
1846 age = now - st[stat.ST_MTIME]
1847 user = util.username(st.st_uid)
1847 user = util.username(st.st_uid)
1848 locker = vfs.readlock(name)
1848 locker = vfs.readlock(name)
1849 if b":" in locker:
1849 if b":" in locker:
1850 host, pid = locker.split(b':')
1850 host, pid = locker.split(b':')
1851 if host == socket.gethostname():
1851 if host == socket.gethostname():
1852 locker = b'user %s, process %s' % (user or b'None', pid)
1852 locker = b'user %s, process %s' % (user or b'None', pid)
1853 else:
1853 else:
1854 locker = b'user %s, process %s, host %s' % (
1854 locker = b'user %s, process %s, host %s' % (
1855 user or b'None',
1855 user or b'None',
1856 pid,
1856 pid,
1857 host,
1857 host,
1858 )
1858 )
1859 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
1859 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
1860 return 1
1860 return 1
1861 except OSError as e:
1861 except OSError as e:
1862 if e.errno != errno.ENOENT:
1862 if e.errno != errno.ENOENT:
1863 raise
1863 raise
1864
1864
1865 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
1865 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
1866 return 0
1866 return 0
1867
1867
1868 held += report(repo.svfs, b"lock", repo.lock)
1868 held += report(repo.svfs, b"lock", repo.lock)
1869 held += report(repo.vfs, b"wlock", repo.wlock)
1869 held += report(repo.vfs, b"wlock", repo.wlock)
1870
1870
1871 return held
1871 return held
1872
1872
1873
1873
1874 @command(
1874 @command(
1875 b'debugmanifestfulltextcache',
1875 b'debugmanifestfulltextcache',
1876 [
1876 [
1877 (b'', b'clear', False, _(b'clear the cache')),
1877 (b'', b'clear', False, _(b'clear the cache')),
1878 (
1878 (
1879 b'a',
1879 b'a',
1880 b'add',
1880 b'add',
1881 [],
1881 [],
1882 _(b'add the given manifest nodes to the cache'),
1882 _(b'add the given manifest nodes to the cache'),
1883 _(b'NODE'),
1883 _(b'NODE'),
1884 ),
1884 ),
1885 ],
1885 ],
1886 b'',
1886 b'',
1887 )
1887 )
1888 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1888 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1889 """show, clear or amend the contents of the manifest fulltext cache"""
1889 """show, clear or amend the contents of the manifest fulltext cache"""
1890
1890
1891 def getcache():
1891 def getcache():
1892 r = repo.manifestlog.getstorage(b'')
1892 r = repo.manifestlog.getstorage(b'')
1893 try:
1893 try:
1894 return r._fulltextcache
1894 return r._fulltextcache
1895 except AttributeError:
1895 except AttributeError:
1896 msg = _(
1896 msg = _(
1897 b"Current revlog implementation doesn't appear to have a "
1897 b"Current revlog implementation doesn't appear to have a "
1898 b"manifest fulltext cache\n"
1898 b"manifest fulltext cache\n"
1899 )
1899 )
1900 raise error.Abort(msg)
1900 raise error.Abort(msg)
1901
1901
1902 if opts.get('clear'):
1902 if opts.get('clear'):
1903 with repo.wlock():
1903 with repo.wlock():
1904 cache = getcache()
1904 cache = getcache()
1905 cache.clear(clear_persisted_data=True)
1905 cache.clear(clear_persisted_data=True)
1906 return
1906 return
1907
1907
1908 if add:
1908 if add:
1909 with repo.wlock():
1909 with repo.wlock():
1910 m = repo.manifestlog
1910 m = repo.manifestlog
1911 store = m.getstorage(b'')
1911 store = m.getstorage(b'')
1912 for n in add:
1912 for n in add:
1913 try:
1913 try:
1914 manifest = m[store.lookup(n)]
1914 manifest = m[store.lookup(n)]
1915 except error.LookupError as e:
1915 except error.LookupError as e:
1916 raise error.Abort(e, hint=b"Check your manifest node id")
1916 raise error.Abort(e, hint=b"Check your manifest node id")
1917 manifest.read() # stores revisision in cache too
1917 manifest.read() # stores revisision in cache too
1918 return
1918 return
1919
1919
1920 cache = getcache()
1920 cache = getcache()
1921 if not len(cache):
1921 if not len(cache):
1922 ui.write(_(b'cache empty\n'))
1922 ui.write(_(b'cache empty\n'))
1923 else:
1923 else:
1924 ui.write(
1924 ui.write(
1925 _(
1925 _(
1926 b'cache contains %d manifest entries, in order of most to '
1926 b'cache contains %d manifest entries, in order of most to '
1927 b'least recent:\n'
1927 b'least recent:\n'
1928 )
1928 )
1929 % (len(cache),)
1929 % (len(cache),)
1930 )
1930 )
1931 totalsize = 0
1931 totalsize = 0
1932 for nodeid in cache:
1932 for nodeid in cache:
1933 # Use cache.get to not update the LRU order
1933 # Use cache.get to not update the LRU order
1934 data = cache.peek(nodeid)
1934 data = cache.peek(nodeid)
1935 size = len(data)
1935 size = len(data)
1936 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1936 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1937 ui.write(
1937 ui.write(
1938 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
1938 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
1939 )
1939 )
1940 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
1940 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
1941 ui.write(
1941 ui.write(
1942 _(b'total cache data size %s, on-disk %s\n')
1942 _(b'total cache data size %s, on-disk %s\n')
1943 % (util.bytecount(totalsize), util.bytecount(ondisk))
1943 % (util.bytecount(totalsize), util.bytecount(ondisk))
1944 )
1944 )
1945
1945
1946
1946
1947 @command(b'debugmergestate', [], b'')
1947 @command(b'debugmergestate', [], b'')
1948 def debugmergestate(ui, repo, *args):
1948 def debugmergestate(ui, repo, *args):
1949 """print merge state
1949 """print merge state
1950
1950
1951 Use --verbose to print out information about whether v1 or v2 merge state
1951 Use --verbose to print out information about whether v1 or v2 merge state
1952 was chosen."""
1952 was chosen."""
1953
1953
1954 def _hashornull(h):
1954 def _hashornull(h):
1955 if h == nullhex:
1955 if h == nullhex:
1956 return b'null'
1956 return b'null'
1957 else:
1957 else:
1958 return h
1958 return h
1959
1959
1960 def printrecords(version):
1960 def printrecords(version):
1961 ui.writenoi18n(b'* version %d records\n' % version)
1961 ui.writenoi18n(b'* version %d records\n' % version)
1962 if version == 1:
1962 if version == 1:
1963 records = v1records
1963 records = v1records
1964 else:
1964 else:
1965 records = v2records
1965 records = v2records
1966
1966
1967 for rtype, record in records:
1967 for rtype, record in records:
1968 # pretty print some record types
1968 # pretty print some record types
1969 if rtype == b'L':
1969 if rtype == b'L':
1970 ui.writenoi18n(b'local: %s\n' % record)
1970 ui.writenoi18n(b'local: %s\n' % record)
1971 elif rtype == b'O':
1971 elif rtype == b'O':
1972 ui.writenoi18n(b'other: %s\n' % record)
1972 ui.writenoi18n(b'other: %s\n' % record)
1973 elif rtype == b'm':
1973 elif rtype == b'm':
1974 driver, mdstate = record.split(b'\0', 1)
1974 driver, mdstate = record.split(b'\0', 1)
1975 ui.writenoi18n(
1975 ui.writenoi18n(
1976 b'merge driver: %s (state "%s")\n' % (driver, mdstate)
1976 b'merge driver: %s (state "%s")\n' % (driver, mdstate)
1977 )
1977 )
1978 elif rtype in b'FDC':
1978 elif rtype in b'FDC':
1979 r = record.split(b'\0')
1979 r = record.split(b'\0')
1980 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1980 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1981 if version == 1:
1981 if version == 1:
1982 onode = b'not stored in v1 format'
1982 onode = b'not stored in v1 format'
1983 flags = r[7]
1983 flags = r[7]
1984 else:
1984 else:
1985 onode, flags = r[7:9]
1985 onode, flags = r[7:9]
1986 ui.writenoi18n(
1986 ui.writenoi18n(
1987 b'file: %s (record type "%s", state "%s", hash %s)\n'
1987 b'file: %s (record type "%s", state "%s", hash %s)\n'
1988 % (f, rtype, state, _hashornull(hash))
1988 % (f, rtype, state, _hashornull(hash))
1989 )
1989 )
1990 ui.writenoi18n(
1990 ui.writenoi18n(
1991 b' local path: %s (flags "%s")\n' % (lfile, flags)
1991 b' local path: %s (flags "%s")\n' % (lfile, flags)
1992 )
1992 )
1993 ui.writenoi18n(
1993 ui.writenoi18n(
1994 b' ancestor path: %s (node %s)\n'
1994 b' ancestor path: %s (node %s)\n'
1995 % (afile, _hashornull(anode))
1995 % (afile, _hashornull(anode))
1996 )
1996 )
1997 ui.writenoi18n(
1997 ui.writenoi18n(
1998 b' other path: %s (node %s)\n'
1998 b' other path: %s (node %s)\n'
1999 % (ofile, _hashornull(onode))
1999 % (ofile, _hashornull(onode))
2000 )
2000 )
2001 elif rtype == b'f':
2001 elif rtype == b'f':
2002 filename, rawextras = record.split(b'\0', 1)
2002 filename, rawextras = record.split(b'\0', 1)
2003 extras = rawextras.split(b'\0')
2003 extras = rawextras.split(b'\0')
2004 i = 0
2004 i = 0
2005 extrastrings = []
2005 extrastrings = []
2006 while i < len(extras):
2006 while i < len(extras):
2007 extrastrings.append(b'%s = %s' % (extras[i], extras[i + 1]))
2007 extrastrings.append(b'%s = %s' % (extras[i], extras[i + 1]))
2008 i += 2
2008 i += 2
2009
2009
2010 ui.writenoi18n(
2010 ui.writenoi18n(
2011 b'file extras: %s (%s)\n'
2011 b'file extras: %s (%s)\n'
2012 % (filename, b', '.join(extrastrings))
2012 % (filename, b', '.join(extrastrings))
2013 )
2013 )
2014 elif rtype == b'l':
2014 elif rtype == b'l':
2015 labels = record.split(b'\0', 2)
2015 labels = record.split(b'\0', 2)
2016 labels = [l for l in labels if len(l) > 0]
2016 labels = [l for l in labels if len(l) > 0]
2017 ui.writenoi18n(b'labels:\n')
2017 ui.writenoi18n(b'labels:\n')
2018 ui.write((b' local: %s\n' % labels[0]))
2018 ui.write((b' local: %s\n' % labels[0]))
2019 ui.write((b' other: %s\n' % labels[1]))
2019 ui.write((b' other: %s\n' % labels[1]))
2020 if len(labels) > 2:
2020 if len(labels) > 2:
2021 ui.write((b' base: %s\n' % labels[2]))
2021 ui.write((b' base: %s\n' % labels[2]))
2022 else:
2022 else:
2023 ui.writenoi18n(
2023 ui.writenoi18n(
2024 b'unrecognized entry: %s\t%s\n'
2024 b'unrecognized entry: %s\t%s\n'
2025 % (rtype, record.replace(b'\0', b'\t'))
2025 % (rtype, record.replace(b'\0', b'\t'))
2026 )
2026 )
2027
2027
2028 # Avoid mergestate.read() since it may raise an exception for unsupported
2028 # Avoid mergestate.read() since it may raise an exception for unsupported
2029 # merge state records. We shouldn't be doing this, but this is OK since this
2029 # merge state records. We shouldn't be doing this, but this is OK since this
2030 # command is pretty low-level.
2030 # command is pretty low-level.
2031 ms = mergemod.mergestate(repo)
2031 ms = mergemod.mergestate(repo)
2032
2032
2033 # sort so that reasonable information is on top
2033 # sort so that reasonable information is on top
2034 v1records = ms._readrecordsv1()
2034 v1records = ms._readrecordsv1()
2035 v2records = ms._readrecordsv2()
2035 v2records = ms._readrecordsv2()
2036 order = b'LOml'
2036 order = b'LOml'
2037
2037
2038 def key(r):
2038 def key(r):
2039 idx = order.find(r[0])
2039 idx = order.find(r[0])
2040 if idx == -1:
2040 if idx == -1:
2041 return (1, r[1])
2041 return (1, r[1])
2042 else:
2042 else:
2043 return (0, idx)
2043 return (0, idx)
2044
2044
2045 v1records.sort(key=key)
2045 v1records.sort(key=key)
2046 v2records.sort(key=key)
2046 v2records.sort(key=key)
2047
2047
2048 if not v1records and not v2records:
2048 if not v1records and not v2records:
2049 ui.writenoi18n(b'no merge state found\n')
2049 ui.writenoi18n(b'no merge state found\n')
2050 elif not v2records:
2050 elif not v2records:
2051 ui.notenoi18n(b'no version 2 merge state\n')
2051 ui.notenoi18n(b'no version 2 merge state\n')
2052 printrecords(1)
2052 printrecords(1)
2053 elif ms._v1v2match(v1records, v2records):
2053 elif ms._v1v2match(v1records, v2records):
2054 ui.notenoi18n(b'v1 and v2 states match: using v2\n')
2054 ui.notenoi18n(b'v1 and v2 states match: using v2\n')
2055 printrecords(2)
2055 printrecords(2)
2056 else:
2056 else:
2057 ui.notenoi18n(b'v1 and v2 states mismatch: using v1\n')
2057 ui.notenoi18n(b'v1 and v2 states mismatch: using v1\n')
2058 printrecords(1)
2058 printrecords(1)
2059 if ui.verbose:
2059 if ui.verbose:
2060 printrecords(2)
2060 printrecords(2)
2061
2061
2062
2062
2063 @command(b'debugnamecomplete', [], _(b'NAME...'))
2063 @command(b'debugnamecomplete', [], _(b'NAME...'))
2064 def debugnamecomplete(ui, repo, *args):
2064 def debugnamecomplete(ui, repo, *args):
2065 '''complete "names" - tags, open branch names, bookmark names'''
2065 '''complete "names" - tags, open branch names, bookmark names'''
2066
2066
2067 names = set()
2067 names = set()
2068 # since we previously only listed open branches, we will handle that
2068 # since we previously only listed open branches, we will handle that
2069 # specially (after this for loop)
2069 # specially (after this for loop)
2070 for name, ns in pycompat.iteritems(repo.names):
2070 for name, ns in pycompat.iteritems(repo.names):
2071 if name != b'branches':
2071 if name != b'branches':
2072 names.update(ns.listnames(repo))
2072 names.update(ns.listnames(repo))
2073 names.update(
2073 names.update(
2074 tag
2074 tag
2075 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2075 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2076 if not closed
2076 if not closed
2077 )
2077 )
2078 completions = set()
2078 completions = set()
2079 if not args:
2079 if not args:
2080 args = [b'']
2080 args = [b'']
2081 for a in args:
2081 for a in args:
2082 completions.update(n for n in names if n.startswith(a))
2082 completions.update(n for n in names if n.startswith(a))
2083 ui.write(b'\n'.join(sorted(completions)))
2083 ui.write(b'\n'.join(sorted(completions)))
2084 ui.write(b'\n')
2084 ui.write(b'\n')
2085
2085
2086
2086
2087 @command(
2087 @command(
2088 b'debugnodemap',
2088 b'debugnodemap',
2089 [
2089 [
2090 (
2090 (
2091 b'',
2091 b'',
2092 b'dump-new',
2092 b'dump-new',
2093 False,
2093 False,
2094 _(b'write a (new) persistent binary nodemap on stdin'),
2094 _(b'write a (new) persistent binary nodemap on stdin'),
2095 ),
2095 ),
2096 (b'', b'dump-disk', False, _(b'dump on-disk data on stdin')),
2096 (b'', b'dump-disk', False, _(b'dump on-disk data on stdin')),
2097 (
2097 (
2098 b'',
2098 b'',
2099 b'check',
2099 b'check',
2100 False,
2100 False,
2101 _(b'check that the data on disk data are correct.'),
2101 _(b'check that the data on disk data are correct.'),
2102 ),
2102 ),
2103 (
2103 (
2104 b'',
2104 b'',
2105 b'metadata',
2105 b'metadata',
2106 False,
2106 False,
2107 _(b'display the on disk meta data for the nodemap'),
2107 _(b'display the on disk meta data for the nodemap'),
2108 ),
2108 ),
2109 ],
2109 ],
2110 )
2110 )
2111 def debugnodemap(ui, repo, **opts):
2111 def debugnodemap(ui, repo, **opts):
2112 """write and inspect on disk nodemap
2112 """write and inspect on disk nodemap
2113 """
2113 """
2114 if opts['dump_new']:
2114 if opts['dump_new']:
2115 unfi = repo.unfiltered()
2115 unfi = repo.unfiltered()
2116 cl = unfi.changelog
2116 cl = unfi.changelog
2117 data = nodemap.persistent_data(cl.index)
2117 data = nodemap.persistent_data(cl.index)
2118 ui.write(data)
2118 ui.write(data)
2119 elif opts['dump_disk']:
2119 elif opts['dump_disk']:
2120 unfi = repo.unfiltered()
2120 unfi = repo.unfiltered()
2121 cl = unfi.changelog
2121 cl = unfi.changelog
2122 nm_data = nodemap.persisted_data(cl)
2122 nm_data = nodemap.persisted_data(cl)
2123 if nm_data is not None:
2123 if nm_data is not None:
2124 docket, data = nm_data
2124 docket, data = nm_data
2125 ui.write(data)
2125 ui.write(data)
2126 elif opts['check']:
2126 elif opts['check']:
2127 unfi = repo.unfiltered()
2127 unfi = repo.unfiltered()
2128 cl = unfi.changelog
2128 cl = unfi.changelog
2129 nm_data = nodemap.persisted_data(cl)
2129 nm_data = nodemap.persisted_data(cl)
2130 if nm_data is not None:
2130 if nm_data is not None:
2131 docket, data = nm_data
2131 docket, data = nm_data
2132 return nodemap.check_data(ui, cl.index, data)
2132 return nodemap.check_data(ui, cl.index, data)
2133 elif opts['metadata']:
2133 elif opts['metadata']:
2134 unfi = repo.unfiltered()
2134 unfi = repo.unfiltered()
2135 cl = unfi.changelog
2135 cl = unfi.changelog
2136 nm_data = nodemap.persisted_data(cl)
2136 nm_data = nodemap.persisted_data(cl)
2137 if nm_data is not None:
2137 if nm_data is not None:
2138 docket, data = nm_data
2138 docket, data = nm_data
2139 ui.write((b"uid: %s\n") % docket.uid)
2139 ui.write((b"uid: %s\n") % docket.uid)
2140 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2140
2141
2141
2142
2142 @command(
2143 @command(
2143 b'debugobsolete',
2144 b'debugobsolete',
2144 [
2145 [
2145 (b'', b'flags', 0, _(b'markers flag')),
2146 (b'', b'flags', 0, _(b'markers flag')),
2146 (
2147 (
2147 b'',
2148 b'',
2148 b'record-parents',
2149 b'record-parents',
2149 False,
2150 False,
2150 _(b'record parent information for the precursor'),
2151 _(b'record parent information for the precursor'),
2151 ),
2152 ),
2152 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2153 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2153 (
2154 (
2154 b'',
2155 b'',
2155 b'exclusive',
2156 b'exclusive',
2156 False,
2157 False,
2157 _(b'restrict display to markers only relevant to REV'),
2158 _(b'restrict display to markers only relevant to REV'),
2158 ),
2159 ),
2159 (b'', b'index', False, _(b'display index of the marker')),
2160 (b'', b'index', False, _(b'display index of the marker')),
2160 (b'', b'delete', [], _(b'delete markers specified by indices')),
2161 (b'', b'delete', [], _(b'delete markers specified by indices')),
2161 ]
2162 ]
2162 + cmdutil.commitopts2
2163 + cmdutil.commitopts2
2163 + cmdutil.formatteropts,
2164 + cmdutil.formatteropts,
2164 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2165 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2165 )
2166 )
2166 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2167 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2167 """create arbitrary obsolete marker
2168 """create arbitrary obsolete marker
2168
2169
2169 With no arguments, displays the list of obsolescence markers."""
2170 With no arguments, displays the list of obsolescence markers."""
2170
2171
2171 opts = pycompat.byteskwargs(opts)
2172 opts = pycompat.byteskwargs(opts)
2172
2173
2173 def parsenodeid(s):
2174 def parsenodeid(s):
2174 try:
2175 try:
2175 # We do not use revsingle/revrange functions here to accept
2176 # We do not use revsingle/revrange functions here to accept
2176 # arbitrary node identifiers, possibly not present in the
2177 # arbitrary node identifiers, possibly not present in the
2177 # local repository.
2178 # local repository.
2178 n = bin(s)
2179 n = bin(s)
2179 if len(n) != len(nullid):
2180 if len(n) != len(nullid):
2180 raise TypeError()
2181 raise TypeError()
2181 return n
2182 return n
2182 except TypeError:
2183 except TypeError:
2183 raise error.Abort(
2184 raise error.Abort(
2184 b'changeset references must be full hexadecimal '
2185 b'changeset references must be full hexadecimal '
2185 b'node identifiers'
2186 b'node identifiers'
2186 )
2187 )
2187
2188
2188 if opts.get(b'delete'):
2189 if opts.get(b'delete'):
2189 indices = []
2190 indices = []
2190 for v in opts.get(b'delete'):
2191 for v in opts.get(b'delete'):
2191 try:
2192 try:
2192 indices.append(int(v))
2193 indices.append(int(v))
2193 except ValueError:
2194 except ValueError:
2194 raise error.Abort(
2195 raise error.Abort(
2195 _(b'invalid index value: %r') % v,
2196 _(b'invalid index value: %r') % v,
2196 hint=_(b'use integers for indices'),
2197 hint=_(b'use integers for indices'),
2197 )
2198 )
2198
2199
2199 if repo.currenttransaction():
2200 if repo.currenttransaction():
2200 raise error.Abort(
2201 raise error.Abort(
2201 _(b'cannot delete obsmarkers in the middle of transaction.')
2202 _(b'cannot delete obsmarkers in the middle of transaction.')
2202 )
2203 )
2203
2204
2204 with repo.lock():
2205 with repo.lock():
2205 n = repair.deleteobsmarkers(repo.obsstore, indices)
2206 n = repair.deleteobsmarkers(repo.obsstore, indices)
2206 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2207 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2207
2208
2208 return
2209 return
2209
2210
2210 if precursor is not None:
2211 if precursor is not None:
2211 if opts[b'rev']:
2212 if opts[b'rev']:
2212 raise error.Abort(b'cannot select revision when creating marker')
2213 raise error.Abort(b'cannot select revision when creating marker')
2213 metadata = {}
2214 metadata = {}
2214 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2215 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2215 succs = tuple(parsenodeid(succ) for succ in successors)
2216 succs = tuple(parsenodeid(succ) for succ in successors)
2216 l = repo.lock()
2217 l = repo.lock()
2217 try:
2218 try:
2218 tr = repo.transaction(b'debugobsolete')
2219 tr = repo.transaction(b'debugobsolete')
2219 try:
2220 try:
2220 date = opts.get(b'date')
2221 date = opts.get(b'date')
2221 if date:
2222 if date:
2222 date = dateutil.parsedate(date)
2223 date = dateutil.parsedate(date)
2223 else:
2224 else:
2224 date = None
2225 date = None
2225 prec = parsenodeid(precursor)
2226 prec = parsenodeid(precursor)
2226 parents = None
2227 parents = None
2227 if opts[b'record_parents']:
2228 if opts[b'record_parents']:
2228 if prec not in repo.unfiltered():
2229 if prec not in repo.unfiltered():
2229 raise error.Abort(
2230 raise error.Abort(
2230 b'cannot used --record-parents on '
2231 b'cannot used --record-parents on '
2231 b'unknown changesets'
2232 b'unknown changesets'
2232 )
2233 )
2233 parents = repo.unfiltered()[prec].parents()
2234 parents = repo.unfiltered()[prec].parents()
2234 parents = tuple(p.node() for p in parents)
2235 parents = tuple(p.node() for p in parents)
2235 repo.obsstore.create(
2236 repo.obsstore.create(
2236 tr,
2237 tr,
2237 prec,
2238 prec,
2238 succs,
2239 succs,
2239 opts[b'flags'],
2240 opts[b'flags'],
2240 parents=parents,
2241 parents=parents,
2241 date=date,
2242 date=date,
2242 metadata=metadata,
2243 metadata=metadata,
2243 ui=ui,
2244 ui=ui,
2244 )
2245 )
2245 tr.close()
2246 tr.close()
2246 except ValueError as exc:
2247 except ValueError as exc:
2247 raise error.Abort(
2248 raise error.Abort(
2248 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2249 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2249 )
2250 )
2250 finally:
2251 finally:
2251 tr.release()
2252 tr.release()
2252 finally:
2253 finally:
2253 l.release()
2254 l.release()
2254 else:
2255 else:
2255 if opts[b'rev']:
2256 if opts[b'rev']:
2256 revs = scmutil.revrange(repo, opts[b'rev'])
2257 revs = scmutil.revrange(repo, opts[b'rev'])
2257 nodes = [repo[r].node() for r in revs]
2258 nodes = [repo[r].node() for r in revs]
2258 markers = list(
2259 markers = list(
2259 obsutil.getmarkers(
2260 obsutil.getmarkers(
2260 repo, nodes=nodes, exclusive=opts[b'exclusive']
2261 repo, nodes=nodes, exclusive=opts[b'exclusive']
2261 )
2262 )
2262 )
2263 )
2263 markers.sort(key=lambda x: x._data)
2264 markers.sort(key=lambda x: x._data)
2264 else:
2265 else:
2265 markers = obsutil.getmarkers(repo)
2266 markers = obsutil.getmarkers(repo)
2266
2267
2267 markerstoiter = markers
2268 markerstoiter = markers
2268 isrelevant = lambda m: True
2269 isrelevant = lambda m: True
2269 if opts.get(b'rev') and opts.get(b'index'):
2270 if opts.get(b'rev') and opts.get(b'index'):
2270 markerstoiter = obsutil.getmarkers(repo)
2271 markerstoiter = obsutil.getmarkers(repo)
2271 markerset = set(markers)
2272 markerset = set(markers)
2272 isrelevant = lambda m: m in markerset
2273 isrelevant = lambda m: m in markerset
2273
2274
2274 fm = ui.formatter(b'debugobsolete', opts)
2275 fm = ui.formatter(b'debugobsolete', opts)
2275 for i, m in enumerate(markerstoiter):
2276 for i, m in enumerate(markerstoiter):
2276 if not isrelevant(m):
2277 if not isrelevant(m):
2277 # marker can be irrelevant when we're iterating over a set
2278 # marker can be irrelevant when we're iterating over a set
2278 # of markers (markerstoiter) which is bigger than the set
2279 # of markers (markerstoiter) which is bigger than the set
2279 # of markers we want to display (markers)
2280 # of markers we want to display (markers)
2280 # this can happen if both --index and --rev options are
2281 # this can happen if both --index and --rev options are
2281 # provided and thus we need to iterate over all of the markers
2282 # provided and thus we need to iterate over all of the markers
2282 # to get the correct indices, but only display the ones that
2283 # to get the correct indices, but only display the ones that
2283 # are relevant to --rev value
2284 # are relevant to --rev value
2284 continue
2285 continue
2285 fm.startitem()
2286 fm.startitem()
2286 ind = i if opts.get(b'index') else None
2287 ind = i if opts.get(b'index') else None
2287 cmdutil.showmarker(fm, m, index=ind)
2288 cmdutil.showmarker(fm, m, index=ind)
2288 fm.end()
2289 fm.end()
2289
2290
2290
2291
2291 @command(
2292 @command(
2292 b'debugp1copies',
2293 b'debugp1copies',
2293 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2294 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2294 _(b'[-r REV]'),
2295 _(b'[-r REV]'),
2295 )
2296 )
2296 def debugp1copies(ui, repo, **opts):
2297 def debugp1copies(ui, repo, **opts):
2297 """dump copy information compared to p1"""
2298 """dump copy information compared to p1"""
2298
2299
2299 opts = pycompat.byteskwargs(opts)
2300 opts = pycompat.byteskwargs(opts)
2300 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2301 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2301 for dst, src in ctx.p1copies().items():
2302 for dst, src in ctx.p1copies().items():
2302 ui.write(b'%s -> %s\n' % (src, dst))
2303 ui.write(b'%s -> %s\n' % (src, dst))
2303
2304
2304
2305
2305 @command(
2306 @command(
2306 b'debugp2copies',
2307 b'debugp2copies',
2307 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2308 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2308 _(b'[-r REV]'),
2309 _(b'[-r REV]'),
2309 )
2310 )
2310 def debugp1copies(ui, repo, **opts):
2311 def debugp1copies(ui, repo, **opts):
2311 """dump copy information compared to p2"""
2312 """dump copy information compared to p2"""
2312
2313
2313 opts = pycompat.byteskwargs(opts)
2314 opts = pycompat.byteskwargs(opts)
2314 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2315 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2315 for dst, src in ctx.p2copies().items():
2316 for dst, src in ctx.p2copies().items():
2316 ui.write(b'%s -> %s\n' % (src, dst))
2317 ui.write(b'%s -> %s\n' % (src, dst))
2317
2318
2318
2319
2319 @command(
2320 @command(
2320 b'debugpathcomplete',
2321 b'debugpathcomplete',
2321 [
2322 [
2322 (b'f', b'full', None, _(b'complete an entire path')),
2323 (b'f', b'full', None, _(b'complete an entire path')),
2323 (b'n', b'normal', None, _(b'show only normal files')),
2324 (b'n', b'normal', None, _(b'show only normal files')),
2324 (b'a', b'added', None, _(b'show only added files')),
2325 (b'a', b'added', None, _(b'show only added files')),
2325 (b'r', b'removed', None, _(b'show only removed files')),
2326 (b'r', b'removed', None, _(b'show only removed files')),
2326 ],
2327 ],
2327 _(b'FILESPEC...'),
2328 _(b'FILESPEC...'),
2328 )
2329 )
2329 def debugpathcomplete(ui, repo, *specs, **opts):
2330 def debugpathcomplete(ui, repo, *specs, **opts):
2330 '''complete part or all of a tracked path
2331 '''complete part or all of a tracked path
2331
2332
2332 This command supports shells that offer path name completion. It
2333 This command supports shells that offer path name completion. It
2333 currently completes only files already known to the dirstate.
2334 currently completes only files already known to the dirstate.
2334
2335
2335 Completion extends only to the next path segment unless
2336 Completion extends only to the next path segment unless
2336 --full is specified, in which case entire paths are used.'''
2337 --full is specified, in which case entire paths are used.'''
2337
2338
2338 def complete(path, acceptable):
2339 def complete(path, acceptable):
2339 dirstate = repo.dirstate
2340 dirstate = repo.dirstate
2340 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2341 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2341 rootdir = repo.root + pycompat.ossep
2342 rootdir = repo.root + pycompat.ossep
2342 if spec != repo.root and not spec.startswith(rootdir):
2343 if spec != repo.root and not spec.startswith(rootdir):
2343 return [], []
2344 return [], []
2344 if os.path.isdir(spec):
2345 if os.path.isdir(spec):
2345 spec += b'/'
2346 spec += b'/'
2346 spec = spec[len(rootdir) :]
2347 spec = spec[len(rootdir) :]
2347 fixpaths = pycompat.ossep != b'/'
2348 fixpaths = pycompat.ossep != b'/'
2348 if fixpaths:
2349 if fixpaths:
2349 spec = spec.replace(pycompat.ossep, b'/')
2350 spec = spec.replace(pycompat.ossep, b'/')
2350 speclen = len(spec)
2351 speclen = len(spec)
2351 fullpaths = opts['full']
2352 fullpaths = opts['full']
2352 files, dirs = set(), set()
2353 files, dirs = set(), set()
2353 adddir, addfile = dirs.add, files.add
2354 adddir, addfile = dirs.add, files.add
2354 for f, st in pycompat.iteritems(dirstate):
2355 for f, st in pycompat.iteritems(dirstate):
2355 if f.startswith(spec) and st[0] in acceptable:
2356 if f.startswith(spec) and st[0] in acceptable:
2356 if fixpaths:
2357 if fixpaths:
2357 f = f.replace(b'/', pycompat.ossep)
2358 f = f.replace(b'/', pycompat.ossep)
2358 if fullpaths:
2359 if fullpaths:
2359 addfile(f)
2360 addfile(f)
2360 continue
2361 continue
2361 s = f.find(pycompat.ossep, speclen)
2362 s = f.find(pycompat.ossep, speclen)
2362 if s >= 0:
2363 if s >= 0:
2363 adddir(f[:s])
2364 adddir(f[:s])
2364 else:
2365 else:
2365 addfile(f)
2366 addfile(f)
2366 return files, dirs
2367 return files, dirs
2367
2368
2368 acceptable = b''
2369 acceptable = b''
2369 if opts['normal']:
2370 if opts['normal']:
2370 acceptable += b'nm'
2371 acceptable += b'nm'
2371 if opts['added']:
2372 if opts['added']:
2372 acceptable += b'a'
2373 acceptable += b'a'
2373 if opts['removed']:
2374 if opts['removed']:
2374 acceptable += b'r'
2375 acceptable += b'r'
2375 cwd = repo.getcwd()
2376 cwd = repo.getcwd()
2376 if not specs:
2377 if not specs:
2377 specs = [b'.']
2378 specs = [b'.']
2378
2379
2379 files, dirs = set(), set()
2380 files, dirs = set(), set()
2380 for spec in specs:
2381 for spec in specs:
2381 f, d = complete(spec, acceptable or b'nmar')
2382 f, d = complete(spec, acceptable or b'nmar')
2382 files.update(f)
2383 files.update(f)
2383 dirs.update(d)
2384 dirs.update(d)
2384 files.update(dirs)
2385 files.update(dirs)
2385 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2386 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2386 ui.write(b'\n')
2387 ui.write(b'\n')
2387
2388
2388
2389
2389 @command(
2390 @command(
2390 b'debugpathcopies',
2391 b'debugpathcopies',
2391 cmdutil.walkopts,
2392 cmdutil.walkopts,
2392 b'hg debugpathcopies REV1 REV2 [FILE]',
2393 b'hg debugpathcopies REV1 REV2 [FILE]',
2393 inferrepo=True,
2394 inferrepo=True,
2394 )
2395 )
2395 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2396 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2396 """show copies between two revisions"""
2397 """show copies between two revisions"""
2397 ctx1 = scmutil.revsingle(repo, rev1)
2398 ctx1 = scmutil.revsingle(repo, rev1)
2398 ctx2 = scmutil.revsingle(repo, rev2)
2399 ctx2 = scmutil.revsingle(repo, rev2)
2399 m = scmutil.match(ctx1, pats, opts)
2400 m = scmutil.match(ctx1, pats, opts)
2400 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2401 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2401 ui.write(b'%s -> %s\n' % (src, dst))
2402 ui.write(b'%s -> %s\n' % (src, dst))
2402
2403
2403
2404
2404 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2405 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2405 def debugpeer(ui, path):
2406 def debugpeer(ui, path):
2406 """establish a connection to a peer repository"""
2407 """establish a connection to a peer repository"""
2407 # Always enable peer request logging. Requires --debug to display
2408 # Always enable peer request logging. Requires --debug to display
2408 # though.
2409 # though.
2409 overrides = {
2410 overrides = {
2410 (b'devel', b'debug.peer-request'): True,
2411 (b'devel', b'debug.peer-request'): True,
2411 }
2412 }
2412
2413
2413 with ui.configoverride(overrides):
2414 with ui.configoverride(overrides):
2414 peer = hg.peer(ui, {}, path)
2415 peer = hg.peer(ui, {}, path)
2415
2416
2416 local = peer.local() is not None
2417 local = peer.local() is not None
2417 canpush = peer.canpush()
2418 canpush = peer.canpush()
2418
2419
2419 ui.write(_(b'url: %s\n') % peer.url())
2420 ui.write(_(b'url: %s\n') % peer.url())
2420 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2421 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2421 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2422 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2422
2423
2423
2424
2424 @command(
2425 @command(
2425 b'debugpickmergetool',
2426 b'debugpickmergetool',
2426 [
2427 [
2427 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2428 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2428 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2429 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2429 ]
2430 ]
2430 + cmdutil.walkopts
2431 + cmdutil.walkopts
2431 + cmdutil.mergetoolopts,
2432 + cmdutil.mergetoolopts,
2432 _(b'[PATTERN]...'),
2433 _(b'[PATTERN]...'),
2433 inferrepo=True,
2434 inferrepo=True,
2434 )
2435 )
2435 def debugpickmergetool(ui, repo, *pats, **opts):
2436 def debugpickmergetool(ui, repo, *pats, **opts):
2436 """examine which merge tool is chosen for specified file
2437 """examine which merge tool is chosen for specified file
2437
2438
2438 As described in :hg:`help merge-tools`, Mercurial examines
2439 As described in :hg:`help merge-tools`, Mercurial examines
2439 configurations below in this order to decide which merge tool is
2440 configurations below in this order to decide which merge tool is
2440 chosen for specified file.
2441 chosen for specified file.
2441
2442
2442 1. ``--tool`` option
2443 1. ``--tool`` option
2443 2. ``HGMERGE`` environment variable
2444 2. ``HGMERGE`` environment variable
2444 3. configurations in ``merge-patterns`` section
2445 3. configurations in ``merge-patterns`` section
2445 4. configuration of ``ui.merge``
2446 4. configuration of ``ui.merge``
2446 5. configurations in ``merge-tools`` section
2447 5. configurations in ``merge-tools`` section
2447 6. ``hgmerge`` tool (for historical reason only)
2448 6. ``hgmerge`` tool (for historical reason only)
2448 7. default tool for fallback (``:merge`` or ``:prompt``)
2449 7. default tool for fallback (``:merge`` or ``:prompt``)
2449
2450
2450 This command writes out examination result in the style below::
2451 This command writes out examination result in the style below::
2451
2452
2452 FILE = MERGETOOL
2453 FILE = MERGETOOL
2453
2454
2454 By default, all files known in the first parent context of the
2455 By default, all files known in the first parent context of the
2455 working directory are examined. Use file patterns and/or -I/-X
2456 working directory are examined. Use file patterns and/or -I/-X
2456 options to limit target files. -r/--rev is also useful to examine
2457 options to limit target files. -r/--rev is also useful to examine
2457 files in another context without actual updating to it.
2458 files in another context without actual updating to it.
2458
2459
2459 With --debug, this command shows warning messages while matching
2460 With --debug, this command shows warning messages while matching
2460 against ``merge-patterns`` and so on, too. It is recommended to
2461 against ``merge-patterns`` and so on, too. It is recommended to
2461 use this option with explicit file patterns and/or -I/-X options,
2462 use this option with explicit file patterns and/or -I/-X options,
2462 because this option increases amount of output per file according
2463 because this option increases amount of output per file according
2463 to configurations in hgrc.
2464 to configurations in hgrc.
2464
2465
2465 With -v/--verbose, this command shows configurations below at
2466 With -v/--verbose, this command shows configurations below at
2466 first (only if specified).
2467 first (only if specified).
2467
2468
2468 - ``--tool`` option
2469 - ``--tool`` option
2469 - ``HGMERGE`` environment variable
2470 - ``HGMERGE`` environment variable
2470 - configuration of ``ui.merge``
2471 - configuration of ``ui.merge``
2471
2472
2472 If merge tool is chosen before matching against
2473 If merge tool is chosen before matching against
2473 ``merge-patterns``, this command can't show any helpful
2474 ``merge-patterns``, this command can't show any helpful
2474 information, even with --debug. In such case, information above is
2475 information, even with --debug. In such case, information above is
2475 useful to know why a merge tool is chosen.
2476 useful to know why a merge tool is chosen.
2476 """
2477 """
2477 opts = pycompat.byteskwargs(opts)
2478 opts = pycompat.byteskwargs(opts)
2478 overrides = {}
2479 overrides = {}
2479 if opts[b'tool']:
2480 if opts[b'tool']:
2480 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2481 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2481 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2482 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2482
2483
2483 with ui.configoverride(overrides, b'debugmergepatterns'):
2484 with ui.configoverride(overrides, b'debugmergepatterns'):
2484 hgmerge = encoding.environ.get(b"HGMERGE")
2485 hgmerge = encoding.environ.get(b"HGMERGE")
2485 if hgmerge is not None:
2486 if hgmerge is not None:
2486 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2487 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2487 uimerge = ui.config(b"ui", b"merge")
2488 uimerge = ui.config(b"ui", b"merge")
2488 if uimerge:
2489 if uimerge:
2489 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2490 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2490
2491
2491 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2492 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2492 m = scmutil.match(ctx, pats, opts)
2493 m = scmutil.match(ctx, pats, opts)
2493 changedelete = opts[b'changedelete']
2494 changedelete = opts[b'changedelete']
2494 for path in ctx.walk(m):
2495 for path in ctx.walk(m):
2495 fctx = ctx[path]
2496 fctx = ctx[path]
2496 try:
2497 try:
2497 if not ui.debugflag:
2498 if not ui.debugflag:
2498 ui.pushbuffer(error=True)
2499 ui.pushbuffer(error=True)
2499 tool, toolpath = filemerge._picktool(
2500 tool, toolpath = filemerge._picktool(
2500 repo,
2501 repo,
2501 ui,
2502 ui,
2502 path,
2503 path,
2503 fctx.isbinary(),
2504 fctx.isbinary(),
2504 b'l' in fctx.flags(),
2505 b'l' in fctx.flags(),
2505 changedelete,
2506 changedelete,
2506 )
2507 )
2507 finally:
2508 finally:
2508 if not ui.debugflag:
2509 if not ui.debugflag:
2509 ui.popbuffer()
2510 ui.popbuffer()
2510 ui.write(b'%s = %s\n' % (path, tool))
2511 ui.write(b'%s = %s\n' % (path, tool))
2511
2512
2512
2513
2513 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2514 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2514 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2515 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2515 '''access the pushkey key/value protocol
2516 '''access the pushkey key/value protocol
2516
2517
2517 With two args, list the keys in the given namespace.
2518 With two args, list the keys in the given namespace.
2518
2519
2519 With five args, set a key to new if it currently is set to old.
2520 With five args, set a key to new if it currently is set to old.
2520 Reports success or failure.
2521 Reports success or failure.
2521 '''
2522 '''
2522
2523
2523 target = hg.peer(ui, {}, repopath)
2524 target = hg.peer(ui, {}, repopath)
2524 if keyinfo:
2525 if keyinfo:
2525 key, old, new = keyinfo
2526 key, old, new = keyinfo
2526 with target.commandexecutor() as e:
2527 with target.commandexecutor() as e:
2527 r = e.callcommand(
2528 r = e.callcommand(
2528 b'pushkey',
2529 b'pushkey',
2529 {
2530 {
2530 b'namespace': namespace,
2531 b'namespace': namespace,
2531 b'key': key,
2532 b'key': key,
2532 b'old': old,
2533 b'old': old,
2533 b'new': new,
2534 b'new': new,
2534 },
2535 },
2535 ).result()
2536 ).result()
2536
2537
2537 ui.status(pycompat.bytestr(r) + b'\n')
2538 ui.status(pycompat.bytestr(r) + b'\n')
2538 return not r
2539 return not r
2539 else:
2540 else:
2540 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2541 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2541 ui.write(
2542 ui.write(
2542 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2543 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2543 )
2544 )
2544
2545
2545
2546
2546 @command(b'debugpvec', [], _(b'A B'))
2547 @command(b'debugpvec', [], _(b'A B'))
2547 def debugpvec(ui, repo, a, b=None):
2548 def debugpvec(ui, repo, a, b=None):
2548 ca = scmutil.revsingle(repo, a)
2549 ca = scmutil.revsingle(repo, a)
2549 cb = scmutil.revsingle(repo, b)
2550 cb = scmutil.revsingle(repo, b)
2550 pa = pvec.ctxpvec(ca)
2551 pa = pvec.ctxpvec(ca)
2551 pb = pvec.ctxpvec(cb)
2552 pb = pvec.ctxpvec(cb)
2552 if pa == pb:
2553 if pa == pb:
2553 rel = b"="
2554 rel = b"="
2554 elif pa > pb:
2555 elif pa > pb:
2555 rel = b">"
2556 rel = b">"
2556 elif pa < pb:
2557 elif pa < pb:
2557 rel = b"<"
2558 rel = b"<"
2558 elif pa | pb:
2559 elif pa | pb:
2559 rel = b"|"
2560 rel = b"|"
2560 ui.write(_(b"a: %s\n") % pa)
2561 ui.write(_(b"a: %s\n") % pa)
2561 ui.write(_(b"b: %s\n") % pb)
2562 ui.write(_(b"b: %s\n") % pb)
2562 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2563 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2563 ui.write(
2564 ui.write(
2564 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2565 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2565 % (
2566 % (
2566 abs(pa._depth - pb._depth),
2567 abs(pa._depth - pb._depth),
2567 pvec._hamming(pa._vec, pb._vec),
2568 pvec._hamming(pa._vec, pb._vec),
2568 pa.distance(pb),
2569 pa.distance(pb),
2569 rel,
2570 rel,
2570 )
2571 )
2571 )
2572 )
2572
2573
2573
2574
2574 @command(
2575 @command(
2575 b'debugrebuilddirstate|debugrebuildstate',
2576 b'debugrebuilddirstate|debugrebuildstate',
2576 [
2577 [
2577 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2578 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2578 (
2579 (
2579 b'',
2580 b'',
2580 b'minimal',
2581 b'minimal',
2581 None,
2582 None,
2582 _(
2583 _(
2583 b'only rebuild files that are inconsistent with '
2584 b'only rebuild files that are inconsistent with '
2584 b'the working copy parent'
2585 b'the working copy parent'
2585 ),
2586 ),
2586 ),
2587 ),
2587 ],
2588 ],
2588 _(b'[-r REV]'),
2589 _(b'[-r REV]'),
2589 )
2590 )
2590 def debugrebuilddirstate(ui, repo, rev, **opts):
2591 def debugrebuilddirstate(ui, repo, rev, **opts):
2591 """rebuild the dirstate as it would look like for the given revision
2592 """rebuild the dirstate as it would look like for the given revision
2592
2593
2593 If no revision is specified the first current parent will be used.
2594 If no revision is specified the first current parent will be used.
2594
2595
2595 The dirstate will be set to the files of the given revision.
2596 The dirstate will be set to the files of the given revision.
2596 The actual working directory content or existing dirstate
2597 The actual working directory content or existing dirstate
2597 information such as adds or removes is not considered.
2598 information such as adds or removes is not considered.
2598
2599
2599 ``minimal`` will only rebuild the dirstate status for files that claim to be
2600 ``minimal`` will only rebuild the dirstate status for files that claim to be
2600 tracked but are not in the parent manifest, or that exist in the parent
2601 tracked but are not in the parent manifest, or that exist in the parent
2601 manifest but are not in the dirstate. It will not change adds, removes, or
2602 manifest but are not in the dirstate. It will not change adds, removes, or
2602 modified files that are in the working copy parent.
2603 modified files that are in the working copy parent.
2603
2604
2604 One use of this command is to make the next :hg:`status` invocation
2605 One use of this command is to make the next :hg:`status` invocation
2605 check the actual file content.
2606 check the actual file content.
2606 """
2607 """
2607 ctx = scmutil.revsingle(repo, rev)
2608 ctx = scmutil.revsingle(repo, rev)
2608 with repo.wlock():
2609 with repo.wlock():
2609 dirstate = repo.dirstate
2610 dirstate = repo.dirstate
2610 changedfiles = None
2611 changedfiles = None
2611 # See command doc for what minimal does.
2612 # See command doc for what minimal does.
2612 if opts.get('minimal'):
2613 if opts.get('minimal'):
2613 manifestfiles = set(ctx.manifest().keys())
2614 manifestfiles = set(ctx.manifest().keys())
2614 dirstatefiles = set(dirstate)
2615 dirstatefiles = set(dirstate)
2615 manifestonly = manifestfiles - dirstatefiles
2616 manifestonly = manifestfiles - dirstatefiles
2616 dsonly = dirstatefiles - manifestfiles
2617 dsonly = dirstatefiles - manifestfiles
2617 dsnotadded = set(f for f in dsonly if dirstate[f] != b'a')
2618 dsnotadded = set(f for f in dsonly if dirstate[f] != b'a')
2618 changedfiles = manifestonly | dsnotadded
2619 changedfiles = manifestonly | dsnotadded
2619
2620
2620 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2621 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2621
2622
2622
2623
2623 @command(b'debugrebuildfncache', [], b'')
2624 @command(b'debugrebuildfncache', [], b'')
2624 def debugrebuildfncache(ui, repo):
2625 def debugrebuildfncache(ui, repo):
2625 """rebuild the fncache file"""
2626 """rebuild the fncache file"""
2626 repair.rebuildfncache(ui, repo)
2627 repair.rebuildfncache(ui, repo)
2627
2628
2628
2629
2629 @command(
2630 @command(
2630 b'debugrename',
2631 b'debugrename',
2631 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2632 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2632 _(b'[-r REV] [FILE]...'),
2633 _(b'[-r REV] [FILE]...'),
2633 )
2634 )
2634 def debugrename(ui, repo, *pats, **opts):
2635 def debugrename(ui, repo, *pats, **opts):
2635 """dump rename information"""
2636 """dump rename information"""
2636
2637
2637 opts = pycompat.byteskwargs(opts)
2638 opts = pycompat.byteskwargs(opts)
2638 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2639 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2639 m = scmutil.match(ctx, pats, opts)
2640 m = scmutil.match(ctx, pats, opts)
2640 for abs in ctx.walk(m):
2641 for abs in ctx.walk(m):
2641 fctx = ctx[abs]
2642 fctx = ctx[abs]
2642 o = fctx.filelog().renamed(fctx.filenode())
2643 o = fctx.filelog().renamed(fctx.filenode())
2643 rel = repo.pathto(abs)
2644 rel = repo.pathto(abs)
2644 if o:
2645 if o:
2645 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2646 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2646 else:
2647 else:
2647 ui.write(_(b"%s not renamed\n") % rel)
2648 ui.write(_(b"%s not renamed\n") % rel)
2648
2649
2649
2650
2650 @command(
2651 @command(
2651 b'debugrevlog',
2652 b'debugrevlog',
2652 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2653 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2653 _(b'-c|-m|FILE'),
2654 _(b'-c|-m|FILE'),
2654 optionalrepo=True,
2655 optionalrepo=True,
2655 )
2656 )
2656 def debugrevlog(ui, repo, file_=None, **opts):
2657 def debugrevlog(ui, repo, file_=None, **opts):
2657 """show data and statistics about a revlog"""
2658 """show data and statistics about a revlog"""
2658 opts = pycompat.byteskwargs(opts)
2659 opts = pycompat.byteskwargs(opts)
2659 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2660 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2660
2661
2661 if opts.get(b"dump"):
2662 if opts.get(b"dump"):
2662 numrevs = len(r)
2663 numrevs = len(r)
2663 ui.write(
2664 ui.write(
2664 (
2665 (
2665 b"# rev p1rev p2rev start end deltastart base p1 p2"
2666 b"# rev p1rev p2rev start end deltastart base p1 p2"
2666 b" rawsize totalsize compression heads chainlen\n"
2667 b" rawsize totalsize compression heads chainlen\n"
2667 )
2668 )
2668 )
2669 )
2669 ts = 0
2670 ts = 0
2670 heads = set()
2671 heads = set()
2671
2672
2672 for rev in pycompat.xrange(numrevs):
2673 for rev in pycompat.xrange(numrevs):
2673 dbase = r.deltaparent(rev)
2674 dbase = r.deltaparent(rev)
2674 if dbase == -1:
2675 if dbase == -1:
2675 dbase = rev
2676 dbase = rev
2676 cbase = r.chainbase(rev)
2677 cbase = r.chainbase(rev)
2677 clen = r.chainlen(rev)
2678 clen = r.chainlen(rev)
2678 p1, p2 = r.parentrevs(rev)
2679 p1, p2 = r.parentrevs(rev)
2679 rs = r.rawsize(rev)
2680 rs = r.rawsize(rev)
2680 ts = ts + rs
2681 ts = ts + rs
2681 heads -= set(r.parentrevs(rev))
2682 heads -= set(r.parentrevs(rev))
2682 heads.add(rev)
2683 heads.add(rev)
2683 try:
2684 try:
2684 compression = ts / r.end(rev)
2685 compression = ts / r.end(rev)
2685 except ZeroDivisionError:
2686 except ZeroDivisionError:
2686 compression = 0
2687 compression = 0
2687 ui.write(
2688 ui.write(
2688 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2689 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2689 b"%11d %5d %8d\n"
2690 b"%11d %5d %8d\n"
2690 % (
2691 % (
2691 rev,
2692 rev,
2692 p1,
2693 p1,
2693 p2,
2694 p2,
2694 r.start(rev),
2695 r.start(rev),
2695 r.end(rev),
2696 r.end(rev),
2696 r.start(dbase),
2697 r.start(dbase),
2697 r.start(cbase),
2698 r.start(cbase),
2698 r.start(p1),
2699 r.start(p1),
2699 r.start(p2),
2700 r.start(p2),
2700 rs,
2701 rs,
2701 ts,
2702 ts,
2702 compression,
2703 compression,
2703 len(heads),
2704 len(heads),
2704 clen,
2705 clen,
2705 )
2706 )
2706 )
2707 )
2707 return 0
2708 return 0
2708
2709
2709 v = r.version
2710 v = r.version
2710 format = v & 0xFFFF
2711 format = v & 0xFFFF
2711 flags = []
2712 flags = []
2712 gdelta = False
2713 gdelta = False
2713 if v & revlog.FLAG_INLINE_DATA:
2714 if v & revlog.FLAG_INLINE_DATA:
2714 flags.append(b'inline')
2715 flags.append(b'inline')
2715 if v & revlog.FLAG_GENERALDELTA:
2716 if v & revlog.FLAG_GENERALDELTA:
2716 gdelta = True
2717 gdelta = True
2717 flags.append(b'generaldelta')
2718 flags.append(b'generaldelta')
2718 if not flags:
2719 if not flags:
2719 flags = [b'(none)']
2720 flags = [b'(none)']
2720
2721
2721 ### tracks merge vs single parent
2722 ### tracks merge vs single parent
2722 nummerges = 0
2723 nummerges = 0
2723
2724
2724 ### tracks ways the "delta" are build
2725 ### tracks ways the "delta" are build
2725 # nodelta
2726 # nodelta
2726 numempty = 0
2727 numempty = 0
2727 numemptytext = 0
2728 numemptytext = 0
2728 numemptydelta = 0
2729 numemptydelta = 0
2729 # full file content
2730 # full file content
2730 numfull = 0
2731 numfull = 0
2731 # intermediate snapshot against a prior snapshot
2732 # intermediate snapshot against a prior snapshot
2732 numsemi = 0
2733 numsemi = 0
2733 # snapshot count per depth
2734 # snapshot count per depth
2734 numsnapdepth = collections.defaultdict(lambda: 0)
2735 numsnapdepth = collections.defaultdict(lambda: 0)
2735 # delta against previous revision
2736 # delta against previous revision
2736 numprev = 0
2737 numprev = 0
2737 # delta against first or second parent (not prev)
2738 # delta against first or second parent (not prev)
2738 nump1 = 0
2739 nump1 = 0
2739 nump2 = 0
2740 nump2 = 0
2740 # delta against neither prev nor parents
2741 # delta against neither prev nor parents
2741 numother = 0
2742 numother = 0
2742 # delta against prev that are also first or second parent
2743 # delta against prev that are also first or second parent
2743 # (details of `numprev`)
2744 # (details of `numprev`)
2744 nump1prev = 0
2745 nump1prev = 0
2745 nump2prev = 0
2746 nump2prev = 0
2746
2747
2747 # data about delta chain of each revs
2748 # data about delta chain of each revs
2748 chainlengths = []
2749 chainlengths = []
2749 chainbases = []
2750 chainbases = []
2750 chainspans = []
2751 chainspans = []
2751
2752
2752 # data about each revision
2753 # data about each revision
2753 datasize = [None, 0, 0]
2754 datasize = [None, 0, 0]
2754 fullsize = [None, 0, 0]
2755 fullsize = [None, 0, 0]
2755 semisize = [None, 0, 0]
2756 semisize = [None, 0, 0]
2756 # snapshot count per depth
2757 # snapshot count per depth
2757 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2758 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2758 deltasize = [None, 0, 0]
2759 deltasize = [None, 0, 0]
2759 chunktypecounts = {}
2760 chunktypecounts = {}
2760 chunktypesizes = {}
2761 chunktypesizes = {}
2761
2762
2762 def addsize(size, l):
2763 def addsize(size, l):
2763 if l[0] is None or size < l[0]:
2764 if l[0] is None or size < l[0]:
2764 l[0] = size
2765 l[0] = size
2765 if size > l[1]:
2766 if size > l[1]:
2766 l[1] = size
2767 l[1] = size
2767 l[2] += size
2768 l[2] += size
2768
2769
2769 numrevs = len(r)
2770 numrevs = len(r)
2770 for rev in pycompat.xrange(numrevs):
2771 for rev in pycompat.xrange(numrevs):
2771 p1, p2 = r.parentrevs(rev)
2772 p1, p2 = r.parentrevs(rev)
2772 delta = r.deltaparent(rev)
2773 delta = r.deltaparent(rev)
2773 if format > 0:
2774 if format > 0:
2774 addsize(r.rawsize(rev), datasize)
2775 addsize(r.rawsize(rev), datasize)
2775 if p2 != nullrev:
2776 if p2 != nullrev:
2776 nummerges += 1
2777 nummerges += 1
2777 size = r.length(rev)
2778 size = r.length(rev)
2778 if delta == nullrev:
2779 if delta == nullrev:
2779 chainlengths.append(0)
2780 chainlengths.append(0)
2780 chainbases.append(r.start(rev))
2781 chainbases.append(r.start(rev))
2781 chainspans.append(size)
2782 chainspans.append(size)
2782 if size == 0:
2783 if size == 0:
2783 numempty += 1
2784 numempty += 1
2784 numemptytext += 1
2785 numemptytext += 1
2785 else:
2786 else:
2786 numfull += 1
2787 numfull += 1
2787 numsnapdepth[0] += 1
2788 numsnapdepth[0] += 1
2788 addsize(size, fullsize)
2789 addsize(size, fullsize)
2789 addsize(size, snapsizedepth[0])
2790 addsize(size, snapsizedepth[0])
2790 else:
2791 else:
2791 chainlengths.append(chainlengths[delta] + 1)
2792 chainlengths.append(chainlengths[delta] + 1)
2792 baseaddr = chainbases[delta]
2793 baseaddr = chainbases[delta]
2793 revaddr = r.start(rev)
2794 revaddr = r.start(rev)
2794 chainbases.append(baseaddr)
2795 chainbases.append(baseaddr)
2795 chainspans.append((revaddr - baseaddr) + size)
2796 chainspans.append((revaddr - baseaddr) + size)
2796 if size == 0:
2797 if size == 0:
2797 numempty += 1
2798 numempty += 1
2798 numemptydelta += 1
2799 numemptydelta += 1
2799 elif r.issnapshot(rev):
2800 elif r.issnapshot(rev):
2800 addsize(size, semisize)
2801 addsize(size, semisize)
2801 numsemi += 1
2802 numsemi += 1
2802 depth = r.snapshotdepth(rev)
2803 depth = r.snapshotdepth(rev)
2803 numsnapdepth[depth] += 1
2804 numsnapdepth[depth] += 1
2804 addsize(size, snapsizedepth[depth])
2805 addsize(size, snapsizedepth[depth])
2805 else:
2806 else:
2806 addsize(size, deltasize)
2807 addsize(size, deltasize)
2807 if delta == rev - 1:
2808 if delta == rev - 1:
2808 numprev += 1
2809 numprev += 1
2809 if delta == p1:
2810 if delta == p1:
2810 nump1prev += 1
2811 nump1prev += 1
2811 elif delta == p2:
2812 elif delta == p2:
2812 nump2prev += 1
2813 nump2prev += 1
2813 elif delta == p1:
2814 elif delta == p1:
2814 nump1 += 1
2815 nump1 += 1
2815 elif delta == p2:
2816 elif delta == p2:
2816 nump2 += 1
2817 nump2 += 1
2817 elif delta != nullrev:
2818 elif delta != nullrev:
2818 numother += 1
2819 numother += 1
2819
2820
2820 # Obtain data on the raw chunks in the revlog.
2821 # Obtain data on the raw chunks in the revlog.
2821 if util.safehasattr(r, b'_getsegmentforrevs'):
2822 if util.safehasattr(r, b'_getsegmentforrevs'):
2822 segment = r._getsegmentforrevs(rev, rev)[1]
2823 segment = r._getsegmentforrevs(rev, rev)[1]
2823 else:
2824 else:
2824 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2825 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2825 if segment:
2826 if segment:
2826 chunktype = bytes(segment[0:1])
2827 chunktype = bytes(segment[0:1])
2827 else:
2828 else:
2828 chunktype = b'empty'
2829 chunktype = b'empty'
2829
2830
2830 if chunktype not in chunktypecounts:
2831 if chunktype not in chunktypecounts:
2831 chunktypecounts[chunktype] = 0
2832 chunktypecounts[chunktype] = 0
2832 chunktypesizes[chunktype] = 0
2833 chunktypesizes[chunktype] = 0
2833
2834
2834 chunktypecounts[chunktype] += 1
2835 chunktypecounts[chunktype] += 1
2835 chunktypesizes[chunktype] += size
2836 chunktypesizes[chunktype] += size
2836
2837
2837 # Adjust size min value for empty cases
2838 # Adjust size min value for empty cases
2838 for size in (datasize, fullsize, semisize, deltasize):
2839 for size in (datasize, fullsize, semisize, deltasize):
2839 if size[0] is None:
2840 if size[0] is None:
2840 size[0] = 0
2841 size[0] = 0
2841
2842
2842 numdeltas = numrevs - numfull - numempty - numsemi
2843 numdeltas = numrevs - numfull - numempty - numsemi
2843 numoprev = numprev - nump1prev - nump2prev
2844 numoprev = numprev - nump1prev - nump2prev
2844 totalrawsize = datasize[2]
2845 totalrawsize = datasize[2]
2845 datasize[2] /= numrevs
2846 datasize[2] /= numrevs
2846 fulltotal = fullsize[2]
2847 fulltotal = fullsize[2]
2847 if numfull == 0:
2848 if numfull == 0:
2848 fullsize[2] = 0
2849 fullsize[2] = 0
2849 else:
2850 else:
2850 fullsize[2] /= numfull
2851 fullsize[2] /= numfull
2851 semitotal = semisize[2]
2852 semitotal = semisize[2]
2852 snaptotal = {}
2853 snaptotal = {}
2853 if numsemi > 0:
2854 if numsemi > 0:
2854 semisize[2] /= numsemi
2855 semisize[2] /= numsemi
2855 for depth in snapsizedepth:
2856 for depth in snapsizedepth:
2856 snaptotal[depth] = snapsizedepth[depth][2]
2857 snaptotal[depth] = snapsizedepth[depth][2]
2857 snapsizedepth[depth][2] /= numsnapdepth[depth]
2858 snapsizedepth[depth][2] /= numsnapdepth[depth]
2858
2859
2859 deltatotal = deltasize[2]
2860 deltatotal = deltasize[2]
2860 if numdeltas > 0:
2861 if numdeltas > 0:
2861 deltasize[2] /= numdeltas
2862 deltasize[2] /= numdeltas
2862 totalsize = fulltotal + semitotal + deltatotal
2863 totalsize = fulltotal + semitotal + deltatotal
2863 avgchainlen = sum(chainlengths) / numrevs
2864 avgchainlen = sum(chainlengths) / numrevs
2864 maxchainlen = max(chainlengths)
2865 maxchainlen = max(chainlengths)
2865 maxchainspan = max(chainspans)
2866 maxchainspan = max(chainspans)
2866 compratio = 1
2867 compratio = 1
2867 if totalsize:
2868 if totalsize:
2868 compratio = totalrawsize / totalsize
2869 compratio = totalrawsize / totalsize
2869
2870
2870 basedfmtstr = b'%%%dd\n'
2871 basedfmtstr = b'%%%dd\n'
2871 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
2872 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
2872
2873
2873 def dfmtstr(max):
2874 def dfmtstr(max):
2874 return basedfmtstr % len(str(max))
2875 return basedfmtstr % len(str(max))
2875
2876
2876 def pcfmtstr(max, padding=0):
2877 def pcfmtstr(max, padding=0):
2877 return basepcfmtstr % (len(str(max)), b' ' * padding)
2878 return basepcfmtstr % (len(str(max)), b' ' * padding)
2878
2879
2879 def pcfmt(value, total):
2880 def pcfmt(value, total):
2880 if total:
2881 if total:
2881 return (value, 100 * float(value) / total)
2882 return (value, 100 * float(value) / total)
2882 else:
2883 else:
2883 return value, 100.0
2884 return value, 100.0
2884
2885
2885 ui.writenoi18n(b'format : %d\n' % format)
2886 ui.writenoi18n(b'format : %d\n' % format)
2886 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
2887 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
2887
2888
2888 ui.write(b'\n')
2889 ui.write(b'\n')
2889 fmt = pcfmtstr(totalsize)
2890 fmt = pcfmtstr(totalsize)
2890 fmt2 = dfmtstr(totalsize)
2891 fmt2 = dfmtstr(totalsize)
2891 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2892 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2892 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
2893 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
2893 ui.writenoi18n(
2894 ui.writenoi18n(
2894 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
2895 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
2895 )
2896 )
2896 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2897 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2897 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
2898 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
2898 ui.writenoi18n(
2899 ui.writenoi18n(
2899 b' text : '
2900 b' text : '
2900 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
2901 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
2901 )
2902 )
2902 ui.writenoi18n(
2903 ui.writenoi18n(
2903 b' delta : '
2904 b' delta : '
2904 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
2905 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
2905 )
2906 )
2906 ui.writenoi18n(
2907 ui.writenoi18n(
2907 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
2908 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
2908 )
2909 )
2909 for depth in sorted(numsnapdepth):
2910 for depth in sorted(numsnapdepth):
2910 ui.write(
2911 ui.write(
2911 (b' lvl-%-3d : ' % depth)
2912 (b' lvl-%-3d : ' % depth)
2912 + fmt % pcfmt(numsnapdepth[depth], numrevs)
2913 + fmt % pcfmt(numsnapdepth[depth], numrevs)
2913 )
2914 )
2914 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
2915 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
2915 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
2916 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
2916 ui.writenoi18n(
2917 ui.writenoi18n(
2917 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
2918 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
2918 )
2919 )
2919 for depth in sorted(numsnapdepth):
2920 for depth in sorted(numsnapdepth):
2920 ui.write(
2921 ui.write(
2921 (b' lvl-%-3d : ' % depth)
2922 (b' lvl-%-3d : ' % depth)
2922 + fmt % pcfmt(snaptotal[depth], totalsize)
2923 + fmt % pcfmt(snaptotal[depth], totalsize)
2923 )
2924 )
2924 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
2925 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
2925
2926
2926 def fmtchunktype(chunktype):
2927 def fmtchunktype(chunktype):
2927 if chunktype == b'empty':
2928 if chunktype == b'empty':
2928 return b' %s : ' % chunktype
2929 return b' %s : ' % chunktype
2929 elif chunktype in pycompat.bytestr(string.ascii_letters):
2930 elif chunktype in pycompat.bytestr(string.ascii_letters):
2930 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2931 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2931 else:
2932 else:
2932 return b' 0x%s : ' % hex(chunktype)
2933 return b' 0x%s : ' % hex(chunktype)
2933
2934
2934 ui.write(b'\n')
2935 ui.write(b'\n')
2935 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
2936 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
2936 for chunktype in sorted(chunktypecounts):
2937 for chunktype in sorted(chunktypecounts):
2937 ui.write(fmtchunktype(chunktype))
2938 ui.write(fmtchunktype(chunktype))
2938 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2939 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2939 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
2940 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
2940 for chunktype in sorted(chunktypecounts):
2941 for chunktype in sorted(chunktypecounts):
2941 ui.write(fmtchunktype(chunktype))
2942 ui.write(fmtchunktype(chunktype))
2942 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2943 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2943
2944
2944 ui.write(b'\n')
2945 ui.write(b'\n')
2945 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2946 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2946 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
2947 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
2947 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
2948 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
2948 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
2949 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
2949 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
2950 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
2950
2951
2951 if format > 0:
2952 if format > 0:
2952 ui.write(b'\n')
2953 ui.write(b'\n')
2953 ui.writenoi18n(
2954 ui.writenoi18n(
2954 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
2955 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
2955 % tuple(datasize)
2956 % tuple(datasize)
2956 )
2957 )
2957 ui.writenoi18n(
2958 ui.writenoi18n(
2958 b'full revision size (min/max/avg) : %d / %d / %d\n'
2959 b'full revision size (min/max/avg) : %d / %d / %d\n'
2959 % tuple(fullsize)
2960 % tuple(fullsize)
2960 )
2961 )
2961 ui.writenoi18n(
2962 ui.writenoi18n(
2962 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
2963 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
2963 % tuple(semisize)
2964 % tuple(semisize)
2964 )
2965 )
2965 for depth in sorted(snapsizedepth):
2966 for depth in sorted(snapsizedepth):
2966 if depth == 0:
2967 if depth == 0:
2967 continue
2968 continue
2968 ui.writenoi18n(
2969 ui.writenoi18n(
2969 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
2970 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
2970 % ((depth,) + tuple(snapsizedepth[depth]))
2971 % ((depth,) + tuple(snapsizedepth[depth]))
2971 )
2972 )
2972 ui.writenoi18n(
2973 ui.writenoi18n(
2973 b'delta size (min/max/avg) : %d / %d / %d\n'
2974 b'delta size (min/max/avg) : %d / %d / %d\n'
2974 % tuple(deltasize)
2975 % tuple(deltasize)
2975 )
2976 )
2976
2977
2977 if numdeltas > 0:
2978 if numdeltas > 0:
2978 ui.write(b'\n')
2979 ui.write(b'\n')
2979 fmt = pcfmtstr(numdeltas)
2980 fmt = pcfmtstr(numdeltas)
2980 fmt2 = pcfmtstr(numdeltas, 4)
2981 fmt2 = pcfmtstr(numdeltas, 4)
2981 ui.writenoi18n(
2982 ui.writenoi18n(
2982 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
2983 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
2983 )
2984 )
2984 if numprev > 0:
2985 if numprev > 0:
2985 ui.writenoi18n(
2986 ui.writenoi18n(
2986 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
2987 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
2987 )
2988 )
2988 ui.writenoi18n(
2989 ui.writenoi18n(
2989 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
2990 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
2990 )
2991 )
2991 ui.writenoi18n(
2992 ui.writenoi18n(
2992 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
2993 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
2993 )
2994 )
2994 if gdelta:
2995 if gdelta:
2995 ui.writenoi18n(
2996 ui.writenoi18n(
2996 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
2997 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
2997 )
2998 )
2998 ui.writenoi18n(
2999 ui.writenoi18n(
2999 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3000 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3000 )
3001 )
3001 ui.writenoi18n(
3002 ui.writenoi18n(
3002 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3003 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3003 )
3004 )
3004
3005
3005
3006
3006 @command(
3007 @command(
3007 b'debugrevlogindex',
3008 b'debugrevlogindex',
3008 cmdutil.debugrevlogopts
3009 cmdutil.debugrevlogopts
3009 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3010 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3010 _(b'[-f FORMAT] -c|-m|FILE'),
3011 _(b'[-f FORMAT] -c|-m|FILE'),
3011 optionalrepo=True,
3012 optionalrepo=True,
3012 )
3013 )
3013 def debugrevlogindex(ui, repo, file_=None, **opts):
3014 def debugrevlogindex(ui, repo, file_=None, **opts):
3014 """dump the contents of a revlog index"""
3015 """dump the contents of a revlog index"""
3015 opts = pycompat.byteskwargs(opts)
3016 opts = pycompat.byteskwargs(opts)
3016 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3017 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3017 format = opts.get(b'format', 0)
3018 format = opts.get(b'format', 0)
3018 if format not in (0, 1):
3019 if format not in (0, 1):
3019 raise error.Abort(_(b"unknown format %d") % format)
3020 raise error.Abort(_(b"unknown format %d") % format)
3020
3021
3021 if ui.debugflag:
3022 if ui.debugflag:
3022 shortfn = hex
3023 shortfn = hex
3023 else:
3024 else:
3024 shortfn = short
3025 shortfn = short
3025
3026
3026 # There might not be anything in r, so have a sane default
3027 # There might not be anything in r, so have a sane default
3027 idlen = 12
3028 idlen = 12
3028 for i in r:
3029 for i in r:
3029 idlen = len(shortfn(r.node(i)))
3030 idlen = len(shortfn(r.node(i)))
3030 break
3031 break
3031
3032
3032 if format == 0:
3033 if format == 0:
3033 if ui.verbose:
3034 if ui.verbose:
3034 ui.writenoi18n(
3035 ui.writenoi18n(
3035 b" rev offset length linkrev %s %s p2\n"
3036 b" rev offset length linkrev %s %s p2\n"
3036 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3037 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3037 )
3038 )
3038 else:
3039 else:
3039 ui.writenoi18n(
3040 ui.writenoi18n(
3040 b" rev linkrev %s %s p2\n"
3041 b" rev linkrev %s %s p2\n"
3041 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3042 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3042 )
3043 )
3043 elif format == 1:
3044 elif format == 1:
3044 if ui.verbose:
3045 if ui.verbose:
3045 ui.writenoi18n(
3046 ui.writenoi18n(
3046 (
3047 (
3047 b" rev flag offset length size link p1"
3048 b" rev flag offset length size link p1"
3048 b" p2 %s\n"
3049 b" p2 %s\n"
3049 )
3050 )
3050 % b"nodeid".rjust(idlen)
3051 % b"nodeid".rjust(idlen)
3051 )
3052 )
3052 else:
3053 else:
3053 ui.writenoi18n(
3054 ui.writenoi18n(
3054 b" rev flag size link p1 p2 %s\n"
3055 b" rev flag size link p1 p2 %s\n"
3055 % b"nodeid".rjust(idlen)
3056 % b"nodeid".rjust(idlen)
3056 )
3057 )
3057
3058
3058 for i in r:
3059 for i in r:
3059 node = r.node(i)
3060 node = r.node(i)
3060 if format == 0:
3061 if format == 0:
3061 try:
3062 try:
3062 pp = r.parents(node)
3063 pp = r.parents(node)
3063 except Exception:
3064 except Exception:
3064 pp = [nullid, nullid]
3065 pp = [nullid, nullid]
3065 if ui.verbose:
3066 if ui.verbose:
3066 ui.write(
3067 ui.write(
3067 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3068 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3068 % (
3069 % (
3069 i,
3070 i,
3070 r.start(i),
3071 r.start(i),
3071 r.length(i),
3072 r.length(i),
3072 r.linkrev(i),
3073 r.linkrev(i),
3073 shortfn(node),
3074 shortfn(node),
3074 shortfn(pp[0]),
3075 shortfn(pp[0]),
3075 shortfn(pp[1]),
3076 shortfn(pp[1]),
3076 )
3077 )
3077 )
3078 )
3078 else:
3079 else:
3079 ui.write(
3080 ui.write(
3080 b"% 6d % 7d %s %s %s\n"
3081 b"% 6d % 7d %s %s %s\n"
3081 % (
3082 % (
3082 i,
3083 i,
3083 r.linkrev(i),
3084 r.linkrev(i),
3084 shortfn(node),
3085 shortfn(node),
3085 shortfn(pp[0]),
3086 shortfn(pp[0]),
3086 shortfn(pp[1]),
3087 shortfn(pp[1]),
3087 )
3088 )
3088 )
3089 )
3089 elif format == 1:
3090 elif format == 1:
3090 pr = r.parentrevs(i)
3091 pr = r.parentrevs(i)
3091 if ui.verbose:
3092 if ui.verbose:
3092 ui.write(
3093 ui.write(
3093 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3094 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3094 % (
3095 % (
3095 i,
3096 i,
3096 r.flags(i),
3097 r.flags(i),
3097 r.start(i),
3098 r.start(i),
3098 r.length(i),
3099 r.length(i),
3099 r.rawsize(i),
3100 r.rawsize(i),
3100 r.linkrev(i),
3101 r.linkrev(i),
3101 pr[0],
3102 pr[0],
3102 pr[1],
3103 pr[1],
3103 shortfn(node),
3104 shortfn(node),
3104 )
3105 )
3105 )
3106 )
3106 else:
3107 else:
3107 ui.write(
3108 ui.write(
3108 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3109 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3109 % (
3110 % (
3110 i,
3111 i,
3111 r.flags(i),
3112 r.flags(i),
3112 r.rawsize(i),
3113 r.rawsize(i),
3113 r.linkrev(i),
3114 r.linkrev(i),
3114 pr[0],
3115 pr[0],
3115 pr[1],
3116 pr[1],
3116 shortfn(node),
3117 shortfn(node),
3117 )
3118 )
3118 )
3119 )
3119
3120
3120
3121
3121 @command(
3122 @command(
3122 b'debugrevspec',
3123 b'debugrevspec',
3123 [
3124 [
3124 (
3125 (
3125 b'',
3126 b'',
3126 b'optimize',
3127 b'optimize',
3127 None,
3128 None,
3128 _(b'print parsed tree after optimizing (DEPRECATED)'),
3129 _(b'print parsed tree after optimizing (DEPRECATED)'),
3129 ),
3130 ),
3130 (
3131 (
3131 b'',
3132 b'',
3132 b'show-revs',
3133 b'show-revs',
3133 True,
3134 True,
3134 _(b'print list of result revisions (default)'),
3135 _(b'print list of result revisions (default)'),
3135 ),
3136 ),
3136 (
3137 (
3137 b's',
3138 b's',
3138 b'show-set',
3139 b'show-set',
3139 None,
3140 None,
3140 _(b'print internal representation of result set'),
3141 _(b'print internal representation of result set'),
3141 ),
3142 ),
3142 (
3143 (
3143 b'p',
3144 b'p',
3144 b'show-stage',
3145 b'show-stage',
3145 [],
3146 [],
3146 _(b'print parsed tree at the given stage'),
3147 _(b'print parsed tree at the given stage'),
3147 _(b'NAME'),
3148 _(b'NAME'),
3148 ),
3149 ),
3149 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3150 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3150 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3151 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3151 ],
3152 ],
3152 b'REVSPEC',
3153 b'REVSPEC',
3153 )
3154 )
3154 def debugrevspec(ui, repo, expr, **opts):
3155 def debugrevspec(ui, repo, expr, **opts):
3155 """parse and apply a revision specification
3156 """parse and apply a revision specification
3156
3157
3157 Use -p/--show-stage option to print the parsed tree at the given stages.
3158 Use -p/--show-stage option to print the parsed tree at the given stages.
3158 Use -p all to print tree at every stage.
3159 Use -p all to print tree at every stage.
3159
3160
3160 Use --no-show-revs option with -s or -p to print only the set
3161 Use --no-show-revs option with -s or -p to print only the set
3161 representation or the parsed tree respectively.
3162 representation or the parsed tree respectively.
3162
3163
3163 Use --verify-optimized to compare the optimized result with the unoptimized
3164 Use --verify-optimized to compare the optimized result with the unoptimized
3164 one. Returns 1 if the optimized result differs.
3165 one. Returns 1 if the optimized result differs.
3165 """
3166 """
3166 opts = pycompat.byteskwargs(opts)
3167 opts = pycompat.byteskwargs(opts)
3167 aliases = ui.configitems(b'revsetalias')
3168 aliases = ui.configitems(b'revsetalias')
3168 stages = [
3169 stages = [
3169 (b'parsed', lambda tree: tree),
3170 (b'parsed', lambda tree: tree),
3170 (
3171 (
3171 b'expanded',
3172 b'expanded',
3172 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3173 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3173 ),
3174 ),
3174 (b'concatenated', revsetlang.foldconcat),
3175 (b'concatenated', revsetlang.foldconcat),
3175 (b'analyzed', revsetlang.analyze),
3176 (b'analyzed', revsetlang.analyze),
3176 (b'optimized', revsetlang.optimize),
3177 (b'optimized', revsetlang.optimize),
3177 ]
3178 ]
3178 if opts[b'no_optimized']:
3179 if opts[b'no_optimized']:
3179 stages = stages[:-1]
3180 stages = stages[:-1]
3180 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3181 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3181 raise error.Abort(
3182 raise error.Abort(
3182 _(b'cannot use --verify-optimized with --no-optimized')
3183 _(b'cannot use --verify-optimized with --no-optimized')
3183 )
3184 )
3184 stagenames = set(n for n, f in stages)
3185 stagenames = set(n for n, f in stages)
3185
3186
3186 showalways = set()
3187 showalways = set()
3187 showchanged = set()
3188 showchanged = set()
3188 if ui.verbose and not opts[b'show_stage']:
3189 if ui.verbose and not opts[b'show_stage']:
3189 # show parsed tree by --verbose (deprecated)
3190 # show parsed tree by --verbose (deprecated)
3190 showalways.add(b'parsed')
3191 showalways.add(b'parsed')
3191 showchanged.update([b'expanded', b'concatenated'])
3192 showchanged.update([b'expanded', b'concatenated'])
3192 if opts[b'optimize']:
3193 if opts[b'optimize']:
3193 showalways.add(b'optimized')
3194 showalways.add(b'optimized')
3194 if opts[b'show_stage'] and opts[b'optimize']:
3195 if opts[b'show_stage'] and opts[b'optimize']:
3195 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3196 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3196 if opts[b'show_stage'] == [b'all']:
3197 if opts[b'show_stage'] == [b'all']:
3197 showalways.update(stagenames)
3198 showalways.update(stagenames)
3198 else:
3199 else:
3199 for n in opts[b'show_stage']:
3200 for n in opts[b'show_stage']:
3200 if n not in stagenames:
3201 if n not in stagenames:
3201 raise error.Abort(_(b'invalid stage name: %s') % n)
3202 raise error.Abort(_(b'invalid stage name: %s') % n)
3202 showalways.update(opts[b'show_stage'])
3203 showalways.update(opts[b'show_stage'])
3203
3204
3204 treebystage = {}
3205 treebystage = {}
3205 printedtree = None
3206 printedtree = None
3206 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3207 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3207 for n, f in stages:
3208 for n, f in stages:
3208 treebystage[n] = tree = f(tree)
3209 treebystage[n] = tree = f(tree)
3209 if n in showalways or (n in showchanged and tree != printedtree):
3210 if n in showalways or (n in showchanged and tree != printedtree):
3210 if opts[b'show_stage'] or n != b'parsed':
3211 if opts[b'show_stage'] or n != b'parsed':
3211 ui.write(b"* %s:\n" % n)
3212 ui.write(b"* %s:\n" % n)
3212 ui.write(revsetlang.prettyformat(tree), b"\n")
3213 ui.write(revsetlang.prettyformat(tree), b"\n")
3213 printedtree = tree
3214 printedtree = tree
3214
3215
3215 if opts[b'verify_optimized']:
3216 if opts[b'verify_optimized']:
3216 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3217 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3217 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3218 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3218 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3219 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3219 ui.writenoi18n(
3220 ui.writenoi18n(
3220 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3221 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3221 )
3222 )
3222 ui.writenoi18n(
3223 ui.writenoi18n(
3223 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3224 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3224 )
3225 )
3225 arevs = list(arevs)
3226 arevs = list(arevs)
3226 brevs = list(brevs)
3227 brevs = list(brevs)
3227 if arevs == brevs:
3228 if arevs == brevs:
3228 return 0
3229 return 0
3229 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3230 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3230 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3231 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3231 sm = difflib.SequenceMatcher(None, arevs, brevs)
3232 sm = difflib.SequenceMatcher(None, arevs, brevs)
3232 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3233 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3233 if tag in ('delete', 'replace'):
3234 if tag in ('delete', 'replace'):
3234 for c in arevs[alo:ahi]:
3235 for c in arevs[alo:ahi]:
3235 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3236 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3236 if tag in ('insert', 'replace'):
3237 if tag in ('insert', 'replace'):
3237 for c in brevs[blo:bhi]:
3238 for c in brevs[blo:bhi]:
3238 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3239 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3239 if tag == 'equal':
3240 if tag == 'equal':
3240 for c in arevs[alo:ahi]:
3241 for c in arevs[alo:ahi]:
3241 ui.write(b' %d\n' % c)
3242 ui.write(b' %d\n' % c)
3242 return 1
3243 return 1
3243
3244
3244 func = revset.makematcher(tree)
3245 func = revset.makematcher(tree)
3245 revs = func(repo)
3246 revs = func(repo)
3246 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3247 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3247 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3248 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3248 if not opts[b'show_revs']:
3249 if not opts[b'show_revs']:
3249 return
3250 return
3250 for c in revs:
3251 for c in revs:
3251 ui.write(b"%d\n" % c)
3252 ui.write(b"%d\n" % c)
3252
3253
3253
3254
3254 @command(
3255 @command(
3255 b'debugserve',
3256 b'debugserve',
3256 [
3257 [
3257 (
3258 (
3258 b'',
3259 b'',
3259 b'sshstdio',
3260 b'sshstdio',
3260 False,
3261 False,
3261 _(b'run an SSH server bound to process handles'),
3262 _(b'run an SSH server bound to process handles'),
3262 ),
3263 ),
3263 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3264 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3264 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3265 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3265 ],
3266 ],
3266 b'',
3267 b'',
3267 )
3268 )
3268 def debugserve(ui, repo, **opts):
3269 def debugserve(ui, repo, **opts):
3269 """run a server with advanced settings
3270 """run a server with advanced settings
3270
3271
3271 This command is similar to :hg:`serve`. It exists partially as a
3272 This command is similar to :hg:`serve`. It exists partially as a
3272 workaround to the fact that ``hg serve --stdio`` must have specific
3273 workaround to the fact that ``hg serve --stdio`` must have specific
3273 arguments for security reasons.
3274 arguments for security reasons.
3274 """
3275 """
3275 opts = pycompat.byteskwargs(opts)
3276 opts = pycompat.byteskwargs(opts)
3276
3277
3277 if not opts[b'sshstdio']:
3278 if not opts[b'sshstdio']:
3278 raise error.Abort(_(b'only --sshstdio is currently supported'))
3279 raise error.Abort(_(b'only --sshstdio is currently supported'))
3279
3280
3280 logfh = None
3281 logfh = None
3281
3282
3282 if opts[b'logiofd'] and opts[b'logiofile']:
3283 if opts[b'logiofd'] and opts[b'logiofile']:
3283 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3284 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3284
3285
3285 if opts[b'logiofd']:
3286 if opts[b'logiofd']:
3286 # Ideally we would be line buffered. But line buffering in binary
3287 # Ideally we would be line buffered. But line buffering in binary
3287 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3288 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3288 # buffering could have performance impacts. But since this isn't
3289 # buffering could have performance impacts. But since this isn't
3289 # performance critical code, it should be fine.
3290 # performance critical code, it should be fine.
3290 try:
3291 try:
3291 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3292 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3292 except OSError as e:
3293 except OSError as e:
3293 if e.errno != errno.ESPIPE:
3294 if e.errno != errno.ESPIPE:
3294 raise
3295 raise
3295 # can't seek a pipe, so `ab` mode fails on py3
3296 # can't seek a pipe, so `ab` mode fails on py3
3296 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3297 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3297 elif opts[b'logiofile']:
3298 elif opts[b'logiofile']:
3298 logfh = open(opts[b'logiofile'], b'ab', 0)
3299 logfh = open(opts[b'logiofile'], b'ab', 0)
3299
3300
3300 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3301 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3301 s.serve_forever()
3302 s.serve_forever()
3302
3303
3303
3304
3304 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3305 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3305 def debugsetparents(ui, repo, rev1, rev2=None):
3306 def debugsetparents(ui, repo, rev1, rev2=None):
3306 """manually set the parents of the current working directory
3307 """manually set the parents of the current working directory
3307
3308
3308 This is useful for writing repository conversion tools, but should
3309 This is useful for writing repository conversion tools, but should
3309 be used with care. For example, neither the working directory nor the
3310 be used with care. For example, neither the working directory nor the
3310 dirstate is updated, so file status may be incorrect after running this
3311 dirstate is updated, so file status may be incorrect after running this
3311 command.
3312 command.
3312
3313
3313 Returns 0 on success.
3314 Returns 0 on success.
3314 """
3315 """
3315
3316
3316 node1 = scmutil.revsingle(repo, rev1).node()
3317 node1 = scmutil.revsingle(repo, rev1).node()
3317 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3318 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3318
3319
3319 with repo.wlock():
3320 with repo.wlock():
3320 repo.setparents(node1, node2)
3321 repo.setparents(node1, node2)
3321
3322
3322
3323
3323 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3324 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3324 def debugsidedata(ui, repo, file_, rev=None, **opts):
3325 def debugsidedata(ui, repo, file_, rev=None, **opts):
3325 """dump the side data for a cl/manifest/file revision
3326 """dump the side data for a cl/manifest/file revision
3326
3327
3327 Use --verbose to dump the sidedata content."""
3328 Use --verbose to dump the sidedata content."""
3328 opts = pycompat.byteskwargs(opts)
3329 opts = pycompat.byteskwargs(opts)
3329 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3330 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3330 if rev is not None:
3331 if rev is not None:
3331 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3332 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3332 file_, rev = None, file_
3333 file_, rev = None, file_
3333 elif rev is None:
3334 elif rev is None:
3334 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3335 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3335 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3336 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3336 r = getattr(r, '_revlog', r)
3337 r = getattr(r, '_revlog', r)
3337 try:
3338 try:
3338 sidedata = r.sidedata(r.lookup(rev))
3339 sidedata = r.sidedata(r.lookup(rev))
3339 except KeyError:
3340 except KeyError:
3340 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3341 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3341 if sidedata:
3342 if sidedata:
3342 sidedata = list(sidedata.items())
3343 sidedata = list(sidedata.items())
3343 sidedata.sort()
3344 sidedata.sort()
3344 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3345 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3345 for key, value in sidedata:
3346 for key, value in sidedata:
3346 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3347 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3347 if ui.verbose:
3348 if ui.verbose:
3348 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3349 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3349
3350
3350
3351
3351 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3352 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3352 def debugssl(ui, repo, source=None, **opts):
3353 def debugssl(ui, repo, source=None, **opts):
3353 '''test a secure connection to a server
3354 '''test a secure connection to a server
3354
3355
3355 This builds the certificate chain for the server on Windows, installing the
3356 This builds the certificate chain for the server on Windows, installing the
3356 missing intermediates and trusted root via Windows Update if necessary. It
3357 missing intermediates and trusted root via Windows Update if necessary. It
3357 does nothing on other platforms.
3358 does nothing on other platforms.
3358
3359
3359 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3360 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3360 that server is used. See :hg:`help urls` for more information.
3361 that server is used. See :hg:`help urls` for more information.
3361
3362
3362 If the update succeeds, retry the original operation. Otherwise, the cause
3363 If the update succeeds, retry the original operation. Otherwise, the cause
3363 of the SSL error is likely another issue.
3364 of the SSL error is likely another issue.
3364 '''
3365 '''
3365 if not pycompat.iswindows:
3366 if not pycompat.iswindows:
3366 raise error.Abort(
3367 raise error.Abort(
3367 _(b'certificate chain building is only possible on Windows')
3368 _(b'certificate chain building is only possible on Windows')
3368 )
3369 )
3369
3370
3370 if not source:
3371 if not source:
3371 if not repo:
3372 if not repo:
3372 raise error.Abort(
3373 raise error.Abort(
3373 _(
3374 _(
3374 b"there is no Mercurial repository here, and no "
3375 b"there is no Mercurial repository here, and no "
3375 b"server specified"
3376 b"server specified"
3376 )
3377 )
3377 )
3378 )
3378 source = b"default"
3379 source = b"default"
3379
3380
3380 source, branches = hg.parseurl(ui.expandpath(source))
3381 source, branches = hg.parseurl(ui.expandpath(source))
3381 url = util.url(source)
3382 url = util.url(source)
3382
3383
3383 defaultport = {b'https': 443, b'ssh': 22}
3384 defaultport = {b'https': 443, b'ssh': 22}
3384 if url.scheme in defaultport:
3385 if url.scheme in defaultport:
3385 try:
3386 try:
3386 addr = (url.host, int(url.port or defaultport[url.scheme]))
3387 addr = (url.host, int(url.port or defaultport[url.scheme]))
3387 except ValueError:
3388 except ValueError:
3388 raise error.Abort(_(b"malformed port number in URL"))
3389 raise error.Abort(_(b"malformed port number in URL"))
3389 else:
3390 else:
3390 raise error.Abort(_(b"only https and ssh connections are supported"))
3391 raise error.Abort(_(b"only https and ssh connections are supported"))
3391
3392
3392 from . import win32
3393 from . import win32
3393
3394
3394 s = ssl.wrap_socket(
3395 s = ssl.wrap_socket(
3395 socket.socket(),
3396 socket.socket(),
3396 ssl_version=ssl.PROTOCOL_TLS,
3397 ssl_version=ssl.PROTOCOL_TLS,
3397 cert_reqs=ssl.CERT_NONE,
3398 cert_reqs=ssl.CERT_NONE,
3398 ca_certs=None,
3399 ca_certs=None,
3399 )
3400 )
3400
3401
3401 try:
3402 try:
3402 s.connect(addr)
3403 s.connect(addr)
3403 cert = s.getpeercert(True)
3404 cert = s.getpeercert(True)
3404
3405
3405 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3406 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3406
3407
3407 complete = win32.checkcertificatechain(cert, build=False)
3408 complete = win32.checkcertificatechain(cert, build=False)
3408
3409
3409 if not complete:
3410 if not complete:
3410 ui.status(_(b'certificate chain is incomplete, updating... '))
3411 ui.status(_(b'certificate chain is incomplete, updating... '))
3411
3412
3412 if not win32.checkcertificatechain(cert):
3413 if not win32.checkcertificatechain(cert):
3413 ui.status(_(b'failed.\n'))
3414 ui.status(_(b'failed.\n'))
3414 else:
3415 else:
3415 ui.status(_(b'done.\n'))
3416 ui.status(_(b'done.\n'))
3416 else:
3417 else:
3417 ui.status(_(b'full certificate chain is available\n'))
3418 ui.status(_(b'full certificate chain is available\n'))
3418 finally:
3419 finally:
3419 s.close()
3420 s.close()
3420
3421
3421
3422
3422 @command(
3423 @command(
3423 b'debugsub',
3424 b'debugsub',
3424 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3425 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3425 _(b'[-r REV] [REV]'),
3426 _(b'[-r REV] [REV]'),
3426 )
3427 )
3427 def debugsub(ui, repo, rev=None):
3428 def debugsub(ui, repo, rev=None):
3428 ctx = scmutil.revsingle(repo, rev, None)
3429 ctx = scmutil.revsingle(repo, rev, None)
3429 for k, v in sorted(ctx.substate.items()):
3430 for k, v in sorted(ctx.substate.items()):
3430 ui.writenoi18n(b'path %s\n' % k)
3431 ui.writenoi18n(b'path %s\n' % k)
3431 ui.writenoi18n(b' source %s\n' % v[0])
3432 ui.writenoi18n(b' source %s\n' % v[0])
3432 ui.writenoi18n(b' revision %s\n' % v[1])
3433 ui.writenoi18n(b' revision %s\n' % v[1])
3433
3434
3434
3435
3435 @command(
3436 @command(
3436 b'debugsuccessorssets',
3437 b'debugsuccessorssets',
3437 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3438 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3438 _(b'[REV]'),
3439 _(b'[REV]'),
3439 )
3440 )
3440 def debugsuccessorssets(ui, repo, *revs, **opts):
3441 def debugsuccessorssets(ui, repo, *revs, **opts):
3441 """show set of successors for revision
3442 """show set of successors for revision
3442
3443
3443 A successors set of changeset A is a consistent group of revisions that
3444 A successors set of changeset A is a consistent group of revisions that
3444 succeed A. It contains non-obsolete changesets only unless closests
3445 succeed A. It contains non-obsolete changesets only unless closests
3445 successors set is set.
3446 successors set is set.
3446
3447
3447 In most cases a changeset A has a single successors set containing a single
3448 In most cases a changeset A has a single successors set containing a single
3448 successor (changeset A replaced by A').
3449 successor (changeset A replaced by A').
3449
3450
3450 A changeset that is made obsolete with no successors are called "pruned".
3451 A changeset that is made obsolete with no successors are called "pruned".
3451 Such changesets have no successors sets at all.
3452 Such changesets have no successors sets at all.
3452
3453
3453 A changeset that has been "split" will have a successors set containing
3454 A changeset that has been "split" will have a successors set containing
3454 more than one successor.
3455 more than one successor.
3455
3456
3456 A changeset that has been rewritten in multiple different ways is called
3457 A changeset that has been rewritten in multiple different ways is called
3457 "divergent". Such changesets have multiple successor sets (each of which
3458 "divergent". Such changesets have multiple successor sets (each of which
3458 may also be split, i.e. have multiple successors).
3459 may also be split, i.e. have multiple successors).
3459
3460
3460 Results are displayed as follows::
3461 Results are displayed as follows::
3461
3462
3462 <rev1>
3463 <rev1>
3463 <successors-1A>
3464 <successors-1A>
3464 <rev2>
3465 <rev2>
3465 <successors-2A>
3466 <successors-2A>
3466 <successors-2B1> <successors-2B2> <successors-2B3>
3467 <successors-2B1> <successors-2B2> <successors-2B3>
3467
3468
3468 Here rev2 has two possible (i.e. divergent) successors sets. The first
3469 Here rev2 has two possible (i.e. divergent) successors sets. The first
3469 holds one element, whereas the second holds three (i.e. the changeset has
3470 holds one element, whereas the second holds three (i.e. the changeset has
3470 been split).
3471 been split).
3471 """
3472 """
3472 # passed to successorssets caching computation from one call to another
3473 # passed to successorssets caching computation from one call to another
3473 cache = {}
3474 cache = {}
3474 ctx2str = bytes
3475 ctx2str = bytes
3475 node2str = short
3476 node2str = short
3476 for rev in scmutil.revrange(repo, revs):
3477 for rev in scmutil.revrange(repo, revs):
3477 ctx = repo[rev]
3478 ctx = repo[rev]
3478 ui.write(b'%s\n' % ctx2str(ctx))
3479 ui.write(b'%s\n' % ctx2str(ctx))
3479 for succsset in obsutil.successorssets(
3480 for succsset in obsutil.successorssets(
3480 repo, ctx.node(), closest=opts['closest'], cache=cache
3481 repo, ctx.node(), closest=opts['closest'], cache=cache
3481 ):
3482 ):
3482 if succsset:
3483 if succsset:
3483 ui.write(b' ')
3484 ui.write(b' ')
3484 ui.write(node2str(succsset[0]))
3485 ui.write(node2str(succsset[0]))
3485 for node in succsset[1:]:
3486 for node in succsset[1:]:
3486 ui.write(b' ')
3487 ui.write(b' ')
3487 ui.write(node2str(node))
3488 ui.write(node2str(node))
3488 ui.write(b'\n')
3489 ui.write(b'\n')
3489
3490
3490
3491
3491 @command(b'debugtagscache', [])
3492 @command(b'debugtagscache', [])
3492 def debugtagscache(ui, repo):
3493 def debugtagscache(ui, repo):
3493 """display the contents of .hg/cache/hgtagsfnodes1"""
3494 """display the contents of .hg/cache/hgtagsfnodes1"""
3494 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3495 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3495 for r in repo:
3496 for r in repo:
3496 node = repo[r].node()
3497 node = repo[r].node()
3497 tagsnode = cache.getfnode(node, computemissing=False)
3498 tagsnode = cache.getfnode(node, computemissing=False)
3498 tagsnodedisplay = hex(tagsnode) if tagsnode else 'missing/invalid'
3499 tagsnodedisplay = hex(tagsnode) if tagsnode else 'missing/invalid'
3499 ui.write(b'%s %s %s\n' % (r, hex(node), tagsnodedisplay))
3500 ui.write(b'%s %s %s\n' % (r, hex(node), tagsnodedisplay))
3500
3501
3501
3502
3502 @command(
3503 @command(
3503 b'debugtemplate',
3504 b'debugtemplate',
3504 [
3505 [
3505 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3506 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3506 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3507 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3507 ],
3508 ],
3508 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3509 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3509 optionalrepo=True,
3510 optionalrepo=True,
3510 )
3511 )
3511 def debugtemplate(ui, repo, tmpl, **opts):
3512 def debugtemplate(ui, repo, tmpl, **opts):
3512 """parse and apply a template
3513 """parse and apply a template
3513
3514
3514 If -r/--rev is given, the template is processed as a log template and
3515 If -r/--rev is given, the template is processed as a log template and
3515 applied to the given changesets. Otherwise, it is processed as a generic
3516 applied to the given changesets. Otherwise, it is processed as a generic
3516 template.
3517 template.
3517
3518
3518 Use --verbose to print the parsed tree.
3519 Use --verbose to print the parsed tree.
3519 """
3520 """
3520 revs = None
3521 revs = None
3521 if opts['rev']:
3522 if opts['rev']:
3522 if repo is None:
3523 if repo is None:
3523 raise error.RepoError(
3524 raise error.RepoError(
3524 _(b'there is no Mercurial repository here (.hg not found)')
3525 _(b'there is no Mercurial repository here (.hg not found)')
3525 )
3526 )
3526 revs = scmutil.revrange(repo, opts['rev'])
3527 revs = scmutil.revrange(repo, opts['rev'])
3527
3528
3528 props = {}
3529 props = {}
3529 for d in opts['define']:
3530 for d in opts['define']:
3530 try:
3531 try:
3531 k, v = (e.strip() for e in d.split(b'=', 1))
3532 k, v = (e.strip() for e in d.split(b'=', 1))
3532 if not k or k == b'ui':
3533 if not k or k == b'ui':
3533 raise ValueError
3534 raise ValueError
3534 props[k] = v
3535 props[k] = v
3535 except ValueError:
3536 except ValueError:
3536 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3537 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3537
3538
3538 if ui.verbose:
3539 if ui.verbose:
3539 aliases = ui.configitems(b'templatealias')
3540 aliases = ui.configitems(b'templatealias')
3540 tree = templater.parse(tmpl)
3541 tree = templater.parse(tmpl)
3541 ui.note(templater.prettyformat(tree), b'\n')
3542 ui.note(templater.prettyformat(tree), b'\n')
3542 newtree = templater.expandaliases(tree, aliases)
3543 newtree = templater.expandaliases(tree, aliases)
3543 if newtree != tree:
3544 if newtree != tree:
3544 ui.notenoi18n(
3545 ui.notenoi18n(
3545 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3546 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3546 )
3547 )
3547
3548
3548 if revs is None:
3549 if revs is None:
3549 tres = formatter.templateresources(ui, repo)
3550 tres = formatter.templateresources(ui, repo)
3550 t = formatter.maketemplater(ui, tmpl, resources=tres)
3551 t = formatter.maketemplater(ui, tmpl, resources=tres)
3551 if ui.verbose:
3552 if ui.verbose:
3552 kwds, funcs = t.symbolsuseddefault()
3553 kwds, funcs = t.symbolsuseddefault()
3553 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3554 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3554 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3555 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3555 ui.write(t.renderdefault(props))
3556 ui.write(t.renderdefault(props))
3556 else:
3557 else:
3557 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3558 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3558 if ui.verbose:
3559 if ui.verbose:
3559 kwds, funcs = displayer.t.symbolsuseddefault()
3560 kwds, funcs = displayer.t.symbolsuseddefault()
3560 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3561 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3561 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3562 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3562 for r in revs:
3563 for r in revs:
3563 displayer.show(repo[r], **pycompat.strkwargs(props))
3564 displayer.show(repo[r], **pycompat.strkwargs(props))
3564 displayer.close()
3565 displayer.close()
3565
3566
3566
3567
3567 @command(
3568 @command(
3568 b'debuguigetpass',
3569 b'debuguigetpass',
3569 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3570 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3570 _(b'[-p TEXT]'),
3571 _(b'[-p TEXT]'),
3571 norepo=True,
3572 norepo=True,
3572 )
3573 )
3573 def debuguigetpass(ui, prompt=b''):
3574 def debuguigetpass(ui, prompt=b''):
3574 """show prompt to type password"""
3575 """show prompt to type password"""
3575 r = ui.getpass(prompt)
3576 r = ui.getpass(prompt)
3576 ui.writenoi18n(b'respose: %s\n' % r)
3577 ui.writenoi18n(b'respose: %s\n' % r)
3577
3578
3578
3579
3579 @command(
3580 @command(
3580 b'debuguiprompt',
3581 b'debuguiprompt',
3581 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3582 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3582 _(b'[-p TEXT]'),
3583 _(b'[-p TEXT]'),
3583 norepo=True,
3584 norepo=True,
3584 )
3585 )
3585 def debuguiprompt(ui, prompt=b''):
3586 def debuguiprompt(ui, prompt=b''):
3586 """show plain prompt"""
3587 """show plain prompt"""
3587 r = ui.prompt(prompt)
3588 r = ui.prompt(prompt)
3588 ui.writenoi18n(b'response: %s\n' % r)
3589 ui.writenoi18n(b'response: %s\n' % r)
3589
3590
3590
3591
3591 @command(b'debugupdatecaches', [])
3592 @command(b'debugupdatecaches', [])
3592 def debugupdatecaches(ui, repo, *pats, **opts):
3593 def debugupdatecaches(ui, repo, *pats, **opts):
3593 """warm all known caches in the repository"""
3594 """warm all known caches in the repository"""
3594 with repo.wlock(), repo.lock():
3595 with repo.wlock(), repo.lock():
3595 repo.updatecaches(full=True)
3596 repo.updatecaches(full=True)
3596
3597
3597
3598
3598 @command(
3599 @command(
3599 b'debugupgraderepo',
3600 b'debugupgraderepo',
3600 [
3601 [
3601 (
3602 (
3602 b'o',
3603 b'o',
3603 b'optimize',
3604 b'optimize',
3604 [],
3605 [],
3605 _(b'extra optimization to perform'),
3606 _(b'extra optimization to perform'),
3606 _(b'NAME'),
3607 _(b'NAME'),
3607 ),
3608 ),
3608 (b'', b'run', False, _(b'performs an upgrade')),
3609 (b'', b'run', False, _(b'performs an upgrade')),
3609 (b'', b'backup', True, _(b'keep the old repository content around')),
3610 (b'', b'backup', True, _(b'keep the old repository content around')),
3610 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
3611 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
3611 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
3612 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
3612 ],
3613 ],
3613 )
3614 )
3614 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
3615 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
3615 """upgrade a repository to use different features
3616 """upgrade a repository to use different features
3616
3617
3617 If no arguments are specified, the repository is evaluated for upgrade
3618 If no arguments are specified, the repository is evaluated for upgrade
3618 and a list of problems and potential optimizations is printed.
3619 and a list of problems and potential optimizations is printed.
3619
3620
3620 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
3621 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
3621 can be influenced via additional arguments. More details will be provided
3622 can be influenced via additional arguments. More details will be provided
3622 by the command output when run without ``--run``.
3623 by the command output when run without ``--run``.
3623
3624
3624 During the upgrade, the repository will be locked and no writes will be
3625 During the upgrade, the repository will be locked and no writes will be
3625 allowed.
3626 allowed.
3626
3627
3627 At the end of the upgrade, the repository may not be readable while new
3628 At the end of the upgrade, the repository may not be readable while new
3628 repository data is swapped in. This window will be as long as it takes to
3629 repository data is swapped in. This window will be as long as it takes to
3629 rename some directories inside the ``.hg`` directory. On most machines, this
3630 rename some directories inside the ``.hg`` directory. On most machines, this
3630 should complete almost instantaneously and the chances of a consumer being
3631 should complete almost instantaneously and the chances of a consumer being
3631 unable to access the repository should be low.
3632 unable to access the repository should be low.
3632
3633
3633 By default, all revlog will be upgraded. You can restrict this using flag
3634 By default, all revlog will be upgraded. You can restrict this using flag
3634 such as `--manifest`:
3635 such as `--manifest`:
3635
3636
3636 * `--manifest`: only optimize the manifest
3637 * `--manifest`: only optimize the manifest
3637 * `--no-manifest`: optimize all revlog but the manifest
3638 * `--no-manifest`: optimize all revlog but the manifest
3638 * `--changelog`: optimize the changelog only
3639 * `--changelog`: optimize the changelog only
3639 * `--no-changelog --no-manifest`: optimize filelogs only
3640 * `--no-changelog --no-manifest`: optimize filelogs only
3640 """
3641 """
3641 return upgrade.upgraderepo(
3642 return upgrade.upgraderepo(
3642 ui, repo, run=run, optimize=optimize, backup=backup, **opts
3643 ui, repo, run=run, optimize=optimize, backup=backup, **opts
3643 )
3644 )
3644
3645
3645
3646
3646 @command(
3647 @command(
3647 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
3648 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
3648 )
3649 )
3649 def debugwalk(ui, repo, *pats, **opts):
3650 def debugwalk(ui, repo, *pats, **opts):
3650 """show how files match on given patterns"""
3651 """show how files match on given patterns"""
3651 opts = pycompat.byteskwargs(opts)
3652 opts = pycompat.byteskwargs(opts)
3652 m = scmutil.match(repo[None], pats, opts)
3653 m = scmutil.match(repo[None], pats, opts)
3653 if ui.verbose:
3654 if ui.verbose:
3654 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
3655 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
3655 items = list(repo[None].walk(m))
3656 items = list(repo[None].walk(m))
3656 if not items:
3657 if not items:
3657 return
3658 return
3658 f = lambda fn: fn
3659 f = lambda fn: fn
3659 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
3660 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
3660 f = lambda fn: util.normpath(fn)
3661 f = lambda fn: util.normpath(fn)
3661 fmt = b'f %%-%ds %%-%ds %%s' % (
3662 fmt = b'f %%-%ds %%-%ds %%s' % (
3662 max([len(abs) for abs in items]),
3663 max([len(abs) for abs in items]),
3663 max([len(repo.pathto(abs)) for abs in items]),
3664 max([len(repo.pathto(abs)) for abs in items]),
3664 )
3665 )
3665 for abs in items:
3666 for abs in items:
3666 line = fmt % (
3667 line = fmt % (
3667 abs,
3668 abs,
3668 f(repo.pathto(abs)),
3669 f(repo.pathto(abs)),
3669 m.exact(abs) and b'exact' or b'',
3670 m.exact(abs) and b'exact' or b'',
3670 )
3671 )
3671 ui.write(b"%s\n" % line.rstrip())
3672 ui.write(b"%s\n" % line.rstrip())
3672
3673
3673
3674
3674 @command(b'debugwhyunstable', [], _(b'REV'))
3675 @command(b'debugwhyunstable', [], _(b'REV'))
3675 def debugwhyunstable(ui, repo, rev):
3676 def debugwhyunstable(ui, repo, rev):
3676 """explain instabilities of a changeset"""
3677 """explain instabilities of a changeset"""
3677 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
3678 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
3678 dnodes = b''
3679 dnodes = b''
3679 if entry.get(b'divergentnodes'):
3680 if entry.get(b'divergentnodes'):
3680 dnodes = (
3681 dnodes = (
3681 b' '.join(
3682 b' '.join(
3682 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
3683 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
3683 for ctx in entry[b'divergentnodes']
3684 for ctx in entry[b'divergentnodes']
3684 )
3685 )
3685 + b' '
3686 + b' '
3686 )
3687 )
3687 ui.write(
3688 ui.write(
3688 b'%s: %s%s %s\n'
3689 b'%s: %s%s %s\n'
3689 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
3690 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
3690 )
3691 )
3691
3692
3692
3693
3693 @command(
3694 @command(
3694 b'debugwireargs',
3695 b'debugwireargs',
3695 [
3696 [
3696 (b'', b'three', b'', b'three'),
3697 (b'', b'three', b'', b'three'),
3697 (b'', b'four', b'', b'four'),
3698 (b'', b'four', b'', b'four'),
3698 (b'', b'five', b'', b'five'),
3699 (b'', b'five', b'', b'five'),
3699 ]
3700 ]
3700 + cmdutil.remoteopts,
3701 + cmdutil.remoteopts,
3701 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
3702 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
3702 norepo=True,
3703 norepo=True,
3703 )
3704 )
3704 def debugwireargs(ui, repopath, *vals, **opts):
3705 def debugwireargs(ui, repopath, *vals, **opts):
3705 opts = pycompat.byteskwargs(opts)
3706 opts = pycompat.byteskwargs(opts)
3706 repo = hg.peer(ui, opts, repopath)
3707 repo = hg.peer(ui, opts, repopath)
3707 for opt in cmdutil.remoteopts:
3708 for opt in cmdutil.remoteopts:
3708 del opts[opt[1]]
3709 del opts[opt[1]]
3709 args = {}
3710 args = {}
3710 for k, v in pycompat.iteritems(opts):
3711 for k, v in pycompat.iteritems(opts):
3711 if v:
3712 if v:
3712 args[k] = v
3713 args[k] = v
3713 args = pycompat.strkwargs(args)
3714 args = pycompat.strkwargs(args)
3714 # run twice to check that we don't mess up the stream for the next command
3715 # run twice to check that we don't mess up the stream for the next command
3715 res1 = repo.debugwireargs(*vals, **args)
3716 res1 = repo.debugwireargs(*vals, **args)
3716 res2 = repo.debugwireargs(*vals, **args)
3717 res2 = repo.debugwireargs(*vals, **args)
3717 ui.write(b"%s\n" % res1)
3718 ui.write(b"%s\n" % res1)
3718 if res1 != res2:
3719 if res1 != res2:
3719 ui.warn(b"%s\n" % res2)
3720 ui.warn(b"%s\n" % res2)
3720
3721
3721
3722
3722 def _parsewirelangblocks(fh):
3723 def _parsewirelangblocks(fh):
3723 activeaction = None
3724 activeaction = None
3724 blocklines = []
3725 blocklines = []
3725 lastindent = 0
3726 lastindent = 0
3726
3727
3727 for line in fh:
3728 for line in fh:
3728 line = line.rstrip()
3729 line = line.rstrip()
3729 if not line:
3730 if not line:
3730 continue
3731 continue
3731
3732
3732 if line.startswith(b'#'):
3733 if line.startswith(b'#'):
3733 continue
3734 continue
3734
3735
3735 if not line.startswith(b' '):
3736 if not line.startswith(b' '):
3736 # New block. Flush previous one.
3737 # New block. Flush previous one.
3737 if activeaction:
3738 if activeaction:
3738 yield activeaction, blocklines
3739 yield activeaction, blocklines
3739
3740
3740 activeaction = line
3741 activeaction = line
3741 blocklines = []
3742 blocklines = []
3742 lastindent = 0
3743 lastindent = 0
3743 continue
3744 continue
3744
3745
3745 # Else we start with an indent.
3746 # Else we start with an indent.
3746
3747
3747 if not activeaction:
3748 if not activeaction:
3748 raise error.Abort(_(b'indented line outside of block'))
3749 raise error.Abort(_(b'indented line outside of block'))
3749
3750
3750 indent = len(line) - len(line.lstrip())
3751 indent = len(line) - len(line.lstrip())
3751
3752
3752 # If this line is indented more than the last line, concatenate it.
3753 # If this line is indented more than the last line, concatenate it.
3753 if indent > lastindent and blocklines:
3754 if indent > lastindent and blocklines:
3754 blocklines[-1] += line.lstrip()
3755 blocklines[-1] += line.lstrip()
3755 else:
3756 else:
3756 blocklines.append(line)
3757 blocklines.append(line)
3757 lastindent = indent
3758 lastindent = indent
3758
3759
3759 # Flush last block.
3760 # Flush last block.
3760 if activeaction:
3761 if activeaction:
3761 yield activeaction, blocklines
3762 yield activeaction, blocklines
3762
3763
3763
3764
3764 @command(
3765 @command(
3765 b'debugwireproto',
3766 b'debugwireproto',
3766 [
3767 [
3767 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
3768 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
3768 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
3769 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
3769 (
3770 (
3770 b'',
3771 b'',
3771 b'noreadstderr',
3772 b'noreadstderr',
3772 False,
3773 False,
3773 _(b'do not read from stderr of the remote'),
3774 _(b'do not read from stderr of the remote'),
3774 ),
3775 ),
3775 (
3776 (
3776 b'',
3777 b'',
3777 b'nologhandshake',
3778 b'nologhandshake',
3778 False,
3779 False,
3779 _(b'do not log I/O related to the peer handshake'),
3780 _(b'do not log I/O related to the peer handshake'),
3780 ),
3781 ),
3781 ]
3782 ]
3782 + cmdutil.remoteopts,
3783 + cmdutil.remoteopts,
3783 _(b'[PATH]'),
3784 _(b'[PATH]'),
3784 optionalrepo=True,
3785 optionalrepo=True,
3785 )
3786 )
3786 def debugwireproto(ui, repo, path=None, **opts):
3787 def debugwireproto(ui, repo, path=None, **opts):
3787 """send wire protocol commands to a server
3788 """send wire protocol commands to a server
3788
3789
3789 This command can be used to issue wire protocol commands to remote
3790 This command can be used to issue wire protocol commands to remote
3790 peers and to debug the raw data being exchanged.
3791 peers and to debug the raw data being exchanged.
3791
3792
3792 ``--localssh`` will start an SSH server against the current repository
3793 ``--localssh`` will start an SSH server against the current repository
3793 and connect to that. By default, the connection will perform a handshake
3794 and connect to that. By default, the connection will perform a handshake
3794 and establish an appropriate peer instance.
3795 and establish an appropriate peer instance.
3795
3796
3796 ``--peer`` can be used to bypass the handshake protocol and construct a
3797 ``--peer`` can be used to bypass the handshake protocol and construct a
3797 peer instance using the specified class type. Valid values are ``raw``,
3798 peer instance using the specified class type. Valid values are ``raw``,
3798 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
3799 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
3799 raw data payloads and don't support higher-level command actions.
3800 raw data payloads and don't support higher-level command actions.
3800
3801
3801 ``--noreadstderr`` can be used to disable automatic reading from stderr
3802 ``--noreadstderr`` can be used to disable automatic reading from stderr
3802 of the peer (for SSH connections only). Disabling automatic reading of
3803 of the peer (for SSH connections only). Disabling automatic reading of
3803 stderr is useful for making output more deterministic.
3804 stderr is useful for making output more deterministic.
3804
3805
3805 Commands are issued via a mini language which is specified via stdin.
3806 Commands are issued via a mini language which is specified via stdin.
3806 The language consists of individual actions to perform. An action is
3807 The language consists of individual actions to perform. An action is
3807 defined by a block. A block is defined as a line with no leading
3808 defined by a block. A block is defined as a line with no leading
3808 space followed by 0 or more lines with leading space. Blocks are
3809 space followed by 0 or more lines with leading space. Blocks are
3809 effectively a high-level command with additional metadata.
3810 effectively a high-level command with additional metadata.
3810
3811
3811 Lines beginning with ``#`` are ignored.
3812 Lines beginning with ``#`` are ignored.
3812
3813
3813 The following sections denote available actions.
3814 The following sections denote available actions.
3814
3815
3815 raw
3816 raw
3816 ---
3817 ---
3817
3818
3818 Send raw data to the server.
3819 Send raw data to the server.
3819
3820
3820 The block payload contains the raw data to send as one atomic send
3821 The block payload contains the raw data to send as one atomic send
3821 operation. The data may not actually be delivered in a single system
3822 operation. The data may not actually be delivered in a single system
3822 call: it depends on the abilities of the transport being used.
3823 call: it depends on the abilities of the transport being used.
3823
3824
3824 Each line in the block is de-indented and concatenated. Then, that
3825 Each line in the block is de-indented and concatenated. Then, that
3825 value is evaluated as a Python b'' literal. This allows the use of
3826 value is evaluated as a Python b'' literal. This allows the use of
3826 backslash escaping, etc.
3827 backslash escaping, etc.
3827
3828
3828 raw+
3829 raw+
3829 ----
3830 ----
3830
3831
3831 Behaves like ``raw`` except flushes output afterwards.
3832 Behaves like ``raw`` except flushes output afterwards.
3832
3833
3833 command <X>
3834 command <X>
3834 -----------
3835 -----------
3835
3836
3836 Send a request to run a named command, whose name follows the ``command``
3837 Send a request to run a named command, whose name follows the ``command``
3837 string.
3838 string.
3838
3839
3839 Arguments to the command are defined as lines in this block. The format of
3840 Arguments to the command are defined as lines in this block. The format of
3840 each line is ``<key> <value>``. e.g.::
3841 each line is ``<key> <value>``. e.g.::
3841
3842
3842 command listkeys
3843 command listkeys
3843 namespace bookmarks
3844 namespace bookmarks
3844
3845
3845 If the value begins with ``eval:``, it will be interpreted as a Python
3846 If the value begins with ``eval:``, it will be interpreted as a Python
3846 literal expression. Otherwise values are interpreted as Python b'' literals.
3847 literal expression. Otherwise values are interpreted as Python b'' literals.
3847 This allows sending complex types and encoding special byte sequences via
3848 This allows sending complex types and encoding special byte sequences via
3848 backslash escaping.
3849 backslash escaping.
3849
3850
3850 The following arguments have special meaning:
3851 The following arguments have special meaning:
3851
3852
3852 ``PUSHFILE``
3853 ``PUSHFILE``
3853 When defined, the *push* mechanism of the peer will be used instead
3854 When defined, the *push* mechanism of the peer will be used instead
3854 of the static request-response mechanism and the content of the
3855 of the static request-response mechanism and the content of the
3855 file specified in the value of this argument will be sent as the
3856 file specified in the value of this argument will be sent as the
3856 command payload.
3857 command payload.
3857
3858
3858 This can be used to submit a local bundle file to the remote.
3859 This can be used to submit a local bundle file to the remote.
3859
3860
3860 batchbegin
3861 batchbegin
3861 ----------
3862 ----------
3862
3863
3863 Instruct the peer to begin a batched send.
3864 Instruct the peer to begin a batched send.
3864
3865
3865 All ``command`` blocks are queued for execution until the next
3866 All ``command`` blocks are queued for execution until the next
3866 ``batchsubmit`` block.
3867 ``batchsubmit`` block.
3867
3868
3868 batchsubmit
3869 batchsubmit
3869 -----------
3870 -----------
3870
3871
3871 Submit previously queued ``command`` blocks as a batch request.
3872 Submit previously queued ``command`` blocks as a batch request.
3872
3873
3873 This action MUST be paired with a ``batchbegin`` action.
3874 This action MUST be paired with a ``batchbegin`` action.
3874
3875
3875 httprequest <method> <path>
3876 httprequest <method> <path>
3876 ---------------------------
3877 ---------------------------
3877
3878
3878 (HTTP peer only)
3879 (HTTP peer only)
3879
3880
3880 Send an HTTP request to the peer.
3881 Send an HTTP request to the peer.
3881
3882
3882 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3883 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3883
3884
3884 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3885 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3885 headers to add to the request. e.g. ``Accept: foo``.
3886 headers to add to the request. e.g. ``Accept: foo``.
3886
3887
3887 The following arguments are special:
3888 The following arguments are special:
3888
3889
3889 ``BODYFILE``
3890 ``BODYFILE``
3890 The content of the file defined as the value to this argument will be
3891 The content of the file defined as the value to this argument will be
3891 transferred verbatim as the HTTP request body.
3892 transferred verbatim as the HTTP request body.
3892
3893
3893 ``frame <type> <flags> <payload>``
3894 ``frame <type> <flags> <payload>``
3894 Send a unified protocol frame as part of the request body.
3895 Send a unified protocol frame as part of the request body.
3895
3896
3896 All frames will be collected and sent as the body to the HTTP
3897 All frames will be collected and sent as the body to the HTTP
3897 request.
3898 request.
3898
3899
3899 close
3900 close
3900 -----
3901 -----
3901
3902
3902 Close the connection to the server.
3903 Close the connection to the server.
3903
3904
3904 flush
3905 flush
3905 -----
3906 -----
3906
3907
3907 Flush data written to the server.
3908 Flush data written to the server.
3908
3909
3909 readavailable
3910 readavailable
3910 -------------
3911 -------------
3911
3912
3912 Close the write end of the connection and read all available data from
3913 Close the write end of the connection and read all available data from
3913 the server.
3914 the server.
3914
3915
3915 If the connection to the server encompasses multiple pipes, we poll both
3916 If the connection to the server encompasses multiple pipes, we poll both
3916 pipes and read available data.
3917 pipes and read available data.
3917
3918
3918 readline
3919 readline
3919 --------
3920 --------
3920
3921
3921 Read a line of output from the server. If there are multiple output
3922 Read a line of output from the server. If there are multiple output
3922 pipes, reads only the main pipe.
3923 pipes, reads only the main pipe.
3923
3924
3924 ereadline
3925 ereadline
3925 ---------
3926 ---------
3926
3927
3927 Like ``readline``, but read from the stderr pipe, if available.
3928 Like ``readline``, but read from the stderr pipe, if available.
3928
3929
3929 read <X>
3930 read <X>
3930 --------
3931 --------
3931
3932
3932 ``read()`` N bytes from the server's main output pipe.
3933 ``read()`` N bytes from the server's main output pipe.
3933
3934
3934 eread <X>
3935 eread <X>
3935 ---------
3936 ---------
3936
3937
3937 ``read()`` N bytes from the server's stderr pipe, if available.
3938 ``read()`` N bytes from the server's stderr pipe, if available.
3938
3939
3939 Specifying Unified Frame-Based Protocol Frames
3940 Specifying Unified Frame-Based Protocol Frames
3940 ----------------------------------------------
3941 ----------------------------------------------
3941
3942
3942 It is possible to emit a *Unified Frame-Based Protocol* by using special
3943 It is possible to emit a *Unified Frame-Based Protocol* by using special
3943 syntax.
3944 syntax.
3944
3945
3945 A frame is composed as a type, flags, and payload. These can be parsed
3946 A frame is composed as a type, flags, and payload. These can be parsed
3946 from a string of the form:
3947 from a string of the form:
3947
3948
3948 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3949 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3949
3950
3950 ``request-id`` and ``stream-id`` are integers defining the request and
3951 ``request-id`` and ``stream-id`` are integers defining the request and
3951 stream identifiers.
3952 stream identifiers.
3952
3953
3953 ``type`` can be an integer value for the frame type or the string name
3954 ``type`` can be an integer value for the frame type or the string name
3954 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3955 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3955 ``command-name``.
3956 ``command-name``.
3956
3957
3957 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3958 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3958 components. Each component (and there can be just one) can be an integer
3959 components. Each component (and there can be just one) can be an integer
3959 or a flag name for stream flags or frame flags, respectively. Values are
3960 or a flag name for stream flags or frame flags, respectively. Values are
3960 resolved to integers and then bitwise OR'd together.
3961 resolved to integers and then bitwise OR'd together.
3961
3962
3962 ``payload`` represents the raw frame payload. If it begins with
3963 ``payload`` represents the raw frame payload. If it begins with
3963 ``cbor:``, the following string is evaluated as Python code and the
3964 ``cbor:``, the following string is evaluated as Python code and the
3964 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3965 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3965 as a Python byte string literal.
3966 as a Python byte string literal.
3966 """
3967 """
3967 opts = pycompat.byteskwargs(opts)
3968 opts = pycompat.byteskwargs(opts)
3968
3969
3969 if opts[b'localssh'] and not repo:
3970 if opts[b'localssh'] and not repo:
3970 raise error.Abort(_(b'--localssh requires a repository'))
3971 raise error.Abort(_(b'--localssh requires a repository'))
3971
3972
3972 if opts[b'peer'] and opts[b'peer'] not in (
3973 if opts[b'peer'] and opts[b'peer'] not in (
3973 b'raw',
3974 b'raw',
3974 b'http2',
3975 b'http2',
3975 b'ssh1',
3976 b'ssh1',
3976 b'ssh2',
3977 b'ssh2',
3977 ):
3978 ):
3978 raise error.Abort(
3979 raise error.Abort(
3979 _(b'invalid value for --peer'),
3980 _(b'invalid value for --peer'),
3980 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
3981 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
3981 )
3982 )
3982
3983
3983 if path and opts[b'localssh']:
3984 if path and opts[b'localssh']:
3984 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
3985 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
3985
3986
3986 if ui.interactive():
3987 if ui.interactive():
3987 ui.write(_(b'(waiting for commands on stdin)\n'))
3988 ui.write(_(b'(waiting for commands on stdin)\n'))
3988
3989
3989 blocks = list(_parsewirelangblocks(ui.fin))
3990 blocks = list(_parsewirelangblocks(ui.fin))
3990
3991
3991 proc = None
3992 proc = None
3992 stdin = None
3993 stdin = None
3993 stdout = None
3994 stdout = None
3994 stderr = None
3995 stderr = None
3995 opener = None
3996 opener = None
3996
3997
3997 if opts[b'localssh']:
3998 if opts[b'localssh']:
3998 # We start the SSH server in its own process so there is process
3999 # We start the SSH server in its own process so there is process
3999 # separation. This prevents a whole class of potential bugs around
4000 # separation. This prevents a whole class of potential bugs around
4000 # shared state from interfering with server operation.
4001 # shared state from interfering with server operation.
4001 args = procutil.hgcmd() + [
4002 args = procutil.hgcmd() + [
4002 b'-R',
4003 b'-R',
4003 repo.root,
4004 repo.root,
4004 b'debugserve',
4005 b'debugserve',
4005 b'--sshstdio',
4006 b'--sshstdio',
4006 ]
4007 ]
4007 proc = subprocess.Popen(
4008 proc = subprocess.Popen(
4008 pycompat.rapply(procutil.tonativestr, args),
4009 pycompat.rapply(procutil.tonativestr, args),
4009 stdin=subprocess.PIPE,
4010 stdin=subprocess.PIPE,
4010 stdout=subprocess.PIPE,
4011 stdout=subprocess.PIPE,
4011 stderr=subprocess.PIPE,
4012 stderr=subprocess.PIPE,
4012 bufsize=0,
4013 bufsize=0,
4013 )
4014 )
4014
4015
4015 stdin = proc.stdin
4016 stdin = proc.stdin
4016 stdout = proc.stdout
4017 stdout = proc.stdout
4017 stderr = proc.stderr
4018 stderr = proc.stderr
4018
4019
4019 # We turn the pipes into observers so we can log I/O.
4020 # We turn the pipes into observers so we can log I/O.
4020 if ui.verbose or opts[b'peer'] == b'raw':
4021 if ui.verbose or opts[b'peer'] == b'raw':
4021 stdin = util.makeloggingfileobject(
4022 stdin = util.makeloggingfileobject(
4022 ui, proc.stdin, b'i', logdata=True
4023 ui, proc.stdin, b'i', logdata=True
4023 )
4024 )
4024 stdout = util.makeloggingfileobject(
4025 stdout = util.makeloggingfileobject(
4025 ui, proc.stdout, b'o', logdata=True
4026 ui, proc.stdout, b'o', logdata=True
4026 )
4027 )
4027 stderr = util.makeloggingfileobject(
4028 stderr = util.makeloggingfileobject(
4028 ui, proc.stderr, b'e', logdata=True
4029 ui, proc.stderr, b'e', logdata=True
4029 )
4030 )
4030
4031
4031 # --localssh also implies the peer connection settings.
4032 # --localssh also implies the peer connection settings.
4032
4033
4033 url = b'ssh://localserver'
4034 url = b'ssh://localserver'
4034 autoreadstderr = not opts[b'noreadstderr']
4035 autoreadstderr = not opts[b'noreadstderr']
4035
4036
4036 if opts[b'peer'] == b'ssh1':
4037 if opts[b'peer'] == b'ssh1':
4037 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4038 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4038 peer = sshpeer.sshv1peer(
4039 peer = sshpeer.sshv1peer(
4039 ui,
4040 ui,
4040 url,
4041 url,
4041 proc,
4042 proc,
4042 stdin,
4043 stdin,
4043 stdout,
4044 stdout,
4044 stderr,
4045 stderr,
4045 None,
4046 None,
4046 autoreadstderr=autoreadstderr,
4047 autoreadstderr=autoreadstderr,
4047 )
4048 )
4048 elif opts[b'peer'] == b'ssh2':
4049 elif opts[b'peer'] == b'ssh2':
4049 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4050 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4050 peer = sshpeer.sshv2peer(
4051 peer = sshpeer.sshv2peer(
4051 ui,
4052 ui,
4052 url,
4053 url,
4053 proc,
4054 proc,
4054 stdin,
4055 stdin,
4055 stdout,
4056 stdout,
4056 stderr,
4057 stderr,
4057 None,
4058 None,
4058 autoreadstderr=autoreadstderr,
4059 autoreadstderr=autoreadstderr,
4059 )
4060 )
4060 elif opts[b'peer'] == b'raw':
4061 elif opts[b'peer'] == b'raw':
4061 ui.write(_(b'using raw connection to peer\n'))
4062 ui.write(_(b'using raw connection to peer\n'))
4062 peer = None
4063 peer = None
4063 else:
4064 else:
4064 ui.write(_(b'creating ssh peer from handshake results\n'))
4065 ui.write(_(b'creating ssh peer from handshake results\n'))
4065 peer = sshpeer.makepeer(
4066 peer = sshpeer.makepeer(
4066 ui,
4067 ui,
4067 url,
4068 url,
4068 proc,
4069 proc,
4069 stdin,
4070 stdin,
4070 stdout,
4071 stdout,
4071 stderr,
4072 stderr,
4072 autoreadstderr=autoreadstderr,
4073 autoreadstderr=autoreadstderr,
4073 )
4074 )
4074
4075
4075 elif path:
4076 elif path:
4076 # We bypass hg.peer() so we can proxy the sockets.
4077 # We bypass hg.peer() so we can proxy the sockets.
4077 # TODO consider not doing this because we skip
4078 # TODO consider not doing this because we skip
4078 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4079 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4079 u = util.url(path)
4080 u = util.url(path)
4080 if u.scheme != b'http':
4081 if u.scheme != b'http':
4081 raise error.Abort(_(b'only http:// paths are currently supported'))
4082 raise error.Abort(_(b'only http:// paths are currently supported'))
4082
4083
4083 url, authinfo = u.authinfo()
4084 url, authinfo = u.authinfo()
4084 openerargs = {
4085 openerargs = {
4085 'useragent': b'Mercurial debugwireproto',
4086 'useragent': b'Mercurial debugwireproto',
4086 }
4087 }
4087
4088
4088 # Turn pipes/sockets into observers so we can log I/O.
4089 # Turn pipes/sockets into observers so we can log I/O.
4089 if ui.verbose:
4090 if ui.verbose:
4090 openerargs.update(
4091 openerargs.update(
4091 {
4092 {
4092 'loggingfh': ui,
4093 'loggingfh': ui,
4093 'loggingname': b's',
4094 'loggingname': b's',
4094 'loggingopts': {'logdata': True, 'logdataapis': False,},
4095 'loggingopts': {'logdata': True, 'logdataapis': False,},
4095 }
4096 }
4096 )
4097 )
4097
4098
4098 if ui.debugflag:
4099 if ui.debugflag:
4099 openerargs['loggingopts']['logdataapis'] = True
4100 openerargs['loggingopts']['logdataapis'] = True
4100
4101
4101 # Don't send default headers when in raw mode. This allows us to
4102 # Don't send default headers when in raw mode. This allows us to
4102 # bypass most of the behavior of our URL handling code so we can
4103 # bypass most of the behavior of our URL handling code so we can
4103 # have near complete control over what's sent on the wire.
4104 # have near complete control over what's sent on the wire.
4104 if opts[b'peer'] == b'raw':
4105 if opts[b'peer'] == b'raw':
4105 openerargs['sendaccept'] = False
4106 openerargs['sendaccept'] = False
4106
4107
4107 opener = urlmod.opener(ui, authinfo, **openerargs)
4108 opener = urlmod.opener(ui, authinfo, **openerargs)
4108
4109
4109 if opts[b'peer'] == b'http2':
4110 if opts[b'peer'] == b'http2':
4110 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4111 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4111 # We go through makepeer() because we need an API descriptor for
4112 # We go through makepeer() because we need an API descriptor for
4112 # the peer instance to be useful.
4113 # the peer instance to be useful.
4113 with ui.configoverride(
4114 with ui.configoverride(
4114 {(b'experimental', b'httppeer.advertise-v2'): True}
4115 {(b'experimental', b'httppeer.advertise-v2'): True}
4115 ):
4116 ):
4116 if opts[b'nologhandshake']:
4117 if opts[b'nologhandshake']:
4117 ui.pushbuffer()
4118 ui.pushbuffer()
4118
4119
4119 peer = httppeer.makepeer(ui, path, opener=opener)
4120 peer = httppeer.makepeer(ui, path, opener=opener)
4120
4121
4121 if opts[b'nologhandshake']:
4122 if opts[b'nologhandshake']:
4122 ui.popbuffer()
4123 ui.popbuffer()
4123
4124
4124 if not isinstance(peer, httppeer.httpv2peer):
4125 if not isinstance(peer, httppeer.httpv2peer):
4125 raise error.Abort(
4126 raise error.Abort(
4126 _(
4127 _(
4127 b'could not instantiate HTTP peer for '
4128 b'could not instantiate HTTP peer for '
4128 b'wire protocol version 2'
4129 b'wire protocol version 2'
4129 ),
4130 ),
4130 hint=_(
4131 hint=_(
4131 b'the server may not have the feature '
4132 b'the server may not have the feature '
4132 b'enabled or is not allowing this '
4133 b'enabled or is not allowing this '
4133 b'client version'
4134 b'client version'
4134 ),
4135 ),
4135 )
4136 )
4136
4137
4137 elif opts[b'peer'] == b'raw':
4138 elif opts[b'peer'] == b'raw':
4138 ui.write(_(b'using raw connection to peer\n'))
4139 ui.write(_(b'using raw connection to peer\n'))
4139 peer = None
4140 peer = None
4140 elif opts[b'peer']:
4141 elif opts[b'peer']:
4141 raise error.Abort(
4142 raise error.Abort(
4142 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4143 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4143 )
4144 )
4144 else:
4145 else:
4145 peer = httppeer.makepeer(ui, path, opener=opener)
4146 peer = httppeer.makepeer(ui, path, opener=opener)
4146
4147
4147 # We /could/ populate stdin/stdout with sock.makefile()...
4148 # We /could/ populate stdin/stdout with sock.makefile()...
4148 else:
4149 else:
4149 raise error.Abort(_(b'unsupported connection configuration'))
4150 raise error.Abort(_(b'unsupported connection configuration'))
4150
4151
4151 batchedcommands = None
4152 batchedcommands = None
4152
4153
4153 # Now perform actions based on the parsed wire language instructions.
4154 # Now perform actions based on the parsed wire language instructions.
4154 for action, lines in blocks:
4155 for action, lines in blocks:
4155 if action in (b'raw', b'raw+'):
4156 if action in (b'raw', b'raw+'):
4156 if not stdin:
4157 if not stdin:
4157 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4158 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4158
4159
4159 # Concatenate the data together.
4160 # Concatenate the data together.
4160 data = b''.join(l.lstrip() for l in lines)
4161 data = b''.join(l.lstrip() for l in lines)
4161 data = stringutil.unescapestr(data)
4162 data = stringutil.unescapestr(data)
4162 stdin.write(data)
4163 stdin.write(data)
4163
4164
4164 if action == b'raw+':
4165 if action == b'raw+':
4165 stdin.flush()
4166 stdin.flush()
4166 elif action == b'flush':
4167 elif action == b'flush':
4167 if not stdin:
4168 if not stdin:
4168 raise error.Abort(_(b'cannot call flush on this peer'))
4169 raise error.Abort(_(b'cannot call flush on this peer'))
4169 stdin.flush()
4170 stdin.flush()
4170 elif action.startswith(b'command'):
4171 elif action.startswith(b'command'):
4171 if not peer:
4172 if not peer:
4172 raise error.Abort(
4173 raise error.Abort(
4173 _(
4174 _(
4174 b'cannot send commands unless peer instance '
4175 b'cannot send commands unless peer instance '
4175 b'is available'
4176 b'is available'
4176 )
4177 )
4177 )
4178 )
4178
4179
4179 command = action.split(b' ', 1)[1]
4180 command = action.split(b' ', 1)[1]
4180
4181
4181 args = {}
4182 args = {}
4182 for line in lines:
4183 for line in lines:
4183 # We need to allow empty values.
4184 # We need to allow empty values.
4184 fields = line.lstrip().split(b' ', 1)
4185 fields = line.lstrip().split(b' ', 1)
4185 if len(fields) == 1:
4186 if len(fields) == 1:
4186 key = fields[0]
4187 key = fields[0]
4187 value = b''
4188 value = b''
4188 else:
4189 else:
4189 key, value = fields
4190 key, value = fields
4190
4191
4191 if value.startswith(b'eval:'):
4192 if value.startswith(b'eval:'):
4192 value = stringutil.evalpythonliteral(value[5:])
4193 value = stringutil.evalpythonliteral(value[5:])
4193 else:
4194 else:
4194 value = stringutil.unescapestr(value)
4195 value = stringutil.unescapestr(value)
4195
4196
4196 args[key] = value
4197 args[key] = value
4197
4198
4198 if batchedcommands is not None:
4199 if batchedcommands is not None:
4199 batchedcommands.append((command, args))
4200 batchedcommands.append((command, args))
4200 continue
4201 continue
4201
4202
4202 ui.status(_(b'sending %s command\n') % command)
4203 ui.status(_(b'sending %s command\n') % command)
4203
4204
4204 if b'PUSHFILE' in args:
4205 if b'PUSHFILE' in args:
4205 with open(args[b'PUSHFILE'], 'rb') as fh:
4206 with open(args[b'PUSHFILE'], 'rb') as fh:
4206 del args[b'PUSHFILE']
4207 del args[b'PUSHFILE']
4207 res, output = peer._callpush(
4208 res, output = peer._callpush(
4208 command, fh, **pycompat.strkwargs(args)
4209 command, fh, **pycompat.strkwargs(args)
4209 )
4210 )
4210 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4211 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4211 ui.status(
4212 ui.status(
4212 _(b'remote output: %s\n') % stringutil.escapestr(output)
4213 _(b'remote output: %s\n') % stringutil.escapestr(output)
4213 )
4214 )
4214 else:
4215 else:
4215 with peer.commandexecutor() as e:
4216 with peer.commandexecutor() as e:
4216 res = e.callcommand(command, args).result()
4217 res = e.callcommand(command, args).result()
4217
4218
4218 if isinstance(res, wireprotov2peer.commandresponse):
4219 if isinstance(res, wireprotov2peer.commandresponse):
4219 val = res.objects()
4220 val = res.objects()
4220 ui.status(
4221 ui.status(
4221 _(b'response: %s\n')
4222 _(b'response: %s\n')
4222 % stringutil.pprint(val, bprefix=True, indent=2)
4223 % stringutil.pprint(val, bprefix=True, indent=2)
4223 )
4224 )
4224 else:
4225 else:
4225 ui.status(
4226 ui.status(
4226 _(b'response: %s\n')
4227 _(b'response: %s\n')
4227 % stringutil.pprint(res, bprefix=True, indent=2)
4228 % stringutil.pprint(res, bprefix=True, indent=2)
4228 )
4229 )
4229
4230
4230 elif action == b'batchbegin':
4231 elif action == b'batchbegin':
4231 if batchedcommands is not None:
4232 if batchedcommands is not None:
4232 raise error.Abort(_(b'nested batchbegin not allowed'))
4233 raise error.Abort(_(b'nested batchbegin not allowed'))
4233
4234
4234 batchedcommands = []
4235 batchedcommands = []
4235 elif action == b'batchsubmit':
4236 elif action == b'batchsubmit':
4236 # There is a batching API we could go through. But it would be
4237 # There is a batching API we could go through. But it would be
4237 # difficult to normalize requests into function calls. It is easier
4238 # difficult to normalize requests into function calls. It is easier
4238 # to bypass this layer and normalize to commands + args.
4239 # to bypass this layer and normalize to commands + args.
4239 ui.status(
4240 ui.status(
4240 _(b'sending batch with %d sub-commands\n')
4241 _(b'sending batch with %d sub-commands\n')
4241 % len(batchedcommands)
4242 % len(batchedcommands)
4242 )
4243 )
4243 assert peer is not None
4244 assert peer is not None
4244 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4245 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4245 ui.status(
4246 ui.status(
4246 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4247 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4247 )
4248 )
4248
4249
4249 batchedcommands = None
4250 batchedcommands = None
4250
4251
4251 elif action.startswith(b'httprequest '):
4252 elif action.startswith(b'httprequest '):
4252 if not opener:
4253 if not opener:
4253 raise error.Abort(
4254 raise error.Abort(
4254 _(b'cannot use httprequest without an HTTP peer')
4255 _(b'cannot use httprequest without an HTTP peer')
4255 )
4256 )
4256
4257
4257 request = action.split(b' ', 2)
4258 request = action.split(b' ', 2)
4258 if len(request) != 3:
4259 if len(request) != 3:
4259 raise error.Abort(
4260 raise error.Abort(
4260 _(
4261 _(
4261 b'invalid httprequest: expected format is '
4262 b'invalid httprequest: expected format is '
4262 b'"httprequest <method> <path>'
4263 b'"httprequest <method> <path>'
4263 )
4264 )
4264 )
4265 )
4265
4266
4266 method, httppath = request[1:]
4267 method, httppath = request[1:]
4267 headers = {}
4268 headers = {}
4268 body = None
4269 body = None
4269 frames = []
4270 frames = []
4270 for line in lines:
4271 for line in lines:
4271 line = line.lstrip()
4272 line = line.lstrip()
4272 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4273 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4273 if m:
4274 if m:
4274 # Headers need to use native strings.
4275 # Headers need to use native strings.
4275 key = pycompat.strurl(m.group(1))
4276 key = pycompat.strurl(m.group(1))
4276 value = pycompat.strurl(m.group(2))
4277 value = pycompat.strurl(m.group(2))
4277 headers[key] = value
4278 headers[key] = value
4278 continue
4279 continue
4279
4280
4280 if line.startswith(b'BODYFILE '):
4281 if line.startswith(b'BODYFILE '):
4281 with open(line.split(b' ', 1), b'rb') as fh:
4282 with open(line.split(b' ', 1), b'rb') as fh:
4282 body = fh.read()
4283 body = fh.read()
4283 elif line.startswith(b'frame '):
4284 elif line.startswith(b'frame '):
4284 frame = wireprotoframing.makeframefromhumanstring(
4285 frame = wireprotoframing.makeframefromhumanstring(
4285 line[len(b'frame ') :]
4286 line[len(b'frame ') :]
4286 )
4287 )
4287
4288
4288 frames.append(frame)
4289 frames.append(frame)
4289 else:
4290 else:
4290 raise error.Abort(
4291 raise error.Abort(
4291 _(b'unknown argument to httprequest: %s') % line
4292 _(b'unknown argument to httprequest: %s') % line
4292 )
4293 )
4293
4294
4294 url = path + httppath
4295 url = path + httppath
4295
4296
4296 if frames:
4297 if frames:
4297 body = b''.join(bytes(f) for f in frames)
4298 body = b''.join(bytes(f) for f in frames)
4298
4299
4299 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4300 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4300
4301
4301 # urllib.Request insists on using has_data() as a proxy for
4302 # urllib.Request insists on using has_data() as a proxy for
4302 # determining the request method. Override that to use our
4303 # determining the request method. Override that to use our
4303 # explicitly requested method.
4304 # explicitly requested method.
4304 req.get_method = lambda: pycompat.sysstr(method)
4305 req.get_method = lambda: pycompat.sysstr(method)
4305
4306
4306 try:
4307 try:
4307 res = opener.open(req)
4308 res = opener.open(req)
4308 body = res.read()
4309 body = res.read()
4309 except util.urlerr.urlerror as e:
4310 except util.urlerr.urlerror as e:
4310 # read() method must be called, but only exists in Python 2
4311 # read() method must be called, but only exists in Python 2
4311 getattr(e, 'read', lambda: None)()
4312 getattr(e, 'read', lambda: None)()
4312 continue
4313 continue
4313
4314
4314 ct = res.headers.get('Content-Type')
4315 ct = res.headers.get('Content-Type')
4315 if ct == 'application/mercurial-cbor':
4316 if ct == 'application/mercurial-cbor':
4316 ui.write(
4317 ui.write(
4317 _(b'cbor> %s\n')
4318 _(b'cbor> %s\n')
4318 % stringutil.pprint(
4319 % stringutil.pprint(
4319 cborutil.decodeall(body), bprefix=True, indent=2
4320 cborutil.decodeall(body), bprefix=True, indent=2
4320 )
4321 )
4321 )
4322 )
4322
4323
4323 elif action == b'close':
4324 elif action == b'close':
4324 assert peer is not None
4325 assert peer is not None
4325 peer.close()
4326 peer.close()
4326 elif action == b'readavailable':
4327 elif action == b'readavailable':
4327 if not stdout or not stderr:
4328 if not stdout or not stderr:
4328 raise error.Abort(
4329 raise error.Abort(
4329 _(b'readavailable not available on this peer')
4330 _(b'readavailable not available on this peer')
4330 )
4331 )
4331
4332
4332 stdin.close()
4333 stdin.close()
4333 stdout.read()
4334 stdout.read()
4334 stderr.read()
4335 stderr.read()
4335
4336
4336 elif action == b'readline':
4337 elif action == b'readline':
4337 if not stdout:
4338 if not stdout:
4338 raise error.Abort(_(b'readline not available on this peer'))
4339 raise error.Abort(_(b'readline not available on this peer'))
4339 stdout.readline()
4340 stdout.readline()
4340 elif action == b'ereadline':
4341 elif action == b'ereadline':
4341 if not stderr:
4342 if not stderr:
4342 raise error.Abort(_(b'ereadline not available on this peer'))
4343 raise error.Abort(_(b'ereadline not available on this peer'))
4343 stderr.readline()
4344 stderr.readline()
4344 elif action.startswith(b'read '):
4345 elif action.startswith(b'read '):
4345 count = int(action.split(b' ', 1)[1])
4346 count = int(action.split(b' ', 1)[1])
4346 if not stdout:
4347 if not stdout:
4347 raise error.Abort(_(b'read not available on this peer'))
4348 raise error.Abort(_(b'read not available on this peer'))
4348 stdout.read(count)
4349 stdout.read(count)
4349 elif action.startswith(b'eread '):
4350 elif action.startswith(b'eread '):
4350 count = int(action.split(b' ', 1)[1])
4351 count = int(action.split(b' ', 1)[1])
4351 if not stderr:
4352 if not stderr:
4352 raise error.Abort(_(b'eread not available on this peer'))
4353 raise error.Abort(_(b'eread not available on this peer'))
4353 stderr.read(count)
4354 stderr.read(count)
4354 else:
4355 else:
4355 raise error.Abort(_(b'unknown action: %s') % action)
4356 raise error.Abort(_(b'unknown action: %s') % action)
4356
4357
4357 if batchedcommands is not None:
4358 if batchedcommands is not None:
4358 raise error.Abort(_(b'unclosed "batchbegin" request'))
4359 raise error.Abort(_(b'unclosed "batchbegin" request'))
4359
4360
4360 if peer:
4361 if peer:
4361 peer.close()
4362 peer.close()
4362
4363
4363 if proc:
4364 if proc:
4364 proc.kill()
4365 proc.kill()
@@ -1,284 +1,284 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import nullid, nullrev
13 from ..node import nullid, nullrev
14 from .. import (
14 from .. import (
15 pycompat,
15 pycompat,
16 util,
16 util,
17 )
17 )
18
18
19 from ..revlogutils import nodemap as nodemaputil
19 from ..revlogutils import nodemap as nodemaputil
20
20
21 stringio = pycompat.bytesio
21 stringio = pycompat.bytesio
22
22
23
23
24 _pack = struct.pack
24 _pack = struct.pack
25 _unpack = struct.unpack
25 _unpack = struct.unpack
26 _compress = zlib.compress
26 _compress = zlib.compress
27 _decompress = zlib.decompress
27 _decompress = zlib.decompress
28
28
29 # Some code below makes tuples directly because it's more convenient. However,
29 # Some code below makes tuples directly because it's more convenient. However,
30 # code outside this module should always use dirstatetuple.
30 # code outside this module should always use dirstatetuple.
31 def dirstatetuple(*x):
31 def dirstatetuple(*x):
32 # x is a tuple
32 # x is a tuple
33 return x
33 return x
34
34
35
35
36 indexformatng = b">Qiiiiii20s12x"
36 indexformatng = b">Qiiiiii20s12x"
37 indexfirst = struct.calcsize(b'Q')
37 indexfirst = struct.calcsize(b'Q')
38 sizeint = struct.calcsize(b'i')
38 sizeint = struct.calcsize(b'i')
39 indexsize = struct.calcsize(indexformatng)
39 indexsize = struct.calcsize(indexformatng)
40
40
41
41
42 def gettype(q):
42 def gettype(q):
43 return int(q & 0xFFFF)
43 return int(q & 0xFFFF)
44
44
45
45
46 def offset_type(offset, type):
46 def offset_type(offset, type):
47 return int(int(offset) << 16 | type)
47 return int(int(offset) << 16 | type)
48
48
49
49
50 class BaseIndexObject(object):
50 class BaseIndexObject(object):
51 @property
51 @property
52 def nodemap(self):
52 def nodemap(self):
53 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
53 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
54 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
54 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
55 return self._nodemap
55 return self._nodemap
56
56
57 @util.propertycache
57 @util.propertycache
58 def _nodemap(self):
58 def _nodemap(self):
59 nodemap = nodemaputil.NodeMap({nullid: nullrev})
59 nodemap = nodemaputil.NodeMap({nullid: nullrev})
60 for r in range(0, len(self)):
60 for r in range(0, len(self)):
61 n = self[r][7]
61 n = self[r][7]
62 nodemap[n] = r
62 nodemap[n] = r
63 return nodemap
63 return nodemap
64
64
65 def has_node(self, node):
65 def has_node(self, node):
66 """return True if the node exist in the index"""
66 """return True if the node exist in the index"""
67 return node in self._nodemap
67 return node in self._nodemap
68
68
69 def rev(self, node):
69 def rev(self, node):
70 """return a revision for a node
70 """return a revision for a node
71
71
72 If the node is unknown, raise a RevlogError"""
72 If the node is unknown, raise a RevlogError"""
73 return self._nodemap[node]
73 return self._nodemap[node]
74
74
75 def get_rev(self, node):
75 def get_rev(self, node):
76 """return a revision for a node
76 """return a revision for a node
77
77
78 If the node is unknown, return None"""
78 If the node is unknown, return None"""
79 return self._nodemap.get(node)
79 return self._nodemap.get(node)
80
80
81 def _stripnodes(self, start):
81 def _stripnodes(self, start):
82 if '_nodemap' in vars(self):
82 if '_nodemap' in vars(self):
83 for r in range(start, len(self)):
83 for r in range(start, len(self)):
84 n = self[r][7]
84 n = self[r][7]
85 del self._nodemap[n]
85 del self._nodemap[n]
86
86
87 def clearcaches(self):
87 def clearcaches(self):
88 self.__dict__.pop('_nodemap', None)
88 self.__dict__.pop('_nodemap', None)
89
89
90 def __len__(self):
90 def __len__(self):
91 return self._lgt + len(self._extra)
91 return self._lgt + len(self._extra)
92
92
93 def append(self, tup):
93 def append(self, tup):
94 if '_nodemap' in vars(self):
94 if '_nodemap' in vars(self):
95 self._nodemap[tup[7]] = len(self)
95 self._nodemap[tup[7]] = len(self)
96 self._extra.append(tup)
96 self._extra.append(tup)
97
97
98 def _check_index(self, i):
98 def _check_index(self, i):
99 if not isinstance(i, int):
99 if not isinstance(i, int):
100 raise TypeError(b"expecting int indexes")
100 raise TypeError(b"expecting int indexes")
101 if i < 0 or i >= len(self):
101 if i < 0 or i >= len(self):
102 raise IndexError
102 raise IndexError
103
103
104 def __getitem__(self, i):
104 def __getitem__(self, i):
105 if i == -1:
105 if i == -1:
106 return (0, 0, 0, -1, -1, -1, -1, nullid)
106 return (0, 0, 0, -1, -1, -1, -1, nullid)
107 self._check_index(i)
107 self._check_index(i)
108 if i >= self._lgt:
108 if i >= self._lgt:
109 return self._extra[i - self._lgt]
109 return self._extra[i - self._lgt]
110 index = self._calculate_index(i)
110 index = self._calculate_index(i)
111 r = struct.unpack(indexformatng, self._data[index : index + indexsize])
111 r = struct.unpack(indexformatng, self._data[index : index + indexsize])
112 if i == 0:
112 if i == 0:
113 e = list(r)
113 e = list(r)
114 type = gettype(e[0])
114 type = gettype(e[0])
115 e[0] = offset_type(0, type)
115 e[0] = offset_type(0, type)
116 return tuple(e)
116 return tuple(e)
117 return r
117 return r
118
118
119
119
120 class IndexObject(BaseIndexObject):
120 class IndexObject(BaseIndexObject):
121 def __init__(self, data):
121 def __init__(self, data):
122 assert len(data) % indexsize == 0
122 assert len(data) % indexsize == 0
123 self._data = data
123 self._data = data
124 self._lgt = len(data) // indexsize
124 self._lgt = len(data) // indexsize
125 self._extra = []
125 self._extra = []
126
126
127 def _calculate_index(self, i):
127 def _calculate_index(self, i):
128 return i * indexsize
128 return i * indexsize
129
129
130 def __delitem__(self, i):
130 def __delitem__(self, i):
131 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
131 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
132 raise ValueError(b"deleting slices only supports a:-1 with step 1")
132 raise ValueError(b"deleting slices only supports a:-1 with step 1")
133 i = i.start
133 i = i.start
134 self._check_index(i)
134 self._check_index(i)
135 self._stripnodes(i)
135 self._stripnodes(i)
136 if i < self._lgt:
136 if i < self._lgt:
137 self._data = self._data[: i * indexsize]
137 self._data = self._data[: i * indexsize]
138 self._lgt = i
138 self._lgt = i
139 self._extra = []
139 self._extra = []
140 else:
140 else:
141 self._extra = self._extra[: i - self._lgt]
141 self._extra = self._extra[: i - self._lgt]
142
142
143
143
144 class PersistentNodeMapIndexObject(IndexObject):
144 class PersistentNodeMapIndexObject(IndexObject):
145 """a Debug oriented class to test persistent nodemap
145 """a Debug oriented class to test persistent nodemap
146
146
147 We need a simple python object to test API and higher level behavior. See
147 We need a simple python object to test API and higher level behavior. See
148 the Rust implementation for more serious usage. This should be used only
148 the Rust implementation for more serious usage. This should be used only
149 through the dedicated `devel.persistent-nodemap` config.
149 through the dedicated `devel.persistent-nodemap` config.
150 """
150 """
151
151
152 def nodemap_data_all(self):
152 def nodemap_data_all(self):
153 """Return bytes containing a full serialization of a nodemap
153 """Return bytes containing a full serialization of a nodemap
154
154
155 The nodemap should be valid for the full set of revisions in the
155 The nodemap should be valid for the full set of revisions in the
156 index."""
156 index."""
157 return nodemaputil.persistent_data(self)
157 return nodemaputil.persistent_data(self)
158
158
159 def nodemap_data_incremental(self):
159 def nodemap_data_incremental(self):
160 """Return bytes containing a incremental update to persistent nodemap
160 """Return bytes containing a incremental update to persistent nodemap
161
161
162 This containst the data for an append-only update of the data provided
162 This containst the data for an append-only update of the data provided
163 in the last call to `update_nodemap_data`.
163 in the last call to `update_nodemap_data`.
164 """
164 """
165 if self._nm_root is None:
165 if self._nm_root is None:
166 return None
166 return None
167 data = nodemaputil.update_persistent_data(
167 data = nodemaputil.update_persistent_data(
168 self, self._nm_root, self._nm_max_idx, self._nm_rev
168 self, self._nm_root, self._nm_max_idx, self._nm_rev
169 )
169 )
170 self._nm_root = self._nm_max_idx = self._nm_rev = None
170 self._nm_root = self._nm_max_idx = self._nm_rev = None
171 return data
171 return data
172
172
173 def update_nodemap_data(self, nm_data):
173 def update_nodemap_data(self, docket, nm_data):
174 """provide full blokc of persisted binary data for a nodemap
174 """provide full block of persisted binary data for a nodemap
175
175
176 The data are expected to come from disk. See `nodemap_data_all` for a
176 The data are expected to come from disk. See `nodemap_data_all` for a
177 produceur of such data."""
177 produceur of such data."""
178 if nm_data is not None:
178 if nm_data is not None:
179 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
179 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
180 if self._nm_root:
180 if self._nm_root:
181 self._nm_rev = len(self) - 1
181 self._nm_rev = docket.tip_rev
182 else:
182 else:
183 self._nm_root = self._nm_max_idx = self._nm_rev = None
183 self._nm_root = self._nm_max_idx = self._nm_rev = None
184
184
185
185
186 class InlinedIndexObject(BaseIndexObject):
186 class InlinedIndexObject(BaseIndexObject):
187 def __init__(self, data, inline=0):
187 def __init__(self, data, inline=0):
188 self._data = data
188 self._data = data
189 self._lgt = self._inline_scan(None)
189 self._lgt = self._inline_scan(None)
190 self._inline_scan(self._lgt)
190 self._inline_scan(self._lgt)
191 self._extra = []
191 self._extra = []
192
192
193 def _inline_scan(self, lgt):
193 def _inline_scan(self, lgt):
194 off = 0
194 off = 0
195 if lgt is not None:
195 if lgt is not None:
196 self._offsets = [0] * lgt
196 self._offsets = [0] * lgt
197 count = 0
197 count = 0
198 while off <= len(self._data) - indexsize:
198 while off <= len(self._data) - indexsize:
199 (s,) = struct.unpack(
199 (s,) = struct.unpack(
200 b'>i', self._data[off + indexfirst : off + sizeint + indexfirst]
200 b'>i', self._data[off + indexfirst : off + sizeint + indexfirst]
201 )
201 )
202 if lgt is not None:
202 if lgt is not None:
203 self._offsets[count] = off
203 self._offsets[count] = off
204 count += 1
204 count += 1
205 off += indexsize + s
205 off += indexsize + s
206 if off != len(self._data):
206 if off != len(self._data):
207 raise ValueError(b"corrupted data")
207 raise ValueError(b"corrupted data")
208 return count
208 return count
209
209
210 def __delitem__(self, i):
210 def __delitem__(self, i):
211 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
211 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
212 raise ValueError(b"deleting slices only supports a:-1 with step 1")
212 raise ValueError(b"deleting slices only supports a:-1 with step 1")
213 i = i.start
213 i = i.start
214 self._check_index(i)
214 self._check_index(i)
215 self._stripnodes(i)
215 self._stripnodes(i)
216 if i < self._lgt:
216 if i < self._lgt:
217 self._offsets = self._offsets[:i]
217 self._offsets = self._offsets[:i]
218 self._lgt = i
218 self._lgt = i
219 self._extra = []
219 self._extra = []
220 else:
220 else:
221 self._extra = self._extra[: i - self._lgt]
221 self._extra = self._extra[: i - self._lgt]
222
222
223 def _calculate_index(self, i):
223 def _calculate_index(self, i):
224 return self._offsets[i]
224 return self._offsets[i]
225
225
226
226
227 def parse_index2(data, inline):
227 def parse_index2(data, inline):
228 if not inline:
228 if not inline:
229 return IndexObject(data), None
229 return IndexObject(data), None
230 return InlinedIndexObject(data, inline), (0, data)
230 return InlinedIndexObject(data, inline), (0, data)
231
231
232
232
233 def parse_index_devel_nodemap(data, inline):
233 def parse_index_devel_nodemap(data, inline):
234 """like parse_index2, but alway return a PersistentNodeMapIndexObject
234 """like parse_index2, but alway return a PersistentNodeMapIndexObject
235 """
235 """
236 return PersistentNodeMapIndexObject(data), None
236 return PersistentNodeMapIndexObject(data), None
237
237
238
238
239 def parse_dirstate(dmap, copymap, st):
239 def parse_dirstate(dmap, copymap, st):
240 parents = [st[:20], st[20:40]]
240 parents = [st[:20], st[20:40]]
241 # dereference fields so they will be local in loop
241 # dereference fields so they will be local in loop
242 format = b">cllll"
242 format = b">cllll"
243 e_size = struct.calcsize(format)
243 e_size = struct.calcsize(format)
244 pos1 = 40
244 pos1 = 40
245 l = len(st)
245 l = len(st)
246
246
247 # the inner loop
247 # the inner loop
248 while pos1 < l:
248 while pos1 < l:
249 pos2 = pos1 + e_size
249 pos2 = pos1 + e_size
250 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
250 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
251 pos1 = pos2 + e[4]
251 pos1 = pos2 + e[4]
252 f = st[pos2:pos1]
252 f = st[pos2:pos1]
253 if b'\0' in f:
253 if b'\0' in f:
254 f, c = f.split(b'\0')
254 f, c = f.split(b'\0')
255 copymap[f] = c
255 copymap[f] = c
256 dmap[f] = e[:4]
256 dmap[f] = e[:4]
257 return parents
257 return parents
258
258
259
259
260 def pack_dirstate(dmap, copymap, pl, now):
260 def pack_dirstate(dmap, copymap, pl, now):
261 now = int(now)
261 now = int(now)
262 cs = stringio()
262 cs = stringio()
263 write = cs.write
263 write = cs.write
264 write(b"".join(pl))
264 write(b"".join(pl))
265 for f, e in pycompat.iteritems(dmap):
265 for f, e in pycompat.iteritems(dmap):
266 if e[0] == b'n' and e[3] == now:
266 if e[0] == b'n' and e[3] == now:
267 # The file was last modified "simultaneously" with the current
267 # The file was last modified "simultaneously" with the current
268 # write to dirstate (i.e. within the same second for file-
268 # write to dirstate (i.e. within the same second for file-
269 # systems with a granularity of 1 sec). This commonly happens
269 # systems with a granularity of 1 sec). This commonly happens
270 # for at least a couple of files on 'update'.
270 # for at least a couple of files on 'update'.
271 # The user could change the file without changing its size
271 # The user could change the file without changing its size
272 # within the same second. Invalidate the file's mtime in
272 # within the same second. Invalidate the file's mtime in
273 # dirstate, forcing future 'status' calls to compare the
273 # dirstate, forcing future 'status' calls to compare the
274 # contents of the file if the size is the same. This prevents
274 # contents of the file if the size is the same. This prevents
275 # mistakenly treating such files as clean.
275 # mistakenly treating such files as clean.
276 e = dirstatetuple(e[0], e[1], e[2], -1)
276 e = dirstatetuple(e[0], e[1], e[2], -1)
277 dmap[f] = e
277 dmap[f] = e
278
278
279 if f in copymap:
279 if f in copymap:
280 f = b"%s\0%s" % (f, copymap[f])
280 f = b"%s\0%s" % (f, copymap[f])
281 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
281 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
282 write(e)
282 write(e)
283 write(f)
283 write(f)
284 return cs.getvalue()
284 return cs.getvalue()
@@ -1,3033 +1,3033 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullhex,
28 nullhex,
29 nullid,
29 nullid,
30 nullrev,
30 nullrev,
31 short,
31 short,
32 wdirfilenodeids,
32 wdirfilenodeids,
33 wdirhex,
33 wdirhex,
34 wdirid,
34 wdirid,
35 wdirrev,
35 wdirrev,
36 )
36 )
37 from .i18n import _
37 from .i18n import _
38 from .pycompat import getattr
38 from .pycompat import getattr
39 from .revlogutils.constants import (
39 from .revlogutils.constants import (
40 FLAG_GENERALDELTA,
40 FLAG_GENERALDELTA,
41 FLAG_INLINE_DATA,
41 FLAG_INLINE_DATA,
42 REVLOGV0,
42 REVLOGV0,
43 REVLOGV1,
43 REVLOGV1,
44 REVLOGV1_FLAGS,
44 REVLOGV1_FLAGS,
45 REVLOGV2,
45 REVLOGV2,
46 REVLOGV2_FLAGS,
46 REVLOGV2_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FLAGS,
48 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_FORMAT,
49 REVLOG_DEFAULT_VERSION,
49 REVLOG_DEFAULT_VERSION,
50 )
50 )
51 from .revlogutils.flagutil import (
51 from .revlogutils.flagutil import (
52 REVIDX_DEFAULT_FLAGS,
52 REVIDX_DEFAULT_FLAGS,
53 REVIDX_ELLIPSIS,
53 REVIDX_ELLIPSIS,
54 REVIDX_EXTSTORED,
54 REVIDX_EXTSTORED,
55 REVIDX_FLAGS_ORDER,
55 REVIDX_FLAGS_ORDER,
56 REVIDX_ISCENSORED,
56 REVIDX_ISCENSORED,
57 REVIDX_RAWTEXT_CHANGING_FLAGS,
57 REVIDX_RAWTEXT_CHANGING_FLAGS,
58 REVIDX_SIDEDATA,
58 REVIDX_SIDEDATA,
59 )
59 )
60 from .thirdparty import attr
60 from .thirdparty import attr
61 from . import (
61 from . import (
62 ancestor,
62 ancestor,
63 dagop,
63 dagop,
64 error,
64 error,
65 mdiff,
65 mdiff,
66 policy,
66 policy,
67 pycompat,
67 pycompat,
68 templatefilters,
68 templatefilters,
69 util,
69 util,
70 )
70 )
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75 from .revlogutils import (
75 from .revlogutils import (
76 deltas as deltautil,
76 deltas as deltautil,
77 flagutil,
77 flagutil,
78 nodemap as nodemaputil,
78 nodemap as nodemaputil,
79 sidedata as sidedatautil,
79 sidedata as sidedatautil,
80 )
80 )
81 from .utils import (
81 from .utils import (
82 storageutil,
82 storageutil,
83 stringutil,
83 stringutil,
84 )
84 )
85
85
86 # blanked usage of all the name to prevent pyflakes constraints
86 # blanked usage of all the name to prevent pyflakes constraints
87 # We need these name available in the module for extensions.
87 # We need these name available in the module for extensions.
88 REVLOGV0
88 REVLOGV0
89 REVLOGV1
89 REVLOGV1
90 REVLOGV2
90 REVLOGV2
91 FLAG_INLINE_DATA
91 FLAG_INLINE_DATA
92 FLAG_GENERALDELTA
92 FLAG_GENERALDELTA
93 REVLOG_DEFAULT_FLAGS
93 REVLOG_DEFAULT_FLAGS
94 REVLOG_DEFAULT_FORMAT
94 REVLOG_DEFAULT_FORMAT
95 REVLOG_DEFAULT_VERSION
95 REVLOG_DEFAULT_VERSION
96 REVLOGV1_FLAGS
96 REVLOGV1_FLAGS
97 REVLOGV2_FLAGS
97 REVLOGV2_FLAGS
98 REVIDX_ISCENSORED
98 REVIDX_ISCENSORED
99 REVIDX_ELLIPSIS
99 REVIDX_ELLIPSIS
100 REVIDX_SIDEDATA
100 REVIDX_SIDEDATA
101 REVIDX_EXTSTORED
101 REVIDX_EXTSTORED
102 REVIDX_DEFAULT_FLAGS
102 REVIDX_DEFAULT_FLAGS
103 REVIDX_FLAGS_ORDER
103 REVIDX_FLAGS_ORDER
104 REVIDX_RAWTEXT_CHANGING_FLAGS
104 REVIDX_RAWTEXT_CHANGING_FLAGS
105
105
106 parsers = policy.importmod('parsers')
106 parsers = policy.importmod('parsers')
107 rustancestor = policy.importrust('ancestor')
107 rustancestor = policy.importrust('ancestor')
108 rustdagop = policy.importrust('dagop')
108 rustdagop = policy.importrust('dagop')
109 rustrevlog = policy.importrust('revlog')
109 rustrevlog = policy.importrust('revlog')
110
110
111 # Aliased for performance.
111 # Aliased for performance.
112 _zlibdecompress = zlib.decompress
112 _zlibdecompress = zlib.decompress
113
113
114 # max size of revlog with inline data
114 # max size of revlog with inline data
115 _maxinline = 131072
115 _maxinline = 131072
116 _chunksize = 1048576
116 _chunksize = 1048576
117
117
118 # Flag processors for REVIDX_ELLIPSIS.
118 # Flag processors for REVIDX_ELLIPSIS.
119 def ellipsisreadprocessor(rl, text):
119 def ellipsisreadprocessor(rl, text):
120 return text, False, {}
120 return text, False, {}
121
121
122
122
123 def ellipsiswriteprocessor(rl, text, sidedata):
123 def ellipsiswriteprocessor(rl, text, sidedata):
124 return text, False
124 return text, False
125
125
126
126
127 def ellipsisrawprocessor(rl, text):
127 def ellipsisrawprocessor(rl, text):
128 return False
128 return False
129
129
130
130
131 ellipsisprocessor = (
131 ellipsisprocessor = (
132 ellipsisreadprocessor,
132 ellipsisreadprocessor,
133 ellipsiswriteprocessor,
133 ellipsiswriteprocessor,
134 ellipsisrawprocessor,
134 ellipsisrawprocessor,
135 )
135 )
136
136
137
137
138 def getoffset(q):
138 def getoffset(q):
139 return int(q >> 16)
139 return int(q >> 16)
140
140
141
141
142 def gettype(q):
142 def gettype(q):
143 return int(q & 0xFFFF)
143 return int(q & 0xFFFF)
144
144
145
145
146 def offset_type(offset, type):
146 def offset_type(offset, type):
147 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
147 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
148 raise ValueError(b'unknown revlog index flags')
148 raise ValueError(b'unknown revlog index flags')
149 return int(int(offset) << 16 | type)
149 return int(int(offset) << 16 | type)
150
150
151
151
152 def _verify_revision(rl, skipflags, state, node):
152 def _verify_revision(rl, skipflags, state, node):
153 """Verify the integrity of the given revlog ``node`` while providing a hook
153 """Verify the integrity of the given revlog ``node`` while providing a hook
154 point for extensions to influence the operation."""
154 point for extensions to influence the operation."""
155 if skipflags:
155 if skipflags:
156 state[b'skipread'].add(node)
156 state[b'skipread'].add(node)
157 else:
157 else:
158 # Side-effect: read content and verify hash.
158 # Side-effect: read content and verify hash.
159 rl.revision(node)
159 rl.revision(node)
160
160
161
161
162 @attr.s(slots=True, frozen=True)
162 @attr.s(slots=True, frozen=True)
163 class _revisioninfo(object):
163 class _revisioninfo(object):
164 """Information about a revision that allows building its fulltext
164 """Information about a revision that allows building its fulltext
165 node: expected hash of the revision
165 node: expected hash of the revision
166 p1, p2: parent revs of the revision
166 p1, p2: parent revs of the revision
167 btext: built text cache consisting of a one-element list
167 btext: built text cache consisting of a one-element list
168 cachedelta: (baserev, uncompressed_delta) or None
168 cachedelta: (baserev, uncompressed_delta) or None
169 flags: flags associated to the revision storage
169 flags: flags associated to the revision storage
170
170
171 One of btext[0] or cachedelta must be set.
171 One of btext[0] or cachedelta must be set.
172 """
172 """
173
173
174 node = attr.ib()
174 node = attr.ib()
175 p1 = attr.ib()
175 p1 = attr.ib()
176 p2 = attr.ib()
176 p2 = attr.ib()
177 btext = attr.ib()
177 btext = attr.ib()
178 textlen = attr.ib()
178 textlen = attr.ib()
179 cachedelta = attr.ib()
179 cachedelta = attr.ib()
180 flags = attr.ib()
180 flags = attr.ib()
181
181
182
182
183 @interfaceutil.implementer(repository.irevisiondelta)
183 @interfaceutil.implementer(repository.irevisiondelta)
184 @attr.s(slots=True)
184 @attr.s(slots=True)
185 class revlogrevisiondelta(object):
185 class revlogrevisiondelta(object):
186 node = attr.ib()
186 node = attr.ib()
187 p1node = attr.ib()
187 p1node = attr.ib()
188 p2node = attr.ib()
188 p2node = attr.ib()
189 basenode = attr.ib()
189 basenode = attr.ib()
190 flags = attr.ib()
190 flags = attr.ib()
191 baserevisionsize = attr.ib()
191 baserevisionsize = attr.ib()
192 revision = attr.ib()
192 revision = attr.ib()
193 delta = attr.ib()
193 delta = attr.ib()
194 linknode = attr.ib(default=None)
194 linknode = attr.ib(default=None)
195
195
196
196
197 @interfaceutil.implementer(repository.iverifyproblem)
197 @interfaceutil.implementer(repository.iverifyproblem)
198 @attr.s(frozen=True)
198 @attr.s(frozen=True)
199 class revlogproblem(object):
199 class revlogproblem(object):
200 warning = attr.ib(default=None)
200 warning = attr.ib(default=None)
201 error = attr.ib(default=None)
201 error = attr.ib(default=None)
202 node = attr.ib(default=None)
202 node = attr.ib(default=None)
203
203
204
204
205 # index v0:
205 # index v0:
206 # 4 bytes: offset
206 # 4 bytes: offset
207 # 4 bytes: compressed length
207 # 4 bytes: compressed length
208 # 4 bytes: base rev
208 # 4 bytes: base rev
209 # 4 bytes: link rev
209 # 4 bytes: link rev
210 # 20 bytes: parent 1 nodeid
210 # 20 bytes: parent 1 nodeid
211 # 20 bytes: parent 2 nodeid
211 # 20 bytes: parent 2 nodeid
212 # 20 bytes: nodeid
212 # 20 bytes: nodeid
213 indexformatv0 = struct.Struct(b">4l20s20s20s")
213 indexformatv0 = struct.Struct(b">4l20s20s20s")
214 indexformatv0_pack = indexformatv0.pack
214 indexformatv0_pack = indexformatv0.pack
215 indexformatv0_unpack = indexformatv0.unpack
215 indexformatv0_unpack = indexformatv0.unpack
216
216
217
217
218 class revlogoldindex(list):
218 class revlogoldindex(list):
219 @property
219 @property
220 def nodemap(self):
220 def nodemap(self):
221 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
221 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
222 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
222 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
223 return self._nodemap
223 return self._nodemap
224
224
225 @util.propertycache
225 @util.propertycache
226 def _nodemap(self):
226 def _nodemap(self):
227 nodemap = nodemaputil.NodeMap({nullid: nullrev})
227 nodemap = nodemaputil.NodeMap({nullid: nullrev})
228 for r in range(0, len(self)):
228 for r in range(0, len(self)):
229 n = self[r][7]
229 n = self[r][7]
230 nodemap[n] = r
230 nodemap[n] = r
231 return nodemap
231 return nodemap
232
232
233 def has_node(self, node):
233 def has_node(self, node):
234 """return True if the node exist in the index"""
234 """return True if the node exist in the index"""
235 return node in self._nodemap
235 return node in self._nodemap
236
236
237 def rev(self, node):
237 def rev(self, node):
238 """return a revision for a node
238 """return a revision for a node
239
239
240 If the node is unknown, raise a RevlogError"""
240 If the node is unknown, raise a RevlogError"""
241 return self._nodemap[node]
241 return self._nodemap[node]
242
242
243 def get_rev(self, node):
243 def get_rev(self, node):
244 """return a revision for a node
244 """return a revision for a node
245
245
246 If the node is unknown, return None"""
246 If the node is unknown, return None"""
247 return self._nodemap.get(node)
247 return self._nodemap.get(node)
248
248
249 def append(self, tup):
249 def append(self, tup):
250 self._nodemap[tup[7]] = len(self)
250 self._nodemap[tup[7]] = len(self)
251 super(revlogoldindex, self).append(tup)
251 super(revlogoldindex, self).append(tup)
252
252
253 def __delitem__(self, i):
253 def __delitem__(self, i):
254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
256 for r in pycompat.xrange(i.start, len(self)):
256 for r in pycompat.xrange(i.start, len(self)):
257 del self._nodemap[self[r][7]]
257 del self._nodemap[self[r][7]]
258 super(revlogoldindex, self).__delitem__(i)
258 super(revlogoldindex, self).__delitem__(i)
259
259
260 def clearcaches(self):
260 def clearcaches(self):
261 self.__dict__.pop('_nodemap', None)
261 self.__dict__.pop('_nodemap', None)
262
262
263 def __getitem__(self, i):
263 def __getitem__(self, i):
264 if i == -1:
264 if i == -1:
265 return (0, 0, 0, -1, -1, -1, -1, nullid)
265 return (0, 0, 0, -1, -1, -1, -1, nullid)
266 return list.__getitem__(self, i)
266 return list.__getitem__(self, i)
267
267
268
268
269 class revlogoldio(object):
269 class revlogoldio(object):
270 def __init__(self):
270 def __init__(self):
271 self.size = indexformatv0.size
271 self.size = indexformatv0.size
272
272
273 def parseindex(self, data, inline):
273 def parseindex(self, data, inline):
274 s = self.size
274 s = self.size
275 index = []
275 index = []
276 nodemap = nodemaputil.NodeMap({nullid: nullrev})
276 nodemap = nodemaputil.NodeMap({nullid: nullrev})
277 n = off = 0
277 n = off = 0
278 l = len(data)
278 l = len(data)
279 while off + s <= l:
279 while off + s <= l:
280 cur = data[off : off + s]
280 cur = data[off : off + s]
281 off += s
281 off += s
282 e = indexformatv0_unpack(cur)
282 e = indexformatv0_unpack(cur)
283 # transform to revlogv1 format
283 # transform to revlogv1 format
284 e2 = (
284 e2 = (
285 offset_type(e[0], 0),
285 offset_type(e[0], 0),
286 e[1],
286 e[1],
287 -1,
287 -1,
288 e[2],
288 e[2],
289 e[3],
289 e[3],
290 nodemap.get(e[4], nullrev),
290 nodemap.get(e[4], nullrev),
291 nodemap.get(e[5], nullrev),
291 nodemap.get(e[5], nullrev),
292 e[6],
292 e[6],
293 )
293 )
294 index.append(e2)
294 index.append(e2)
295 nodemap[e[6]] = n
295 nodemap[e[6]] = n
296 n += 1
296 n += 1
297
297
298 index = revlogoldindex(index)
298 index = revlogoldindex(index)
299 return index, None
299 return index, None
300
300
301 def packentry(self, entry, node, version, rev):
301 def packentry(self, entry, node, version, rev):
302 if gettype(entry[0]):
302 if gettype(entry[0]):
303 raise error.RevlogError(
303 raise error.RevlogError(
304 _(b'index entry flags need revlog version 1')
304 _(b'index entry flags need revlog version 1')
305 )
305 )
306 e2 = (
306 e2 = (
307 getoffset(entry[0]),
307 getoffset(entry[0]),
308 entry[1],
308 entry[1],
309 entry[3],
309 entry[3],
310 entry[4],
310 entry[4],
311 node(entry[5]),
311 node(entry[5]),
312 node(entry[6]),
312 node(entry[6]),
313 entry[7],
313 entry[7],
314 )
314 )
315 return indexformatv0_pack(*e2)
315 return indexformatv0_pack(*e2)
316
316
317
317
318 # index ng:
318 # index ng:
319 # 6 bytes: offset
319 # 6 bytes: offset
320 # 2 bytes: flags
320 # 2 bytes: flags
321 # 4 bytes: compressed length
321 # 4 bytes: compressed length
322 # 4 bytes: uncompressed length
322 # 4 bytes: uncompressed length
323 # 4 bytes: base rev
323 # 4 bytes: base rev
324 # 4 bytes: link rev
324 # 4 bytes: link rev
325 # 4 bytes: parent 1 rev
325 # 4 bytes: parent 1 rev
326 # 4 bytes: parent 2 rev
326 # 4 bytes: parent 2 rev
327 # 32 bytes: nodeid
327 # 32 bytes: nodeid
328 indexformatng = struct.Struct(b">Qiiiiii20s12x")
328 indexformatng = struct.Struct(b">Qiiiiii20s12x")
329 indexformatng_pack = indexformatng.pack
329 indexformatng_pack = indexformatng.pack
330 versionformat = struct.Struct(b">I")
330 versionformat = struct.Struct(b">I")
331 versionformat_pack = versionformat.pack
331 versionformat_pack = versionformat.pack
332 versionformat_unpack = versionformat.unpack
332 versionformat_unpack = versionformat.unpack
333
333
334 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
334 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
335 # signed integer)
335 # signed integer)
336 _maxentrysize = 0x7FFFFFFF
336 _maxentrysize = 0x7FFFFFFF
337
337
338
338
339 class revlogio(object):
339 class revlogio(object):
340 def __init__(self):
340 def __init__(self):
341 self.size = indexformatng.size
341 self.size = indexformatng.size
342
342
343 def parseindex(self, data, inline):
343 def parseindex(self, data, inline):
344 # call the C implementation to parse the index data
344 # call the C implementation to parse the index data
345 index, cache = parsers.parse_index2(data, inline)
345 index, cache = parsers.parse_index2(data, inline)
346 return index, cache
346 return index, cache
347
347
348 def packentry(self, entry, node, version, rev):
348 def packentry(self, entry, node, version, rev):
349 p = indexformatng_pack(*entry)
349 p = indexformatng_pack(*entry)
350 if rev == 0:
350 if rev == 0:
351 p = versionformat_pack(version) + p[4:]
351 p = versionformat_pack(version) + p[4:]
352 return p
352 return p
353
353
354
354
355 NodemapRevlogIO = None
355 NodemapRevlogIO = None
356
356
357 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
357 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
358
358
359 class NodemapRevlogIO(revlogio):
359 class NodemapRevlogIO(revlogio):
360 """A debug oriented IO class that return a PersistentNodeMapIndexObject
360 """A debug oriented IO class that return a PersistentNodeMapIndexObject
361
361
362 The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
362 The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
363 """
363 """
364
364
365 def parseindex(self, data, inline):
365 def parseindex(self, data, inline):
366 index, cache = parsers.parse_index_devel_nodemap(data, inline)
366 index, cache = parsers.parse_index_devel_nodemap(data, inline)
367 return index, cache
367 return index, cache
368
368
369
369
370 class rustrevlogio(revlogio):
370 class rustrevlogio(revlogio):
371 def parseindex(self, data, inline):
371 def parseindex(self, data, inline):
372 index, cache = super(rustrevlogio, self).parseindex(data, inline)
372 index, cache = super(rustrevlogio, self).parseindex(data, inline)
373 return rustrevlog.MixedIndex(index), cache
373 return rustrevlog.MixedIndex(index), cache
374
374
375
375
376 class revlog(object):
376 class revlog(object):
377 """
377 """
378 the underlying revision storage object
378 the underlying revision storage object
379
379
380 A revlog consists of two parts, an index and the revision data.
380 A revlog consists of two parts, an index and the revision data.
381
381
382 The index is a file with a fixed record size containing
382 The index is a file with a fixed record size containing
383 information on each revision, including its nodeid (hash), the
383 information on each revision, including its nodeid (hash), the
384 nodeids of its parents, the position and offset of its data within
384 nodeids of its parents, the position and offset of its data within
385 the data file, and the revision it's based on. Finally, each entry
385 the data file, and the revision it's based on. Finally, each entry
386 contains a linkrev entry that can serve as a pointer to external
386 contains a linkrev entry that can serve as a pointer to external
387 data.
387 data.
388
388
389 The revision data itself is a linear collection of data chunks.
389 The revision data itself is a linear collection of data chunks.
390 Each chunk represents a revision and is usually represented as a
390 Each chunk represents a revision and is usually represented as a
391 delta against the previous chunk. To bound lookup time, runs of
391 delta against the previous chunk. To bound lookup time, runs of
392 deltas are limited to about 2 times the length of the original
392 deltas are limited to about 2 times the length of the original
393 version data. This makes retrieval of a version proportional to
393 version data. This makes retrieval of a version proportional to
394 its size, or O(1) relative to the number of revisions.
394 its size, or O(1) relative to the number of revisions.
395
395
396 Both pieces of the revlog are written to in an append-only
396 Both pieces of the revlog are written to in an append-only
397 fashion, which means we never need to rewrite a file to insert or
397 fashion, which means we never need to rewrite a file to insert or
398 remove data, and can use some simple techniques to avoid the need
398 remove data, and can use some simple techniques to avoid the need
399 for locking while reading.
399 for locking while reading.
400
400
401 If checkambig, indexfile is opened with checkambig=True at
401 If checkambig, indexfile is opened with checkambig=True at
402 writing, to avoid file stat ambiguity.
402 writing, to avoid file stat ambiguity.
403
403
404 If mmaplargeindex is True, and an mmapindexthreshold is set, the
404 If mmaplargeindex is True, and an mmapindexthreshold is set, the
405 index will be mmapped rather than read if it is larger than the
405 index will be mmapped rather than read if it is larger than the
406 configured threshold.
406 configured threshold.
407
407
408 If censorable is True, the revlog can have censored revisions.
408 If censorable is True, the revlog can have censored revisions.
409
409
410 If `upperboundcomp` is not None, this is the expected maximal gain from
410 If `upperboundcomp` is not None, this is the expected maximal gain from
411 compression for the data content.
411 compression for the data content.
412 """
412 """
413
413
414 _flagserrorclass = error.RevlogError
414 _flagserrorclass = error.RevlogError
415
415
416 def __init__(
416 def __init__(
417 self,
417 self,
418 opener,
418 opener,
419 indexfile,
419 indexfile,
420 datafile=None,
420 datafile=None,
421 checkambig=False,
421 checkambig=False,
422 mmaplargeindex=False,
422 mmaplargeindex=False,
423 censorable=False,
423 censorable=False,
424 upperboundcomp=None,
424 upperboundcomp=None,
425 persistentnodemap=False,
425 persistentnodemap=False,
426 ):
426 ):
427 """
427 """
428 create a revlog object
428 create a revlog object
429
429
430 opener is a function that abstracts the file opening operation
430 opener is a function that abstracts the file opening operation
431 and can be used to implement COW semantics or the like.
431 and can be used to implement COW semantics or the like.
432
432
433 """
433 """
434 self.upperboundcomp = upperboundcomp
434 self.upperboundcomp = upperboundcomp
435 self.indexfile = indexfile
435 self.indexfile = indexfile
436 self.datafile = datafile or (indexfile[:-2] + b".d")
436 self.datafile = datafile or (indexfile[:-2] + b".d")
437 self.nodemap_file = None
437 self.nodemap_file = None
438 if persistentnodemap:
438 if persistentnodemap:
439 self.nodemap_file = indexfile[:-2] + b".n"
439 self.nodemap_file = indexfile[:-2] + b".n"
440
440
441 self.opener = opener
441 self.opener = opener
442 # When True, indexfile is opened with checkambig=True at writing, to
442 # When True, indexfile is opened with checkambig=True at writing, to
443 # avoid file stat ambiguity.
443 # avoid file stat ambiguity.
444 self._checkambig = checkambig
444 self._checkambig = checkambig
445 self._mmaplargeindex = mmaplargeindex
445 self._mmaplargeindex = mmaplargeindex
446 self._censorable = censorable
446 self._censorable = censorable
447 # 3-tuple of (node, rev, text) for a raw revision.
447 # 3-tuple of (node, rev, text) for a raw revision.
448 self._revisioncache = None
448 self._revisioncache = None
449 # Maps rev to chain base rev.
449 # Maps rev to chain base rev.
450 self._chainbasecache = util.lrucachedict(100)
450 self._chainbasecache = util.lrucachedict(100)
451 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
451 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
452 self._chunkcache = (0, b'')
452 self._chunkcache = (0, b'')
453 # How much data to read and cache into the raw revlog data cache.
453 # How much data to read and cache into the raw revlog data cache.
454 self._chunkcachesize = 65536
454 self._chunkcachesize = 65536
455 self._maxchainlen = None
455 self._maxchainlen = None
456 self._deltabothparents = True
456 self._deltabothparents = True
457 self.index = None
457 self.index = None
458 self._nodemap_docket = None
458 self._nodemap_docket = None
459 # Mapping of partial identifiers to full nodes.
459 # Mapping of partial identifiers to full nodes.
460 self._pcache = {}
460 self._pcache = {}
461 # Mapping of revision integer to full node.
461 # Mapping of revision integer to full node.
462 self._compengine = b'zlib'
462 self._compengine = b'zlib'
463 self._compengineopts = {}
463 self._compengineopts = {}
464 self._maxdeltachainspan = -1
464 self._maxdeltachainspan = -1
465 self._withsparseread = False
465 self._withsparseread = False
466 self._sparserevlog = False
466 self._sparserevlog = False
467 self._srdensitythreshold = 0.50
467 self._srdensitythreshold = 0.50
468 self._srmingapsize = 262144
468 self._srmingapsize = 262144
469
469
470 # Make copy of flag processors so each revlog instance can support
470 # Make copy of flag processors so each revlog instance can support
471 # custom flags.
471 # custom flags.
472 self._flagprocessors = dict(flagutil.flagprocessors)
472 self._flagprocessors = dict(flagutil.flagprocessors)
473
473
474 # 2-tuple of file handles being used for active writing.
474 # 2-tuple of file handles being used for active writing.
475 self._writinghandles = None
475 self._writinghandles = None
476
476
477 self._loadindex()
477 self._loadindex()
478
478
479 def _loadindex(self):
479 def _loadindex(self):
480 mmapindexthreshold = None
480 mmapindexthreshold = None
481 opts = self.opener.options
481 opts = self.opener.options
482
482
483 if b'revlogv2' in opts:
483 if b'revlogv2' in opts:
484 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
484 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
485 elif b'revlogv1' in opts:
485 elif b'revlogv1' in opts:
486 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
486 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
487 if b'generaldelta' in opts:
487 if b'generaldelta' in opts:
488 newversionflags |= FLAG_GENERALDELTA
488 newversionflags |= FLAG_GENERALDELTA
489 elif b'revlogv0' in self.opener.options:
489 elif b'revlogv0' in self.opener.options:
490 newversionflags = REVLOGV0
490 newversionflags = REVLOGV0
491 else:
491 else:
492 newversionflags = REVLOG_DEFAULT_VERSION
492 newversionflags = REVLOG_DEFAULT_VERSION
493
493
494 if b'chunkcachesize' in opts:
494 if b'chunkcachesize' in opts:
495 self._chunkcachesize = opts[b'chunkcachesize']
495 self._chunkcachesize = opts[b'chunkcachesize']
496 if b'maxchainlen' in opts:
496 if b'maxchainlen' in opts:
497 self._maxchainlen = opts[b'maxchainlen']
497 self._maxchainlen = opts[b'maxchainlen']
498 if b'deltabothparents' in opts:
498 if b'deltabothparents' in opts:
499 self._deltabothparents = opts[b'deltabothparents']
499 self._deltabothparents = opts[b'deltabothparents']
500 self._lazydelta = bool(opts.get(b'lazydelta', True))
500 self._lazydelta = bool(opts.get(b'lazydelta', True))
501 self._lazydeltabase = False
501 self._lazydeltabase = False
502 if self._lazydelta:
502 if self._lazydelta:
503 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
503 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
504 if b'compengine' in opts:
504 if b'compengine' in opts:
505 self._compengine = opts[b'compengine']
505 self._compengine = opts[b'compengine']
506 if b'zlib.level' in opts:
506 if b'zlib.level' in opts:
507 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
507 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
508 if b'zstd.level' in opts:
508 if b'zstd.level' in opts:
509 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
509 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
510 if b'maxdeltachainspan' in opts:
510 if b'maxdeltachainspan' in opts:
511 self._maxdeltachainspan = opts[b'maxdeltachainspan']
511 self._maxdeltachainspan = opts[b'maxdeltachainspan']
512 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
512 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
513 mmapindexthreshold = opts[b'mmapindexthreshold']
513 mmapindexthreshold = opts[b'mmapindexthreshold']
514 self.hassidedata = bool(opts.get(b'side-data', False))
514 self.hassidedata = bool(opts.get(b'side-data', False))
515 if self.hassidedata:
515 if self.hassidedata:
516 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
516 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
517 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
517 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
518 withsparseread = bool(opts.get(b'with-sparse-read', False))
518 withsparseread = bool(opts.get(b'with-sparse-read', False))
519 # sparse-revlog forces sparse-read
519 # sparse-revlog forces sparse-read
520 self._withsparseread = self._sparserevlog or withsparseread
520 self._withsparseread = self._sparserevlog or withsparseread
521 if b'sparse-read-density-threshold' in opts:
521 if b'sparse-read-density-threshold' in opts:
522 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
522 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
523 if b'sparse-read-min-gap-size' in opts:
523 if b'sparse-read-min-gap-size' in opts:
524 self._srmingapsize = opts[b'sparse-read-min-gap-size']
524 self._srmingapsize = opts[b'sparse-read-min-gap-size']
525 if opts.get(b'enableellipsis'):
525 if opts.get(b'enableellipsis'):
526 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
526 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
527
527
528 # revlog v0 doesn't have flag processors
528 # revlog v0 doesn't have flag processors
529 for flag, processor in pycompat.iteritems(
529 for flag, processor in pycompat.iteritems(
530 opts.get(b'flagprocessors', {})
530 opts.get(b'flagprocessors', {})
531 ):
531 ):
532 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
532 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
533
533
534 if self._chunkcachesize <= 0:
534 if self._chunkcachesize <= 0:
535 raise error.RevlogError(
535 raise error.RevlogError(
536 _(b'revlog chunk cache size %r is not greater than 0')
536 _(b'revlog chunk cache size %r is not greater than 0')
537 % self._chunkcachesize
537 % self._chunkcachesize
538 )
538 )
539 elif self._chunkcachesize & (self._chunkcachesize - 1):
539 elif self._chunkcachesize & (self._chunkcachesize - 1):
540 raise error.RevlogError(
540 raise error.RevlogError(
541 _(b'revlog chunk cache size %r is not a power of 2')
541 _(b'revlog chunk cache size %r is not a power of 2')
542 % self._chunkcachesize
542 % self._chunkcachesize
543 )
543 )
544
544
545 indexdata = b''
545 indexdata = b''
546 self._initempty = True
546 self._initempty = True
547 try:
547 try:
548 nodemap_data = nodemaputil.persisted_data(self)
548 nodemap_data = nodemaputil.persisted_data(self)
549 if nodemap_data is not None:
549 if nodemap_data is not None:
550 self._nodemap_docket = nodemap_data[0]
550 self._nodemap_docket = nodemap_data[0]
551 with self._indexfp() as f:
551 with self._indexfp() as f:
552 if (
552 if (
553 mmapindexthreshold is not None
553 mmapindexthreshold is not None
554 and self.opener.fstat(f).st_size >= mmapindexthreshold
554 and self.opener.fstat(f).st_size >= mmapindexthreshold
555 ):
555 ):
556 # TODO: should .close() to release resources without
556 # TODO: should .close() to release resources without
557 # relying on Python GC
557 # relying on Python GC
558 indexdata = util.buffer(util.mmapread(f))
558 indexdata = util.buffer(util.mmapread(f))
559 else:
559 else:
560 indexdata = f.read()
560 indexdata = f.read()
561 if len(indexdata) > 0:
561 if len(indexdata) > 0:
562 versionflags = versionformat_unpack(indexdata[:4])[0]
562 versionflags = versionformat_unpack(indexdata[:4])[0]
563 self._initempty = False
563 self._initempty = False
564 else:
564 else:
565 versionflags = newversionflags
565 versionflags = newversionflags
566 except IOError as inst:
566 except IOError as inst:
567 if inst.errno != errno.ENOENT:
567 if inst.errno != errno.ENOENT:
568 raise
568 raise
569
569
570 versionflags = newversionflags
570 versionflags = newversionflags
571
571
572 self.version = versionflags
572 self.version = versionflags
573
573
574 flags = versionflags & ~0xFFFF
574 flags = versionflags & ~0xFFFF
575 fmt = versionflags & 0xFFFF
575 fmt = versionflags & 0xFFFF
576
576
577 if fmt == REVLOGV0:
577 if fmt == REVLOGV0:
578 if flags:
578 if flags:
579 raise error.RevlogError(
579 raise error.RevlogError(
580 _(b'unknown flags (%#04x) in version %d revlog %s')
580 _(b'unknown flags (%#04x) in version %d revlog %s')
581 % (flags >> 16, fmt, self.indexfile)
581 % (flags >> 16, fmt, self.indexfile)
582 )
582 )
583
583
584 self._inline = False
584 self._inline = False
585 self._generaldelta = False
585 self._generaldelta = False
586
586
587 elif fmt == REVLOGV1:
587 elif fmt == REVLOGV1:
588 if flags & ~REVLOGV1_FLAGS:
588 if flags & ~REVLOGV1_FLAGS:
589 raise error.RevlogError(
589 raise error.RevlogError(
590 _(b'unknown flags (%#04x) in version %d revlog %s')
590 _(b'unknown flags (%#04x) in version %d revlog %s')
591 % (flags >> 16, fmt, self.indexfile)
591 % (flags >> 16, fmt, self.indexfile)
592 )
592 )
593
593
594 self._inline = versionflags & FLAG_INLINE_DATA
594 self._inline = versionflags & FLAG_INLINE_DATA
595 self._generaldelta = versionflags & FLAG_GENERALDELTA
595 self._generaldelta = versionflags & FLAG_GENERALDELTA
596
596
597 elif fmt == REVLOGV2:
597 elif fmt == REVLOGV2:
598 if flags & ~REVLOGV2_FLAGS:
598 if flags & ~REVLOGV2_FLAGS:
599 raise error.RevlogError(
599 raise error.RevlogError(
600 _(b'unknown flags (%#04x) in version %d revlog %s')
600 _(b'unknown flags (%#04x) in version %d revlog %s')
601 % (flags >> 16, fmt, self.indexfile)
601 % (flags >> 16, fmt, self.indexfile)
602 )
602 )
603
603
604 self._inline = versionflags & FLAG_INLINE_DATA
604 self._inline = versionflags & FLAG_INLINE_DATA
605 # generaldelta implied by version 2 revlogs.
605 # generaldelta implied by version 2 revlogs.
606 self._generaldelta = True
606 self._generaldelta = True
607
607
608 else:
608 else:
609 raise error.RevlogError(
609 raise error.RevlogError(
610 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
610 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
611 )
611 )
612 # sparse-revlog can't be on without general-delta (issue6056)
612 # sparse-revlog can't be on without general-delta (issue6056)
613 if not self._generaldelta:
613 if not self._generaldelta:
614 self._sparserevlog = False
614 self._sparserevlog = False
615
615
616 self._storedeltachains = True
616 self._storedeltachains = True
617
617
618 devel_nodemap = (
618 devel_nodemap = (
619 self.nodemap_file
619 self.nodemap_file
620 and opts.get(b'devel-force-nodemap', False)
620 and opts.get(b'devel-force-nodemap', False)
621 and NodemapRevlogIO is not None
621 and NodemapRevlogIO is not None
622 )
622 )
623
623
624 self._io = revlogio()
624 self._io = revlogio()
625 if self.version == REVLOGV0:
625 if self.version == REVLOGV0:
626 self._io = revlogoldio()
626 self._io = revlogoldio()
627 elif devel_nodemap:
627 elif devel_nodemap:
628 self._io = NodemapRevlogIO()
628 self._io = NodemapRevlogIO()
629 elif rustrevlog is not None and self.opener.options.get(b'rust.index'):
629 elif rustrevlog is not None and self.opener.options.get(b'rust.index'):
630 self._io = rustrevlogio()
630 self._io = rustrevlogio()
631 try:
631 try:
632 d = self._io.parseindex(indexdata, self._inline)
632 d = self._io.parseindex(indexdata, self._inline)
633 index, _chunkcache = d
633 index, _chunkcache = d
634 use_nodemap = (
634 use_nodemap = (
635 not self._inline
635 not self._inline
636 and self.nodemap_file is not None
636 and self.nodemap_file is not None
637 and util.safehasattr(index, 'update_nodemap_data')
637 and util.safehasattr(index, 'update_nodemap_data')
638 )
638 )
639 if use_nodemap:
639 if use_nodemap:
640 nodemap_data = nodemaputil.persisted_data(self)
640 nodemap_data = nodemaputil.persisted_data(self)
641 if nodemap_data is not None:
641 if nodemap_data is not None:
642 index.update_nodemap_data(nodemap_data[1])
642 index.update_nodemap_data(*nodemap_data)
643 except (ValueError, IndexError):
643 except (ValueError, IndexError):
644 raise error.RevlogError(
644 raise error.RevlogError(
645 _(b"index %s is corrupted") % self.indexfile
645 _(b"index %s is corrupted") % self.indexfile
646 )
646 )
647 self.index, self._chunkcache = d
647 self.index, self._chunkcache = d
648 if not self._chunkcache:
648 if not self._chunkcache:
649 self._chunkclear()
649 self._chunkclear()
650 # revnum -> (chain-length, sum-delta-length)
650 # revnum -> (chain-length, sum-delta-length)
651 self._chaininfocache = {}
651 self._chaininfocache = {}
652 # revlog header -> revlog compressor
652 # revlog header -> revlog compressor
653 self._decompressors = {}
653 self._decompressors = {}
654
654
655 @util.propertycache
655 @util.propertycache
656 def _compressor(self):
656 def _compressor(self):
657 engine = util.compengines[self._compengine]
657 engine = util.compengines[self._compengine]
658 return engine.revlogcompressor(self._compengineopts)
658 return engine.revlogcompressor(self._compengineopts)
659
659
660 def _indexfp(self, mode=b'r'):
660 def _indexfp(self, mode=b'r'):
661 """file object for the revlog's index file"""
661 """file object for the revlog's index file"""
662 args = {'mode': mode}
662 args = {'mode': mode}
663 if mode != b'r':
663 if mode != b'r':
664 args['checkambig'] = self._checkambig
664 args['checkambig'] = self._checkambig
665 if mode == b'w':
665 if mode == b'w':
666 args['atomictemp'] = True
666 args['atomictemp'] = True
667 return self.opener(self.indexfile, **args)
667 return self.opener(self.indexfile, **args)
668
668
669 def _datafp(self, mode=b'r'):
669 def _datafp(self, mode=b'r'):
670 """file object for the revlog's data file"""
670 """file object for the revlog's data file"""
671 return self.opener(self.datafile, mode=mode)
671 return self.opener(self.datafile, mode=mode)
672
672
673 @contextlib.contextmanager
673 @contextlib.contextmanager
674 def _datareadfp(self, existingfp=None):
674 def _datareadfp(self, existingfp=None):
675 """file object suitable to read data"""
675 """file object suitable to read data"""
676 # Use explicit file handle, if given.
676 # Use explicit file handle, if given.
677 if existingfp is not None:
677 if existingfp is not None:
678 yield existingfp
678 yield existingfp
679
679
680 # Use a file handle being actively used for writes, if available.
680 # Use a file handle being actively used for writes, if available.
681 # There is some danger to doing this because reads will seek the
681 # There is some danger to doing this because reads will seek the
682 # file. However, _writeentry() performs a SEEK_END before all writes,
682 # file. However, _writeentry() performs a SEEK_END before all writes,
683 # so we should be safe.
683 # so we should be safe.
684 elif self._writinghandles:
684 elif self._writinghandles:
685 if self._inline:
685 if self._inline:
686 yield self._writinghandles[0]
686 yield self._writinghandles[0]
687 else:
687 else:
688 yield self._writinghandles[1]
688 yield self._writinghandles[1]
689
689
690 # Otherwise open a new file handle.
690 # Otherwise open a new file handle.
691 else:
691 else:
692 if self._inline:
692 if self._inline:
693 func = self._indexfp
693 func = self._indexfp
694 else:
694 else:
695 func = self._datafp
695 func = self._datafp
696 with func() as fp:
696 with func() as fp:
697 yield fp
697 yield fp
698
698
699 def tiprev(self):
699 def tiprev(self):
700 return len(self.index) - 1
700 return len(self.index) - 1
701
701
702 def tip(self):
702 def tip(self):
703 return self.node(self.tiprev())
703 return self.node(self.tiprev())
704
704
705 def __contains__(self, rev):
705 def __contains__(self, rev):
706 return 0 <= rev < len(self)
706 return 0 <= rev < len(self)
707
707
708 def __len__(self):
708 def __len__(self):
709 return len(self.index)
709 return len(self.index)
710
710
711 def __iter__(self):
711 def __iter__(self):
712 return iter(pycompat.xrange(len(self)))
712 return iter(pycompat.xrange(len(self)))
713
713
714 def revs(self, start=0, stop=None):
714 def revs(self, start=0, stop=None):
715 """iterate over all rev in this revlog (from start to stop)"""
715 """iterate over all rev in this revlog (from start to stop)"""
716 return storageutil.iterrevs(len(self), start=start, stop=stop)
716 return storageutil.iterrevs(len(self), start=start, stop=stop)
717
717
718 @property
718 @property
719 def nodemap(self):
719 def nodemap(self):
720 msg = (
720 msg = (
721 b"revlog.nodemap is deprecated, "
721 b"revlog.nodemap is deprecated, "
722 b"use revlog.index.[has_node|rev|get_rev]"
722 b"use revlog.index.[has_node|rev|get_rev]"
723 )
723 )
724 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
724 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
725 return self.index.nodemap
725 return self.index.nodemap
726
726
727 @property
727 @property
728 def _nodecache(self):
728 def _nodecache(self):
729 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
729 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
730 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
730 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
731 return self.index.nodemap
731 return self.index.nodemap
732
732
733 def hasnode(self, node):
733 def hasnode(self, node):
734 try:
734 try:
735 self.rev(node)
735 self.rev(node)
736 return True
736 return True
737 except KeyError:
737 except KeyError:
738 return False
738 return False
739
739
740 def candelta(self, baserev, rev):
740 def candelta(self, baserev, rev):
741 """whether two revisions (baserev, rev) can be delta-ed or not"""
741 """whether two revisions (baserev, rev) can be delta-ed or not"""
742 # Disable delta if either rev requires a content-changing flag
742 # Disable delta if either rev requires a content-changing flag
743 # processor (ex. LFS). This is because such flag processor can alter
743 # processor (ex. LFS). This is because such flag processor can alter
744 # the rawtext content that the delta will be based on, and two clients
744 # the rawtext content that the delta will be based on, and two clients
745 # could have a same revlog node with different flags (i.e. different
745 # could have a same revlog node with different flags (i.e. different
746 # rawtext contents) and the delta could be incompatible.
746 # rawtext contents) and the delta could be incompatible.
747 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
747 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
748 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
748 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
749 ):
749 ):
750 return False
750 return False
751 return True
751 return True
752
752
753 def clearcaches(self):
753 def clearcaches(self):
754 self._revisioncache = None
754 self._revisioncache = None
755 self._chainbasecache.clear()
755 self._chainbasecache.clear()
756 self._chunkcache = (0, b'')
756 self._chunkcache = (0, b'')
757 self._pcache = {}
757 self._pcache = {}
758 self.index.clearcaches()
758 self.index.clearcaches()
759
759
760 def rev(self, node):
760 def rev(self, node):
761 try:
761 try:
762 return self.index.rev(node)
762 return self.index.rev(node)
763 except TypeError:
763 except TypeError:
764 raise
764 raise
765 except error.RevlogError:
765 except error.RevlogError:
766 # parsers.c radix tree lookup failed
766 # parsers.c radix tree lookup failed
767 if node == wdirid or node in wdirfilenodeids:
767 if node == wdirid or node in wdirfilenodeids:
768 raise error.WdirUnsupported
768 raise error.WdirUnsupported
769 raise error.LookupError(node, self.indexfile, _(b'no node'))
769 raise error.LookupError(node, self.indexfile, _(b'no node'))
770
770
771 # Accessors for index entries.
771 # Accessors for index entries.
772
772
773 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
773 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
774 # are flags.
774 # are flags.
775 def start(self, rev):
775 def start(self, rev):
776 return int(self.index[rev][0] >> 16)
776 return int(self.index[rev][0] >> 16)
777
777
778 def flags(self, rev):
778 def flags(self, rev):
779 return self.index[rev][0] & 0xFFFF
779 return self.index[rev][0] & 0xFFFF
780
780
781 def length(self, rev):
781 def length(self, rev):
782 return self.index[rev][1]
782 return self.index[rev][1]
783
783
784 def rawsize(self, rev):
784 def rawsize(self, rev):
785 """return the length of the uncompressed text for a given revision"""
785 """return the length of the uncompressed text for a given revision"""
786 l = self.index[rev][2]
786 l = self.index[rev][2]
787 if l >= 0:
787 if l >= 0:
788 return l
788 return l
789
789
790 t = self.rawdata(rev)
790 t = self.rawdata(rev)
791 return len(t)
791 return len(t)
792
792
793 def size(self, rev):
793 def size(self, rev):
794 """length of non-raw text (processed by a "read" flag processor)"""
794 """length of non-raw text (processed by a "read" flag processor)"""
795 # fast path: if no "read" flag processor could change the content,
795 # fast path: if no "read" flag processor could change the content,
796 # size is rawsize. note: ELLIPSIS is known to not change the content.
796 # size is rawsize. note: ELLIPSIS is known to not change the content.
797 flags = self.flags(rev)
797 flags = self.flags(rev)
798 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
798 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
799 return self.rawsize(rev)
799 return self.rawsize(rev)
800
800
801 return len(self.revision(rev, raw=False))
801 return len(self.revision(rev, raw=False))
802
802
803 def chainbase(self, rev):
803 def chainbase(self, rev):
804 base = self._chainbasecache.get(rev)
804 base = self._chainbasecache.get(rev)
805 if base is not None:
805 if base is not None:
806 return base
806 return base
807
807
808 index = self.index
808 index = self.index
809 iterrev = rev
809 iterrev = rev
810 base = index[iterrev][3]
810 base = index[iterrev][3]
811 while base != iterrev:
811 while base != iterrev:
812 iterrev = base
812 iterrev = base
813 base = index[iterrev][3]
813 base = index[iterrev][3]
814
814
815 self._chainbasecache[rev] = base
815 self._chainbasecache[rev] = base
816 return base
816 return base
817
817
818 def linkrev(self, rev):
818 def linkrev(self, rev):
819 return self.index[rev][4]
819 return self.index[rev][4]
820
820
821 def parentrevs(self, rev):
821 def parentrevs(self, rev):
822 try:
822 try:
823 entry = self.index[rev]
823 entry = self.index[rev]
824 except IndexError:
824 except IndexError:
825 if rev == wdirrev:
825 if rev == wdirrev:
826 raise error.WdirUnsupported
826 raise error.WdirUnsupported
827 raise
827 raise
828
828
829 return entry[5], entry[6]
829 return entry[5], entry[6]
830
830
831 # fast parentrevs(rev) where rev isn't filtered
831 # fast parentrevs(rev) where rev isn't filtered
832 _uncheckedparentrevs = parentrevs
832 _uncheckedparentrevs = parentrevs
833
833
834 def node(self, rev):
834 def node(self, rev):
835 try:
835 try:
836 return self.index[rev][7]
836 return self.index[rev][7]
837 except IndexError:
837 except IndexError:
838 if rev == wdirrev:
838 if rev == wdirrev:
839 raise error.WdirUnsupported
839 raise error.WdirUnsupported
840 raise
840 raise
841
841
842 # Derived from index values.
842 # Derived from index values.
843
843
844 def end(self, rev):
844 def end(self, rev):
845 return self.start(rev) + self.length(rev)
845 return self.start(rev) + self.length(rev)
846
846
847 def parents(self, node):
847 def parents(self, node):
848 i = self.index
848 i = self.index
849 d = i[self.rev(node)]
849 d = i[self.rev(node)]
850 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
850 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
851
851
852 def chainlen(self, rev):
852 def chainlen(self, rev):
853 return self._chaininfo(rev)[0]
853 return self._chaininfo(rev)[0]
854
854
855 def _chaininfo(self, rev):
855 def _chaininfo(self, rev):
856 chaininfocache = self._chaininfocache
856 chaininfocache = self._chaininfocache
857 if rev in chaininfocache:
857 if rev in chaininfocache:
858 return chaininfocache[rev]
858 return chaininfocache[rev]
859 index = self.index
859 index = self.index
860 generaldelta = self._generaldelta
860 generaldelta = self._generaldelta
861 iterrev = rev
861 iterrev = rev
862 e = index[iterrev]
862 e = index[iterrev]
863 clen = 0
863 clen = 0
864 compresseddeltalen = 0
864 compresseddeltalen = 0
865 while iterrev != e[3]:
865 while iterrev != e[3]:
866 clen += 1
866 clen += 1
867 compresseddeltalen += e[1]
867 compresseddeltalen += e[1]
868 if generaldelta:
868 if generaldelta:
869 iterrev = e[3]
869 iterrev = e[3]
870 else:
870 else:
871 iterrev -= 1
871 iterrev -= 1
872 if iterrev in chaininfocache:
872 if iterrev in chaininfocache:
873 t = chaininfocache[iterrev]
873 t = chaininfocache[iterrev]
874 clen += t[0]
874 clen += t[0]
875 compresseddeltalen += t[1]
875 compresseddeltalen += t[1]
876 break
876 break
877 e = index[iterrev]
877 e = index[iterrev]
878 else:
878 else:
879 # Add text length of base since decompressing that also takes
879 # Add text length of base since decompressing that also takes
880 # work. For cache hits the length is already included.
880 # work. For cache hits the length is already included.
881 compresseddeltalen += e[1]
881 compresseddeltalen += e[1]
882 r = (clen, compresseddeltalen)
882 r = (clen, compresseddeltalen)
883 chaininfocache[rev] = r
883 chaininfocache[rev] = r
884 return r
884 return r
885
885
886 def _deltachain(self, rev, stoprev=None):
886 def _deltachain(self, rev, stoprev=None):
887 """Obtain the delta chain for a revision.
887 """Obtain the delta chain for a revision.
888
888
889 ``stoprev`` specifies a revision to stop at. If not specified, we
889 ``stoprev`` specifies a revision to stop at. If not specified, we
890 stop at the base of the chain.
890 stop at the base of the chain.
891
891
892 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
892 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
893 revs in ascending order and ``stopped`` is a bool indicating whether
893 revs in ascending order and ``stopped`` is a bool indicating whether
894 ``stoprev`` was hit.
894 ``stoprev`` was hit.
895 """
895 """
896 # Try C implementation.
896 # Try C implementation.
897 try:
897 try:
898 return self.index.deltachain(rev, stoprev, self._generaldelta)
898 return self.index.deltachain(rev, stoprev, self._generaldelta)
899 except AttributeError:
899 except AttributeError:
900 pass
900 pass
901
901
902 chain = []
902 chain = []
903
903
904 # Alias to prevent attribute lookup in tight loop.
904 # Alias to prevent attribute lookup in tight loop.
905 index = self.index
905 index = self.index
906 generaldelta = self._generaldelta
906 generaldelta = self._generaldelta
907
907
908 iterrev = rev
908 iterrev = rev
909 e = index[iterrev]
909 e = index[iterrev]
910 while iterrev != e[3] and iterrev != stoprev:
910 while iterrev != e[3] and iterrev != stoprev:
911 chain.append(iterrev)
911 chain.append(iterrev)
912 if generaldelta:
912 if generaldelta:
913 iterrev = e[3]
913 iterrev = e[3]
914 else:
914 else:
915 iterrev -= 1
915 iterrev -= 1
916 e = index[iterrev]
916 e = index[iterrev]
917
917
918 if iterrev == stoprev:
918 if iterrev == stoprev:
919 stopped = True
919 stopped = True
920 else:
920 else:
921 chain.append(iterrev)
921 chain.append(iterrev)
922 stopped = False
922 stopped = False
923
923
924 chain.reverse()
924 chain.reverse()
925 return chain, stopped
925 return chain, stopped
926
926
927 def ancestors(self, revs, stoprev=0, inclusive=False):
927 def ancestors(self, revs, stoprev=0, inclusive=False):
928 """Generate the ancestors of 'revs' in reverse revision order.
928 """Generate the ancestors of 'revs' in reverse revision order.
929 Does not generate revs lower than stoprev.
929 Does not generate revs lower than stoprev.
930
930
931 See the documentation for ancestor.lazyancestors for more details."""
931 See the documentation for ancestor.lazyancestors for more details."""
932
932
933 # first, make sure start revisions aren't filtered
933 # first, make sure start revisions aren't filtered
934 revs = list(revs)
934 revs = list(revs)
935 checkrev = self.node
935 checkrev = self.node
936 for r in revs:
936 for r in revs:
937 checkrev(r)
937 checkrev(r)
938 # and we're sure ancestors aren't filtered as well
938 # and we're sure ancestors aren't filtered as well
939
939
940 if rustancestor is not None:
940 if rustancestor is not None:
941 lazyancestors = rustancestor.LazyAncestors
941 lazyancestors = rustancestor.LazyAncestors
942 arg = self.index
942 arg = self.index
943 elif util.safehasattr(parsers, b'rustlazyancestors'):
943 elif util.safehasattr(parsers, b'rustlazyancestors'):
944 lazyancestors = ancestor.rustlazyancestors
944 lazyancestors = ancestor.rustlazyancestors
945 arg = self.index
945 arg = self.index
946 else:
946 else:
947 lazyancestors = ancestor.lazyancestors
947 lazyancestors = ancestor.lazyancestors
948 arg = self._uncheckedparentrevs
948 arg = self._uncheckedparentrevs
949 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
949 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
950
950
951 def descendants(self, revs):
951 def descendants(self, revs):
952 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
952 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
953
953
954 def findcommonmissing(self, common=None, heads=None):
954 def findcommonmissing(self, common=None, heads=None):
955 """Return a tuple of the ancestors of common and the ancestors of heads
955 """Return a tuple of the ancestors of common and the ancestors of heads
956 that are not ancestors of common. In revset terminology, we return the
956 that are not ancestors of common. In revset terminology, we return the
957 tuple:
957 tuple:
958
958
959 ::common, (::heads) - (::common)
959 ::common, (::heads) - (::common)
960
960
961 The list is sorted by revision number, meaning it is
961 The list is sorted by revision number, meaning it is
962 topologically sorted.
962 topologically sorted.
963
963
964 'heads' and 'common' are both lists of node IDs. If heads is
964 'heads' and 'common' are both lists of node IDs. If heads is
965 not supplied, uses all of the revlog's heads. If common is not
965 not supplied, uses all of the revlog's heads. If common is not
966 supplied, uses nullid."""
966 supplied, uses nullid."""
967 if common is None:
967 if common is None:
968 common = [nullid]
968 common = [nullid]
969 if heads is None:
969 if heads is None:
970 heads = self.heads()
970 heads = self.heads()
971
971
972 common = [self.rev(n) for n in common]
972 common = [self.rev(n) for n in common]
973 heads = [self.rev(n) for n in heads]
973 heads = [self.rev(n) for n in heads]
974
974
975 # we want the ancestors, but inclusive
975 # we want the ancestors, but inclusive
976 class lazyset(object):
976 class lazyset(object):
977 def __init__(self, lazyvalues):
977 def __init__(self, lazyvalues):
978 self.addedvalues = set()
978 self.addedvalues = set()
979 self.lazyvalues = lazyvalues
979 self.lazyvalues = lazyvalues
980
980
981 def __contains__(self, value):
981 def __contains__(self, value):
982 return value in self.addedvalues or value in self.lazyvalues
982 return value in self.addedvalues or value in self.lazyvalues
983
983
984 def __iter__(self):
984 def __iter__(self):
985 added = self.addedvalues
985 added = self.addedvalues
986 for r in added:
986 for r in added:
987 yield r
987 yield r
988 for r in self.lazyvalues:
988 for r in self.lazyvalues:
989 if not r in added:
989 if not r in added:
990 yield r
990 yield r
991
991
992 def add(self, value):
992 def add(self, value):
993 self.addedvalues.add(value)
993 self.addedvalues.add(value)
994
994
995 def update(self, values):
995 def update(self, values):
996 self.addedvalues.update(values)
996 self.addedvalues.update(values)
997
997
998 has = lazyset(self.ancestors(common))
998 has = lazyset(self.ancestors(common))
999 has.add(nullrev)
999 has.add(nullrev)
1000 has.update(common)
1000 has.update(common)
1001
1001
1002 # take all ancestors from heads that aren't in has
1002 # take all ancestors from heads that aren't in has
1003 missing = set()
1003 missing = set()
1004 visit = collections.deque(r for r in heads if r not in has)
1004 visit = collections.deque(r for r in heads if r not in has)
1005 while visit:
1005 while visit:
1006 r = visit.popleft()
1006 r = visit.popleft()
1007 if r in missing:
1007 if r in missing:
1008 continue
1008 continue
1009 else:
1009 else:
1010 missing.add(r)
1010 missing.add(r)
1011 for p in self.parentrevs(r):
1011 for p in self.parentrevs(r):
1012 if p not in has:
1012 if p not in has:
1013 visit.append(p)
1013 visit.append(p)
1014 missing = list(missing)
1014 missing = list(missing)
1015 missing.sort()
1015 missing.sort()
1016 return has, [self.node(miss) for miss in missing]
1016 return has, [self.node(miss) for miss in missing]
1017
1017
1018 def incrementalmissingrevs(self, common=None):
1018 def incrementalmissingrevs(self, common=None):
1019 """Return an object that can be used to incrementally compute the
1019 """Return an object that can be used to incrementally compute the
1020 revision numbers of the ancestors of arbitrary sets that are not
1020 revision numbers of the ancestors of arbitrary sets that are not
1021 ancestors of common. This is an ancestor.incrementalmissingancestors
1021 ancestors of common. This is an ancestor.incrementalmissingancestors
1022 object.
1022 object.
1023
1023
1024 'common' is a list of revision numbers. If common is not supplied, uses
1024 'common' is a list of revision numbers. If common is not supplied, uses
1025 nullrev.
1025 nullrev.
1026 """
1026 """
1027 if common is None:
1027 if common is None:
1028 common = [nullrev]
1028 common = [nullrev]
1029
1029
1030 if rustancestor is not None:
1030 if rustancestor is not None:
1031 return rustancestor.MissingAncestors(self.index, common)
1031 return rustancestor.MissingAncestors(self.index, common)
1032 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1032 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1033
1033
1034 def findmissingrevs(self, common=None, heads=None):
1034 def findmissingrevs(self, common=None, heads=None):
1035 """Return the revision numbers of the ancestors of heads that
1035 """Return the revision numbers of the ancestors of heads that
1036 are not ancestors of common.
1036 are not ancestors of common.
1037
1037
1038 More specifically, return a list of revision numbers corresponding to
1038 More specifically, return a list of revision numbers corresponding to
1039 nodes N such that every N satisfies the following constraints:
1039 nodes N such that every N satisfies the following constraints:
1040
1040
1041 1. N is an ancestor of some node in 'heads'
1041 1. N is an ancestor of some node in 'heads'
1042 2. N is not an ancestor of any node in 'common'
1042 2. N is not an ancestor of any node in 'common'
1043
1043
1044 The list is sorted by revision number, meaning it is
1044 The list is sorted by revision number, meaning it is
1045 topologically sorted.
1045 topologically sorted.
1046
1046
1047 'heads' and 'common' are both lists of revision numbers. If heads is
1047 'heads' and 'common' are both lists of revision numbers. If heads is
1048 not supplied, uses all of the revlog's heads. If common is not
1048 not supplied, uses all of the revlog's heads. If common is not
1049 supplied, uses nullid."""
1049 supplied, uses nullid."""
1050 if common is None:
1050 if common is None:
1051 common = [nullrev]
1051 common = [nullrev]
1052 if heads is None:
1052 if heads is None:
1053 heads = self.headrevs()
1053 heads = self.headrevs()
1054
1054
1055 inc = self.incrementalmissingrevs(common=common)
1055 inc = self.incrementalmissingrevs(common=common)
1056 return inc.missingancestors(heads)
1056 return inc.missingancestors(heads)
1057
1057
1058 def findmissing(self, common=None, heads=None):
1058 def findmissing(self, common=None, heads=None):
1059 """Return the ancestors of heads that are not ancestors of common.
1059 """Return the ancestors of heads that are not ancestors of common.
1060
1060
1061 More specifically, return a list of nodes N such that every N
1061 More specifically, return a list of nodes N such that every N
1062 satisfies the following constraints:
1062 satisfies the following constraints:
1063
1063
1064 1. N is an ancestor of some node in 'heads'
1064 1. N is an ancestor of some node in 'heads'
1065 2. N is not an ancestor of any node in 'common'
1065 2. N is not an ancestor of any node in 'common'
1066
1066
1067 The list is sorted by revision number, meaning it is
1067 The list is sorted by revision number, meaning it is
1068 topologically sorted.
1068 topologically sorted.
1069
1069
1070 'heads' and 'common' are both lists of node IDs. If heads is
1070 'heads' and 'common' are both lists of node IDs. If heads is
1071 not supplied, uses all of the revlog's heads. If common is not
1071 not supplied, uses all of the revlog's heads. If common is not
1072 supplied, uses nullid."""
1072 supplied, uses nullid."""
1073 if common is None:
1073 if common is None:
1074 common = [nullid]
1074 common = [nullid]
1075 if heads is None:
1075 if heads is None:
1076 heads = self.heads()
1076 heads = self.heads()
1077
1077
1078 common = [self.rev(n) for n in common]
1078 common = [self.rev(n) for n in common]
1079 heads = [self.rev(n) for n in heads]
1079 heads = [self.rev(n) for n in heads]
1080
1080
1081 inc = self.incrementalmissingrevs(common=common)
1081 inc = self.incrementalmissingrevs(common=common)
1082 return [self.node(r) for r in inc.missingancestors(heads)]
1082 return [self.node(r) for r in inc.missingancestors(heads)]
1083
1083
1084 def nodesbetween(self, roots=None, heads=None):
1084 def nodesbetween(self, roots=None, heads=None):
1085 """Return a topological path from 'roots' to 'heads'.
1085 """Return a topological path from 'roots' to 'heads'.
1086
1086
1087 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1087 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1088 topologically sorted list of all nodes N that satisfy both of
1088 topologically sorted list of all nodes N that satisfy both of
1089 these constraints:
1089 these constraints:
1090
1090
1091 1. N is a descendant of some node in 'roots'
1091 1. N is a descendant of some node in 'roots'
1092 2. N is an ancestor of some node in 'heads'
1092 2. N is an ancestor of some node in 'heads'
1093
1093
1094 Every node is considered to be both a descendant and an ancestor
1094 Every node is considered to be both a descendant and an ancestor
1095 of itself, so every reachable node in 'roots' and 'heads' will be
1095 of itself, so every reachable node in 'roots' and 'heads' will be
1096 included in 'nodes'.
1096 included in 'nodes'.
1097
1097
1098 'outroots' is the list of reachable nodes in 'roots', i.e., the
1098 'outroots' is the list of reachable nodes in 'roots', i.e., the
1099 subset of 'roots' that is returned in 'nodes'. Likewise,
1099 subset of 'roots' that is returned in 'nodes'. Likewise,
1100 'outheads' is the subset of 'heads' that is also in 'nodes'.
1100 'outheads' is the subset of 'heads' that is also in 'nodes'.
1101
1101
1102 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1102 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1103 unspecified, uses nullid as the only root. If 'heads' is
1103 unspecified, uses nullid as the only root. If 'heads' is
1104 unspecified, uses list of all of the revlog's heads."""
1104 unspecified, uses list of all of the revlog's heads."""
1105 nonodes = ([], [], [])
1105 nonodes = ([], [], [])
1106 if roots is not None:
1106 if roots is not None:
1107 roots = list(roots)
1107 roots = list(roots)
1108 if not roots:
1108 if not roots:
1109 return nonodes
1109 return nonodes
1110 lowestrev = min([self.rev(n) for n in roots])
1110 lowestrev = min([self.rev(n) for n in roots])
1111 else:
1111 else:
1112 roots = [nullid] # Everybody's a descendant of nullid
1112 roots = [nullid] # Everybody's a descendant of nullid
1113 lowestrev = nullrev
1113 lowestrev = nullrev
1114 if (lowestrev == nullrev) and (heads is None):
1114 if (lowestrev == nullrev) and (heads is None):
1115 # We want _all_ the nodes!
1115 # We want _all_ the nodes!
1116 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1116 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1117 if heads is None:
1117 if heads is None:
1118 # All nodes are ancestors, so the latest ancestor is the last
1118 # All nodes are ancestors, so the latest ancestor is the last
1119 # node.
1119 # node.
1120 highestrev = len(self) - 1
1120 highestrev = len(self) - 1
1121 # Set ancestors to None to signal that every node is an ancestor.
1121 # Set ancestors to None to signal that every node is an ancestor.
1122 ancestors = None
1122 ancestors = None
1123 # Set heads to an empty dictionary for later discovery of heads
1123 # Set heads to an empty dictionary for later discovery of heads
1124 heads = {}
1124 heads = {}
1125 else:
1125 else:
1126 heads = list(heads)
1126 heads = list(heads)
1127 if not heads:
1127 if not heads:
1128 return nonodes
1128 return nonodes
1129 ancestors = set()
1129 ancestors = set()
1130 # Turn heads into a dictionary so we can remove 'fake' heads.
1130 # Turn heads into a dictionary so we can remove 'fake' heads.
1131 # Also, later we will be using it to filter out the heads we can't
1131 # Also, later we will be using it to filter out the heads we can't
1132 # find from roots.
1132 # find from roots.
1133 heads = dict.fromkeys(heads, False)
1133 heads = dict.fromkeys(heads, False)
1134 # Start at the top and keep marking parents until we're done.
1134 # Start at the top and keep marking parents until we're done.
1135 nodestotag = set(heads)
1135 nodestotag = set(heads)
1136 # Remember where the top was so we can use it as a limit later.
1136 # Remember where the top was so we can use it as a limit later.
1137 highestrev = max([self.rev(n) for n in nodestotag])
1137 highestrev = max([self.rev(n) for n in nodestotag])
1138 while nodestotag:
1138 while nodestotag:
1139 # grab a node to tag
1139 # grab a node to tag
1140 n = nodestotag.pop()
1140 n = nodestotag.pop()
1141 # Never tag nullid
1141 # Never tag nullid
1142 if n == nullid:
1142 if n == nullid:
1143 continue
1143 continue
1144 # A node's revision number represents its place in a
1144 # A node's revision number represents its place in a
1145 # topologically sorted list of nodes.
1145 # topologically sorted list of nodes.
1146 r = self.rev(n)
1146 r = self.rev(n)
1147 if r >= lowestrev:
1147 if r >= lowestrev:
1148 if n not in ancestors:
1148 if n not in ancestors:
1149 # If we are possibly a descendant of one of the roots
1149 # If we are possibly a descendant of one of the roots
1150 # and we haven't already been marked as an ancestor
1150 # and we haven't already been marked as an ancestor
1151 ancestors.add(n) # Mark as ancestor
1151 ancestors.add(n) # Mark as ancestor
1152 # Add non-nullid parents to list of nodes to tag.
1152 # Add non-nullid parents to list of nodes to tag.
1153 nodestotag.update(
1153 nodestotag.update(
1154 [p for p in self.parents(n) if p != nullid]
1154 [p for p in self.parents(n) if p != nullid]
1155 )
1155 )
1156 elif n in heads: # We've seen it before, is it a fake head?
1156 elif n in heads: # We've seen it before, is it a fake head?
1157 # So it is, real heads should not be the ancestors of
1157 # So it is, real heads should not be the ancestors of
1158 # any other heads.
1158 # any other heads.
1159 heads.pop(n)
1159 heads.pop(n)
1160 if not ancestors:
1160 if not ancestors:
1161 return nonodes
1161 return nonodes
1162 # Now that we have our set of ancestors, we want to remove any
1162 # Now that we have our set of ancestors, we want to remove any
1163 # roots that are not ancestors.
1163 # roots that are not ancestors.
1164
1164
1165 # If one of the roots was nullid, everything is included anyway.
1165 # If one of the roots was nullid, everything is included anyway.
1166 if lowestrev > nullrev:
1166 if lowestrev > nullrev:
1167 # But, since we weren't, let's recompute the lowest rev to not
1167 # But, since we weren't, let's recompute the lowest rev to not
1168 # include roots that aren't ancestors.
1168 # include roots that aren't ancestors.
1169
1169
1170 # Filter out roots that aren't ancestors of heads
1170 # Filter out roots that aren't ancestors of heads
1171 roots = [root for root in roots if root in ancestors]
1171 roots = [root for root in roots if root in ancestors]
1172 # Recompute the lowest revision
1172 # Recompute the lowest revision
1173 if roots:
1173 if roots:
1174 lowestrev = min([self.rev(root) for root in roots])
1174 lowestrev = min([self.rev(root) for root in roots])
1175 else:
1175 else:
1176 # No more roots? Return empty list
1176 # No more roots? Return empty list
1177 return nonodes
1177 return nonodes
1178 else:
1178 else:
1179 # We are descending from nullid, and don't need to care about
1179 # We are descending from nullid, and don't need to care about
1180 # any other roots.
1180 # any other roots.
1181 lowestrev = nullrev
1181 lowestrev = nullrev
1182 roots = [nullid]
1182 roots = [nullid]
1183 # Transform our roots list into a set.
1183 # Transform our roots list into a set.
1184 descendants = set(roots)
1184 descendants = set(roots)
1185 # Also, keep the original roots so we can filter out roots that aren't
1185 # Also, keep the original roots so we can filter out roots that aren't
1186 # 'real' roots (i.e. are descended from other roots).
1186 # 'real' roots (i.e. are descended from other roots).
1187 roots = descendants.copy()
1187 roots = descendants.copy()
1188 # Our topologically sorted list of output nodes.
1188 # Our topologically sorted list of output nodes.
1189 orderedout = []
1189 orderedout = []
1190 # Don't start at nullid since we don't want nullid in our output list,
1190 # Don't start at nullid since we don't want nullid in our output list,
1191 # and if nullid shows up in descendants, empty parents will look like
1191 # and if nullid shows up in descendants, empty parents will look like
1192 # they're descendants.
1192 # they're descendants.
1193 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1193 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1194 n = self.node(r)
1194 n = self.node(r)
1195 isdescendant = False
1195 isdescendant = False
1196 if lowestrev == nullrev: # Everybody is a descendant of nullid
1196 if lowestrev == nullrev: # Everybody is a descendant of nullid
1197 isdescendant = True
1197 isdescendant = True
1198 elif n in descendants:
1198 elif n in descendants:
1199 # n is already a descendant
1199 # n is already a descendant
1200 isdescendant = True
1200 isdescendant = True
1201 # This check only needs to be done here because all the roots
1201 # This check only needs to be done here because all the roots
1202 # will start being marked is descendants before the loop.
1202 # will start being marked is descendants before the loop.
1203 if n in roots:
1203 if n in roots:
1204 # If n was a root, check if it's a 'real' root.
1204 # If n was a root, check if it's a 'real' root.
1205 p = tuple(self.parents(n))
1205 p = tuple(self.parents(n))
1206 # If any of its parents are descendants, it's not a root.
1206 # If any of its parents are descendants, it's not a root.
1207 if (p[0] in descendants) or (p[1] in descendants):
1207 if (p[0] in descendants) or (p[1] in descendants):
1208 roots.remove(n)
1208 roots.remove(n)
1209 else:
1209 else:
1210 p = tuple(self.parents(n))
1210 p = tuple(self.parents(n))
1211 # A node is a descendant if either of its parents are
1211 # A node is a descendant if either of its parents are
1212 # descendants. (We seeded the dependents list with the roots
1212 # descendants. (We seeded the dependents list with the roots
1213 # up there, remember?)
1213 # up there, remember?)
1214 if (p[0] in descendants) or (p[1] in descendants):
1214 if (p[0] in descendants) or (p[1] in descendants):
1215 descendants.add(n)
1215 descendants.add(n)
1216 isdescendant = True
1216 isdescendant = True
1217 if isdescendant and ((ancestors is None) or (n in ancestors)):
1217 if isdescendant and ((ancestors is None) or (n in ancestors)):
1218 # Only include nodes that are both descendants and ancestors.
1218 # Only include nodes that are both descendants and ancestors.
1219 orderedout.append(n)
1219 orderedout.append(n)
1220 if (ancestors is not None) and (n in heads):
1220 if (ancestors is not None) and (n in heads):
1221 # We're trying to figure out which heads are reachable
1221 # We're trying to figure out which heads are reachable
1222 # from roots.
1222 # from roots.
1223 # Mark this head as having been reached
1223 # Mark this head as having been reached
1224 heads[n] = True
1224 heads[n] = True
1225 elif ancestors is None:
1225 elif ancestors is None:
1226 # Otherwise, we're trying to discover the heads.
1226 # Otherwise, we're trying to discover the heads.
1227 # Assume this is a head because if it isn't, the next step
1227 # Assume this is a head because if it isn't, the next step
1228 # will eventually remove it.
1228 # will eventually remove it.
1229 heads[n] = True
1229 heads[n] = True
1230 # But, obviously its parents aren't.
1230 # But, obviously its parents aren't.
1231 for p in self.parents(n):
1231 for p in self.parents(n):
1232 heads.pop(p, None)
1232 heads.pop(p, None)
1233 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1233 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1234 roots = list(roots)
1234 roots = list(roots)
1235 assert orderedout
1235 assert orderedout
1236 assert roots
1236 assert roots
1237 assert heads
1237 assert heads
1238 return (orderedout, roots, heads)
1238 return (orderedout, roots, heads)
1239
1239
1240 def headrevs(self, revs=None):
1240 def headrevs(self, revs=None):
1241 if revs is None:
1241 if revs is None:
1242 try:
1242 try:
1243 return self.index.headrevs()
1243 return self.index.headrevs()
1244 except AttributeError:
1244 except AttributeError:
1245 return self._headrevs()
1245 return self._headrevs()
1246 if rustdagop is not None:
1246 if rustdagop is not None:
1247 return rustdagop.headrevs(self.index, revs)
1247 return rustdagop.headrevs(self.index, revs)
1248 return dagop.headrevs(revs, self._uncheckedparentrevs)
1248 return dagop.headrevs(revs, self._uncheckedparentrevs)
1249
1249
1250 def computephases(self, roots):
1250 def computephases(self, roots):
1251 return self.index.computephasesmapsets(roots)
1251 return self.index.computephasesmapsets(roots)
1252
1252
1253 def _headrevs(self):
1253 def _headrevs(self):
1254 count = len(self)
1254 count = len(self)
1255 if not count:
1255 if not count:
1256 return [nullrev]
1256 return [nullrev]
1257 # we won't iter over filtered rev so nobody is a head at start
1257 # we won't iter over filtered rev so nobody is a head at start
1258 ishead = [0] * (count + 1)
1258 ishead = [0] * (count + 1)
1259 index = self.index
1259 index = self.index
1260 for r in self:
1260 for r in self:
1261 ishead[r] = 1 # I may be an head
1261 ishead[r] = 1 # I may be an head
1262 e = index[r]
1262 e = index[r]
1263 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1263 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1264 return [r for r, val in enumerate(ishead) if val]
1264 return [r for r, val in enumerate(ishead) if val]
1265
1265
1266 def heads(self, start=None, stop=None):
1266 def heads(self, start=None, stop=None):
1267 """return the list of all nodes that have no children
1267 """return the list of all nodes that have no children
1268
1268
1269 if start is specified, only heads that are descendants of
1269 if start is specified, only heads that are descendants of
1270 start will be returned
1270 start will be returned
1271 if stop is specified, it will consider all the revs from stop
1271 if stop is specified, it will consider all the revs from stop
1272 as if they had no children
1272 as if they had no children
1273 """
1273 """
1274 if start is None and stop is None:
1274 if start is None and stop is None:
1275 if not len(self):
1275 if not len(self):
1276 return [nullid]
1276 return [nullid]
1277 return [self.node(r) for r in self.headrevs()]
1277 return [self.node(r) for r in self.headrevs()]
1278
1278
1279 if start is None:
1279 if start is None:
1280 start = nullrev
1280 start = nullrev
1281 else:
1281 else:
1282 start = self.rev(start)
1282 start = self.rev(start)
1283
1283
1284 stoprevs = set(self.rev(n) for n in stop or [])
1284 stoprevs = set(self.rev(n) for n in stop or [])
1285
1285
1286 revs = dagop.headrevssubset(
1286 revs = dagop.headrevssubset(
1287 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1287 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1288 )
1288 )
1289
1289
1290 return [self.node(rev) for rev in revs]
1290 return [self.node(rev) for rev in revs]
1291
1291
1292 def children(self, node):
1292 def children(self, node):
1293 """find the children of a given node"""
1293 """find the children of a given node"""
1294 c = []
1294 c = []
1295 p = self.rev(node)
1295 p = self.rev(node)
1296 for r in self.revs(start=p + 1):
1296 for r in self.revs(start=p + 1):
1297 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1297 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1298 if prevs:
1298 if prevs:
1299 for pr in prevs:
1299 for pr in prevs:
1300 if pr == p:
1300 if pr == p:
1301 c.append(self.node(r))
1301 c.append(self.node(r))
1302 elif p == nullrev:
1302 elif p == nullrev:
1303 c.append(self.node(r))
1303 c.append(self.node(r))
1304 return c
1304 return c
1305
1305
1306 def commonancestorsheads(self, a, b):
1306 def commonancestorsheads(self, a, b):
1307 """calculate all the heads of the common ancestors of nodes a and b"""
1307 """calculate all the heads of the common ancestors of nodes a and b"""
1308 a, b = self.rev(a), self.rev(b)
1308 a, b = self.rev(a), self.rev(b)
1309 ancs = self._commonancestorsheads(a, b)
1309 ancs = self._commonancestorsheads(a, b)
1310 return pycompat.maplist(self.node, ancs)
1310 return pycompat.maplist(self.node, ancs)
1311
1311
1312 def _commonancestorsheads(self, *revs):
1312 def _commonancestorsheads(self, *revs):
1313 """calculate all the heads of the common ancestors of revs"""
1313 """calculate all the heads of the common ancestors of revs"""
1314 try:
1314 try:
1315 ancs = self.index.commonancestorsheads(*revs)
1315 ancs = self.index.commonancestorsheads(*revs)
1316 except (AttributeError, OverflowError): # C implementation failed
1316 except (AttributeError, OverflowError): # C implementation failed
1317 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1317 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1318 return ancs
1318 return ancs
1319
1319
1320 def isancestor(self, a, b):
1320 def isancestor(self, a, b):
1321 """return True if node a is an ancestor of node b
1321 """return True if node a is an ancestor of node b
1322
1322
1323 A revision is considered an ancestor of itself."""
1323 A revision is considered an ancestor of itself."""
1324 a, b = self.rev(a), self.rev(b)
1324 a, b = self.rev(a), self.rev(b)
1325 return self.isancestorrev(a, b)
1325 return self.isancestorrev(a, b)
1326
1326
1327 def isancestorrev(self, a, b):
1327 def isancestorrev(self, a, b):
1328 """return True if revision a is an ancestor of revision b
1328 """return True if revision a is an ancestor of revision b
1329
1329
1330 A revision is considered an ancestor of itself.
1330 A revision is considered an ancestor of itself.
1331
1331
1332 The implementation of this is trivial but the use of
1332 The implementation of this is trivial but the use of
1333 reachableroots is not."""
1333 reachableroots is not."""
1334 if a == nullrev:
1334 if a == nullrev:
1335 return True
1335 return True
1336 elif a == b:
1336 elif a == b:
1337 return True
1337 return True
1338 elif a > b:
1338 elif a > b:
1339 return False
1339 return False
1340 return bool(self.reachableroots(a, [b], [a], includepath=False))
1340 return bool(self.reachableroots(a, [b], [a], includepath=False))
1341
1341
1342 def reachableroots(self, minroot, heads, roots, includepath=False):
1342 def reachableroots(self, minroot, heads, roots, includepath=False):
1343 """return (heads(::(<roots> and <roots>::<heads>)))
1343 """return (heads(::(<roots> and <roots>::<heads>)))
1344
1344
1345 If includepath is True, return (<roots>::<heads>)."""
1345 If includepath is True, return (<roots>::<heads>)."""
1346 try:
1346 try:
1347 return self.index.reachableroots2(
1347 return self.index.reachableroots2(
1348 minroot, heads, roots, includepath
1348 minroot, heads, roots, includepath
1349 )
1349 )
1350 except AttributeError:
1350 except AttributeError:
1351 return dagop._reachablerootspure(
1351 return dagop._reachablerootspure(
1352 self.parentrevs, minroot, roots, heads, includepath
1352 self.parentrevs, minroot, roots, heads, includepath
1353 )
1353 )
1354
1354
1355 def ancestor(self, a, b):
1355 def ancestor(self, a, b):
1356 """calculate the "best" common ancestor of nodes a and b"""
1356 """calculate the "best" common ancestor of nodes a and b"""
1357
1357
1358 a, b = self.rev(a), self.rev(b)
1358 a, b = self.rev(a), self.rev(b)
1359 try:
1359 try:
1360 ancs = self.index.ancestors(a, b)
1360 ancs = self.index.ancestors(a, b)
1361 except (AttributeError, OverflowError):
1361 except (AttributeError, OverflowError):
1362 ancs = ancestor.ancestors(self.parentrevs, a, b)
1362 ancs = ancestor.ancestors(self.parentrevs, a, b)
1363 if ancs:
1363 if ancs:
1364 # choose a consistent winner when there's a tie
1364 # choose a consistent winner when there's a tie
1365 return min(map(self.node, ancs))
1365 return min(map(self.node, ancs))
1366 return nullid
1366 return nullid
1367
1367
1368 def _match(self, id):
1368 def _match(self, id):
1369 if isinstance(id, int):
1369 if isinstance(id, int):
1370 # rev
1370 # rev
1371 return self.node(id)
1371 return self.node(id)
1372 if len(id) == 20:
1372 if len(id) == 20:
1373 # possibly a binary node
1373 # possibly a binary node
1374 # odds of a binary node being all hex in ASCII are 1 in 10**25
1374 # odds of a binary node being all hex in ASCII are 1 in 10**25
1375 try:
1375 try:
1376 node = id
1376 node = id
1377 self.rev(node) # quick search the index
1377 self.rev(node) # quick search the index
1378 return node
1378 return node
1379 except error.LookupError:
1379 except error.LookupError:
1380 pass # may be partial hex id
1380 pass # may be partial hex id
1381 try:
1381 try:
1382 # str(rev)
1382 # str(rev)
1383 rev = int(id)
1383 rev = int(id)
1384 if b"%d" % rev != id:
1384 if b"%d" % rev != id:
1385 raise ValueError
1385 raise ValueError
1386 if rev < 0:
1386 if rev < 0:
1387 rev = len(self) + rev
1387 rev = len(self) + rev
1388 if rev < 0 or rev >= len(self):
1388 if rev < 0 or rev >= len(self):
1389 raise ValueError
1389 raise ValueError
1390 return self.node(rev)
1390 return self.node(rev)
1391 except (ValueError, OverflowError):
1391 except (ValueError, OverflowError):
1392 pass
1392 pass
1393 if len(id) == 40:
1393 if len(id) == 40:
1394 try:
1394 try:
1395 # a full hex nodeid?
1395 # a full hex nodeid?
1396 node = bin(id)
1396 node = bin(id)
1397 self.rev(node)
1397 self.rev(node)
1398 return node
1398 return node
1399 except (TypeError, error.LookupError):
1399 except (TypeError, error.LookupError):
1400 pass
1400 pass
1401
1401
1402 def _partialmatch(self, id):
1402 def _partialmatch(self, id):
1403 # we don't care wdirfilenodeids as they should be always full hash
1403 # we don't care wdirfilenodeids as they should be always full hash
1404 maybewdir = wdirhex.startswith(id)
1404 maybewdir = wdirhex.startswith(id)
1405 try:
1405 try:
1406 partial = self.index.partialmatch(id)
1406 partial = self.index.partialmatch(id)
1407 if partial and self.hasnode(partial):
1407 if partial and self.hasnode(partial):
1408 if maybewdir:
1408 if maybewdir:
1409 # single 'ff...' match in radix tree, ambiguous with wdir
1409 # single 'ff...' match in radix tree, ambiguous with wdir
1410 raise error.RevlogError
1410 raise error.RevlogError
1411 return partial
1411 return partial
1412 if maybewdir:
1412 if maybewdir:
1413 # no 'ff...' match in radix tree, wdir identified
1413 # no 'ff...' match in radix tree, wdir identified
1414 raise error.WdirUnsupported
1414 raise error.WdirUnsupported
1415 return None
1415 return None
1416 except error.RevlogError:
1416 except error.RevlogError:
1417 # parsers.c radix tree lookup gave multiple matches
1417 # parsers.c radix tree lookup gave multiple matches
1418 # fast path: for unfiltered changelog, radix tree is accurate
1418 # fast path: for unfiltered changelog, radix tree is accurate
1419 if not getattr(self, 'filteredrevs', None):
1419 if not getattr(self, 'filteredrevs', None):
1420 raise error.AmbiguousPrefixLookupError(
1420 raise error.AmbiguousPrefixLookupError(
1421 id, self.indexfile, _(b'ambiguous identifier')
1421 id, self.indexfile, _(b'ambiguous identifier')
1422 )
1422 )
1423 # fall through to slow path that filters hidden revisions
1423 # fall through to slow path that filters hidden revisions
1424 except (AttributeError, ValueError):
1424 except (AttributeError, ValueError):
1425 # we are pure python, or key was too short to search radix tree
1425 # we are pure python, or key was too short to search radix tree
1426 pass
1426 pass
1427
1427
1428 if id in self._pcache:
1428 if id in self._pcache:
1429 return self._pcache[id]
1429 return self._pcache[id]
1430
1430
1431 if len(id) <= 40:
1431 if len(id) <= 40:
1432 try:
1432 try:
1433 # hex(node)[:...]
1433 # hex(node)[:...]
1434 l = len(id) // 2 # grab an even number of digits
1434 l = len(id) // 2 # grab an even number of digits
1435 prefix = bin(id[: l * 2])
1435 prefix = bin(id[: l * 2])
1436 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1436 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1437 nl = [
1437 nl = [
1438 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1438 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1439 ]
1439 ]
1440 if nullhex.startswith(id):
1440 if nullhex.startswith(id):
1441 nl.append(nullid)
1441 nl.append(nullid)
1442 if len(nl) > 0:
1442 if len(nl) > 0:
1443 if len(nl) == 1 and not maybewdir:
1443 if len(nl) == 1 and not maybewdir:
1444 self._pcache[id] = nl[0]
1444 self._pcache[id] = nl[0]
1445 return nl[0]
1445 return nl[0]
1446 raise error.AmbiguousPrefixLookupError(
1446 raise error.AmbiguousPrefixLookupError(
1447 id, self.indexfile, _(b'ambiguous identifier')
1447 id, self.indexfile, _(b'ambiguous identifier')
1448 )
1448 )
1449 if maybewdir:
1449 if maybewdir:
1450 raise error.WdirUnsupported
1450 raise error.WdirUnsupported
1451 return None
1451 return None
1452 except TypeError:
1452 except TypeError:
1453 pass
1453 pass
1454
1454
1455 def lookup(self, id):
1455 def lookup(self, id):
1456 """locate a node based on:
1456 """locate a node based on:
1457 - revision number or str(revision number)
1457 - revision number or str(revision number)
1458 - nodeid or subset of hex nodeid
1458 - nodeid or subset of hex nodeid
1459 """
1459 """
1460 n = self._match(id)
1460 n = self._match(id)
1461 if n is not None:
1461 if n is not None:
1462 return n
1462 return n
1463 n = self._partialmatch(id)
1463 n = self._partialmatch(id)
1464 if n:
1464 if n:
1465 return n
1465 return n
1466
1466
1467 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1467 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1468
1468
1469 def shortest(self, node, minlength=1):
1469 def shortest(self, node, minlength=1):
1470 """Find the shortest unambiguous prefix that matches node."""
1470 """Find the shortest unambiguous prefix that matches node."""
1471
1471
1472 def isvalid(prefix):
1472 def isvalid(prefix):
1473 try:
1473 try:
1474 matchednode = self._partialmatch(prefix)
1474 matchednode = self._partialmatch(prefix)
1475 except error.AmbiguousPrefixLookupError:
1475 except error.AmbiguousPrefixLookupError:
1476 return False
1476 return False
1477 except error.WdirUnsupported:
1477 except error.WdirUnsupported:
1478 # single 'ff...' match
1478 # single 'ff...' match
1479 return True
1479 return True
1480 if matchednode is None:
1480 if matchednode is None:
1481 raise error.LookupError(node, self.indexfile, _(b'no node'))
1481 raise error.LookupError(node, self.indexfile, _(b'no node'))
1482 return True
1482 return True
1483
1483
1484 def maybewdir(prefix):
1484 def maybewdir(prefix):
1485 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1485 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1486
1486
1487 hexnode = hex(node)
1487 hexnode = hex(node)
1488
1488
1489 def disambiguate(hexnode, minlength):
1489 def disambiguate(hexnode, minlength):
1490 """Disambiguate against wdirid."""
1490 """Disambiguate against wdirid."""
1491 for length in range(minlength, 41):
1491 for length in range(minlength, 41):
1492 prefix = hexnode[:length]
1492 prefix = hexnode[:length]
1493 if not maybewdir(prefix):
1493 if not maybewdir(prefix):
1494 return prefix
1494 return prefix
1495
1495
1496 if not getattr(self, 'filteredrevs', None):
1496 if not getattr(self, 'filteredrevs', None):
1497 try:
1497 try:
1498 length = max(self.index.shortest(node), minlength)
1498 length = max(self.index.shortest(node), minlength)
1499 return disambiguate(hexnode, length)
1499 return disambiguate(hexnode, length)
1500 except error.RevlogError:
1500 except error.RevlogError:
1501 if node != wdirid:
1501 if node != wdirid:
1502 raise error.LookupError(node, self.indexfile, _(b'no node'))
1502 raise error.LookupError(node, self.indexfile, _(b'no node'))
1503 except AttributeError:
1503 except AttributeError:
1504 # Fall through to pure code
1504 # Fall through to pure code
1505 pass
1505 pass
1506
1506
1507 if node == wdirid:
1507 if node == wdirid:
1508 for length in range(minlength, 41):
1508 for length in range(minlength, 41):
1509 prefix = hexnode[:length]
1509 prefix = hexnode[:length]
1510 if isvalid(prefix):
1510 if isvalid(prefix):
1511 return prefix
1511 return prefix
1512
1512
1513 for length in range(minlength, 41):
1513 for length in range(minlength, 41):
1514 prefix = hexnode[:length]
1514 prefix = hexnode[:length]
1515 if isvalid(prefix):
1515 if isvalid(prefix):
1516 return disambiguate(hexnode, length)
1516 return disambiguate(hexnode, length)
1517
1517
1518 def cmp(self, node, text):
1518 def cmp(self, node, text):
1519 """compare text with a given file revision
1519 """compare text with a given file revision
1520
1520
1521 returns True if text is different than what is stored.
1521 returns True if text is different than what is stored.
1522 """
1522 """
1523 p1, p2 = self.parents(node)
1523 p1, p2 = self.parents(node)
1524 return storageutil.hashrevisionsha1(text, p1, p2) != node
1524 return storageutil.hashrevisionsha1(text, p1, p2) != node
1525
1525
1526 def _cachesegment(self, offset, data):
1526 def _cachesegment(self, offset, data):
1527 """Add a segment to the revlog cache.
1527 """Add a segment to the revlog cache.
1528
1528
1529 Accepts an absolute offset and the data that is at that location.
1529 Accepts an absolute offset and the data that is at that location.
1530 """
1530 """
1531 o, d = self._chunkcache
1531 o, d = self._chunkcache
1532 # try to add to existing cache
1532 # try to add to existing cache
1533 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1533 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1534 self._chunkcache = o, d + data
1534 self._chunkcache = o, d + data
1535 else:
1535 else:
1536 self._chunkcache = offset, data
1536 self._chunkcache = offset, data
1537
1537
1538 def _readsegment(self, offset, length, df=None):
1538 def _readsegment(self, offset, length, df=None):
1539 """Load a segment of raw data from the revlog.
1539 """Load a segment of raw data from the revlog.
1540
1540
1541 Accepts an absolute offset, length to read, and an optional existing
1541 Accepts an absolute offset, length to read, and an optional existing
1542 file handle to read from.
1542 file handle to read from.
1543
1543
1544 If an existing file handle is passed, it will be seeked and the
1544 If an existing file handle is passed, it will be seeked and the
1545 original seek position will NOT be restored.
1545 original seek position will NOT be restored.
1546
1546
1547 Returns a str or buffer of raw byte data.
1547 Returns a str or buffer of raw byte data.
1548
1548
1549 Raises if the requested number of bytes could not be read.
1549 Raises if the requested number of bytes could not be read.
1550 """
1550 """
1551 # Cache data both forward and backward around the requested
1551 # Cache data both forward and backward around the requested
1552 # data, in a fixed size window. This helps speed up operations
1552 # data, in a fixed size window. This helps speed up operations
1553 # involving reading the revlog backwards.
1553 # involving reading the revlog backwards.
1554 cachesize = self._chunkcachesize
1554 cachesize = self._chunkcachesize
1555 realoffset = offset & ~(cachesize - 1)
1555 realoffset = offset & ~(cachesize - 1)
1556 reallength = (
1556 reallength = (
1557 (offset + length + cachesize) & ~(cachesize - 1)
1557 (offset + length + cachesize) & ~(cachesize - 1)
1558 ) - realoffset
1558 ) - realoffset
1559 with self._datareadfp(df) as df:
1559 with self._datareadfp(df) as df:
1560 df.seek(realoffset)
1560 df.seek(realoffset)
1561 d = df.read(reallength)
1561 d = df.read(reallength)
1562
1562
1563 self._cachesegment(realoffset, d)
1563 self._cachesegment(realoffset, d)
1564 if offset != realoffset or reallength != length:
1564 if offset != realoffset or reallength != length:
1565 startoffset = offset - realoffset
1565 startoffset = offset - realoffset
1566 if len(d) - startoffset < length:
1566 if len(d) - startoffset < length:
1567 raise error.RevlogError(
1567 raise error.RevlogError(
1568 _(
1568 _(
1569 b'partial read of revlog %s; expected %d bytes from '
1569 b'partial read of revlog %s; expected %d bytes from '
1570 b'offset %d, got %d'
1570 b'offset %d, got %d'
1571 )
1571 )
1572 % (
1572 % (
1573 self.indexfile if self._inline else self.datafile,
1573 self.indexfile if self._inline else self.datafile,
1574 length,
1574 length,
1575 realoffset,
1575 realoffset,
1576 len(d) - startoffset,
1576 len(d) - startoffset,
1577 )
1577 )
1578 )
1578 )
1579
1579
1580 return util.buffer(d, startoffset, length)
1580 return util.buffer(d, startoffset, length)
1581
1581
1582 if len(d) < length:
1582 if len(d) < length:
1583 raise error.RevlogError(
1583 raise error.RevlogError(
1584 _(
1584 _(
1585 b'partial read of revlog %s; expected %d bytes from offset '
1585 b'partial read of revlog %s; expected %d bytes from offset '
1586 b'%d, got %d'
1586 b'%d, got %d'
1587 )
1587 )
1588 % (
1588 % (
1589 self.indexfile if self._inline else self.datafile,
1589 self.indexfile if self._inline else self.datafile,
1590 length,
1590 length,
1591 offset,
1591 offset,
1592 len(d),
1592 len(d),
1593 )
1593 )
1594 )
1594 )
1595
1595
1596 return d
1596 return d
1597
1597
1598 def _getsegment(self, offset, length, df=None):
1598 def _getsegment(self, offset, length, df=None):
1599 """Obtain a segment of raw data from the revlog.
1599 """Obtain a segment of raw data from the revlog.
1600
1600
1601 Accepts an absolute offset, length of bytes to obtain, and an
1601 Accepts an absolute offset, length of bytes to obtain, and an
1602 optional file handle to the already-opened revlog. If the file
1602 optional file handle to the already-opened revlog. If the file
1603 handle is used, it's original seek position will not be preserved.
1603 handle is used, it's original seek position will not be preserved.
1604
1604
1605 Requests for data may be returned from a cache.
1605 Requests for data may be returned from a cache.
1606
1606
1607 Returns a str or a buffer instance of raw byte data.
1607 Returns a str or a buffer instance of raw byte data.
1608 """
1608 """
1609 o, d = self._chunkcache
1609 o, d = self._chunkcache
1610 l = len(d)
1610 l = len(d)
1611
1611
1612 # is it in the cache?
1612 # is it in the cache?
1613 cachestart = offset - o
1613 cachestart = offset - o
1614 cacheend = cachestart + length
1614 cacheend = cachestart + length
1615 if cachestart >= 0 and cacheend <= l:
1615 if cachestart >= 0 and cacheend <= l:
1616 if cachestart == 0 and cacheend == l:
1616 if cachestart == 0 and cacheend == l:
1617 return d # avoid a copy
1617 return d # avoid a copy
1618 return util.buffer(d, cachestart, cacheend - cachestart)
1618 return util.buffer(d, cachestart, cacheend - cachestart)
1619
1619
1620 return self._readsegment(offset, length, df=df)
1620 return self._readsegment(offset, length, df=df)
1621
1621
1622 def _getsegmentforrevs(self, startrev, endrev, df=None):
1622 def _getsegmentforrevs(self, startrev, endrev, df=None):
1623 """Obtain a segment of raw data corresponding to a range of revisions.
1623 """Obtain a segment of raw data corresponding to a range of revisions.
1624
1624
1625 Accepts the start and end revisions and an optional already-open
1625 Accepts the start and end revisions and an optional already-open
1626 file handle to be used for reading. If the file handle is read, its
1626 file handle to be used for reading. If the file handle is read, its
1627 seek position will not be preserved.
1627 seek position will not be preserved.
1628
1628
1629 Requests for data may be satisfied by a cache.
1629 Requests for data may be satisfied by a cache.
1630
1630
1631 Returns a 2-tuple of (offset, data) for the requested range of
1631 Returns a 2-tuple of (offset, data) for the requested range of
1632 revisions. Offset is the integer offset from the beginning of the
1632 revisions. Offset is the integer offset from the beginning of the
1633 revlog and data is a str or buffer of the raw byte data.
1633 revlog and data is a str or buffer of the raw byte data.
1634
1634
1635 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1635 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1636 to determine where each revision's data begins and ends.
1636 to determine where each revision's data begins and ends.
1637 """
1637 """
1638 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1638 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1639 # (functions are expensive).
1639 # (functions are expensive).
1640 index = self.index
1640 index = self.index
1641 istart = index[startrev]
1641 istart = index[startrev]
1642 start = int(istart[0] >> 16)
1642 start = int(istart[0] >> 16)
1643 if startrev == endrev:
1643 if startrev == endrev:
1644 end = start + istart[1]
1644 end = start + istart[1]
1645 else:
1645 else:
1646 iend = index[endrev]
1646 iend = index[endrev]
1647 end = int(iend[0] >> 16) + iend[1]
1647 end = int(iend[0] >> 16) + iend[1]
1648
1648
1649 if self._inline:
1649 if self._inline:
1650 start += (startrev + 1) * self._io.size
1650 start += (startrev + 1) * self._io.size
1651 end += (endrev + 1) * self._io.size
1651 end += (endrev + 1) * self._io.size
1652 length = end - start
1652 length = end - start
1653
1653
1654 return start, self._getsegment(start, length, df=df)
1654 return start, self._getsegment(start, length, df=df)
1655
1655
1656 def _chunk(self, rev, df=None):
1656 def _chunk(self, rev, df=None):
1657 """Obtain a single decompressed chunk for a revision.
1657 """Obtain a single decompressed chunk for a revision.
1658
1658
1659 Accepts an integer revision and an optional already-open file handle
1659 Accepts an integer revision and an optional already-open file handle
1660 to be used for reading. If used, the seek position of the file will not
1660 to be used for reading. If used, the seek position of the file will not
1661 be preserved.
1661 be preserved.
1662
1662
1663 Returns a str holding uncompressed data for the requested revision.
1663 Returns a str holding uncompressed data for the requested revision.
1664 """
1664 """
1665 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1665 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1666
1666
1667 def _chunks(self, revs, df=None, targetsize=None):
1667 def _chunks(self, revs, df=None, targetsize=None):
1668 """Obtain decompressed chunks for the specified revisions.
1668 """Obtain decompressed chunks for the specified revisions.
1669
1669
1670 Accepts an iterable of numeric revisions that are assumed to be in
1670 Accepts an iterable of numeric revisions that are assumed to be in
1671 ascending order. Also accepts an optional already-open file handle
1671 ascending order. Also accepts an optional already-open file handle
1672 to be used for reading. If used, the seek position of the file will
1672 to be used for reading. If used, the seek position of the file will
1673 not be preserved.
1673 not be preserved.
1674
1674
1675 This function is similar to calling ``self._chunk()`` multiple times,
1675 This function is similar to calling ``self._chunk()`` multiple times,
1676 but is faster.
1676 but is faster.
1677
1677
1678 Returns a list with decompressed data for each requested revision.
1678 Returns a list with decompressed data for each requested revision.
1679 """
1679 """
1680 if not revs:
1680 if not revs:
1681 return []
1681 return []
1682 start = self.start
1682 start = self.start
1683 length = self.length
1683 length = self.length
1684 inline = self._inline
1684 inline = self._inline
1685 iosize = self._io.size
1685 iosize = self._io.size
1686 buffer = util.buffer
1686 buffer = util.buffer
1687
1687
1688 l = []
1688 l = []
1689 ladd = l.append
1689 ladd = l.append
1690
1690
1691 if not self._withsparseread:
1691 if not self._withsparseread:
1692 slicedchunks = (revs,)
1692 slicedchunks = (revs,)
1693 else:
1693 else:
1694 slicedchunks = deltautil.slicechunk(
1694 slicedchunks = deltautil.slicechunk(
1695 self, revs, targetsize=targetsize
1695 self, revs, targetsize=targetsize
1696 )
1696 )
1697
1697
1698 for revschunk in slicedchunks:
1698 for revschunk in slicedchunks:
1699 firstrev = revschunk[0]
1699 firstrev = revschunk[0]
1700 # Skip trailing revisions with empty diff
1700 # Skip trailing revisions with empty diff
1701 for lastrev in revschunk[::-1]:
1701 for lastrev in revschunk[::-1]:
1702 if length(lastrev) != 0:
1702 if length(lastrev) != 0:
1703 break
1703 break
1704
1704
1705 try:
1705 try:
1706 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1706 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1707 except OverflowError:
1707 except OverflowError:
1708 # issue4215 - we can't cache a run of chunks greater than
1708 # issue4215 - we can't cache a run of chunks greater than
1709 # 2G on Windows
1709 # 2G on Windows
1710 return [self._chunk(rev, df=df) for rev in revschunk]
1710 return [self._chunk(rev, df=df) for rev in revschunk]
1711
1711
1712 decomp = self.decompress
1712 decomp = self.decompress
1713 for rev in revschunk:
1713 for rev in revschunk:
1714 chunkstart = start(rev)
1714 chunkstart = start(rev)
1715 if inline:
1715 if inline:
1716 chunkstart += (rev + 1) * iosize
1716 chunkstart += (rev + 1) * iosize
1717 chunklength = length(rev)
1717 chunklength = length(rev)
1718 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1718 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1719
1719
1720 return l
1720 return l
1721
1721
1722 def _chunkclear(self):
1722 def _chunkclear(self):
1723 """Clear the raw chunk cache."""
1723 """Clear the raw chunk cache."""
1724 self._chunkcache = (0, b'')
1724 self._chunkcache = (0, b'')
1725
1725
1726 def deltaparent(self, rev):
1726 def deltaparent(self, rev):
1727 """return deltaparent of the given revision"""
1727 """return deltaparent of the given revision"""
1728 base = self.index[rev][3]
1728 base = self.index[rev][3]
1729 if base == rev:
1729 if base == rev:
1730 return nullrev
1730 return nullrev
1731 elif self._generaldelta:
1731 elif self._generaldelta:
1732 return base
1732 return base
1733 else:
1733 else:
1734 return rev - 1
1734 return rev - 1
1735
1735
1736 def issnapshot(self, rev):
1736 def issnapshot(self, rev):
1737 """tells whether rev is a snapshot
1737 """tells whether rev is a snapshot
1738 """
1738 """
1739 if not self._sparserevlog:
1739 if not self._sparserevlog:
1740 return self.deltaparent(rev) == nullrev
1740 return self.deltaparent(rev) == nullrev
1741 elif util.safehasattr(self.index, b'issnapshot'):
1741 elif util.safehasattr(self.index, b'issnapshot'):
1742 # directly assign the method to cache the testing and access
1742 # directly assign the method to cache the testing and access
1743 self.issnapshot = self.index.issnapshot
1743 self.issnapshot = self.index.issnapshot
1744 return self.issnapshot(rev)
1744 return self.issnapshot(rev)
1745 if rev == nullrev:
1745 if rev == nullrev:
1746 return True
1746 return True
1747 entry = self.index[rev]
1747 entry = self.index[rev]
1748 base = entry[3]
1748 base = entry[3]
1749 if base == rev:
1749 if base == rev:
1750 return True
1750 return True
1751 if base == nullrev:
1751 if base == nullrev:
1752 return True
1752 return True
1753 p1 = entry[5]
1753 p1 = entry[5]
1754 p2 = entry[6]
1754 p2 = entry[6]
1755 if base == p1 or base == p2:
1755 if base == p1 or base == p2:
1756 return False
1756 return False
1757 return self.issnapshot(base)
1757 return self.issnapshot(base)
1758
1758
1759 def snapshotdepth(self, rev):
1759 def snapshotdepth(self, rev):
1760 """number of snapshot in the chain before this one"""
1760 """number of snapshot in the chain before this one"""
1761 if not self.issnapshot(rev):
1761 if not self.issnapshot(rev):
1762 raise error.ProgrammingError(b'revision %d not a snapshot')
1762 raise error.ProgrammingError(b'revision %d not a snapshot')
1763 return len(self._deltachain(rev)[0]) - 1
1763 return len(self._deltachain(rev)[0]) - 1
1764
1764
1765 def revdiff(self, rev1, rev2):
1765 def revdiff(self, rev1, rev2):
1766 """return or calculate a delta between two revisions
1766 """return or calculate a delta between two revisions
1767
1767
1768 The delta calculated is in binary form and is intended to be written to
1768 The delta calculated is in binary form and is intended to be written to
1769 revlog data directly. So this function needs raw revision data.
1769 revlog data directly. So this function needs raw revision data.
1770 """
1770 """
1771 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1771 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1772 return bytes(self._chunk(rev2))
1772 return bytes(self._chunk(rev2))
1773
1773
1774 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1774 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1775
1775
1776 def _processflags(self, text, flags, operation, raw=False):
1776 def _processflags(self, text, flags, operation, raw=False):
1777 """deprecated entry point to access flag processors"""
1777 """deprecated entry point to access flag processors"""
1778 msg = b'_processflag(...) use the specialized variant'
1778 msg = b'_processflag(...) use the specialized variant'
1779 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1779 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1780 if raw:
1780 if raw:
1781 return text, flagutil.processflagsraw(self, text, flags)
1781 return text, flagutil.processflagsraw(self, text, flags)
1782 elif operation == b'read':
1782 elif operation == b'read':
1783 return flagutil.processflagsread(self, text, flags)
1783 return flagutil.processflagsread(self, text, flags)
1784 else: # write operation
1784 else: # write operation
1785 return flagutil.processflagswrite(self, text, flags)
1785 return flagutil.processflagswrite(self, text, flags)
1786
1786
1787 def revision(self, nodeorrev, _df=None, raw=False):
1787 def revision(self, nodeorrev, _df=None, raw=False):
1788 """return an uncompressed revision of a given node or revision
1788 """return an uncompressed revision of a given node or revision
1789 number.
1789 number.
1790
1790
1791 _df - an existing file handle to read from. (internal-only)
1791 _df - an existing file handle to read from. (internal-only)
1792 raw - an optional argument specifying if the revision data is to be
1792 raw - an optional argument specifying if the revision data is to be
1793 treated as raw data when applying flag transforms. 'raw' should be set
1793 treated as raw data when applying flag transforms. 'raw' should be set
1794 to True when generating changegroups or in debug commands.
1794 to True when generating changegroups or in debug commands.
1795 """
1795 """
1796 if raw:
1796 if raw:
1797 msg = (
1797 msg = (
1798 b'revlog.revision(..., raw=True) is deprecated, '
1798 b'revlog.revision(..., raw=True) is deprecated, '
1799 b'use revlog.rawdata(...)'
1799 b'use revlog.rawdata(...)'
1800 )
1800 )
1801 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1801 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1802 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1802 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1803
1803
1804 def sidedata(self, nodeorrev, _df=None):
1804 def sidedata(self, nodeorrev, _df=None):
1805 """a map of extra data related to the changeset but not part of the hash
1805 """a map of extra data related to the changeset but not part of the hash
1806
1806
1807 This function currently return a dictionary. However, more advanced
1807 This function currently return a dictionary. However, more advanced
1808 mapping object will likely be used in the future for a more
1808 mapping object will likely be used in the future for a more
1809 efficient/lazy code.
1809 efficient/lazy code.
1810 """
1810 """
1811 return self._revisiondata(nodeorrev, _df)[1]
1811 return self._revisiondata(nodeorrev, _df)[1]
1812
1812
1813 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1813 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1814 # deal with <nodeorrev> argument type
1814 # deal with <nodeorrev> argument type
1815 if isinstance(nodeorrev, int):
1815 if isinstance(nodeorrev, int):
1816 rev = nodeorrev
1816 rev = nodeorrev
1817 node = self.node(rev)
1817 node = self.node(rev)
1818 else:
1818 else:
1819 node = nodeorrev
1819 node = nodeorrev
1820 rev = None
1820 rev = None
1821
1821
1822 # fast path the special `nullid` rev
1822 # fast path the special `nullid` rev
1823 if node == nullid:
1823 if node == nullid:
1824 return b"", {}
1824 return b"", {}
1825
1825
1826 # ``rawtext`` is the text as stored inside the revlog. Might be the
1826 # ``rawtext`` is the text as stored inside the revlog. Might be the
1827 # revision or might need to be processed to retrieve the revision.
1827 # revision or might need to be processed to retrieve the revision.
1828 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1828 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1829
1829
1830 if raw and validated:
1830 if raw and validated:
1831 # if we don't want to process the raw text and that raw
1831 # if we don't want to process the raw text and that raw
1832 # text is cached, we can exit early.
1832 # text is cached, we can exit early.
1833 return rawtext, {}
1833 return rawtext, {}
1834 if rev is None:
1834 if rev is None:
1835 rev = self.rev(node)
1835 rev = self.rev(node)
1836 # the revlog's flag for this revision
1836 # the revlog's flag for this revision
1837 # (usually alter its state or content)
1837 # (usually alter its state or content)
1838 flags = self.flags(rev)
1838 flags = self.flags(rev)
1839
1839
1840 if validated and flags == REVIDX_DEFAULT_FLAGS:
1840 if validated and flags == REVIDX_DEFAULT_FLAGS:
1841 # no extra flags set, no flag processor runs, text = rawtext
1841 # no extra flags set, no flag processor runs, text = rawtext
1842 return rawtext, {}
1842 return rawtext, {}
1843
1843
1844 sidedata = {}
1844 sidedata = {}
1845 if raw:
1845 if raw:
1846 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1846 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1847 text = rawtext
1847 text = rawtext
1848 else:
1848 else:
1849 try:
1849 try:
1850 r = flagutil.processflagsread(self, rawtext, flags)
1850 r = flagutil.processflagsread(self, rawtext, flags)
1851 except error.SidedataHashError as exc:
1851 except error.SidedataHashError as exc:
1852 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1852 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1853 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1853 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1854 raise error.RevlogError(msg)
1854 raise error.RevlogError(msg)
1855 text, validatehash, sidedata = r
1855 text, validatehash, sidedata = r
1856 if validatehash:
1856 if validatehash:
1857 self.checkhash(text, node, rev=rev)
1857 self.checkhash(text, node, rev=rev)
1858 if not validated:
1858 if not validated:
1859 self._revisioncache = (node, rev, rawtext)
1859 self._revisioncache = (node, rev, rawtext)
1860
1860
1861 return text, sidedata
1861 return text, sidedata
1862
1862
1863 def _rawtext(self, node, rev, _df=None):
1863 def _rawtext(self, node, rev, _df=None):
1864 """return the possibly unvalidated rawtext for a revision
1864 """return the possibly unvalidated rawtext for a revision
1865
1865
1866 returns (rev, rawtext, validated)
1866 returns (rev, rawtext, validated)
1867 """
1867 """
1868
1868
1869 # revision in the cache (could be useful to apply delta)
1869 # revision in the cache (could be useful to apply delta)
1870 cachedrev = None
1870 cachedrev = None
1871 # An intermediate text to apply deltas to
1871 # An intermediate text to apply deltas to
1872 basetext = None
1872 basetext = None
1873
1873
1874 # Check if we have the entry in cache
1874 # Check if we have the entry in cache
1875 # The cache entry looks like (node, rev, rawtext)
1875 # The cache entry looks like (node, rev, rawtext)
1876 if self._revisioncache:
1876 if self._revisioncache:
1877 if self._revisioncache[0] == node:
1877 if self._revisioncache[0] == node:
1878 return (rev, self._revisioncache[2], True)
1878 return (rev, self._revisioncache[2], True)
1879 cachedrev = self._revisioncache[1]
1879 cachedrev = self._revisioncache[1]
1880
1880
1881 if rev is None:
1881 if rev is None:
1882 rev = self.rev(node)
1882 rev = self.rev(node)
1883
1883
1884 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1884 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1885 if stopped:
1885 if stopped:
1886 basetext = self._revisioncache[2]
1886 basetext = self._revisioncache[2]
1887
1887
1888 # drop cache to save memory, the caller is expected to
1888 # drop cache to save memory, the caller is expected to
1889 # update self._revisioncache after validating the text
1889 # update self._revisioncache after validating the text
1890 self._revisioncache = None
1890 self._revisioncache = None
1891
1891
1892 targetsize = None
1892 targetsize = None
1893 rawsize = self.index[rev][2]
1893 rawsize = self.index[rev][2]
1894 if 0 <= rawsize:
1894 if 0 <= rawsize:
1895 targetsize = 4 * rawsize
1895 targetsize = 4 * rawsize
1896
1896
1897 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1897 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1898 if basetext is None:
1898 if basetext is None:
1899 basetext = bytes(bins[0])
1899 basetext = bytes(bins[0])
1900 bins = bins[1:]
1900 bins = bins[1:]
1901
1901
1902 rawtext = mdiff.patches(basetext, bins)
1902 rawtext = mdiff.patches(basetext, bins)
1903 del basetext # let us have a chance to free memory early
1903 del basetext # let us have a chance to free memory early
1904 return (rev, rawtext, False)
1904 return (rev, rawtext, False)
1905
1905
1906 def rawdata(self, nodeorrev, _df=None):
1906 def rawdata(self, nodeorrev, _df=None):
1907 """return an uncompressed raw data of a given node or revision number.
1907 """return an uncompressed raw data of a given node or revision number.
1908
1908
1909 _df - an existing file handle to read from. (internal-only)
1909 _df - an existing file handle to read from. (internal-only)
1910 """
1910 """
1911 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1911 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1912
1912
1913 def hash(self, text, p1, p2):
1913 def hash(self, text, p1, p2):
1914 """Compute a node hash.
1914 """Compute a node hash.
1915
1915
1916 Available as a function so that subclasses can replace the hash
1916 Available as a function so that subclasses can replace the hash
1917 as needed.
1917 as needed.
1918 """
1918 """
1919 return storageutil.hashrevisionsha1(text, p1, p2)
1919 return storageutil.hashrevisionsha1(text, p1, p2)
1920
1920
1921 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1921 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1922 """Check node hash integrity.
1922 """Check node hash integrity.
1923
1923
1924 Available as a function so that subclasses can extend hash mismatch
1924 Available as a function so that subclasses can extend hash mismatch
1925 behaviors as needed.
1925 behaviors as needed.
1926 """
1926 """
1927 try:
1927 try:
1928 if p1 is None and p2 is None:
1928 if p1 is None and p2 is None:
1929 p1, p2 = self.parents(node)
1929 p1, p2 = self.parents(node)
1930 if node != self.hash(text, p1, p2):
1930 if node != self.hash(text, p1, p2):
1931 # Clear the revision cache on hash failure. The revision cache
1931 # Clear the revision cache on hash failure. The revision cache
1932 # only stores the raw revision and clearing the cache does have
1932 # only stores the raw revision and clearing the cache does have
1933 # the side-effect that we won't have a cache hit when the raw
1933 # the side-effect that we won't have a cache hit when the raw
1934 # revision data is accessed. But this case should be rare and
1934 # revision data is accessed. But this case should be rare and
1935 # it is extra work to teach the cache about the hash
1935 # it is extra work to teach the cache about the hash
1936 # verification state.
1936 # verification state.
1937 if self._revisioncache and self._revisioncache[0] == node:
1937 if self._revisioncache and self._revisioncache[0] == node:
1938 self._revisioncache = None
1938 self._revisioncache = None
1939
1939
1940 revornode = rev
1940 revornode = rev
1941 if revornode is None:
1941 if revornode is None:
1942 revornode = templatefilters.short(hex(node))
1942 revornode = templatefilters.short(hex(node))
1943 raise error.RevlogError(
1943 raise error.RevlogError(
1944 _(b"integrity check failed on %s:%s")
1944 _(b"integrity check failed on %s:%s")
1945 % (self.indexfile, pycompat.bytestr(revornode))
1945 % (self.indexfile, pycompat.bytestr(revornode))
1946 )
1946 )
1947 except error.RevlogError:
1947 except error.RevlogError:
1948 if self._censorable and storageutil.iscensoredtext(text):
1948 if self._censorable and storageutil.iscensoredtext(text):
1949 raise error.CensoredNodeError(self.indexfile, node, text)
1949 raise error.CensoredNodeError(self.indexfile, node, text)
1950 raise
1950 raise
1951
1951
1952 def _enforceinlinesize(self, tr, fp=None):
1952 def _enforceinlinesize(self, tr, fp=None):
1953 """Check if the revlog is too big for inline and convert if so.
1953 """Check if the revlog is too big for inline and convert if so.
1954
1954
1955 This should be called after revisions are added to the revlog. If the
1955 This should be called after revisions are added to the revlog. If the
1956 revlog has grown too large to be an inline revlog, it will convert it
1956 revlog has grown too large to be an inline revlog, it will convert it
1957 to use multiple index and data files.
1957 to use multiple index and data files.
1958 """
1958 """
1959 tiprev = len(self) - 1
1959 tiprev = len(self) - 1
1960 if (
1960 if (
1961 not self._inline
1961 not self._inline
1962 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
1962 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
1963 ):
1963 ):
1964 return
1964 return
1965
1965
1966 trinfo = tr.find(self.indexfile)
1966 trinfo = tr.find(self.indexfile)
1967 if trinfo is None:
1967 if trinfo is None:
1968 raise error.RevlogError(
1968 raise error.RevlogError(
1969 _(b"%s not found in the transaction") % self.indexfile
1969 _(b"%s not found in the transaction") % self.indexfile
1970 )
1970 )
1971
1971
1972 trindex = trinfo[2]
1972 trindex = trinfo[2]
1973 if trindex is not None:
1973 if trindex is not None:
1974 dataoff = self.start(trindex)
1974 dataoff = self.start(trindex)
1975 else:
1975 else:
1976 # revlog was stripped at start of transaction, use all leftover data
1976 # revlog was stripped at start of transaction, use all leftover data
1977 trindex = len(self) - 1
1977 trindex = len(self) - 1
1978 dataoff = self.end(tiprev)
1978 dataoff = self.end(tiprev)
1979
1979
1980 tr.add(self.datafile, dataoff)
1980 tr.add(self.datafile, dataoff)
1981
1981
1982 if fp:
1982 if fp:
1983 fp.flush()
1983 fp.flush()
1984 fp.close()
1984 fp.close()
1985 # We can't use the cached file handle after close(). So prevent
1985 # We can't use the cached file handle after close(). So prevent
1986 # its usage.
1986 # its usage.
1987 self._writinghandles = None
1987 self._writinghandles = None
1988
1988
1989 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
1989 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
1990 for r in self:
1990 for r in self:
1991 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1991 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1992
1992
1993 with self._indexfp(b'w') as fp:
1993 with self._indexfp(b'w') as fp:
1994 self.version &= ~FLAG_INLINE_DATA
1994 self.version &= ~FLAG_INLINE_DATA
1995 self._inline = False
1995 self._inline = False
1996 io = self._io
1996 io = self._io
1997 for i in self:
1997 for i in self:
1998 e = io.packentry(self.index[i], self.node, self.version, i)
1998 e = io.packentry(self.index[i], self.node, self.version, i)
1999 fp.write(e)
1999 fp.write(e)
2000
2000
2001 # the temp file replace the real index when we exit the context
2001 # the temp file replace the real index when we exit the context
2002 # manager
2002 # manager
2003
2003
2004 tr.replace(self.indexfile, trindex * self._io.size)
2004 tr.replace(self.indexfile, trindex * self._io.size)
2005 nodemaputil.setup_persistent_nodemap(tr, self)
2005 nodemaputil.setup_persistent_nodemap(tr, self)
2006 self._chunkclear()
2006 self._chunkclear()
2007
2007
2008 def _nodeduplicatecallback(self, transaction, node):
2008 def _nodeduplicatecallback(self, transaction, node):
2009 """called when trying to add a node already stored.
2009 """called when trying to add a node already stored.
2010 """
2010 """
2011
2011
2012 def addrevision(
2012 def addrevision(
2013 self,
2013 self,
2014 text,
2014 text,
2015 transaction,
2015 transaction,
2016 link,
2016 link,
2017 p1,
2017 p1,
2018 p2,
2018 p2,
2019 cachedelta=None,
2019 cachedelta=None,
2020 node=None,
2020 node=None,
2021 flags=REVIDX_DEFAULT_FLAGS,
2021 flags=REVIDX_DEFAULT_FLAGS,
2022 deltacomputer=None,
2022 deltacomputer=None,
2023 sidedata=None,
2023 sidedata=None,
2024 ):
2024 ):
2025 """add a revision to the log
2025 """add a revision to the log
2026
2026
2027 text - the revision data to add
2027 text - the revision data to add
2028 transaction - the transaction object used for rollback
2028 transaction - the transaction object used for rollback
2029 link - the linkrev data to add
2029 link - the linkrev data to add
2030 p1, p2 - the parent nodeids of the revision
2030 p1, p2 - the parent nodeids of the revision
2031 cachedelta - an optional precomputed delta
2031 cachedelta - an optional precomputed delta
2032 node - nodeid of revision; typically node is not specified, and it is
2032 node - nodeid of revision; typically node is not specified, and it is
2033 computed by default as hash(text, p1, p2), however subclasses might
2033 computed by default as hash(text, p1, p2), however subclasses might
2034 use different hashing method (and override checkhash() in such case)
2034 use different hashing method (and override checkhash() in such case)
2035 flags - the known flags to set on the revision
2035 flags - the known flags to set on the revision
2036 deltacomputer - an optional deltacomputer instance shared between
2036 deltacomputer - an optional deltacomputer instance shared between
2037 multiple calls
2037 multiple calls
2038 """
2038 """
2039 if link == nullrev:
2039 if link == nullrev:
2040 raise error.RevlogError(
2040 raise error.RevlogError(
2041 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2041 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2042 )
2042 )
2043
2043
2044 if sidedata is None:
2044 if sidedata is None:
2045 sidedata = {}
2045 sidedata = {}
2046 flags = flags & ~REVIDX_SIDEDATA
2046 flags = flags & ~REVIDX_SIDEDATA
2047 elif not self.hassidedata:
2047 elif not self.hassidedata:
2048 raise error.ProgrammingError(
2048 raise error.ProgrammingError(
2049 _(b"trying to add sidedata to a revlog who don't support them")
2049 _(b"trying to add sidedata to a revlog who don't support them")
2050 )
2050 )
2051 else:
2051 else:
2052 flags |= REVIDX_SIDEDATA
2052 flags |= REVIDX_SIDEDATA
2053
2053
2054 if flags:
2054 if flags:
2055 node = node or self.hash(text, p1, p2)
2055 node = node or self.hash(text, p1, p2)
2056
2056
2057 rawtext, validatehash = flagutil.processflagswrite(
2057 rawtext, validatehash = flagutil.processflagswrite(
2058 self, text, flags, sidedata=sidedata
2058 self, text, flags, sidedata=sidedata
2059 )
2059 )
2060
2060
2061 # If the flag processor modifies the revision data, ignore any provided
2061 # If the flag processor modifies the revision data, ignore any provided
2062 # cachedelta.
2062 # cachedelta.
2063 if rawtext != text:
2063 if rawtext != text:
2064 cachedelta = None
2064 cachedelta = None
2065
2065
2066 if len(rawtext) > _maxentrysize:
2066 if len(rawtext) > _maxentrysize:
2067 raise error.RevlogError(
2067 raise error.RevlogError(
2068 _(
2068 _(
2069 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2069 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2070 )
2070 )
2071 % (self.indexfile, len(rawtext))
2071 % (self.indexfile, len(rawtext))
2072 )
2072 )
2073
2073
2074 node = node or self.hash(rawtext, p1, p2)
2074 node = node or self.hash(rawtext, p1, p2)
2075 if self.index.has_node(node):
2075 if self.index.has_node(node):
2076 return node
2076 return node
2077
2077
2078 if validatehash:
2078 if validatehash:
2079 self.checkhash(rawtext, node, p1=p1, p2=p2)
2079 self.checkhash(rawtext, node, p1=p1, p2=p2)
2080
2080
2081 return self.addrawrevision(
2081 return self.addrawrevision(
2082 rawtext,
2082 rawtext,
2083 transaction,
2083 transaction,
2084 link,
2084 link,
2085 p1,
2085 p1,
2086 p2,
2086 p2,
2087 node,
2087 node,
2088 flags,
2088 flags,
2089 cachedelta=cachedelta,
2089 cachedelta=cachedelta,
2090 deltacomputer=deltacomputer,
2090 deltacomputer=deltacomputer,
2091 )
2091 )
2092
2092
2093 def addrawrevision(
2093 def addrawrevision(
2094 self,
2094 self,
2095 rawtext,
2095 rawtext,
2096 transaction,
2096 transaction,
2097 link,
2097 link,
2098 p1,
2098 p1,
2099 p2,
2099 p2,
2100 node,
2100 node,
2101 flags,
2101 flags,
2102 cachedelta=None,
2102 cachedelta=None,
2103 deltacomputer=None,
2103 deltacomputer=None,
2104 ):
2104 ):
2105 """add a raw revision with known flags, node and parents
2105 """add a raw revision with known flags, node and parents
2106 useful when reusing a revision not stored in this revlog (ex: received
2106 useful when reusing a revision not stored in this revlog (ex: received
2107 over wire, or read from an external bundle).
2107 over wire, or read from an external bundle).
2108 """
2108 """
2109 dfh = None
2109 dfh = None
2110 if not self._inline:
2110 if not self._inline:
2111 dfh = self._datafp(b"a+")
2111 dfh = self._datafp(b"a+")
2112 ifh = self._indexfp(b"a+")
2112 ifh = self._indexfp(b"a+")
2113 try:
2113 try:
2114 return self._addrevision(
2114 return self._addrevision(
2115 node,
2115 node,
2116 rawtext,
2116 rawtext,
2117 transaction,
2117 transaction,
2118 link,
2118 link,
2119 p1,
2119 p1,
2120 p2,
2120 p2,
2121 flags,
2121 flags,
2122 cachedelta,
2122 cachedelta,
2123 ifh,
2123 ifh,
2124 dfh,
2124 dfh,
2125 deltacomputer=deltacomputer,
2125 deltacomputer=deltacomputer,
2126 )
2126 )
2127 finally:
2127 finally:
2128 if dfh:
2128 if dfh:
2129 dfh.close()
2129 dfh.close()
2130 ifh.close()
2130 ifh.close()
2131
2131
2132 def compress(self, data):
2132 def compress(self, data):
2133 """Generate a possibly-compressed representation of data."""
2133 """Generate a possibly-compressed representation of data."""
2134 if not data:
2134 if not data:
2135 return b'', data
2135 return b'', data
2136
2136
2137 compressed = self._compressor.compress(data)
2137 compressed = self._compressor.compress(data)
2138
2138
2139 if compressed:
2139 if compressed:
2140 # The revlog compressor added the header in the returned data.
2140 # The revlog compressor added the header in the returned data.
2141 return b'', compressed
2141 return b'', compressed
2142
2142
2143 if data[0:1] == b'\0':
2143 if data[0:1] == b'\0':
2144 return b'', data
2144 return b'', data
2145 return b'u', data
2145 return b'u', data
2146
2146
2147 def decompress(self, data):
2147 def decompress(self, data):
2148 """Decompress a revlog chunk.
2148 """Decompress a revlog chunk.
2149
2149
2150 The chunk is expected to begin with a header identifying the
2150 The chunk is expected to begin with a header identifying the
2151 format type so it can be routed to an appropriate decompressor.
2151 format type so it can be routed to an appropriate decompressor.
2152 """
2152 """
2153 if not data:
2153 if not data:
2154 return data
2154 return data
2155
2155
2156 # Revlogs are read much more frequently than they are written and many
2156 # Revlogs are read much more frequently than they are written and many
2157 # chunks only take microseconds to decompress, so performance is
2157 # chunks only take microseconds to decompress, so performance is
2158 # important here.
2158 # important here.
2159 #
2159 #
2160 # We can make a few assumptions about revlogs:
2160 # We can make a few assumptions about revlogs:
2161 #
2161 #
2162 # 1) the majority of chunks will be compressed (as opposed to inline
2162 # 1) the majority of chunks will be compressed (as opposed to inline
2163 # raw data).
2163 # raw data).
2164 # 2) decompressing *any* data will likely by at least 10x slower than
2164 # 2) decompressing *any* data will likely by at least 10x slower than
2165 # returning raw inline data.
2165 # returning raw inline data.
2166 # 3) we want to prioritize common and officially supported compression
2166 # 3) we want to prioritize common and officially supported compression
2167 # engines
2167 # engines
2168 #
2168 #
2169 # It follows that we want to optimize for "decompress compressed data
2169 # It follows that we want to optimize for "decompress compressed data
2170 # when encoded with common and officially supported compression engines"
2170 # when encoded with common and officially supported compression engines"
2171 # case over "raw data" and "data encoded by less common or non-official
2171 # case over "raw data" and "data encoded by less common or non-official
2172 # compression engines." That is why we have the inline lookup first
2172 # compression engines." That is why we have the inline lookup first
2173 # followed by the compengines lookup.
2173 # followed by the compengines lookup.
2174 #
2174 #
2175 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2175 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2176 # compressed chunks. And this matters for changelog and manifest reads.
2176 # compressed chunks. And this matters for changelog and manifest reads.
2177 t = data[0:1]
2177 t = data[0:1]
2178
2178
2179 if t == b'x':
2179 if t == b'x':
2180 try:
2180 try:
2181 return _zlibdecompress(data)
2181 return _zlibdecompress(data)
2182 except zlib.error as e:
2182 except zlib.error as e:
2183 raise error.RevlogError(
2183 raise error.RevlogError(
2184 _(b'revlog decompress error: %s')
2184 _(b'revlog decompress error: %s')
2185 % stringutil.forcebytestr(e)
2185 % stringutil.forcebytestr(e)
2186 )
2186 )
2187 # '\0' is more common than 'u' so it goes first.
2187 # '\0' is more common than 'u' so it goes first.
2188 elif t == b'\0':
2188 elif t == b'\0':
2189 return data
2189 return data
2190 elif t == b'u':
2190 elif t == b'u':
2191 return util.buffer(data, 1)
2191 return util.buffer(data, 1)
2192
2192
2193 try:
2193 try:
2194 compressor = self._decompressors[t]
2194 compressor = self._decompressors[t]
2195 except KeyError:
2195 except KeyError:
2196 try:
2196 try:
2197 engine = util.compengines.forrevlogheader(t)
2197 engine = util.compengines.forrevlogheader(t)
2198 compressor = engine.revlogcompressor(self._compengineopts)
2198 compressor = engine.revlogcompressor(self._compengineopts)
2199 self._decompressors[t] = compressor
2199 self._decompressors[t] = compressor
2200 except KeyError:
2200 except KeyError:
2201 raise error.RevlogError(_(b'unknown compression type %r') % t)
2201 raise error.RevlogError(_(b'unknown compression type %r') % t)
2202
2202
2203 return compressor.decompress(data)
2203 return compressor.decompress(data)
2204
2204
2205 def _addrevision(
2205 def _addrevision(
2206 self,
2206 self,
2207 node,
2207 node,
2208 rawtext,
2208 rawtext,
2209 transaction,
2209 transaction,
2210 link,
2210 link,
2211 p1,
2211 p1,
2212 p2,
2212 p2,
2213 flags,
2213 flags,
2214 cachedelta,
2214 cachedelta,
2215 ifh,
2215 ifh,
2216 dfh,
2216 dfh,
2217 alwayscache=False,
2217 alwayscache=False,
2218 deltacomputer=None,
2218 deltacomputer=None,
2219 ):
2219 ):
2220 """internal function to add revisions to the log
2220 """internal function to add revisions to the log
2221
2221
2222 see addrevision for argument descriptions.
2222 see addrevision for argument descriptions.
2223
2223
2224 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2224 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2225
2225
2226 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2226 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2227 be used.
2227 be used.
2228
2228
2229 invariants:
2229 invariants:
2230 - rawtext is optional (can be None); if not set, cachedelta must be set.
2230 - rawtext is optional (can be None); if not set, cachedelta must be set.
2231 if both are set, they must correspond to each other.
2231 if both are set, they must correspond to each other.
2232 """
2232 """
2233 if node == nullid:
2233 if node == nullid:
2234 raise error.RevlogError(
2234 raise error.RevlogError(
2235 _(b"%s: attempt to add null revision") % self.indexfile
2235 _(b"%s: attempt to add null revision") % self.indexfile
2236 )
2236 )
2237 if node == wdirid or node in wdirfilenodeids:
2237 if node == wdirid or node in wdirfilenodeids:
2238 raise error.RevlogError(
2238 raise error.RevlogError(
2239 _(b"%s: attempt to add wdir revision") % self.indexfile
2239 _(b"%s: attempt to add wdir revision") % self.indexfile
2240 )
2240 )
2241
2241
2242 if self._inline:
2242 if self._inline:
2243 fh = ifh
2243 fh = ifh
2244 else:
2244 else:
2245 fh = dfh
2245 fh = dfh
2246
2246
2247 btext = [rawtext]
2247 btext = [rawtext]
2248
2248
2249 curr = len(self)
2249 curr = len(self)
2250 prev = curr - 1
2250 prev = curr - 1
2251 offset = self.end(prev)
2251 offset = self.end(prev)
2252 p1r, p2r = self.rev(p1), self.rev(p2)
2252 p1r, p2r = self.rev(p1), self.rev(p2)
2253
2253
2254 # full versions are inserted when the needed deltas
2254 # full versions are inserted when the needed deltas
2255 # become comparable to the uncompressed text
2255 # become comparable to the uncompressed text
2256 if rawtext is None:
2256 if rawtext is None:
2257 # need rawtext size, before changed by flag processors, which is
2257 # need rawtext size, before changed by flag processors, which is
2258 # the non-raw size. use revlog explicitly to avoid filelog's extra
2258 # the non-raw size. use revlog explicitly to avoid filelog's extra
2259 # logic that might remove metadata size.
2259 # logic that might remove metadata size.
2260 textlen = mdiff.patchedsize(
2260 textlen = mdiff.patchedsize(
2261 revlog.size(self, cachedelta[0]), cachedelta[1]
2261 revlog.size(self, cachedelta[0]), cachedelta[1]
2262 )
2262 )
2263 else:
2263 else:
2264 textlen = len(rawtext)
2264 textlen = len(rawtext)
2265
2265
2266 if deltacomputer is None:
2266 if deltacomputer is None:
2267 deltacomputer = deltautil.deltacomputer(self)
2267 deltacomputer = deltautil.deltacomputer(self)
2268
2268
2269 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2269 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2270
2270
2271 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2271 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2272
2272
2273 e = (
2273 e = (
2274 offset_type(offset, flags),
2274 offset_type(offset, flags),
2275 deltainfo.deltalen,
2275 deltainfo.deltalen,
2276 textlen,
2276 textlen,
2277 deltainfo.base,
2277 deltainfo.base,
2278 link,
2278 link,
2279 p1r,
2279 p1r,
2280 p2r,
2280 p2r,
2281 node,
2281 node,
2282 )
2282 )
2283 self.index.append(e)
2283 self.index.append(e)
2284
2284
2285 entry = self._io.packentry(e, self.node, self.version, curr)
2285 entry = self._io.packentry(e, self.node, self.version, curr)
2286 self._writeentry(
2286 self._writeentry(
2287 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2287 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2288 )
2288 )
2289
2289
2290 rawtext = btext[0]
2290 rawtext = btext[0]
2291
2291
2292 if alwayscache and rawtext is None:
2292 if alwayscache and rawtext is None:
2293 rawtext = deltacomputer.buildtext(revinfo, fh)
2293 rawtext = deltacomputer.buildtext(revinfo, fh)
2294
2294
2295 if type(rawtext) == bytes: # only accept immutable objects
2295 if type(rawtext) == bytes: # only accept immutable objects
2296 self._revisioncache = (node, curr, rawtext)
2296 self._revisioncache = (node, curr, rawtext)
2297 self._chainbasecache[curr] = deltainfo.chainbase
2297 self._chainbasecache[curr] = deltainfo.chainbase
2298 return node
2298 return node
2299
2299
2300 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2300 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2301 # Files opened in a+ mode have inconsistent behavior on various
2301 # Files opened in a+ mode have inconsistent behavior on various
2302 # platforms. Windows requires that a file positioning call be made
2302 # platforms. Windows requires that a file positioning call be made
2303 # when the file handle transitions between reads and writes. See
2303 # when the file handle transitions between reads and writes. See
2304 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2304 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2305 # platforms, Python or the platform itself can be buggy. Some versions
2305 # platforms, Python or the platform itself can be buggy. Some versions
2306 # of Solaris have been observed to not append at the end of the file
2306 # of Solaris have been observed to not append at the end of the file
2307 # if the file was seeked to before the end. See issue4943 for more.
2307 # if the file was seeked to before the end. See issue4943 for more.
2308 #
2308 #
2309 # We work around this issue by inserting a seek() before writing.
2309 # We work around this issue by inserting a seek() before writing.
2310 # Note: This is likely not necessary on Python 3. However, because
2310 # Note: This is likely not necessary on Python 3. However, because
2311 # the file handle is reused for reads and may be seeked there, we need
2311 # the file handle is reused for reads and may be seeked there, we need
2312 # to be careful before changing this.
2312 # to be careful before changing this.
2313 ifh.seek(0, os.SEEK_END)
2313 ifh.seek(0, os.SEEK_END)
2314 if dfh:
2314 if dfh:
2315 dfh.seek(0, os.SEEK_END)
2315 dfh.seek(0, os.SEEK_END)
2316
2316
2317 curr = len(self) - 1
2317 curr = len(self) - 1
2318 if not self._inline:
2318 if not self._inline:
2319 transaction.add(self.datafile, offset)
2319 transaction.add(self.datafile, offset)
2320 transaction.add(self.indexfile, curr * len(entry))
2320 transaction.add(self.indexfile, curr * len(entry))
2321 if data[0]:
2321 if data[0]:
2322 dfh.write(data[0])
2322 dfh.write(data[0])
2323 dfh.write(data[1])
2323 dfh.write(data[1])
2324 ifh.write(entry)
2324 ifh.write(entry)
2325 else:
2325 else:
2326 offset += curr * self._io.size
2326 offset += curr * self._io.size
2327 transaction.add(self.indexfile, offset, curr)
2327 transaction.add(self.indexfile, offset, curr)
2328 ifh.write(entry)
2328 ifh.write(entry)
2329 ifh.write(data[0])
2329 ifh.write(data[0])
2330 ifh.write(data[1])
2330 ifh.write(data[1])
2331 self._enforceinlinesize(transaction, ifh)
2331 self._enforceinlinesize(transaction, ifh)
2332 nodemaputil.setup_persistent_nodemap(transaction, self)
2332 nodemaputil.setup_persistent_nodemap(transaction, self)
2333
2333
2334 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2334 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2335 """
2335 """
2336 add a delta group
2336 add a delta group
2337
2337
2338 given a set of deltas, add them to the revision log. the
2338 given a set of deltas, add them to the revision log. the
2339 first delta is against its parent, which should be in our
2339 first delta is against its parent, which should be in our
2340 log, the rest are against the previous delta.
2340 log, the rest are against the previous delta.
2341
2341
2342 If ``addrevisioncb`` is defined, it will be called with arguments of
2342 If ``addrevisioncb`` is defined, it will be called with arguments of
2343 this revlog and the node that was added.
2343 this revlog and the node that was added.
2344 """
2344 """
2345
2345
2346 if self._writinghandles:
2346 if self._writinghandles:
2347 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2347 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2348
2348
2349 nodes = []
2349 nodes = []
2350
2350
2351 r = len(self)
2351 r = len(self)
2352 end = 0
2352 end = 0
2353 if r:
2353 if r:
2354 end = self.end(r - 1)
2354 end = self.end(r - 1)
2355 ifh = self._indexfp(b"a+")
2355 ifh = self._indexfp(b"a+")
2356 isize = r * self._io.size
2356 isize = r * self._io.size
2357 if self._inline:
2357 if self._inline:
2358 transaction.add(self.indexfile, end + isize, r)
2358 transaction.add(self.indexfile, end + isize, r)
2359 dfh = None
2359 dfh = None
2360 else:
2360 else:
2361 transaction.add(self.indexfile, isize, r)
2361 transaction.add(self.indexfile, isize, r)
2362 transaction.add(self.datafile, end)
2362 transaction.add(self.datafile, end)
2363 dfh = self._datafp(b"a+")
2363 dfh = self._datafp(b"a+")
2364
2364
2365 def flush():
2365 def flush():
2366 if dfh:
2366 if dfh:
2367 dfh.flush()
2367 dfh.flush()
2368 ifh.flush()
2368 ifh.flush()
2369
2369
2370 self._writinghandles = (ifh, dfh)
2370 self._writinghandles = (ifh, dfh)
2371
2371
2372 try:
2372 try:
2373 deltacomputer = deltautil.deltacomputer(self)
2373 deltacomputer = deltautil.deltacomputer(self)
2374 # loop through our set of deltas
2374 # loop through our set of deltas
2375 for data in deltas:
2375 for data in deltas:
2376 node, p1, p2, linknode, deltabase, delta, flags = data
2376 node, p1, p2, linknode, deltabase, delta, flags = data
2377 link = linkmapper(linknode)
2377 link = linkmapper(linknode)
2378 flags = flags or REVIDX_DEFAULT_FLAGS
2378 flags = flags or REVIDX_DEFAULT_FLAGS
2379
2379
2380 nodes.append(node)
2380 nodes.append(node)
2381
2381
2382 if self.index.has_node(node):
2382 if self.index.has_node(node):
2383 self._nodeduplicatecallback(transaction, node)
2383 self._nodeduplicatecallback(transaction, node)
2384 # this can happen if two branches make the same change
2384 # this can happen if two branches make the same change
2385 continue
2385 continue
2386
2386
2387 for p in (p1, p2):
2387 for p in (p1, p2):
2388 if not self.index.has_node(p):
2388 if not self.index.has_node(p):
2389 raise error.LookupError(
2389 raise error.LookupError(
2390 p, self.indexfile, _(b'unknown parent')
2390 p, self.indexfile, _(b'unknown parent')
2391 )
2391 )
2392
2392
2393 if not self.index.has_node(deltabase):
2393 if not self.index.has_node(deltabase):
2394 raise error.LookupError(
2394 raise error.LookupError(
2395 deltabase, self.indexfile, _(b'unknown delta base')
2395 deltabase, self.indexfile, _(b'unknown delta base')
2396 )
2396 )
2397
2397
2398 baserev = self.rev(deltabase)
2398 baserev = self.rev(deltabase)
2399
2399
2400 if baserev != nullrev and self.iscensored(baserev):
2400 if baserev != nullrev and self.iscensored(baserev):
2401 # if base is censored, delta must be full replacement in a
2401 # if base is censored, delta must be full replacement in a
2402 # single patch operation
2402 # single patch operation
2403 hlen = struct.calcsize(b">lll")
2403 hlen = struct.calcsize(b">lll")
2404 oldlen = self.rawsize(baserev)
2404 oldlen = self.rawsize(baserev)
2405 newlen = len(delta) - hlen
2405 newlen = len(delta) - hlen
2406 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2406 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2407 raise error.CensoredBaseError(
2407 raise error.CensoredBaseError(
2408 self.indexfile, self.node(baserev)
2408 self.indexfile, self.node(baserev)
2409 )
2409 )
2410
2410
2411 if not flags and self._peek_iscensored(baserev, delta, flush):
2411 if not flags and self._peek_iscensored(baserev, delta, flush):
2412 flags |= REVIDX_ISCENSORED
2412 flags |= REVIDX_ISCENSORED
2413
2413
2414 # We assume consumers of addrevisioncb will want to retrieve
2414 # We assume consumers of addrevisioncb will want to retrieve
2415 # the added revision, which will require a call to
2415 # the added revision, which will require a call to
2416 # revision(). revision() will fast path if there is a cache
2416 # revision(). revision() will fast path if there is a cache
2417 # hit. So, we tell _addrevision() to always cache in this case.
2417 # hit. So, we tell _addrevision() to always cache in this case.
2418 # We're only using addgroup() in the context of changegroup
2418 # We're only using addgroup() in the context of changegroup
2419 # generation so the revision data can always be handled as raw
2419 # generation so the revision data can always be handled as raw
2420 # by the flagprocessor.
2420 # by the flagprocessor.
2421 self._addrevision(
2421 self._addrevision(
2422 node,
2422 node,
2423 None,
2423 None,
2424 transaction,
2424 transaction,
2425 link,
2425 link,
2426 p1,
2426 p1,
2427 p2,
2427 p2,
2428 flags,
2428 flags,
2429 (baserev, delta),
2429 (baserev, delta),
2430 ifh,
2430 ifh,
2431 dfh,
2431 dfh,
2432 alwayscache=bool(addrevisioncb),
2432 alwayscache=bool(addrevisioncb),
2433 deltacomputer=deltacomputer,
2433 deltacomputer=deltacomputer,
2434 )
2434 )
2435
2435
2436 if addrevisioncb:
2436 if addrevisioncb:
2437 addrevisioncb(self, node)
2437 addrevisioncb(self, node)
2438
2438
2439 if not dfh and not self._inline:
2439 if not dfh and not self._inline:
2440 # addrevision switched from inline to conventional
2440 # addrevision switched from inline to conventional
2441 # reopen the index
2441 # reopen the index
2442 ifh.close()
2442 ifh.close()
2443 dfh = self._datafp(b"a+")
2443 dfh = self._datafp(b"a+")
2444 ifh = self._indexfp(b"a+")
2444 ifh = self._indexfp(b"a+")
2445 self._writinghandles = (ifh, dfh)
2445 self._writinghandles = (ifh, dfh)
2446 finally:
2446 finally:
2447 self._writinghandles = None
2447 self._writinghandles = None
2448
2448
2449 if dfh:
2449 if dfh:
2450 dfh.close()
2450 dfh.close()
2451 ifh.close()
2451 ifh.close()
2452
2452
2453 return nodes
2453 return nodes
2454
2454
2455 def iscensored(self, rev):
2455 def iscensored(self, rev):
2456 """Check if a file revision is censored."""
2456 """Check if a file revision is censored."""
2457 if not self._censorable:
2457 if not self._censorable:
2458 return False
2458 return False
2459
2459
2460 return self.flags(rev) & REVIDX_ISCENSORED
2460 return self.flags(rev) & REVIDX_ISCENSORED
2461
2461
2462 def _peek_iscensored(self, baserev, delta, flush):
2462 def _peek_iscensored(self, baserev, delta, flush):
2463 """Quickly check if a delta produces a censored revision."""
2463 """Quickly check if a delta produces a censored revision."""
2464 if not self._censorable:
2464 if not self._censorable:
2465 return False
2465 return False
2466
2466
2467 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2467 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2468
2468
2469 def getstrippoint(self, minlink):
2469 def getstrippoint(self, minlink):
2470 """find the minimum rev that must be stripped to strip the linkrev
2470 """find the minimum rev that must be stripped to strip the linkrev
2471
2471
2472 Returns a tuple containing the minimum rev and a set of all revs that
2472 Returns a tuple containing the minimum rev and a set of all revs that
2473 have linkrevs that will be broken by this strip.
2473 have linkrevs that will be broken by this strip.
2474 """
2474 """
2475 return storageutil.resolvestripinfo(
2475 return storageutil.resolvestripinfo(
2476 minlink,
2476 minlink,
2477 len(self) - 1,
2477 len(self) - 1,
2478 self.headrevs(),
2478 self.headrevs(),
2479 self.linkrev,
2479 self.linkrev,
2480 self.parentrevs,
2480 self.parentrevs,
2481 )
2481 )
2482
2482
2483 def strip(self, minlink, transaction):
2483 def strip(self, minlink, transaction):
2484 """truncate the revlog on the first revision with a linkrev >= minlink
2484 """truncate the revlog on the first revision with a linkrev >= minlink
2485
2485
2486 This function is called when we're stripping revision minlink and
2486 This function is called when we're stripping revision minlink and
2487 its descendants from the repository.
2487 its descendants from the repository.
2488
2488
2489 We have to remove all revisions with linkrev >= minlink, because
2489 We have to remove all revisions with linkrev >= minlink, because
2490 the equivalent changelog revisions will be renumbered after the
2490 the equivalent changelog revisions will be renumbered after the
2491 strip.
2491 strip.
2492
2492
2493 So we truncate the revlog on the first of these revisions, and
2493 So we truncate the revlog on the first of these revisions, and
2494 trust that the caller has saved the revisions that shouldn't be
2494 trust that the caller has saved the revisions that shouldn't be
2495 removed and that it'll re-add them after this truncation.
2495 removed and that it'll re-add them after this truncation.
2496 """
2496 """
2497 if len(self) == 0:
2497 if len(self) == 0:
2498 return
2498 return
2499
2499
2500 rev, _ = self.getstrippoint(minlink)
2500 rev, _ = self.getstrippoint(minlink)
2501 if rev == len(self):
2501 if rev == len(self):
2502 return
2502 return
2503
2503
2504 # first truncate the files on disk
2504 # first truncate the files on disk
2505 end = self.start(rev)
2505 end = self.start(rev)
2506 if not self._inline:
2506 if not self._inline:
2507 transaction.add(self.datafile, end)
2507 transaction.add(self.datafile, end)
2508 end = rev * self._io.size
2508 end = rev * self._io.size
2509 else:
2509 else:
2510 end += rev * self._io.size
2510 end += rev * self._io.size
2511
2511
2512 transaction.add(self.indexfile, end)
2512 transaction.add(self.indexfile, end)
2513
2513
2514 # then reset internal state in memory to forget those revisions
2514 # then reset internal state in memory to forget those revisions
2515 self._revisioncache = None
2515 self._revisioncache = None
2516 self._chaininfocache = {}
2516 self._chaininfocache = {}
2517 self._chunkclear()
2517 self._chunkclear()
2518
2518
2519 del self.index[rev:-1]
2519 del self.index[rev:-1]
2520
2520
2521 def checksize(self):
2521 def checksize(self):
2522 """Check size of index and data files
2522 """Check size of index and data files
2523
2523
2524 return a (dd, di) tuple.
2524 return a (dd, di) tuple.
2525 - dd: extra bytes for the "data" file
2525 - dd: extra bytes for the "data" file
2526 - di: extra bytes for the "index" file
2526 - di: extra bytes for the "index" file
2527
2527
2528 A healthy revlog will return (0, 0).
2528 A healthy revlog will return (0, 0).
2529 """
2529 """
2530 expected = 0
2530 expected = 0
2531 if len(self):
2531 if len(self):
2532 expected = max(0, self.end(len(self) - 1))
2532 expected = max(0, self.end(len(self) - 1))
2533
2533
2534 try:
2534 try:
2535 with self._datafp() as f:
2535 with self._datafp() as f:
2536 f.seek(0, io.SEEK_END)
2536 f.seek(0, io.SEEK_END)
2537 actual = f.tell()
2537 actual = f.tell()
2538 dd = actual - expected
2538 dd = actual - expected
2539 except IOError as inst:
2539 except IOError as inst:
2540 if inst.errno != errno.ENOENT:
2540 if inst.errno != errno.ENOENT:
2541 raise
2541 raise
2542 dd = 0
2542 dd = 0
2543
2543
2544 try:
2544 try:
2545 f = self.opener(self.indexfile)
2545 f = self.opener(self.indexfile)
2546 f.seek(0, io.SEEK_END)
2546 f.seek(0, io.SEEK_END)
2547 actual = f.tell()
2547 actual = f.tell()
2548 f.close()
2548 f.close()
2549 s = self._io.size
2549 s = self._io.size
2550 i = max(0, actual // s)
2550 i = max(0, actual // s)
2551 di = actual - (i * s)
2551 di = actual - (i * s)
2552 if self._inline:
2552 if self._inline:
2553 databytes = 0
2553 databytes = 0
2554 for r in self:
2554 for r in self:
2555 databytes += max(0, self.length(r))
2555 databytes += max(0, self.length(r))
2556 dd = 0
2556 dd = 0
2557 di = actual - len(self) * s - databytes
2557 di = actual - len(self) * s - databytes
2558 except IOError as inst:
2558 except IOError as inst:
2559 if inst.errno != errno.ENOENT:
2559 if inst.errno != errno.ENOENT:
2560 raise
2560 raise
2561 di = 0
2561 di = 0
2562
2562
2563 return (dd, di)
2563 return (dd, di)
2564
2564
2565 def files(self):
2565 def files(self):
2566 res = [self.indexfile]
2566 res = [self.indexfile]
2567 if not self._inline:
2567 if not self._inline:
2568 res.append(self.datafile)
2568 res.append(self.datafile)
2569 return res
2569 return res
2570
2570
2571 def emitrevisions(
2571 def emitrevisions(
2572 self,
2572 self,
2573 nodes,
2573 nodes,
2574 nodesorder=None,
2574 nodesorder=None,
2575 revisiondata=False,
2575 revisiondata=False,
2576 assumehaveparentrevisions=False,
2576 assumehaveparentrevisions=False,
2577 deltamode=repository.CG_DELTAMODE_STD,
2577 deltamode=repository.CG_DELTAMODE_STD,
2578 ):
2578 ):
2579 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2579 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2580 raise error.ProgrammingError(
2580 raise error.ProgrammingError(
2581 b'unhandled value for nodesorder: %s' % nodesorder
2581 b'unhandled value for nodesorder: %s' % nodesorder
2582 )
2582 )
2583
2583
2584 if nodesorder is None and not self._generaldelta:
2584 if nodesorder is None and not self._generaldelta:
2585 nodesorder = b'storage'
2585 nodesorder = b'storage'
2586
2586
2587 if (
2587 if (
2588 not self._storedeltachains
2588 not self._storedeltachains
2589 and deltamode != repository.CG_DELTAMODE_PREV
2589 and deltamode != repository.CG_DELTAMODE_PREV
2590 ):
2590 ):
2591 deltamode = repository.CG_DELTAMODE_FULL
2591 deltamode = repository.CG_DELTAMODE_FULL
2592
2592
2593 return storageutil.emitrevisions(
2593 return storageutil.emitrevisions(
2594 self,
2594 self,
2595 nodes,
2595 nodes,
2596 nodesorder,
2596 nodesorder,
2597 revlogrevisiondelta,
2597 revlogrevisiondelta,
2598 deltaparentfn=self.deltaparent,
2598 deltaparentfn=self.deltaparent,
2599 candeltafn=self.candelta,
2599 candeltafn=self.candelta,
2600 rawsizefn=self.rawsize,
2600 rawsizefn=self.rawsize,
2601 revdifffn=self.revdiff,
2601 revdifffn=self.revdiff,
2602 flagsfn=self.flags,
2602 flagsfn=self.flags,
2603 deltamode=deltamode,
2603 deltamode=deltamode,
2604 revisiondata=revisiondata,
2604 revisiondata=revisiondata,
2605 assumehaveparentrevisions=assumehaveparentrevisions,
2605 assumehaveparentrevisions=assumehaveparentrevisions,
2606 )
2606 )
2607
2607
2608 DELTAREUSEALWAYS = b'always'
2608 DELTAREUSEALWAYS = b'always'
2609 DELTAREUSESAMEREVS = b'samerevs'
2609 DELTAREUSESAMEREVS = b'samerevs'
2610 DELTAREUSENEVER = b'never'
2610 DELTAREUSENEVER = b'never'
2611
2611
2612 DELTAREUSEFULLADD = b'fulladd'
2612 DELTAREUSEFULLADD = b'fulladd'
2613
2613
2614 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2614 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2615
2615
2616 def clone(
2616 def clone(
2617 self,
2617 self,
2618 tr,
2618 tr,
2619 destrevlog,
2619 destrevlog,
2620 addrevisioncb=None,
2620 addrevisioncb=None,
2621 deltareuse=DELTAREUSESAMEREVS,
2621 deltareuse=DELTAREUSESAMEREVS,
2622 forcedeltabothparents=None,
2622 forcedeltabothparents=None,
2623 sidedatacompanion=None,
2623 sidedatacompanion=None,
2624 ):
2624 ):
2625 """Copy this revlog to another, possibly with format changes.
2625 """Copy this revlog to another, possibly with format changes.
2626
2626
2627 The destination revlog will contain the same revisions and nodes.
2627 The destination revlog will contain the same revisions and nodes.
2628 However, it may not be bit-for-bit identical due to e.g. delta encoding
2628 However, it may not be bit-for-bit identical due to e.g. delta encoding
2629 differences.
2629 differences.
2630
2630
2631 The ``deltareuse`` argument control how deltas from the existing revlog
2631 The ``deltareuse`` argument control how deltas from the existing revlog
2632 are preserved in the destination revlog. The argument can have the
2632 are preserved in the destination revlog. The argument can have the
2633 following values:
2633 following values:
2634
2634
2635 DELTAREUSEALWAYS
2635 DELTAREUSEALWAYS
2636 Deltas will always be reused (if possible), even if the destination
2636 Deltas will always be reused (if possible), even if the destination
2637 revlog would not select the same revisions for the delta. This is the
2637 revlog would not select the same revisions for the delta. This is the
2638 fastest mode of operation.
2638 fastest mode of operation.
2639 DELTAREUSESAMEREVS
2639 DELTAREUSESAMEREVS
2640 Deltas will be reused if the destination revlog would pick the same
2640 Deltas will be reused if the destination revlog would pick the same
2641 revisions for the delta. This mode strikes a balance between speed
2641 revisions for the delta. This mode strikes a balance between speed
2642 and optimization.
2642 and optimization.
2643 DELTAREUSENEVER
2643 DELTAREUSENEVER
2644 Deltas will never be reused. This is the slowest mode of execution.
2644 Deltas will never be reused. This is the slowest mode of execution.
2645 This mode can be used to recompute deltas (e.g. if the diff/delta
2645 This mode can be used to recompute deltas (e.g. if the diff/delta
2646 algorithm changes).
2646 algorithm changes).
2647 DELTAREUSEFULLADD
2647 DELTAREUSEFULLADD
2648 Revision will be re-added as if their were new content. This is
2648 Revision will be re-added as if their were new content. This is
2649 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2649 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2650 eg: large file detection and handling.
2650 eg: large file detection and handling.
2651
2651
2652 Delta computation can be slow, so the choice of delta reuse policy can
2652 Delta computation can be slow, so the choice of delta reuse policy can
2653 significantly affect run time.
2653 significantly affect run time.
2654
2654
2655 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2655 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2656 two extremes. Deltas will be reused if they are appropriate. But if the
2656 two extremes. Deltas will be reused if they are appropriate. But if the
2657 delta could choose a better revision, it will do so. This means if you
2657 delta could choose a better revision, it will do so. This means if you
2658 are converting a non-generaldelta revlog to a generaldelta revlog,
2658 are converting a non-generaldelta revlog to a generaldelta revlog,
2659 deltas will be recomputed if the delta's parent isn't a parent of the
2659 deltas will be recomputed if the delta's parent isn't a parent of the
2660 revision.
2660 revision.
2661
2661
2662 In addition to the delta policy, the ``forcedeltabothparents``
2662 In addition to the delta policy, the ``forcedeltabothparents``
2663 argument controls whether to force compute deltas against both parents
2663 argument controls whether to force compute deltas against both parents
2664 for merges. By default, the current default is used.
2664 for merges. By default, the current default is used.
2665
2665
2666 If not None, the `sidedatacompanion` is callable that accept two
2666 If not None, the `sidedatacompanion` is callable that accept two
2667 arguments:
2667 arguments:
2668
2668
2669 (srcrevlog, rev)
2669 (srcrevlog, rev)
2670
2670
2671 and return a triplet that control changes to sidedata content from the
2671 and return a triplet that control changes to sidedata content from the
2672 old revision to the new clone result:
2672 old revision to the new clone result:
2673
2673
2674 (dropall, filterout, update)
2674 (dropall, filterout, update)
2675
2675
2676 * if `dropall` is True, all sidedata should be dropped
2676 * if `dropall` is True, all sidedata should be dropped
2677 * `filterout` is a set of sidedata keys that should be dropped
2677 * `filterout` is a set of sidedata keys that should be dropped
2678 * `update` is a mapping of additionnal/new key -> value
2678 * `update` is a mapping of additionnal/new key -> value
2679 """
2679 """
2680 if deltareuse not in self.DELTAREUSEALL:
2680 if deltareuse not in self.DELTAREUSEALL:
2681 raise ValueError(
2681 raise ValueError(
2682 _(b'value for deltareuse invalid: %s') % deltareuse
2682 _(b'value for deltareuse invalid: %s') % deltareuse
2683 )
2683 )
2684
2684
2685 if len(destrevlog):
2685 if len(destrevlog):
2686 raise ValueError(_(b'destination revlog is not empty'))
2686 raise ValueError(_(b'destination revlog is not empty'))
2687
2687
2688 if getattr(self, 'filteredrevs', None):
2688 if getattr(self, 'filteredrevs', None):
2689 raise ValueError(_(b'source revlog has filtered revisions'))
2689 raise ValueError(_(b'source revlog has filtered revisions'))
2690 if getattr(destrevlog, 'filteredrevs', None):
2690 if getattr(destrevlog, 'filteredrevs', None):
2691 raise ValueError(_(b'destination revlog has filtered revisions'))
2691 raise ValueError(_(b'destination revlog has filtered revisions'))
2692
2692
2693 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2693 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2694 # if possible.
2694 # if possible.
2695 oldlazydelta = destrevlog._lazydelta
2695 oldlazydelta = destrevlog._lazydelta
2696 oldlazydeltabase = destrevlog._lazydeltabase
2696 oldlazydeltabase = destrevlog._lazydeltabase
2697 oldamd = destrevlog._deltabothparents
2697 oldamd = destrevlog._deltabothparents
2698
2698
2699 try:
2699 try:
2700 if deltareuse == self.DELTAREUSEALWAYS:
2700 if deltareuse == self.DELTAREUSEALWAYS:
2701 destrevlog._lazydeltabase = True
2701 destrevlog._lazydeltabase = True
2702 destrevlog._lazydelta = True
2702 destrevlog._lazydelta = True
2703 elif deltareuse == self.DELTAREUSESAMEREVS:
2703 elif deltareuse == self.DELTAREUSESAMEREVS:
2704 destrevlog._lazydeltabase = False
2704 destrevlog._lazydeltabase = False
2705 destrevlog._lazydelta = True
2705 destrevlog._lazydelta = True
2706 elif deltareuse == self.DELTAREUSENEVER:
2706 elif deltareuse == self.DELTAREUSENEVER:
2707 destrevlog._lazydeltabase = False
2707 destrevlog._lazydeltabase = False
2708 destrevlog._lazydelta = False
2708 destrevlog._lazydelta = False
2709
2709
2710 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2710 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2711
2711
2712 self._clone(
2712 self._clone(
2713 tr,
2713 tr,
2714 destrevlog,
2714 destrevlog,
2715 addrevisioncb,
2715 addrevisioncb,
2716 deltareuse,
2716 deltareuse,
2717 forcedeltabothparents,
2717 forcedeltabothparents,
2718 sidedatacompanion,
2718 sidedatacompanion,
2719 )
2719 )
2720
2720
2721 finally:
2721 finally:
2722 destrevlog._lazydelta = oldlazydelta
2722 destrevlog._lazydelta = oldlazydelta
2723 destrevlog._lazydeltabase = oldlazydeltabase
2723 destrevlog._lazydeltabase = oldlazydeltabase
2724 destrevlog._deltabothparents = oldamd
2724 destrevlog._deltabothparents = oldamd
2725
2725
2726 def _clone(
2726 def _clone(
2727 self,
2727 self,
2728 tr,
2728 tr,
2729 destrevlog,
2729 destrevlog,
2730 addrevisioncb,
2730 addrevisioncb,
2731 deltareuse,
2731 deltareuse,
2732 forcedeltabothparents,
2732 forcedeltabothparents,
2733 sidedatacompanion,
2733 sidedatacompanion,
2734 ):
2734 ):
2735 """perform the core duty of `revlog.clone` after parameter processing"""
2735 """perform the core duty of `revlog.clone` after parameter processing"""
2736 deltacomputer = deltautil.deltacomputer(destrevlog)
2736 deltacomputer = deltautil.deltacomputer(destrevlog)
2737 index = self.index
2737 index = self.index
2738 for rev in self:
2738 for rev in self:
2739 entry = index[rev]
2739 entry = index[rev]
2740
2740
2741 # Some classes override linkrev to take filtered revs into
2741 # Some classes override linkrev to take filtered revs into
2742 # account. Use raw entry from index.
2742 # account. Use raw entry from index.
2743 flags = entry[0] & 0xFFFF
2743 flags = entry[0] & 0xFFFF
2744 linkrev = entry[4]
2744 linkrev = entry[4]
2745 p1 = index[entry[5]][7]
2745 p1 = index[entry[5]][7]
2746 p2 = index[entry[6]][7]
2746 p2 = index[entry[6]][7]
2747 node = entry[7]
2747 node = entry[7]
2748
2748
2749 sidedataactions = (False, [], {})
2749 sidedataactions = (False, [], {})
2750 if sidedatacompanion is not None:
2750 if sidedatacompanion is not None:
2751 sidedataactions = sidedatacompanion(self, rev)
2751 sidedataactions = sidedatacompanion(self, rev)
2752
2752
2753 # (Possibly) reuse the delta from the revlog if allowed and
2753 # (Possibly) reuse the delta from the revlog if allowed and
2754 # the revlog chunk is a delta.
2754 # the revlog chunk is a delta.
2755 cachedelta = None
2755 cachedelta = None
2756 rawtext = None
2756 rawtext = None
2757 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2757 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2758 dropall, filterout, update = sidedataactions
2758 dropall, filterout, update = sidedataactions
2759 text, sidedata = self._revisiondata(rev)
2759 text, sidedata = self._revisiondata(rev)
2760 if dropall:
2760 if dropall:
2761 sidedata = {}
2761 sidedata = {}
2762 for key in filterout:
2762 for key in filterout:
2763 sidedata.pop(key, None)
2763 sidedata.pop(key, None)
2764 sidedata.update(update)
2764 sidedata.update(update)
2765 if not sidedata:
2765 if not sidedata:
2766 sidedata = None
2766 sidedata = None
2767 destrevlog.addrevision(
2767 destrevlog.addrevision(
2768 text,
2768 text,
2769 tr,
2769 tr,
2770 linkrev,
2770 linkrev,
2771 p1,
2771 p1,
2772 p2,
2772 p2,
2773 cachedelta=cachedelta,
2773 cachedelta=cachedelta,
2774 node=node,
2774 node=node,
2775 flags=flags,
2775 flags=flags,
2776 deltacomputer=deltacomputer,
2776 deltacomputer=deltacomputer,
2777 sidedata=sidedata,
2777 sidedata=sidedata,
2778 )
2778 )
2779 else:
2779 else:
2780 if destrevlog._lazydelta:
2780 if destrevlog._lazydelta:
2781 dp = self.deltaparent(rev)
2781 dp = self.deltaparent(rev)
2782 if dp != nullrev:
2782 if dp != nullrev:
2783 cachedelta = (dp, bytes(self._chunk(rev)))
2783 cachedelta = (dp, bytes(self._chunk(rev)))
2784
2784
2785 if not cachedelta:
2785 if not cachedelta:
2786 rawtext = self.rawdata(rev)
2786 rawtext = self.rawdata(rev)
2787
2787
2788 ifh = destrevlog.opener(
2788 ifh = destrevlog.opener(
2789 destrevlog.indexfile, b'a+', checkambig=False
2789 destrevlog.indexfile, b'a+', checkambig=False
2790 )
2790 )
2791 dfh = None
2791 dfh = None
2792 if not destrevlog._inline:
2792 if not destrevlog._inline:
2793 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2793 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2794 try:
2794 try:
2795 destrevlog._addrevision(
2795 destrevlog._addrevision(
2796 node,
2796 node,
2797 rawtext,
2797 rawtext,
2798 tr,
2798 tr,
2799 linkrev,
2799 linkrev,
2800 p1,
2800 p1,
2801 p2,
2801 p2,
2802 flags,
2802 flags,
2803 cachedelta,
2803 cachedelta,
2804 ifh,
2804 ifh,
2805 dfh,
2805 dfh,
2806 deltacomputer=deltacomputer,
2806 deltacomputer=deltacomputer,
2807 )
2807 )
2808 finally:
2808 finally:
2809 if dfh:
2809 if dfh:
2810 dfh.close()
2810 dfh.close()
2811 ifh.close()
2811 ifh.close()
2812
2812
2813 if addrevisioncb:
2813 if addrevisioncb:
2814 addrevisioncb(self, rev, node)
2814 addrevisioncb(self, rev, node)
2815
2815
2816 def censorrevision(self, tr, censornode, tombstone=b''):
2816 def censorrevision(self, tr, censornode, tombstone=b''):
2817 if (self.version & 0xFFFF) == REVLOGV0:
2817 if (self.version & 0xFFFF) == REVLOGV0:
2818 raise error.RevlogError(
2818 raise error.RevlogError(
2819 _(b'cannot censor with version %d revlogs') % self.version
2819 _(b'cannot censor with version %d revlogs') % self.version
2820 )
2820 )
2821
2821
2822 censorrev = self.rev(censornode)
2822 censorrev = self.rev(censornode)
2823 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2823 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2824
2824
2825 if len(tombstone) > self.rawsize(censorrev):
2825 if len(tombstone) > self.rawsize(censorrev):
2826 raise error.Abort(
2826 raise error.Abort(
2827 _(b'censor tombstone must be no longer than censored data')
2827 _(b'censor tombstone must be no longer than censored data')
2828 )
2828 )
2829
2829
2830 # Rewriting the revlog in place is hard. Our strategy for censoring is
2830 # Rewriting the revlog in place is hard. Our strategy for censoring is
2831 # to create a new revlog, copy all revisions to it, then replace the
2831 # to create a new revlog, copy all revisions to it, then replace the
2832 # revlogs on transaction close.
2832 # revlogs on transaction close.
2833
2833
2834 newindexfile = self.indexfile + b'.tmpcensored'
2834 newindexfile = self.indexfile + b'.tmpcensored'
2835 newdatafile = self.datafile + b'.tmpcensored'
2835 newdatafile = self.datafile + b'.tmpcensored'
2836
2836
2837 # This is a bit dangerous. We could easily have a mismatch of state.
2837 # This is a bit dangerous. We could easily have a mismatch of state.
2838 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2838 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2839 newrl.version = self.version
2839 newrl.version = self.version
2840 newrl._generaldelta = self._generaldelta
2840 newrl._generaldelta = self._generaldelta
2841 newrl._io = self._io
2841 newrl._io = self._io
2842
2842
2843 for rev in self.revs():
2843 for rev in self.revs():
2844 node = self.node(rev)
2844 node = self.node(rev)
2845 p1, p2 = self.parents(node)
2845 p1, p2 = self.parents(node)
2846
2846
2847 if rev == censorrev:
2847 if rev == censorrev:
2848 newrl.addrawrevision(
2848 newrl.addrawrevision(
2849 tombstone,
2849 tombstone,
2850 tr,
2850 tr,
2851 self.linkrev(censorrev),
2851 self.linkrev(censorrev),
2852 p1,
2852 p1,
2853 p2,
2853 p2,
2854 censornode,
2854 censornode,
2855 REVIDX_ISCENSORED,
2855 REVIDX_ISCENSORED,
2856 )
2856 )
2857
2857
2858 if newrl.deltaparent(rev) != nullrev:
2858 if newrl.deltaparent(rev) != nullrev:
2859 raise error.Abort(
2859 raise error.Abort(
2860 _(
2860 _(
2861 b'censored revision stored as delta; '
2861 b'censored revision stored as delta; '
2862 b'cannot censor'
2862 b'cannot censor'
2863 ),
2863 ),
2864 hint=_(
2864 hint=_(
2865 b'censoring of revlogs is not '
2865 b'censoring of revlogs is not '
2866 b'fully implemented; please report '
2866 b'fully implemented; please report '
2867 b'this bug'
2867 b'this bug'
2868 ),
2868 ),
2869 )
2869 )
2870 continue
2870 continue
2871
2871
2872 if self.iscensored(rev):
2872 if self.iscensored(rev):
2873 if self.deltaparent(rev) != nullrev:
2873 if self.deltaparent(rev) != nullrev:
2874 raise error.Abort(
2874 raise error.Abort(
2875 _(
2875 _(
2876 b'cannot censor due to censored '
2876 b'cannot censor due to censored '
2877 b'revision having delta stored'
2877 b'revision having delta stored'
2878 )
2878 )
2879 )
2879 )
2880 rawtext = self._chunk(rev)
2880 rawtext = self._chunk(rev)
2881 else:
2881 else:
2882 rawtext = self.rawdata(rev)
2882 rawtext = self.rawdata(rev)
2883
2883
2884 newrl.addrawrevision(
2884 newrl.addrawrevision(
2885 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2885 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2886 )
2886 )
2887
2887
2888 tr.addbackup(self.indexfile, location=b'store')
2888 tr.addbackup(self.indexfile, location=b'store')
2889 if not self._inline:
2889 if not self._inline:
2890 tr.addbackup(self.datafile, location=b'store')
2890 tr.addbackup(self.datafile, location=b'store')
2891
2891
2892 self.opener.rename(newrl.indexfile, self.indexfile)
2892 self.opener.rename(newrl.indexfile, self.indexfile)
2893 if not self._inline:
2893 if not self._inline:
2894 self.opener.rename(newrl.datafile, self.datafile)
2894 self.opener.rename(newrl.datafile, self.datafile)
2895
2895
2896 self.clearcaches()
2896 self.clearcaches()
2897 self._loadindex()
2897 self._loadindex()
2898
2898
2899 def verifyintegrity(self, state):
2899 def verifyintegrity(self, state):
2900 """Verifies the integrity of the revlog.
2900 """Verifies the integrity of the revlog.
2901
2901
2902 Yields ``revlogproblem`` instances describing problems that are
2902 Yields ``revlogproblem`` instances describing problems that are
2903 found.
2903 found.
2904 """
2904 """
2905 dd, di = self.checksize()
2905 dd, di = self.checksize()
2906 if dd:
2906 if dd:
2907 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2907 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2908 if di:
2908 if di:
2909 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2909 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2910
2910
2911 version = self.version & 0xFFFF
2911 version = self.version & 0xFFFF
2912
2912
2913 # The verifier tells us what version revlog we should be.
2913 # The verifier tells us what version revlog we should be.
2914 if version != state[b'expectedversion']:
2914 if version != state[b'expectedversion']:
2915 yield revlogproblem(
2915 yield revlogproblem(
2916 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2916 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2917 % (self.indexfile, version, state[b'expectedversion'])
2917 % (self.indexfile, version, state[b'expectedversion'])
2918 )
2918 )
2919
2919
2920 state[b'skipread'] = set()
2920 state[b'skipread'] = set()
2921 state[b'safe_renamed'] = set()
2921 state[b'safe_renamed'] = set()
2922
2922
2923 for rev in self:
2923 for rev in self:
2924 node = self.node(rev)
2924 node = self.node(rev)
2925
2925
2926 # Verify contents. 4 cases to care about:
2926 # Verify contents. 4 cases to care about:
2927 #
2927 #
2928 # common: the most common case
2928 # common: the most common case
2929 # rename: with a rename
2929 # rename: with a rename
2930 # meta: file content starts with b'\1\n', the metadata
2930 # meta: file content starts with b'\1\n', the metadata
2931 # header defined in filelog.py, but without a rename
2931 # header defined in filelog.py, but without a rename
2932 # ext: content stored externally
2932 # ext: content stored externally
2933 #
2933 #
2934 # More formally, their differences are shown below:
2934 # More formally, their differences are shown below:
2935 #
2935 #
2936 # | common | rename | meta | ext
2936 # | common | rename | meta | ext
2937 # -------------------------------------------------------
2937 # -------------------------------------------------------
2938 # flags() | 0 | 0 | 0 | not 0
2938 # flags() | 0 | 0 | 0 | not 0
2939 # renamed() | False | True | False | ?
2939 # renamed() | False | True | False | ?
2940 # rawtext[0:2]=='\1\n'| False | True | True | ?
2940 # rawtext[0:2]=='\1\n'| False | True | True | ?
2941 #
2941 #
2942 # "rawtext" means the raw text stored in revlog data, which
2942 # "rawtext" means the raw text stored in revlog data, which
2943 # could be retrieved by "rawdata(rev)". "text"
2943 # could be retrieved by "rawdata(rev)". "text"
2944 # mentioned below is "revision(rev)".
2944 # mentioned below is "revision(rev)".
2945 #
2945 #
2946 # There are 3 different lengths stored physically:
2946 # There are 3 different lengths stored physically:
2947 # 1. L1: rawsize, stored in revlog index
2947 # 1. L1: rawsize, stored in revlog index
2948 # 2. L2: len(rawtext), stored in revlog data
2948 # 2. L2: len(rawtext), stored in revlog data
2949 # 3. L3: len(text), stored in revlog data if flags==0, or
2949 # 3. L3: len(text), stored in revlog data if flags==0, or
2950 # possibly somewhere else if flags!=0
2950 # possibly somewhere else if flags!=0
2951 #
2951 #
2952 # L1 should be equal to L2. L3 could be different from them.
2952 # L1 should be equal to L2. L3 could be different from them.
2953 # "text" may or may not affect commit hash depending on flag
2953 # "text" may or may not affect commit hash depending on flag
2954 # processors (see flagutil.addflagprocessor).
2954 # processors (see flagutil.addflagprocessor).
2955 #
2955 #
2956 # | common | rename | meta | ext
2956 # | common | rename | meta | ext
2957 # -------------------------------------------------
2957 # -------------------------------------------------
2958 # rawsize() | L1 | L1 | L1 | L1
2958 # rawsize() | L1 | L1 | L1 | L1
2959 # size() | L1 | L2-LM | L1(*) | L1 (?)
2959 # size() | L1 | L2-LM | L1(*) | L1 (?)
2960 # len(rawtext) | L2 | L2 | L2 | L2
2960 # len(rawtext) | L2 | L2 | L2 | L2
2961 # len(text) | L2 | L2 | L2 | L3
2961 # len(text) | L2 | L2 | L2 | L3
2962 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2962 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2963 #
2963 #
2964 # LM: length of metadata, depending on rawtext
2964 # LM: length of metadata, depending on rawtext
2965 # (*): not ideal, see comment in filelog.size
2965 # (*): not ideal, see comment in filelog.size
2966 # (?): could be "- len(meta)" if the resolved content has
2966 # (?): could be "- len(meta)" if the resolved content has
2967 # rename metadata
2967 # rename metadata
2968 #
2968 #
2969 # Checks needed to be done:
2969 # Checks needed to be done:
2970 # 1. length check: L1 == L2, in all cases.
2970 # 1. length check: L1 == L2, in all cases.
2971 # 2. hash check: depending on flag processor, we may need to
2971 # 2. hash check: depending on flag processor, we may need to
2972 # use either "text" (external), or "rawtext" (in revlog).
2972 # use either "text" (external), or "rawtext" (in revlog).
2973
2973
2974 try:
2974 try:
2975 skipflags = state.get(b'skipflags', 0)
2975 skipflags = state.get(b'skipflags', 0)
2976 if skipflags:
2976 if skipflags:
2977 skipflags &= self.flags(rev)
2977 skipflags &= self.flags(rev)
2978
2978
2979 _verify_revision(self, skipflags, state, node)
2979 _verify_revision(self, skipflags, state, node)
2980
2980
2981 l1 = self.rawsize(rev)
2981 l1 = self.rawsize(rev)
2982 l2 = len(self.rawdata(node))
2982 l2 = len(self.rawdata(node))
2983
2983
2984 if l1 != l2:
2984 if l1 != l2:
2985 yield revlogproblem(
2985 yield revlogproblem(
2986 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
2986 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
2987 node=node,
2987 node=node,
2988 )
2988 )
2989
2989
2990 except error.CensoredNodeError:
2990 except error.CensoredNodeError:
2991 if state[b'erroroncensored']:
2991 if state[b'erroroncensored']:
2992 yield revlogproblem(
2992 yield revlogproblem(
2993 error=_(b'censored file data'), node=node
2993 error=_(b'censored file data'), node=node
2994 )
2994 )
2995 state[b'skipread'].add(node)
2995 state[b'skipread'].add(node)
2996 except Exception as e:
2996 except Exception as e:
2997 yield revlogproblem(
2997 yield revlogproblem(
2998 error=_(b'unpacking %s: %s')
2998 error=_(b'unpacking %s: %s')
2999 % (short(node), stringutil.forcebytestr(e)),
2999 % (short(node), stringutil.forcebytestr(e)),
3000 node=node,
3000 node=node,
3001 )
3001 )
3002 state[b'skipread'].add(node)
3002 state[b'skipread'].add(node)
3003
3003
3004 def storageinfo(
3004 def storageinfo(
3005 self,
3005 self,
3006 exclusivefiles=False,
3006 exclusivefiles=False,
3007 sharedfiles=False,
3007 sharedfiles=False,
3008 revisionscount=False,
3008 revisionscount=False,
3009 trackedsize=False,
3009 trackedsize=False,
3010 storedsize=False,
3010 storedsize=False,
3011 ):
3011 ):
3012 d = {}
3012 d = {}
3013
3013
3014 if exclusivefiles:
3014 if exclusivefiles:
3015 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
3015 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
3016 if not self._inline:
3016 if not self._inline:
3017 d[b'exclusivefiles'].append((self.opener, self.datafile))
3017 d[b'exclusivefiles'].append((self.opener, self.datafile))
3018
3018
3019 if sharedfiles:
3019 if sharedfiles:
3020 d[b'sharedfiles'] = []
3020 d[b'sharedfiles'] = []
3021
3021
3022 if revisionscount:
3022 if revisionscount:
3023 d[b'revisionscount'] = len(self)
3023 d[b'revisionscount'] = len(self)
3024
3024
3025 if trackedsize:
3025 if trackedsize:
3026 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3026 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3027
3027
3028 if storedsize:
3028 if storedsize:
3029 d[b'storedsize'] = sum(
3029 d[b'storedsize'] = sum(
3030 self.opener.stat(path).st_size for path in self.files()
3030 self.opener.stat(path).st_size for path in self.files()
3031 )
3031 )
3032
3032
3033 return d
3033 return d
@@ -1,456 +1,463 b''
1 # nodemap.py - nodemap related code and utilities
1 # nodemap.py - nodemap related code and utilities
2 #
2 #
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import os
11 import os
12 import re
12 import re
13 import struct
13 import struct
14
14
15 from .. import (
15 from .. import (
16 error,
16 error,
17 node as nodemod,
17 node as nodemod,
18 util,
18 util,
19 )
19 )
20
20
21
21
22 class NodeMap(dict):
22 class NodeMap(dict):
23 def __missing__(self, x):
23 def __missing__(self, x):
24 raise error.RevlogError(b'unknown node: %s' % x)
24 raise error.RevlogError(b'unknown node: %s' % x)
25
25
26
26
27 def persisted_data(revlog):
27 def persisted_data(revlog):
28 """read the nodemap for a revlog from disk"""
28 """read the nodemap for a revlog from disk"""
29 if revlog.nodemap_file is None:
29 if revlog.nodemap_file is None:
30 return None
30 return None
31 pdata = revlog.opener.tryread(revlog.nodemap_file)
31 pdata = revlog.opener.tryread(revlog.nodemap_file)
32 if not pdata:
32 if not pdata:
33 return None
33 return None
34 offset = 0
34 offset = 0
35 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
35 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
36 if version != ONDISK_VERSION:
36 if version != ONDISK_VERSION:
37 return None
37 return None
38 offset += S_VERSION.size
38 offset += S_VERSION.size
39 (uid_size,) = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
39 headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
40 uid_size, tip_rev = headers
40 offset += S_HEADER.size
41 offset += S_HEADER.size
41 docket = NodeMapDocket(pdata[offset : offset + uid_size])
42 docket = NodeMapDocket(pdata[offset : offset + uid_size])
43 docket.tip_rev = tip_rev
42
44
43 filename = _rawdata_filepath(revlog, docket)
45 filename = _rawdata_filepath(revlog, docket)
44 return docket, revlog.opener.tryread(filename)
46 return docket, revlog.opener.tryread(filename)
45
47
46
48
47 def setup_persistent_nodemap(tr, revlog):
49 def setup_persistent_nodemap(tr, revlog):
48 """Install whatever is needed transaction side to persist a nodemap on disk
50 """Install whatever is needed transaction side to persist a nodemap on disk
49
51
50 (only actually persist the nodemap if this is relevant for this revlog)
52 (only actually persist the nodemap if this is relevant for this revlog)
51 """
53 """
52 if revlog._inline:
54 if revlog._inline:
53 return # inlined revlog are too small for this to be relevant
55 return # inlined revlog are too small for this to be relevant
54 if revlog.nodemap_file is None:
56 if revlog.nodemap_file is None:
55 return # we do not use persistent_nodemap on this revlog
57 return # we do not use persistent_nodemap on this revlog
56 callback_id = b"revlog-persistent-nodemap-%s" % revlog.nodemap_file
58 callback_id = b"revlog-persistent-nodemap-%s" % revlog.nodemap_file
57 if tr.hasfinalize(callback_id):
59 if tr.hasfinalize(callback_id):
58 return # no need to register again
60 return # no need to register again
59 tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
61 tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
60
62
61
63
62 def _persist_nodemap(tr, revlog):
64 def _persist_nodemap(tr, revlog):
63 """Write nodemap data on disk for a given revlog
65 """Write nodemap data on disk for a given revlog
64 """
66 """
65 if getattr(revlog, 'filteredrevs', ()):
67 if getattr(revlog, 'filteredrevs', ()):
66 raise error.ProgrammingError(
68 raise error.ProgrammingError(
67 "cannot persist nodemap of a filtered changelog"
69 "cannot persist nodemap of a filtered changelog"
68 )
70 )
69 if revlog.nodemap_file is None:
71 if revlog.nodemap_file is None:
70 msg = "calling persist nodemap on a revlog without the feature enableb"
72 msg = "calling persist nodemap on a revlog without the feature enableb"
71 raise error.ProgrammingError(msg)
73 raise error.ProgrammingError(msg)
72
74
73 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
75 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
74 ondisk_docket = revlog._nodemap_docket
76 ondisk_docket = revlog._nodemap_docket
75
77
76 # first attemp an incremental update of the data
78 # first attemp an incremental update of the data
77 if can_incremental and ondisk_docket is not None:
79 if can_incremental and ondisk_docket is not None:
78 target_docket = revlog._nodemap_docket.copy()
80 target_docket = revlog._nodemap_docket.copy()
79 data = revlog.index.nodemap_data_incremental()
81 data = revlog.index.nodemap_data_incremental()
80 datafile = _rawdata_filepath(revlog, target_docket)
82 datafile = _rawdata_filepath(revlog, target_docket)
81 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
83 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
82 # store vfs
84 # store vfs
83 with revlog.opener(datafile, b'a') as fd:
85 with revlog.opener(datafile, b'a') as fd:
84 fd.write(data)
86 fd.write(data)
85 else:
87 else:
86 # otherwise fallback to a full new export
88 # otherwise fallback to a full new export
87 target_docket = NodeMapDocket()
89 target_docket = NodeMapDocket()
88 datafile = _rawdata_filepath(revlog, target_docket)
90 datafile = _rawdata_filepath(revlog, target_docket)
89 if util.safehasattr(revlog.index, "nodemap_data_all"):
91 if util.safehasattr(revlog.index, "nodemap_data_all"):
90 data = revlog.index.nodemap_data_all()
92 data = revlog.index.nodemap_data_all()
91 else:
93 else:
92 data = persistent_data(revlog.index)
94 data = persistent_data(revlog.index)
93 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
95 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
94 # store vfs
96 # store vfs
95 with revlog.opener(datafile, b'w') as fd:
97 with revlog.opener(datafile, b'w') as fd:
96 fd.write(data)
98 fd.write(data)
99 target_docket.tip_rev = revlog.tiprev()
97 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
100 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
98 # store vfs
101 # store vfs
99 with revlog.opener(revlog.nodemap_file, b'w', atomictemp=True) as fp:
102 with revlog.opener(revlog.nodemap_file, b'w', atomictemp=True) as fp:
100 fp.write(target_docket.serialize())
103 fp.write(target_docket.serialize())
101 revlog._nodemap_docket = target_docket
104 revlog._nodemap_docket = target_docket
102 # EXP-TODO: if the transaction abort, we should remove the new data and
105 # EXP-TODO: if the transaction abort, we should remove the new data and
103 # reinstall the old one.
106 # reinstall the old one.
104
107
105 # search for old index file in all cases, some older process might have
108 # search for old index file in all cases, some older process might have
106 # left one behind.
109 # left one behind.
107 olds = _other_rawdata_filepath(revlog, target_docket)
110 olds = _other_rawdata_filepath(revlog, target_docket)
108 if olds:
111 if olds:
109 realvfs = getattr(revlog, '_realopener', revlog.opener)
112 realvfs = getattr(revlog, '_realopener', revlog.opener)
110
113
111 def cleanup(tr):
114 def cleanup(tr):
112 for oldfile in olds:
115 for oldfile in olds:
113 realvfs.tryunlink(oldfile)
116 realvfs.tryunlink(oldfile)
114
117
115 callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
118 callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
116 tr.addpostclose(callback_id, cleanup)
119 tr.addpostclose(callback_id, cleanup)
117
120
118
121
119 ### Nodemap docket file
122 ### Nodemap docket file
120 #
123 #
121 # The nodemap data are stored on disk using 2 files:
124 # The nodemap data are stored on disk using 2 files:
122 #
125 #
123 # * a raw data files containing a persistent nodemap
126 # * a raw data files containing a persistent nodemap
124 # (see `Nodemap Trie` section)
127 # (see `Nodemap Trie` section)
125 #
128 #
126 # * a small "docket" file containing medatadata
129 # * a small "docket" file containing medatadata
127 #
130 #
128 # While the nodemap data can be multiple tens of megabytes, the "docket" is
131 # While the nodemap data can be multiple tens of megabytes, the "docket" is
129 # small, it is easy to update it automatically or to duplicated its content
132 # small, it is easy to update it automatically or to duplicated its content
130 # during a transaction.
133 # during a transaction.
131 #
134 #
132 # Multiple raw data can exist at the same time (The currently valid one and a
135 # Multiple raw data can exist at the same time (The currently valid one and a
133 # new one beind used by an in progress transaction). To accomodate this, the
136 # new one beind used by an in progress transaction). To accomodate this, the
134 # filename hosting the raw data has a variable parts. The exact filename is
137 # filename hosting the raw data has a variable parts. The exact filename is
135 # specified inside the "docket" file.
138 # specified inside the "docket" file.
136 #
139 #
137 # The docket file contains information to find, qualify and validate the raw
140 # The docket file contains information to find, qualify and validate the raw
138 # data. Its content is currently very light, but it will expand as the on disk
141 # data. Its content is currently very light, but it will expand as the on disk
139 # nodemap gains the necessary features to be used in production.
142 # nodemap gains the necessary features to be used in production.
140
143
141 # version 0 is experimental, no BC garantee, do no use outside of tests.
144 # version 0 is experimental, no BC garantee, do no use outside of tests.
142 ONDISK_VERSION = 0
145 ONDISK_VERSION = 0
143
146
144 S_VERSION = struct.Struct(">B")
147 S_VERSION = struct.Struct(">B")
145 S_HEADER = struct.Struct(">B")
148 S_HEADER = struct.Struct(">BQ")
146
149
147 ID_SIZE = 8
150 ID_SIZE = 8
148
151
149
152
150 def _make_uid():
153 def _make_uid():
151 """return a new unique identifier.
154 """return a new unique identifier.
152
155
153 The identifier is random and composed of ascii characters."""
156 The identifier is random and composed of ascii characters."""
154 return nodemod.hex(os.urandom(ID_SIZE))
157 return nodemod.hex(os.urandom(ID_SIZE))
155
158
156
159
157 class NodeMapDocket(object):
160 class NodeMapDocket(object):
158 """metadata associated with persistent nodemap data
161 """metadata associated with persistent nodemap data
159
162
160 The persistent data may come from disk or be on their way to disk.
163 The persistent data may come from disk or be on their way to disk.
161 """
164 """
162
165
163 def __init__(self, uid=None):
166 def __init__(self, uid=None):
164 if uid is None:
167 if uid is None:
165 uid = _make_uid()
168 uid = _make_uid()
166 self.uid = uid
169 self.uid = uid
170 self.tip_rev = None
167
171
168 def copy(self):
172 def copy(self):
169 return NodeMapDocket(uid=self.uid)
173 new = NodeMapDocket(uid=self.uid)
174 new.tip_rev = self.tip_rev
175 return new
170
176
171 def serialize(self):
177 def serialize(self):
172 """return serialized bytes for a docket using the passed uid"""
178 """return serialized bytes for a docket using the passed uid"""
173 data = []
179 data = []
174 data.append(S_VERSION.pack(ONDISK_VERSION))
180 data.append(S_VERSION.pack(ONDISK_VERSION))
175 data.append(S_HEADER.pack(len(self.uid)))
181 headers = (len(self.uid), self.tip_rev)
182 data.append(S_HEADER.pack(*headers))
176 data.append(self.uid)
183 data.append(self.uid)
177 return b''.join(data)
184 return b''.join(data)
178
185
179
186
180 def _rawdata_filepath(revlog, docket):
187 def _rawdata_filepath(revlog, docket):
181 """The (vfs relative) nodemap's rawdata file for a given uid"""
188 """The (vfs relative) nodemap's rawdata file for a given uid"""
182 prefix = revlog.nodemap_file[:-2]
189 prefix = revlog.nodemap_file[:-2]
183 return b"%s-%s.nd" % (prefix, docket.uid)
190 return b"%s-%s.nd" % (prefix, docket.uid)
184
191
185
192
186 def _other_rawdata_filepath(revlog, docket):
193 def _other_rawdata_filepath(revlog, docket):
187 prefix = revlog.nodemap_file[:-2]
194 prefix = revlog.nodemap_file[:-2]
188 pattern = re.compile(b"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
195 pattern = re.compile(b"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
189 new_file_path = _rawdata_filepath(revlog, docket)
196 new_file_path = _rawdata_filepath(revlog, docket)
190 new_file_name = revlog.opener.basename(new_file_path)
197 new_file_name = revlog.opener.basename(new_file_path)
191 dirpath = revlog.opener.dirname(new_file_path)
198 dirpath = revlog.opener.dirname(new_file_path)
192 others = []
199 others = []
193 for f in revlog.opener.listdir(dirpath):
200 for f in revlog.opener.listdir(dirpath):
194 if pattern.match(f) and f != new_file_name:
201 if pattern.match(f) and f != new_file_name:
195 others.append(f)
202 others.append(f)
196 return others
203 return others
197
204
198
205
199 ### Nodemap Trie
206 ### Nodemap Trie
200 #
207 #
201 # This is a simple reference implementation to compute and persist a nodemap
208 # This is a simple reference implementation to compute and persist a nodemap
202 # trie. This reference implementation is write only. The python version of this
209 # trie. This reference implementation is write only. The python version of this
203 # is not expected to be actually used, since it wont provide performance
210 # is not expected to be actually used, since it wont provide performance
204 # improvement over existing non-persistent C implementation.
211 # improvement over existing non-persistent C implementation.
205 #
212 #
206 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
213 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
207 # revision can be adressed using its node shortest prefix.
214 # revision can be adressed using its node shortest prefix.
208 #
215 #
209 # The trie is stored as a sequence of block. Each block contains 16 entries
216 # The trie is stored as a sequence of block. Each block contains 16 entries
210 # (signed 64bit integer, big endian). Each entry can be one of the following:
217 # (signed 64bit integer, big endian). Each entry can be one of the following:
211 #
218 #
212 # * value >= 0 -> index of sub-block
219 # * value >= 0 -> index of sub-block
213 # * value == -1 -> no value
220 # * value == -1 -> no value
214 # * value < -1 -> a revision value: rev = -(value+10)
221 # * value < -1 -> a revision value: rev = -(value+10)
215 #
222 #
216 # The implementation focus on simplicity, not on performance. A Rust
223 # The implementation focus on simplicity, not on performance. A Rust
217 # implementation should provide a efficient version of the same binary
224 # implementation should provide a efficient version of the same binary
218 # persistence. This reference python implementation is never meant to be
225 # persistence. This reference python implementation is never meant to be
219 # extensively use in production.
226 # extensively use in production.
220
227
221
228
222 def persistent_data(index):
229 def persistent_data(index):
223 """return the persistent binary form for a nodemap for a given index
230 """return the persistent binary form for a nodemap for a given index
224 """
231 """
225 trie = _build_trie(index)
232 trie = _build_trie(index)
226 return _persist_trie(trie)
233 return _persist_trie(trie)
227
234
228
235
229 def update_persistent_data(index, root, max_idx, last_rev):
236 def update_persistent_data(index, root, max_idx, last_rev):
230 """return the incremental update for persistent nodemap from a given index
237 """return the incremental update for persistent nodemap from a given index
231 """
238 """
232 trie = _update_trie(index, root, last_rev)
239 trie = _update_trie(index, root, last_rev)
233 return _persist_trie(trie, existing_idx=max_idx)
240 return _persist_trie(trie, existing_idx=max_idx)
234
241
235
242
236 S_BLOCK = struct.Struct(">" + ("l" * 16))
243 S_BLOCK = struct.Struct(">" + ("l" * 16))
237
244
238 NO_ENTRY = -1
245 NO_ENTRY = -1
239 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
246 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
240 REV_OFFSET = 2
247 REV_OFFSET = 2
241
248
242
249
243 def _transform_rev(rev):
250 def _transform_rev(rev):
244 """Return the number used to represent the rev in the tree.
251 """Return the number used to represent the rev in the tree.
245
252
246 (or retrieve a rev number from such representation)
253 (or retrieve a rev number from such representation)
247
254
248 Note that this is an involution, a function equal to its inverse (i.e.
255 Note that this is an involution, a function equal to its inverse (i.e.
249 which gives the identity when applied to itself).
256 which gives the identity when applied to itself).
250 """
257 """
251 return -(rev + REV_OFFSET)
258 return -(rev + REV_OFFSET)
252
259
253
260
254 def _to_int(hex_digit):
261 def _to_int(hex_digit):
255 """turn an hexadecimal digit into a proper integer"""
262 """turn an hexadecimal digit into a proper integer"""
256 return int(hex_digit, 16)
263 return int(hex_digit, 16)
257
264
258
265
259 class Block(dict):
266 class Block(dict):
260 """represent a block of the Trie
267 """represent a block of the Trie
261
268
262 contains up to 16 entry indexed from 0 to 15"""
269 contains up to 16 entry indexed from 0 to 15"""
263
270
264 def __init__(self):
271 def __init__(self):
265 super(Block, self).__init__()
272 super(Block, self).__init__()
266 # If this block exist on disk, here is its ID
273 # If this block exist on disk, here is its ID
267 self.ondisk_id = None
274 self.ondisk_id = None
268
275
269 def __iter__(self):
276 def __iter__(self):
270 return iter(self.get(i) for i in range(16))
277 return iter(self.get(i) for i in range(16))
271
278
272
279
273 def _build_trie(index):
280 def _build_trie(index):
274 """build a nodemap trie
281 """build a nodemap trie
275
282
276 The nodemap stores revision number for each unique prefix.
283 The nodemap stores revision number for each unique prefix.
277
284
278 Each block is a dictionary with keys in `[0, 15]`. Values are either
285 Each block is a dictionary with keys in `[0, 15]`. Values are either
279 another block or a revision number.
286 another block or a revision number.
280 """
287 """
281 root = Block()
288 root = Block()
282 for rev in range(len(index)):
289 for rev in range(len(index)):
283 hex = nodemod.hex(index[rev][7])
290 hex = nodemod.hex(index[rev][7])
284 _insert_into_block(index, 0, root, rev, hex)
291 _insert_into_block(index, 0, root, rev, hex)
285 return root
292 return root
286
293
287
294
288 def _update_trie(index, root, last_rev):
295 def _update_trie(index, root, last_rev):
289 """consume"""
296 """consume"""
290 for rev in range(last_rev + 1, len(index)):
297 for rev in range(last_rev + 1, len(index)):
291 hex = nodemod.hex(index[rev][7])
298 hex = nodemod.hex(index[rev][7])
292 _insert_into_block(index, 0, root, rev, hex)
299 _insert_into_block(index, 0, root, rev, hex)
293 return root
300 return root
294
301
295
302
296 def _insert_into_block(index, level, block, current_rev, current_hex):
303 def _insert_into_block(index, level, block, current_rev, current_hex):
297 """insert a new revision in a block
304 """insert a new revision in a block
298
305
299 index: the index we are adding revision for
306 index: the index we are adding revision for
300 level: the depth of the current block in the trie
307 level: the depth of the current block in the trie
301 block: the block currently being considered
308 block: the block currently being considered
302 current_rev: the revision number we are adding
309 current_rev: the revision number we are adding
303 current_hex: the hexadecimal representation of the of that revision
310 current_hex: the hexadecimal representation of the of that revision
304 """
311 """
305 if block.ondisk_id is not None:
312 if block.ondisk_id is not None:
306 block.ondisk_id = None
313 block.ondisk_id = None
307 hex_digit = _to_int(current_hex[level : level + 1])
314 hex_digit = _to_int(current_hex[level : level + 1])
308 entry = block.get(hex_digit)
315 entry = block.get(hex_digit)
309 if entry is None:
316 if entry is None:
310 # no entry, simply store the revision number
317 # no entry, simply store the revision number
311 block[hex_digit] = current_rev
318 block[hex_digit] = current_rev
312 elif isinstance(entry, dict):
319 elif isinstance(entry, dict):
313 # need to recurse to an underlying block
320 # need to recurse to an underlying block
314 _insert_into_block(index, level + 1, entry, current_rev, current_hex)
321 _insert_into_block(index, level + 1, entry, current_rev, current_hex)
315 else:
322 else:
316 # collision with a previously unique prefix, inserting new
323 # collision with a previously unique prefix, inserting new
317 # vertices to fit both entry.
324 # vertices to fit both entry.
318 other_hex = nodemod.hex(index[entry][7])
325 other_hex = nodemod.hex(index[entry][7])
319 other_rev = entry
326 other_rev = entry
320 new = Block()
327 new = Block()
321 block[hex_digit] = new
328 block[hex_digit] = new
322 _insert_into_block(index, level + 1, new, other_rev, other_hex)
329 _insert_into_block(index, level + 1, new, other_rev, other_hex)
323 _insert_into_block(index, level + 1, new, current_rev, current_hex)
330 _insert_into_block(index, level + 1, new, current_rev, current_hex)
324
331
325
332
326 def _persist_trie(root, existing_idx=None):
333 def _persist_trie(root, existing_idx=None):
327 """turn a nodemap trie into persistent binary data
334 """turn a nodemap trie into persistent binary data
328
335
329 See `_build_trie` for nodemap trie structure"""
336 See `_build_trie` for nodemap trie structure"""
330 block_map = {}
337 block_map = {}
331 if existing_idx is not None:
338 if existing_idx is not None:
332 base_idx = existing_idx + 1
339 base_idx = existing_idx + 1
333 else:
340 else:
334 base_idx = 0
341 base_idx = 0
335 chunks = []
342 chunks = []
336 for tn in _walk_trie(root):
343 for tn in _walk_trie(root):
337 if tn.ondisk_id is not None:
344 if tn.ondisk_id is not None:
338 block_map[id(tn)] = tn.ondisk_id
345 block_map[id(tn)] = tn.ondisk_id
339 else:
346 else:
340 block_map[id(tn)] = len(chunks) + base_idx
347 block_map[id(tn)] = len(chunks) + base_idx
341 chunks.append(_persist_block(tn, block_map))
348 chunks.append(_persist_block(tn, block_map))
342 return b''.join(chunks)
349 return b''.join(chunks)
343
350
344
351
345 def _walk_trie(block):
352 def _walk_trie(block):
346 """yield all the block in a trie
353 """yield all the block in a trie
347
354
348 Children blocks are always yield before their parent block.
355 Children blocks are always yield before their parent block.
349 """
356 """
350 for (_, item) in sorted(block.items()):
357 for (_, item) in sorted(block.items()):
351 if isinstance(item, dict):
358 if isinstance(item, dict):
352 for sub_block in _walk_trie(item):
359 for sub_block in _walk_trie(item):
353 yield sub_block
360 yield sub_block
354 yield block
361 yield block
355
362
356
363
357 def _persist_block(block_node, block_map):
364 def _persist_block(block_node, block_map):
358 """produce persistent binary data for a single block
365 """produce persistent binary data for a single block
359
366
360 Children block are assumed to be already persisted and present in
367 Children block are assumed to be already persisted and present in
361 block_map.
368 block_map.
362 """
369 """
363 data = tuple(_to_value(v, block_map) for v in block_node)
370 data = tuple(_to_value(v, block_map) for v in block_node)
364 return S_BLOCK.pack(*data)
371 return S_BLOCK.pack(*data)
365
372
366
373
367 def _to_value(item, block_map):
374 def _to_value(item, block_map):
368 """persist any value as an integer"""
375 """persist any value as an integer"""
369 if item is None:
376 if item is None:
370 return NO_ENTRY
377 return NO_ENTRY
371 elif isinstance(item, dict):
378 elif isinstance(item, dict):
372 return block_map[id(item)]
379 return block_map[id(item)]
373 else:
380 else:
374 return _transform_rev(item)
381 return _transform_rev(item)
375
382
376
383
377 def parse_data(data):
384 def parse_data(data):
378 """parse parse nodemap data into a nodemap Trie"""
385 """parse parse nodemap data into a nodemap Trie"""
379 if (len(data) % S_BLOCK.size) != 0:
386 if (len(data) % S_BLOCK.size) != 0:
380 msg = "nodemap data size is not a multiple of block size (%d): %d"
387 msg = "nodemap data size is not a multiple of block size (%d): %d"
381 raise error.Abort(msg % (S_BLOCK.size, len(data)))
388 raise error.Abort(msg % (S_BLOCK.size, len(data)))
382 if not data:
389 if not data:
383 return Block(), None
390 return Block(), None
384 block_map = {}
391 block_map = {}
385 new_blocks = []
392 new_blocks = []
386 for i in range(0, len(data), S_BLOCK.size):
393 for i in range(0, len(data), S_BLOCK.size):
387 block = Block()
394 block = Block()
388 block.ondisk_id = len(block_map)
395 block.ondisk_id = len(block_map)
389 block_map[block.ondisk_id] = block
396 block_map[block.ondisk_id] = block
390 block_data = data[i : i + S_BLOCK.size]
397 block_data = data[i : i + S_BLOCK.size]
391 values = S_BLOCK.unpack(block_data)
398 values = S_BLOCK.unpack(block_data)
392 new_blocks.append((block, values))
399 new_blocks.append((block, values))
393 for b, values in new_blocks:
400 for b, values in new_blocks:
394 for idx, v in enumerate(values):
401 for idx, v in enumerate(values):
395 if v == NO_ENTRY:
402 if v == NO_ENTRY:
396 continue
403 continue
397 elif v >= 0:
404 elif v >= 0:
398 b[idx] = block_map[v]
405 b[idx] = block_map[v]
399 else:
406 else:
400 b[idx] = _transform_rev(v)
407 b[idx] = _transform_rev(v)
401 return block, i // S_BLOCK.size
408 return block, i // S_BLOCK.size
402
409
403
410
404 # debug utility
411 # debug utility
405
412
406
413
407 def check_data(ui, index, data):
414 def check_data(ui, index, data):
408 """verify that the provided nodemap data are valid for the given idex"""
415 """verify that the provided nodemap data are valid for the given idex"""
409 ret = 0
416 ret = 0
410 ui.status((b"revision in index: %d\n") % len(index))
417 ui.status((b"revision in index: %d\n") % len(index))
411 root, __ = parse_data(data)
418 root, __ = parse_data(data)
412 all_revs = set(_all_revisions(root))
419 all_revs = set(_all_revisions(root))
413 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
420 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
414 for r in range(len(index)):
421 for r in range(len(index)):
415 if r not in all_revs:
422 if r not in all_revs:
416 msg = b" revision missing from nodemap: %d\n" % r
423 msg = b" revision missing from nodemap: %d\n" % r
417 ui.write_err(msg)
424 ui.write_err(msg)
418 ret = 1
425 ret = 1
419 else:
426 else:
420 all_revs.remove(r)
427 all_revs.remove(r)
421 nm_rev = _find_node(root, nodemod.hex(index[r][7]))
428 nm_rev = _find_node(root, nodemod.hex(index[r][7]))
422 if nm_rev is None:
429 if nm_rev is None:
423 msg = b" revision node does not match any entries: %d\n" % r
430 msg = b" revision node does not match any entries: %d\n" % r
424 ui.write_err(msg)
431 ui.write_err(msg)
425 ret = 1
432 ret = 1
426 elif nm_rev != r:
433 elif nm_rev != r:
427 msg = (
434 msg = (
428 b" revision node does not match the expected revision: "
435 b" revision node does not match the expected revision: "
429 b"%d != %d\n" % (r, nm_rev)
436 b"%d != %d\n" % (r, nm_rev)
430 )
437 )
431 ui.write_err(msg)
438 ui.write_err(msg)
432 ret = 1
439 ret = 1
433
440
434 if all_revs:
441 if all_revs:
435 for r in sorted(all_revs):
442 for r in sorted(all_revs):
436 msg = b" extra revision in nodemap: %d\n" % r
443 msg = b" extra revision in nodemap: %d\n" % r
437 ui.write_err(msg)
444 ui.write_err(msg)
438 ret = 1
445 ret = 1
439 return ret
446 return ret
440
447
441
448
442 def _all_revisions(root):
449 def _all_revisions(root):
443 """return all revisions stored in a Trie"""
450 """return all revisions stored in a Trie"""
444 for block in _walk_trie(root):
451 for block in _walk_trie(root):
445 for v in block:
452 for v in block:
446 if v is None or isinstance(v, Block):
453 if v is None or isinstance(v, Block):
447 continue
454 continue
448 yield v
455 yield v
449
456
450
457
451 def _find_node(block, node):
458 def _find_node(block, node):
452 """find the revision associated with a given node"""
459 """find the revision associated with a given node"""
453 entry = block.get(_to_int(node[0:1]))
460 entry = block.get(_to_int(node[0:1]))
454 if isinstance(entry, dict):
461 if isinstance(entry, dict):
455 return _find_node(entry, node[1:])
462 return _find_node(entry, node[1:])
456 return entry
463 return entry
@@ -1,71 +1,73 b''
1 ===================================
1 ===================================
2 Test the persistent on-disk nodemap
2 Test the persistent on-disk nodemap
3 ===================================
3 ===================================
4
4
5
5
6 $ hg init test-repo
6 $ hg init test-repo
7 $ cd test-repo
7 $ cd test-repo
8 $ cat << EOF >> .hg/hgrc
8 $ cat << EOF >> .hg/hgrc
9 > [experimental]
9 > [experimental]
10 > exp-persistent-nodemap=yes
10 > exp-persistent-nodemap=yes
11 > [devel]
11 > [devel]
12 > persistent-nodemap=yes
12 > persistent-nodemap=yes
13 > EOF
13 > EOF
14 $ hg debugbuilddag .+5000
14 $ hg debugbuilddag .+5000
15 $ hg debugnodemap --metadata
15 $ hg debugnodemap --metadata
16 uid: ???????????????? (glob)
16 uid: ???????????????? (glob)
17 tip-rev: 5000
17 $ f --size .hg/store/00changelog.n
18 $ f --size .hg/store/00changelog.n
18 .hg/store/00changelog.n: size=18
19 .hg/store/00changelog.n: size=26
19 $ f --sha256 .hg/store/00changelog-*.nd
20 $ f --sha256 .hg/store/00changelog-*.nd
20 .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
21 .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
21 $ hg debugnodemap --dump-new | f --sha256 --size
22 $ hg debugnodemap --dump-new | f --sha256 --size
22 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
23 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
23 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
24 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
24 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
25 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
25 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
26 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
26 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
27 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
27 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
28 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
28 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
29 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
29 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
30 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
30 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
31 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
31 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
32 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
32 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
33 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
33 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
34 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
34 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
35 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
35 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
36 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
36 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
37 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
37 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
38 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
38 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
39 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
39 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
40 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
40 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
41 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
41 $ hg debugnodemap --check
42 $ hg debugnodemap --check
42 revision in index: 5001
43 revision in index: 5001
43 revision in nodemap: 5001
44 revision in nodemap: 5001
44
45
45 add a new commit
46 add a new commit
46
47
47 $ hg up
48 $ hg up
48 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 $ echo foo > foo
50 $ echo foo > foo
50 $ hg add foo
51 $ hg add foo
51 $ hg ci -m 'foo'
52 $ hg ci -m 'foo'
52 $ hg debugnodemap --metadata
53 $ hg debugnodemap --metadata
53 uid: ???????????????? (glob)
54 uid: ???????????????? (glob)
55 tip-rev: 5001
54 $ f --size .hg/store/00changelog.n
56 $ f --size .hg/store/00changelog.n
55 .hg/store/00changelog.n: size=18
57 .hg/store/00changelog.n: size=26
56
58
57 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
59 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
58
60
59 #if pure
61 #if pure
60 $ f --sha256 .hg/store/00changelog-*.nd --size
62 $ f --sha256 .hg/store/00changelog-*.nd --size
61 .hg/store/00changelog-????????????????.nd: size=123072, sha256=136472751566c8198ff09e306a7d2f9bd18bd32298d614752b73da4d6df23340 (glob)
63 .hg/store/00changelog-????????????????.nd: size=123072, sha256=136472751566c8198ff09e306a7d2f9bd18bd32298d614752b73da4d6df23340 (glob)
62
64
63 #else
65 #else
64 $ f --sha256 .hg/store/00changelog-*.nd --size
66 $ f --sha256 .hg/store/00changelog-*.nd --size
65 .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
67 .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
66
68
67 #endif
69 #endif
68
70
69 $ hg debugnodemap --check
71 $ hg debugnodemap --check
70 revision in index: 5002
72 revision in index: 5002
71 revision in nodemap: 5002
73 revision in nodemap: 5002
General Comments 0
You need to be logged in to leave comments. Login now