##// END OF EJS Templates
debugsate: Change debug_iter() to yield tuples instead of DirstateItem...
Simon Sapin -
r48836:cedfe260 default
parent child Browse files
Show More
@@ -1,4941 +1,4938 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import binascii
10 import binascii
11 import codecs
11 import codecs
12 import collections
12 import collections
13 import contextlib
13 import contextlib
14 import difflib
14 import difflib
15 import errno
15 import errno
16 import glob
16 import glob
17 import operator
17 import operator
18 import os
18 import os
19 import platform
19 import platform
20 import random
20 import random
21 import re
21 import re
22 import socket
22 import socket
23 import ssl
23 import ssl
24 import stat
24 import stat
25 import string
25 import string
26 import subprocess
26 import subprocess
27 import sys
27 import sys
28 import time
28 import time
29
29
30 from .i18n import _
30 from .i18n import _
31 from .node import (
31 from .node import (
32 bin,
32 bin,
33 hex,
33 hex,
34 nullrev,
34 nullrev,
35 short,
35 short,
36 )
36 )
37 from .pycompat import (
37 from .pycompat import (
38 getattr,
38 getattr,
39 open,
39 open,
40 )
40 )
41 from . import (
41 from . import (
42 bundle2,
42 bundle2,
43 bundlerepo,
43 bundlerepo,
44 changegroup,
44 changegroup,
45 cmdutil,
45 cmdutil,
46 color,
46 color,
47 context,
47 context,
48 copies,
48 copies,
49 dagparser,
49 dagparser,
50 encoding,
50 encoding,
51 error,
51 error,
52 exchange,
52 exchange,
53 extensions,
53 extensions,
54 filemerge,
54 filemerge,
55 filesetlang,
55 filesetlang,
56 formatter,
56 formatter,
57 hg,
57 hg,
58 httppeer,
58 httppeer,
59 localrepo,
59 localrepo,
60 lock as lockmod,
60 lock as lockmod,
61 logcmdutil,
61 logcmdutil,
62 mergestate as mergestatemod,
62 mergestate as mergestatemod,
63 metadata,
63 metadata,
64 obsolete,
64 obsolete,
65 obsutil,
65 obsutil,
66 pathutil,
66 pathutil,
67 phases,
67 phases,
68 policy,
68 policy,
69 pvec,
69 pvec,
70 pycompat,
70 pycompat,
71 registrar,
71 registrar,
72 repair,
72 repair,
73 repoview,
73 repoview,
74 requirements,
74 requirements,
75 revlog,
75 revlog,
76 revset,
76 revset,
77 revsetlang,
77 revsetlang,
78 scmutil,
78 scmutil,
79 setdiscovery,
79 setdiscovery,
80 simplemerge,
80 simplemerge,
81 sshpeer,
81 sshpeer,
82 sslutil,
82 sslutil,
83 streamclone,
83 streamclone,
84 strip,
84 strip,
85 tags as tagsmod,
85 tags as tagsmod,
86 templater,
86 templater,
87 treediscovery,
87 treediscovery,
88 upgrade,
88 upgrade,
89 url as urlmod,
89 url as urlmod,
90 util,
90 util,
91 vfs as vfsmod,
91 vfs as vfsmod,
92 wireprotoframing,
92 wireprotoframing,
93 wireprotoserver,
93 wireprotoserver,
94 wireprotov2peer,
94 wireprotov2peer,
95 )
95 )
96 from .interfaces import repository
96 from .interfaces import repository
97 from .utils import (
97 from .utils import (
98 cborutil,
98 cborutil,
99 compression,
99 compression,
100 dateutil,
100 dateutil,
101 procutil,
101 procutil,
102 stringutil,
102 stringutil,
103 urlutil,
103 urlutil,
104 )
104 )
105
105
106 from .revlogutils import (
106 from .revlogutils import (
107 deltas as deltautil,
107 deltas as deltautil,
108 nodemap,
108 nodemap,
109 rewrite,
109 rewrite,
110 sidedata,
110 sidedata,
111 )
111 )
112
112
113 release = lockmod.release
113 release = lockmod.release
114
114
115 table = {}
115 table = {}
116 table.update(strip.command._table)
116 table.update(strip.command._table)
117 command = registrar.command(table)
117 command = registrar.command(table)
118
118
119
119
120 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
120 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
121 def debugancestor(ui, repo, *args):
121 def debugancestor(ui, repo, *args):
122 """find the ancestor revision of two revisions in a given index"""
122 """find the ancestor revision of two revisions in a given index"""
123 if len(args) == 3:
123 if len(args) == 3:
124 index, rev1, rev2 = args
124 index, rev1, rev2 = args
125 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
125 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
126 lookup = r.lookup
126 lookup = r.lookup
127 elif len(args) == 2:
127 elif len(args) == 2:
128 if not repo:
128 if not repo:
129 raise error.Abort(
129 raise error.Abort(
130 _(b'there is no Mercurial repository here (.hg not found)')
130 _(b'there is no Mercurial repository here (.hg not found)')
131 )
131 )
132 rev1, rev2 = args
132 rev1, rev2 = args
133 r = repo.changelog
133 r = repo.changelog
134 lookup = repo.lookup
134 lookup = repo.lookup
135 else:
135 else:
136 raise error.Abort(_(b'either two or three arguments required'))
136 raise error.Abort(_(b'either two or three arguments required'))
137 a = r.ancestor(lookup(rev1), lookup(rev2))
137 a = r.ancestor(lookup(rev1), lookup(rev2))
138 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
138 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
139
139
140
140
141 @command(b'debugantivirusrunning', [])
141 @command(b'debugantivirusrunning', [])
142 def debugantivirusrunning(ui, repo):
142 def debugantivirusrunning(ui, repo):
143 """attempt to trigger an antivirus scanner to see if one is active"""
143 """attempt to trigger an antivirus scanner to see if one is active"""
144 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
144 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
145 f.write(
145 f.write(
146 util.b85decode(
146 util.b85decode(
147 # This is a base85-armored version of the EICAR test file. See
147 # This is a base85-armored version of the EICAR test file. See
148 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
148 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
149 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
149 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
150 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
150 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
151 )
151 )
152 )
152 )
153 # Give an AV engine time to scan the file.
153 # Give an AV engine time to scan the file.
154 time.sleep(2)
154 time.sleep(2)
155 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
155 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
156
156
157
157
158 @command(b'debugapplystreamclonebundle', [], b'FILE')
158 @command(b'debugapplystreamclonebundle', [], b'FILE')
159 def debugapplystreamclonebundle(ui, repo, fname):
159 def debugapplystreamclonebundle(ui, repo, fname):
160 """apply a stream clone bundle file"""
160 """apply a stream clone bundle file"""
161 f = hg.openpath(ui, fname)
161 f = hg.openpath(ui, fname)
162 gen = exchange.readbundle(ui, f, fname)
162 gen = exchange.readbundle(ui, f, fname)
163 gen.apply(repo)
163 gen.apply(repo)
164
164
165
165
166 @command(
166 @command(
167 b'debugbuilddag',
167 b'debugbuilddag',
168 [
168 [
169 (
169 (
170 b'm',
170 b'm',
171 b'mergeable-file',
171 b'mergeable-file',
172 None,
172 None,
173 _(b'add single file mergeable changes'),
173 _(b'add single file mergeable changes'),
174 ),
174 ),
175 (
175 (
176 b'o',
176 b'o',
177 b'overwritten-file',
177 b'overwritten-file',
178 None,
178 None,
179 _(b'add single file all revs overwrite'),
179 _(b'add single file all revs overwrite'),
180 ),
180 ),
181 (b'n', b'new-file', None, _(b'add new file at each rev')),
181 (b'n', b'new-file', None, _(b'add new file at each rev')),
182 ],
182 ],
183 _(b'[OPTION]... [TEXT]'),
183 _(b'[OPTION]... [TEXT]'),
184 )
184 )
185 def debugbuilddag(
185 def debugbuilddag(
186 ui,
186 ui,
187 repo,
187 repo,
188 text=None,
188 text=None,
189 mergeable_file=False,
189 mergeable_file=False,
190 overwritten_file=False,
190 overwritten_file=False,
191 new_file=False,
191 new_file=False,
192 ):
192 ):
193 """builds a repo with a given DAG from scratch in the current empty repo
193 """builds a repo with a given DAG from scratch in the current empty repo
194
194
195 The description of the DAG is read from stdin if not given on the
195 The description of the DAG is read from stdin if not given on the
196 command line.
196 command line.
197
197
198 Elements:
198 Elements:
199
199
200 - "+n" is a linear run of n nodes based on the current default parent
200 - "+n" is a linear run of n nodes based on the current default parent
201 - "." is a single node based on the current default parent
201 - "." is a single node based on the current default parent
202 - "$" resets the default parent to null (implied at the start);
202 - "$" resets the default parent to null (implied at the start);
203 otherwise the default parent is always the last node created
203 otherwise the default parent is always the last node created
204 - "<p" sets the default parent to the backref p
204 - "<p" sets the default parent to the backref p
205 - "*p" is a fork at parent p, which is a backref
205 - "*p" is a fork at parent p, which is a backref
206 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
206 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
207 - "/p2" is a merge of the preceding node and p2
207 - "/p2" is a merge of the preceding node and p2
208 - ":tag" defines a local tag for the preceding node
208 - ":tag" defines a local tag for the preceding node
209 - "@branch" sets the named branch for subsequent nodes
209 - "@branch" sets the named branch for subsequent nodes
210 - "#...\\n" is a comment up to the end of the line
210 - "#...\\n" is a comment up to the end of the line
211
211
212 Whitespace between the above elements is ignored.
212 Whitespace between the above elements is ignored.
213
213
214 A backref is either
214 A backref is either
215
215
216 - a number n, which references the node curr-n, where curr is the current
216 - a number n, which references the node curr-n, where curr is the current
217 node, or
217 node, or
218 - the name of a local tag you placed earlier using ":tag", or
218 - the name of a local tag you placed earlier using ":tag", or
219 - empty to denote the default parent.
219 - empty to denote the default parent.
220
220
221 All string valued-elements are either strictly alphanumeric, or must
221 All string valued-elements are either strictly alphanumeric, or must
222 be enclosed in double quotes ("..."), with "\\" as escape character.
222 be enclosed in double quotes ("..."), with "\\" as escape character.
223 """
223 """
224
224
225 if text is None:
225 if text is None:
226 ui.status(_(b"reading DAG from stdin\n"))
226 ui.status(_(b"reading DAG from stdin\n"))
227 text = ui.fin.read()
227 text = ui.fin.read()
228
228
229 cl = repo.changelog
229 cl = repo.changelog
230 if len(cl) > 0:
230 if len(cl) > 0:
231 raise error.Abort(_(b'repository is not empty'))
231 raise error.Abort(_(b'repository is not empty'))
232
232
233 # determine number of revs in DAG
233 # determine number of revs in DAG
234 total = 0
234 total = 0
235 for type, data in dagparser.parsedag(text):
235 for type, data in dagparser.parsedag(text):
236 if type == b'n':
236 if type == b'n':
237 total += 1
237 total += 1
238
238
239 if mergeable_file:
239 if mergeable_file:
240 linesperrev = 2
240 linesperrev = 2
241 # make a file with k lines per rev
241 # make a file with k lines per rev
242 initialmergedlines = [
242 initialmergedlines = [
243 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
243 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
244 ]
244 ]
245 initialmergedlines.append(b"")
245 initialmergedlines.append(b"")
246
246
247 tags = []
247 tags = []
248 progress = ui.makeprogress(
248 progress = ui.makeprogress(
249 _(b'building'), unit=_(b'revisions'), total=total
249 _(b'building'), unit=_(b'revisions'), total=total
250 )
250 )
251 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
251 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
252 at = -1
252 at = -1
253 atbranch = b'default'
253 atbranch = b'default'
254 nodeids = []
254 nodeids = []
255 id = 0
255 id = 0
256 progress.update(id)
256 progress.update(id)
257 for type, data in dagparser.parsedag(text):
257 for type, data in dagparser.parsedag(text):
258 if type == b'n':
258 if type == b'n':
259 ui.note((b'node %s\n' % pycompat.bytestr(data)))
259 ui.note((b'node %s\n' % pycompat.bytestr(data)))
260 id, ps = data
260 id, ps = data
261
261
262 files = []
262 files = []
263 filecontent = {}
263 filecontent = {}
264
264
265 p2 = None
265 p2 = None
266 if mergeable_file:
266 if mergeable_file:
267 fn = b"mf"
267 fn = b"mf"
268 p1 = repo[ps[0]]
268 p1 = repo[ps[0]]
269 if len(ps) > 1:
269 if len(ps) > 1:
270 p2 = repo[ps[1]]
270 p2 = repo[ps[1]]
271 pa = p1.ancestor(p2)
271 pa = p1.ancestor(p2)
272 base, local, other = [
272 base, local, other = [
273 x[fn].data() for x in (pa, p1, p2)
273 x[fn].data() for x in (pa, p1, p2)
274 ]
274 ]
275 m3 = simplemerge.Merge3Text(base, local, other)
275 m3 = simplemerge.Merge3Text(base, local, other)
276 ml = [l.strip() for l in m3.merge_lines()]
276 ml = [l.strip() for l in m3.merge_lines()]
277 ml.append(b"")
277 ml.append(b"")
278 elif at > 0:
278 elif at > 0:
279 ml = p1[fn].data().split(b"\n")
279 ml = p1[fn].data().split(b"\n")
280 else:
280 else:
281 ml = initialmergedlines
281 ml = initialmergedlines
282 ml[id * linesperrev] += b" r%i" % id
282 ml[id * linesperrev] += b" r%i" % id
283 mergedtext = b"\n".join(ml)
283 mergedtext = b"\n".join(ml)
284 files.append(fn)
284 files.append(fn)
285 filecontent[fn] = mergedtext
285 filecontent[fn] = mergedtext
286
286
287 if overwritten_file:
287 if overwritten_file:
288 fn = b"of"
288 fn = b"of"
289 files.append(fn)
289 files.append(fn)
290 filecontent[fn] = b"r%i\n" % id
290 filecontent[fn] = b"r%i\n" % id
291
291
292 if new_file:
292 if new_file:
293 fn = b"nf%i" % id
293 fn = b"nf%i" % id
294 files.append(fn)
294 files.append(fn)
295 filecontent[fn] = b"r%i\n" % id
295 filecontent[fn] = b"r%i\n" % id
296 if len(ps) > 1:
296 if len(ps) > 1:
297 if not p2:
297 if not p2:
298 p2 = repo[ps[1]]
298 p2 = repo[ps[1]]
299 for fn in p2:
299 for fn in p2:
300 if fn.startswith(b"nf"):
300 if fn.startswith(b"nf"):
301 files.append(fn)
301 files.append(fn)
302 filecontent[fn] = p2[fn].data()
302 filecontent[fn] = p2[fn].data()
303
303
304 def fctxfn(repo, cx, path):
304 def fctxfn(repo, cx, path):
305 if path in filecontent:
305 if path in filecontent:
306 return context.memfilectx(
306 return context.memfilectx(
307 repo, cx, path, filecontent[path]
307 repo, cx, path, filecontent[path]
308 )
308 )
309 return None
309 return None
310
310
311 if len(ps) == 0 or ps[0] < 0:
311 if len(ps) == 0 or ps[0] < 0:
312 pars = [None, None]
312 pars = [None, None]
313 elif len(ps) == 1:
313 elif len(ps) == 1:
314 pars = [nodeids[ps[0]], None]
314 pars = [nodeids[ps[0]], None]
315 else:
315 else:
316 pars = [nodeids[p] for p in ps]
316 pars = [nodeids[p] for p in ps]
317 cx = context.memctx(
317 cx = context.memctx(
318 repo,
318 repo,
319 pars,
319 pars,
320 b"r%i" % id,
320 b"r%i" % id,
321 files,
321 files,
322 fctxfn,
322 fctxfn,
323 date=(id, 0),
323 date=(id, 0),
324 user=b"debugbuilddag",
324 user=b"debugbuilddag",
325 extra={b'branch': atbranch},
325 extra={b'branch': atbranch},
326 )
326 )
327 nodeid = repo.commitctx(cx)
327 nodeid = repo.commitctx(cx)
328 nodeids.append(nodeid)
328 nodeids.append(nodeid)
329 at = id
329 at = id
330 elif type == b'l':
330 elif type == b'l':
331 id, name = data
331 id, name = data
332 ui.note((b'tag %s\n' % name))
332 ui.note((b'tag %s\n' % name))
333 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
333 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
334 elif type == b'a':
334 elif type == b'a':
335 ui.note((b'branch %s\n' % data))
335 ui.note((b'branch %s\n' % data))
336 atbranch = data
336 atbranch = data
337 progress.update(id)
337 progress.update(id)
338
338
339 if tags:
339 if tags:
340 repo.vfs.write(b"localtags", b"".join(tags))
340 repo.vfs.write(b"localtags", b"".join(tags))
341
341
342
342
343 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
343 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
344 indent_string = b' ' * indent
344 indent_string = b' ' * indent
345 if all:
345 if all:
346 ui.writenoi18n(
346 ui.writenoi18n(
347 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
347 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
348 % indent_string
348 % indent_string
349 )
349 )
350
350
351 def showchunks(named):
351 def showchunks(named):
352 ui.write(b"\n%s%s\n" % (indent_string, named))
352 ui.write(b"\n%s%s\n" % (indent_string, named))
353 for deltadata in gen.deltaiter():
353 for deltadata in gen.deltaiter():
354 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
354 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
355 ui.write(
355 ui.write(
356 b"%s%s %s %s %s %s %d\n"
356 b"%s%s %s %s %s %s %d\n"
357 % (
357 % (
358 indent_string,
358 indent_string,
359 hex(node),
359 hex(node),
360 hex(p1),
360 hex(p1),
361 hex(p2),
361 hex(p2),
362 hex(cs),
362 hex(cs),
363 hex(deltabase),
363 hex(deltabase),
364 len(delta),
364 len(delta),
365 )
365 )
366 )
366 )
367
367
368 gen.changelogheader()
368 gen.changelogheader()
369 showchunks(b"changelog")
369 showchunks(b"changelog")
370 gen.manifestheader()
370 gen.manifestheader()
371 showchunks(b"manifest")
371 showchunks(b"manifest")
372 for chunkdata in iter(gen.filelogheader, {}):
372 for chunkdata in iter(gen.filelogheader, {}):
373 fname = chunkdata[b'filename']
373 fname = chunkdata[b'filename']
374 showchunks(fname)
374 showchunks(fname)
375 else:
375 else:
376 if isinstance(gen, bundle2.unbundle20):
376 if isinstance(gen, bundle2.unbundle20):
377 raise error.Abort(_(b'use debugbundle2 for this file'))
377 raise error.Abort(_(b'use debugbundle2 for this file'))
378 gen.changelogheader()
378 gen.changelogheader()
379 for deltadata in gen.deltaiter():
379 for deltadata in gen.deltaiter():
380 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
380 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
381 ui.write(b"%s%s\n" % (indent_string, hex(node)))
381 ui.write(b"%s%s\n" % (indent_string, hex(node)))
382
382
383
383
384 def _debugobsmarkers(ui, part, indent=0, **opts):
384 def _debugobsmarkers(ui, part, indent=0, **opts):
385 """display version and markers contained in 'data'"""
385 """display version and markers contained in 'data'"""
386 opts = pycompat.byteskwargs(opts)
386 opts = pycompat.byteskwargs(opts)
387 data = part.read()
387 data = part.read()
388 indent_string = b' ' * indent
388 indent_string = b' ' * indent
389 try:
389 try:
390 version, markers = obsolete._readmarkers(data)
390 version, markers = obsolete._readmarkers(data)
391 except error.UnknownVersion as exc:
391 except error.UnknownVersion as exc:
392 msg = b"%sunsupported version: %s (%d bytes)\n"
392 msg = b"%sunsupported version: %s (%d bytes)\n"
393 msg %= indent_string, exc.version, len(data)
393 msg %= indent_string, exc.version, len(data)
394 ui.write(msg)
394 ui.write(msg)
395 else:
395 else:
396 msg = b"%sversion: %d (%d bytes)\n"
396 msg = b"%sversion: %d (%d bytes)\n"
397 msg %= indent_string, version, len(data)
397 msg %= indent_string, version, len(data)
398 ui.write(msg)
398 ui.write(msg)
399 fm = ui.formatter(b'debugobsolete', opts)
399 fm = ui.formatter(b'debugobsolete', opts)
400 for rawmarker in sorted(markers):
400 for rawmarker in sorted(markers):
401 m = obsutil.marker(None, rawmarker)
401 m = obsutil.marker(None, rawmarker)
402 fm.startitem()
402 fm.startitem()
403 fm.plain(indent_string)
403 fm.plain(indent_string)
404 cmdutil.showmarker(fm, m)
404 cmdutil.showmarker(fm, m)
405 fm.end()
405 fm.end()
406
406
407
407
408 def _debugphaseheads(ui, data, indent=0):
408 def _debugphaseheads(ui, data, indent=0):
409 """display version and markers contained in 'data'"""
409 """display version and markers contained in 'data'"""
410 indent_string = b' ' * indent
410 indent_string = b' ' * indent
411 headsbyphase = phases.binarydecode(data)
411 headsbyphase = phases.binarydecode(data)
412 for phase in phases.allphases:
412 for phase in phases.allphases:
413 for head in headsbyphase[phase]:
413 for head in headsbyphase[phase]:
414 ui.write(indent_string)
414 ui.write(indent_string)
415 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
415 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
416
416
417
417
418 def _quasirepr(thing):
418 def _quasirepr(thing):
419 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
419 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
420 return b'{%s}' % (
420 return b'{%s}' % (
421 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
421 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
422 )
422 )
423 return pycompat.bytestr(repr(thing))
423 return pycompat.bytestr(repr(thing))
424
424
425
425
426 def _debugbundle2(ui, gen, all=None, **opts):
426 def _debugbundle2(ui, gen, all=None, **opts):
427 """lists the contents of a bundle2"""
427 """lists the contents of a bundle2"""
428 if not isinstance(gen, bundle2.unbundle20):
428 if not isinstance(gen, bundle2.unbundle20):
429 raise error.Abort(_(b'not a bundle2 file'))
429 raise error.Abort(_(b'not a bundle2 file'))
430 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
430 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
431 parttypes = opts.get('part_type', [])
431 parttypes = opts.get('part_type', [])
432 for part in gen.iterparts():
432 for part in gen.iterparts():
433 if parttypes and part.type not in parttypes:
433 if parttypes and part.type not in parttypes:
434 continue
434 continue
435 msg = b'%s -- %s (mandatory: %r)\n'
435 msg = b'%s -- %s (mandatory: %r)\n'
436 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
436 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
437 if part.type == b'changegroup':
437 if part.type == b'changegroup':
438 version = part.params.get(b'version', b'01')
438 version = part.params.get(b'version', b'01')
439 cg = changegroup.getunbundler(version, part, b'UN')
439 cg = changegroup.getunbundler(version, part, b'UN')
440 if not ui.quiet:
440 if not ui.quiet:
441 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
441 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
442 if part.type == b'obsmarkers':
442 if part.type == b'obsmarkers':
443 if not ui.quiet:
443 if not ui.quiet:
444 _debugobsmarkers(ui, part, indent=4, **opts)
444 _debugobsmarkers(ui, part, indent=4, **opts)
445 if part.type == b'phase-heads':
445 if part.type == b'phase-heads':
446 if not ui.quiet:
446 if not ui.quiet:
447 _debugphaseheads(ui, part, indent=4)
447 _debugphaseheads(ui, part, indent=4)
448
448
449
449
450 @command(
450 @command(
451 b'debugbundle',
451 b'debugbundle',
452 [
452 [
453 (b'a', b'all', None, _(b'show all details')),
453 (b'a', b'all', None, _(b'show all details')),
454 (b'', b'part-type', [], _(b'show only the named part type')),
454 (b'', b'part-type', [], _(b'show only the named part type')),
455 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
455 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
456 ],
456 ],
457 _(b'FILE'),
457 _(b'FILE'),
458 norepo=True,
458 norepo=True,
459 )
459 )
460 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
460 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
461 """lists the contents of a bundle"""
461 """lists the contents of a bundle"""
462 with hg.openpath(ui, bundlepath) as f:
462 with hg.openpath(ui, bundlepath) as f:
463 if spec:
463 if spec:
464 spec = exchange.getbundlespec(ui, f)
464 spec = exchange.getbundlespec(ui, f)
465 ui.write(b'%s\n' % spec)
465 ui.write(b'%s\n' % spec)
466 return
466 return
467
467
468 gen = exchange.readbundle(ui, f, bundlepath)
468 gen = exchange.readbundle(ui, f, bundlepath)
469 if isinstance(gen, bundle2.unbundle20):
469 if isinstance(gen, bundle2.unbundle20):
470 return _debugbundle2(ui, gen, all=all, **opts)
470 return _debugbundle2(ui, gen, all=all, **opts)
471 _debugchangegroup(ui, gen, all=all, **opts)
471 _debugchangegroup(ui, gen, all=all, **opts)
472
472
473
473
474 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
474 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
475 def debugcapabilities(ui, path, **opts):
475 def debugcapabilities(ui, path, **opts):
476 """lists the capabilities of a remote peer"""
476 """lists the capabilities of a remote peer"""
477 opts = pycompat.byteskwargs(opts)
477 opts = pycompat.byteskwargs(opts)
478 peer = hg.peer(ui, opts, path)
478 peer = hg.peer(ui, opts, path)
479 try:
479 try:
480 caps = peer.capabilities()
480 caps = peer.capabilities()
481 ui.writenoi18n(b'Main capabilities:\n')
481 ui.writenoi18n(b'Main capabilities:\n')
482 for c in sorted(caps):
482 for c in sorted(caps):
483 ui.write(b' %s\n' % c)
483 ui.write(b' %s\n' % c)
484 b2caps = bundle2.bundle2caps(peer)
484 b2caps = bundle2.bundle2caps(peer)
485 if b2caps:
485 if b2caps:
486 ui.writenoi18n(b'Bundle2 capabilities:\n')
486 ui.writenoi18n(b'Bundle2 capabilities:\n')
487 for key, values in sorted(pycompat.iteritems(b2caps)):
487 for key, values in sorted(pycompat.iteritems(b2caps)):
488 ui.write(b' %s\n' % key)
488 ui.write(b' %s\n' % key)
489 for v in values:
489 for v in values:
490 ui.write(b' %s\n' % v)
490 ui.write(b' %s\n' % v)
491 finally:
491 finally:
492 peer.close()
492 peer.close()
493
493
494
494
495 @command(
495 @command(
496 b'debugchangedfiles',
496 b'debugchangedfiles',
497 [
497 [
498 (
498 (
499 b'',
499 b'',
500 b'compute',
500 b'compute',
501 False,
501 False,
502 b"compute information instead of reading it from storage",
502 b"compute information instead of reading it from storage",
503 ),
503 ),
504 ],
504 ],
505 b'REV',
505 b'REV',
506 )
506 )
507 def debugchangedfiles(ui, repo, rev, **opts):
507 def debugchangedfiles(ui, repo, rev, **opts):
508 """list the stored files changes for a revision"""
508 """list the stored files changes for a revision"""
509 ctx = scmutil.revsingle(repo, rev, None)
509 ctx = scmutil.revsingle(repo, rev, None)
510 files = None
510 files = None
511
511
512 if opts['compute']:
512 if opts['compute']:
513 files = metadata.compute_all_files_changes(ctx)
513 files = metadata.compute_all_files_changes(ctx)
514 else:
514 else:
515 sd = repo.changelog.sidedata(ctx.rev())
515 sd = repo.changelog.sidedata(ctx.rev())
516 files_block = sd.get(sidedata.SD_FILES)
516 files_block = sd.get(sidedata.SD_FILES)
517 if files_block is not None:
517 if files_block is not None:
518 files = metadata.decode_files_sidedata(sd)
518 files = metadata.decode_files_sidedata(sd)
519 if files is not None:
519 if files is not None:
520 for f in sorted(files.touched):
520 for f in sorted(files.touched):
521 if f in files.added:
521 if f in files.added:
522 action = b"added"
522 action = b"added"
523 elif f in files.removed:
523 elif f in files.removed:
524 action = b"removed"
524 action = b"removed"
525 elif f in files.merged:
525 elif f in files.merged:
526 action = b"merged"
526 action = b"merged"
527 elif f in files.salvaged:
527 elif f in files.salvaged:
528 action = b"salvaged"
528 action = b"salvaged"
529 else:
529 else:
530 action = b"touched"
530 action = b"touched"
531
531
532 copy_parent = b""
532 copy_parent = b""
533 copy_source = b""
533 copy_source = b""
534 if f in files.copied_from_p1:
534 if f in files.copied_from_p1:
535 copy_parent = b"p1"
535 copy_parent = b"p1"
536 copy_source = files.copied_from_p1[f]
536 copy_source = files.copied_from_p1[f]
537 elif f in files.copied_from_p2:
537 elif f in files.copied_from_p2:
538 copy_parent = b"p2"
538 copy_parent = b"p2"
539 copy_source = files.copied_from_p2[f]
539 copy_source = files.copied_from_p2[f]
540
540
541 data = (action, copy_parent, f, copy_source)
541 data = (action, copy_parent, f, copy_source)
542 template = b"%-8s %2s: %s, %s;\n"
542 template = b"%-8s %2s: %s, %s;\n"
543 ui.write(template % data)
543 ui.write(template % data)
544
544
545
545
546 @command(b'debugcheckstate', [], b'')
546 @command(b'debugcheckstate', [], b'')
547 def debugcheckstate(ui, repo):
547 def debugcheckstate(ui, repo):
548 """validate the correctness of the current dirstate"""
548 """validate the correctness of the current dirstate"""
549 parent1, parent2 = repo.dirstate.parents()
549 parent1, parent2 = repo.dirstate.parents()
550 m1 = repo[parent1].manifest()
550 m1 = repo[parent1].manifest()
551 m2 = repo[parent2].manifest()
551 m2 = repo[parent2].manifest()
552 errors = 0
552 errors = 0
553 for f in repo.dirstate:
553 for f in repo.dirstate:
554 state = repo.dirstate[f]
554 state = repo.dirstate[f]
555 if state in b"nr" and f not in m1:
555 if state in b"nr" and f not in m1:
556 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
556 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
557 errors += 1
557 errors += 1
558 if state in b"a" and f in m1:
558 if state in b"a" and f in m1:
559 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
559 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
560 errors += 1
560 errors += 1
561 if state in b"m" and f not in m1 and f not in m2:
561 if state in b"m" and f not in m1 and f not in m2:
562 ui.warn(
562 ui.warn(
563 _(b"%s in state %s, but not in either manifest\n") % (f, state)
563 _(b"%s in state %s, but not in either manifest\n") % (f, state)
564 )
564 )
565 errors += 1
565 errors += 1
566 for f in m1:
566 for f in m1:
567 state = repo.dirstate[f]
567 state = repo.dirstate[f]
568 if state not in b"nrm":
568 if state not in b"nrm":
569 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
569 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
570 errors += 1
570 errors += 1
571 if errors:
571 if errors:
572 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
572 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
573 raise error.Abort(errstr)
573 raise error.Abort(errstr)
574
574
575
575
576 @command(
576 @command(
577 b'debugcolor',
577 b'debugcolor',
578 [(b'', b'style', None, _(b'show all configured styles'))],
578 [(b'', b'style', None, _(b'show all configured styles'))],
579 b'hg debugcolor',
579 b'hg debugcolor',
580 )
580 )
581 def debugcolor(ui, repo, **opts):
581 def debugcolor(ui, repo, **opts):
582 """show available color, effects or style"""
582 """show available color, effects or style"""
583 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
583 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
584 if opts.get('style'):
584 if opts.get('style'):
585 return _debugdisplaystyle(ui)
585 return _debugdisplaystyle(ui)
586 else:
586 else:
587 return _debugdisplaycolor(ui)
587 return _debugdisplaycolor(ui)
588
588
589
589
590 def _debugdisplaycolor(ui):
590 def _debugdisplaycolor(ui):
591 ui = ui.copy()
591 ui = ui.copy()
592 ui._styles.clear()
592 ui._styles.clear()
593 for effect in color._activeeffects(ui).keys():
593 for effect in color._activeeffects(ui).keys():
594 ui._styles[effect] = effect
594 ui._styles[effect] = effect
595 if ui._terminfoparams:
595 if ui._terminfoparams:
596 for k, v in ui.configitems(b'color'):
596 for k, v in ui.configitems(b'color'):
597 if k.startswith(b'color.'):
597 if k.startswith(b'color.'):
598 ui._styles[k] = k[6:]
598 ui._styles[k] = k[6:]
599 elif k.startswith(b'terminfo.'):
599 elif k.startswith(b'terminfo.'):
600 ui._styles[k] = k[9:]
600 ui._styles[k] = k[9:]
601 ui.write(_(b'available colors:\n'))
601 ui.write(_(b'available colors:\n'))
602 # sort label with a '_' after the other to group '_background' entry.
602 # sort label with a '_' after the other to group '_background' entry.
603 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
603 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
604 for colorname, label in items:
604 for colorname, label in items:
605 ui.write(b'%s\n' % colorname, label=label)
605 ui.write(b'%s\n' % colorname, label=label)
606
606
607
607
608 def _debugdisplaystyle(ui):
608 def _debugdisplaystyle(ui):
609 ui.write(_(b'available style:\n'))
609 ui.write(_(b'available style:\n'))
610 if not ui._styles:
610 if not ui._styles:
611 return
611 return
612 width = max(len(s) for s in ui._styles)
612 width = max(len(s) for s in ui._styles)
613 for label, effects in sorted(ui._styles.items()):
613 for label, effects in sorted(ui._styles.items()):
614 ui.write(b'%s' % label, label=label)
614 ui.write(b'%s' % label, label=label)
615 if effects:
615 if effects:
616 # 50
616 # 50
617 ui.write(b': ')
617 ui.write(b': ')
618 ui.write(b' ' * (max(0, width - len(label))))
618 ui.write(b' ' * (max(0, width - len(label))))
619 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
619 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
620 ui.write(b'\n')
620 ui.write(b'\n')
621
621
622
622
623 @command(b'debugcreatestreamclonebundle', [], b'FILE')
623 @command(b'debugcreatestreamclonebundle', [], b'FILE')
624 def debugcreatestreamclonebundle(ui, repo, fname):
624 def debugcreatestreamclonebundle(ui, repo, fname):
625 """create a stream clone bundle file
625 """create a stream clone bundle file
626
626
627 Stream bundles are special bundles that are essentially archives of
627 Stream bundles are special bundles that are essentially archives of
628 revlog files. They are commonly used for cloning very quickly.
628 revlog files. They are commonly used for cloning very quickly.
629 """
629 """
630 # TODO we may want to turn this into an abort when this functionality
630 # TODO we may want to turn this into an abort when this functionality
631 # is moved into `hg bundle`.
631 # is moved into `hg bundle`.
632 if phases.hassecret(repo):
632 if phases.hassecret(repo):
633 ui.warn(
633 ui.warn(
634 _(
634 _(
635 b'(warning: stream clone bundle will contain secret '
635 b'(warning: stream clone bundle will contain secret '
636 b'revisions)\n'
636 b'revisions)\n'
637 )
637 )
638 )
638 )
639
639
640 requirements, gen = streamclone.generatebundlev1(repo)
640 requirements, gen = streamclone.generatebundlev1(repo)
641 changegroup.writechunks(ui, gen, fname)
641 changegroup.writechunks(ui, gen, fname)
642
642
643 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
643 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
644
644
645
645
646 @command(
646 @command(
647 b'debugdag',
647 b'debugdag',
648 [
648 [
649 (b't', b'tags', None, _(b'use tags as labels')),
649 (b't', b'tags', None, _(b'use tags as labels')),
650 (b'b', b'branches', None, _(b'annotate with branch names')),
650 (b'b', b'branches', None, _(b'annotate with branch names')),
651 (b'', b'dots', None, _(b'use dots for runs')),
651 (b'', b'dots', None, _(b'use dots for runs')),
652 (b's', b'spaces', None, _(b'separate elements by spaces')),
652 (b's', b'spaces', None, _(b'separate elements by spaces')),
653 ],
653 ],
654 _(b'[OPTION]... [FILE [REV]...]'),
654 _(b'[OPTION]... [FILE [REV]...]'),
655 optionalrepo=True,
655 optionalrepo=True,
656 )
656 )
657 def debugdag(ui, repo, file_=None, *revs, **opts):
657 def debugdag(ui, repo, file_=None, *revs, **opts):
658 """format the changelog or an index DAG as a concise textual description
658 """format the changelog or an index DAG as a concise textual description
659
659
660 If you pass a revlog index, the revlog's DAG is emitted. If you list
660 If you pass a revlog index, the revlog's DAG is emitted. If you list
661 revision numbers, they get labeled in the output as rN.
661 revision numbers, they get labeled in the output as rN.
662
662
663 Otherwise, the changelog DAG of the current repo is emitted.
663 Otherwise, the changelog DAG of the current repo is emitted.
664 """
664 """
665 spaces = opts.get('spaces')
665 spaces = opts.get('spaces')
666 dots = opts.get('dots')
666 dots = opts.get('dots')
667 if file_:
667 if file_:
668 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
668 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
669 revs = {int(r) for r in revs}
669 revs = {int(r) for r in revs}
670
670
671 def events():
671 def events():
672 for r in rlog:
672 for r in rlog:
673 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
673 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
674 if r in revs:
674 if r in revs:
675 yield b'l', (r, b"r%i" % r)
675 yield b'l', (r, b"r%i" % r)
676
676
677 elif repo:
677 elif repo:
678 cl = repo.changelog
678 cl = repo.changelog
679 tags = opts.get('tags')
679 tags = opts.get('tags')
680 branches = opts.get('branches')
680 branches = opts.get('branches')
681 if tags:
681 if tags:
682 labels = {}
682 labels = {}
683 for l, n in repo.tags().items():
683 for l, n in repo.tags().items():
684 labels.setdefault(cl.rev(n), []).append(l)
684 labels.setdefault(cl.rev(n), []).append(l)
685
685
686 def events():
686 def events():
687 b = b"default"
687 b = b"default"
688 for r in cl:
688 for r in cl:
689 if branches:
689 if branches:
690 newb = cl.read(cl.node(r))[5][b'branch']
690 newb = cl.read(cl.node(r))[5][b'branch']
691 if newb != b:
691 if newb != b:
692 yield b'a', newb
692 yield b'a', newb
693 b = newb
693 b = newb
694 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
694 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
695 if tags:
695 if tags:
696 ls = labels.get(r)
696 ls = labels.get(r)
697 if ls:
697 if ls:
698 for l in ls:
698 for l in ls:
699 yield b'l', (r, l)
699 yield b'l', (r, l)
700
700
701 else:
701 else:
702 raise error.Abort(_(b'need repo for changelog dag'))
702 raise error.Abort(_(b'need repo for changelog dag'))
703
703
704 for line in dagparser.dagtextlines(
704 for line in dagparser.dagtextlines(
705 events(),
705 events(),
706 addspaces=spaces,
706 addspaces=spaces,
707 wraplabels=True,
707 wraplabels=True,
708 wrapannotations=True,
708 wrapannotations=True,
709 wrapnonlinear=dots,
709 wrapnonlinear=dots,
710 usedots=dots,
710 usedots=dots,
711 maxlinewidth=70,
711 maxlinewidth=70,
712 ):
712 ):
713 ui.write(line)
713 ui.write(line)
714 ui.write(b"\n")
714 ui.write(b"\n")
715
715
716
716
717 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
717 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
718 def debugdata(ui, repo, file_, rev=None, **opts):
718 def debugdata(ui, repo, file_, rev=None, **opts):
719 """dump the contents of a data file revision"""
719 """dump the contents of a data file revision"""
720 opts = pycompat.byteskwargs(opts)
720 opts = pycompat.byteskwargs(opts)
721 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
721 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
722 if rev is not None:
722 if rev is not None:
723 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
723 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
724 file_, rev = None, file_
724 file_, rev = None, file_
725 elif rev is None:
725 elif rev is None:
726 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
726 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
727 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
727 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
728 try:
728 try:
729 ui.write(r.rawdata(r.lookup(rev)))
729 ui.write(r.rawdata(r.lookup(rev)))
730 except KeyError:
730 except KeyError:
731 raise error.Abort(_(b'invalid revision identifier %s') % rev)
731 raise error.Abort(_(b'invalid revision identifier %s') % rev)
732
732
733
733
734 @command(
734 @command(
735 b'debugdate',
735 b'debugdate',
736 [(b'e', b'extended', None, _(b'try extended date formats'))],
736 [(b'e', b'extended', None, _(b'try extended date formats'))],
737 _(b'[-e] DATE [RANGE]'),
737 _(b'[-e] DATE [RANGE]'),
738 norepo=True,
738 norepo=True,
739 optionalrepo=True,
739 optionalrepo=True,
740 )
740 )
741 def debugdate(ui, date, range=None, **opts):
741 def debugdate(ui, date, range=None, **opts):
742 """parse and display a date"""
742 """parse and display a date"""
743 if opts["extended"]:
743 if opts["extended"]:
744 d = dateutil.parsedate(date, dateutil.extendeddateformats)
744 d = dateutil.parsedate(date, dateutil.extendeddateformats)
745 else:
745 else:
746 d = dateutil.parsedate(date)
746 d = dateutil.parsedate(date)
747 ui.writenoi18n(b"internal: %d %d\n" % d)
747 ui.writenoi18n(b"internal: %d %d\n" % d)
748 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
748 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
749 if range:
749 if range:
750 m = dateutil.matchdate(range)
750 m = dateutil.matchdate(range)
751 ui.writenoi18n(b"match: %s\n" % m(d[0]))
751 ui.writenoi18n(b"match: %s\n" % m(d[0]))
752
752
753
753
754 @command(
754 @command(
755 b'debugdeltachain',
755 b'debugdeltachain',
756 cmdutil.debugrevlogopts + cmdutil.formatteropts,
756 cmdutil.debugrevlogopts + cmdutil.formatteropts,
757 _(b'-c|-m|FILE'),
757 _(b'-c|-m|FILE'),
758 optionalrepo=True,
758 optionalrepo=True,
759 )
759 )
760 def debugdeltachain(ui, repo, file_=None, **opts):
760 def debugdeltachain(ui, repo, file_=None, **opts):
761 """dump information about delta chains in a revlog
761 """dump information about delta chains in a revlog
762
762
763 Output can be templatized. Available template keywords are:
763 Output can be templatized. Available template keywords are:
764
764
765 :``rev``: revision number
765 :``rev``: revision number
766 :``chainid``: delta chain identifier (numbered by unique base)
766 :``chainid``: delta chain identifier (numbered by unique base)
767 :``chainlen``: delta chain length to this revision
767 :``chainlen``: delta chain length to this revision
768 :``prevrev``: previous revision in delta chain
768 :``prevrev``: previous revision in delta chain
769 :``deltatype``: role of delta / how it was computed
769 :``deltatype``: role of delta / how it was computed
770 :``compsize``: compressed size of revision
770 :``compsize``: compressed size of revision
771 :``uncompsize``: uncompressed size of revision
771 :``uncompsize``: uncompressed size of revision
772 :``chainsize``: total size of compressed revisions in chain
772 :``chainsize``: total size of compressed revisions in chain
773 :``chainratio``: total chain size divided by uncompressed revision size
773 :``chainratio``: total chain size divided by uncompressed revision size
774 (new delta chains typically start at ratio 2.00)
774 (new delta chains typically start at ratio 2.00)
775 :``lindist``: linear distance from base revision in delta chain to end
775 :``lindist``: linear distance from base revision in delta chain to end
776 of this revision
776 of this revision
777 :``extradist``: total size of revisions not part of this delta chain from
777 :``extradist``: total size of revisions not part of this delta chain from
778 base of delta chain to end of this revision; a measurement
778 base of delta chain to end of this revision; a measurement
779 of how much extra data we need to read/seek across to read
779 of how much extra data we need to read/seek across to read
780 the delta chain for this revision
780 the delta chain for this revision
781 :``extraratio``: extradist divided by chainsize; another representation of
781 :``extraratio``: extradist divided by chainsize; another representation of
782 how much unrelated data is needed to load this delta chain
782 how much unrelated data is needed to load this delta chain
783
783
784 If the repository is configured to use the sparse read, additional keywords
784 If the repository is configured to use the sparse read, additional keywords
785 are available:
785 are available:
786
786
787 :``readsize``: total size of data read from the disk for a revision
787 :``readsize``: total size of data read from the disk for a revision
788 (sum of the sizes of all the blocks)
788 (sum of the sizes of all the blocks)
789 :``largestblock``: size of the largest block of data read from the disk
789 :``largestblock``: size of the largest block of data read from the disk
790 :``readdensity``: density of useful bytes in the data read from the disk
790 :``readdensity``: density of useful bytes in the data read from the disk
791 :``srchunks``: in how many data hunks the whole revision would be read
791 :``srchunks``: in how many data hunks the whole revision would be read
792
792
793 The sparse read can be enabled with experimental.sparse-read = True
793 The sparse read can be enabled with experimental.sparse-read = True
794 """
794 """
795 opts = pycompat.byteskwargs(opts)
795 opts = pycompat.byteskwargs(opts)
796 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
796 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
797 index = r.index
797 index = r.index
798 start = r.start
798 start = r.start
799 length = r.length
799 length = r.length
800 generaldelta = r._generaldelta
800 generaldelta = r._generaldelta
801 withsparseread = getattr(r, '_withsparseread', False)
801 withsparseread = getattr(r, '_withsparseread', False)
802
802
803 def revinfo(rev):
803 def revinfo(rev):
804 e = index[rev]
804 e = index[rev]
805 compsize = e[1]
805 compsize = e[1]
806 uncompsize = e[2]
806 uncompsize = e[2]
807 chainsize = 0
807 chainsize = 0
808
808
809 if generaldelta:
809 if generaldelta:
810 if e[3] == e[5]:
810 if e[3] == e[5]:
811 deltatype = b'p1'
811 deltatype = b'p1'
812 elif e[3] == e[6]:
812 elif e[3] == e[6]:
813 deltatype = b'p2'
813 deltatype = b'p2'
814 elif e[3] == rev - 1:
814 elif e[3] == rev - 1:
815 deltatype = b'prev'
815 deltatype = b'prev'
816 elif e[3] == rev:
816 elif e[3] == rev:
817 deltatype = b'base'
817 deltatype = b'base'
818 else:
818 else:
819 deltatype = b'other'
819 deltatype = b'other'
820 else:
820 else:
821 if e[3] == rev:
821 if e[3] == rev:
822 deltatype = b'base'
822 deltatype = b'base'
823 else:
823 else:
824 deltatype = b'prev'
824 deltatype = b'prev'
825
825
826 chain = r._deltachain(rev)[0]
826 chain = r._deltachain(rev)[0]
827 for iterrev in chain:
827 for iterrev in chain:
828 e = index[iterrev]
828 e = index[iterrev]
829 chainsize += e[1]
829 chainsize += e[1]
830
830
831 return compsize, uncompsize, deltatype, chain, chainsize
831 return compsize, uncompsize, deltatype, chain, chainsize
832
832
833 fm = ui.formatter(b'debugdeltachain', opts)
833 fm = ui.formatter(b'debugdeltachain', opts)
834
834
835 fm.plain(
835 fm.plain(
836 b' rev chain# chainlen prev delta '
836 b' rev chain# chainlen prev delta '
837 b'size rawsize chainsize ratio lindist extradist '
837 b'size rawsize chainsize ratio lindist extradist '
838 b'extraratio'
838 b'extraratio'
839 )
839 )
840 if withsparseread:
840 if withsparseread:
841 fm.plain(b' readsize largestblk rddensity srchunks')
841 fm.plain(b' readsize largestblk rddensity srchunks')
842 fm.plain(b'\n')
842 fm.plain(b'\n')
843
843
844 chainbases = {}
844 chainbases = {}
845 for rev in r:
845 for rev in r:
846 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
846 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
847 chainbase = chain[0]
847 chainbase = chain[0]
848 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
848 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
849 basestart = start(chainbase)
849 basestart = start(chainbase)
850 revstart = start(rev)
850 revstart = start(rev)
851 lineardist = revstart + comp - basestart
851 lineardist = revstart + comp - basestart
852 extradist = lineardist - chainsize
852 extradist = lineardist - chainsize
853 try:
853 try:
854 prevrev = chain[-2]
854 prevrev = chain[-2]
855 except IndexError:
855 except IndexError:
856 prevrev = -1
856 prevrev = -1
857
857
858 if uncomp != 0:
858 if uncomp != 0:
859 chainratio = float(chainsize) / float(uncomp)
859 chainratio = float(chainsize) / float(uncomp)
860 else:
860 else:
861 chainratio = chainsize
861 chainratio = chainsize
862
862
863 if chainsize != 0:
863 if chainsize != 0:
864 extraratio = float(extradist) / float(chainsize)
864 extraratio = float(extradist) / float(chainsize)
865 else:
865 else:
866 extraratio = extradist
866 extraratio = extradist
867
867
868 fm.startitem()
868 fm.startitem()
869 fm.write(
869 fm.write(
870 b'rev chainid chainlen prevrev deltatype compsize '
870 b'rev chainid chainlen prevrev deltatype compsize '
871 b'uncompsize chainsize chainratio lindist extradist '
871 b'uncompsize chainsize chainratio lindist extradist '
872 b'extraratio',
872 b'extraratio',
873 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
873 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
874 rev,
874 rev,
875 chainid,
875 chainid,
876 len(chain),
876 len(chain),
877 prevrev,
877 prevrev,
878 deltatype,
878 deltatype,
879 comp,
879 comp,
880 uncomp,
880 uncomp,
881 chainsize,
881 chainsize,
882 chainratio,
882 chainratio,
883 lineardist,
883 lineardist,
884 extradist,
884 extradist,
885 extraratio,
885 extraratio,
886 rev=rev,
886 rev=rev,
887 chainid=chainid,
887 chainid=chainid,
888 chainlen=len(chain),
888 chainlen=len(chain),
889 prevrev=prevrev,
889 prevrev=prevrev,
890 deltatype=deltatype,
890 deltatype=deltatype,
891 compsize=comp,
891 compsize=comp,
892 uncompsize=uncomp,
892 uncompsize=uncomp,
893 chainsize=chainsize,
893 chainsize=chainsize,
894 chainratio=chainratio,
894 chainratio=chainratio,
895 lindist=lineardist,
895 lindist=lineardist,
896 extradist=extradist,
896 extradist=extradist,
897 extraratio=extraratio,
897 extraratio=extraratio,
898 )
898 )
899 if withsparseread:
899 if withsparseread:
900 readsize = 0
900 readsize = 0
901 largestblock = 0
901 largestblock = 0
902 srchunks = 0
902 srchunks = 0
903
903
904 for revschunk in deltautil.slicechunk(r, chain):
904 for revschunk in deltautil.slicechunk(r, chain):
905 srchunks += 1
905 srchunks += 1
906 blkend = start(revschunk[-1]) + length(revschunk[-1])
906 blkend = start(revschunk[-1]) + length(revschunk[-1])
907 blksize = blkend - start(revschunk[0])
907 blksize = blkend - start(revschunk[0])
908
908
909 readsize += blksize
909 readsize += blksize
910 if largestblock < blksize:
910 if largestblock < blksize:
911 largestblock = blksize
911 largestblock = blksize
912
912
913 if readsize:
913 if readsize:
914 readdensity = float(chainsize) / float(readsize)
914 readdensity = float(chainsize) / float(readsize)
915 else:
915 else:
916 readdensity = 1
916 readdensity = 1
917
917
918 fm.write(
918 fm.write(
919 b'readsize largestblock readdensity srchunks',
919 b'readsize largestblock readdensity srchunks',
920 b' %10d %10d %9.5f %8d',
920 b' %10d %10d %9.5f %8d',
921 readsize,
921 readsize,
922 largestblock,
922 largestblock,
923 readdensity,
923 readdensity,
924 srchunks,
924 srchunks,
925 readsize=readsize,
925 readsize=readsize,
926 largestblock=largestblock,
926 largestblock=largestblock,
927 readdensity=readdensity,
927 readdensity=readdensity,
928 srchunks=srchunks,
928 srchunks=srchunks,
929 )
929 )
930
930
931 fm.plain(b'\n')
931 fm.plain(b'\n')
932
932
933 fm.end()
933 fm.end()
934
934
935
935
936 @command(
936 @command(
937 b'debugdirstate|debugstate',
937 b'debugdirstate|debugstate',
938 [
938 [
939 (
939 (
940 b'',
940 b'',
941 b'nodates',
941 b'nodates',
942 None,
942 None,
943 _(b'do not display the saved mtime (DEPRECATED)'),
943 _(b'do not display the saved mtime (DEPRECATED)'),
944 ),
944 ),
945 (b'', b'dates', True, _(b'display the saved mtime')),
945 (b'', b'dates', True, _(b'display the saved mtime')),
946 (b'', b'datesort', None, _(b'sort by saved mtime')),
946 (b'', b'datesort', None, _(b'sort by saved mtime')),
947 (
947 (
948 b'',
948 b'',
949 b'all',
949 b'all',
950 False,
950 False,
951 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
951 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
952 ),
952 ),
953 ],
953 ],
954 _(b'[OPTION]...'),
954 _(b'[OPTION]...'),
955 )
955 )
956 def debugstate(ui, repo, **opts):
956 def debugstate(ui, repo, **opts):
957 """show the contents of the current dirstate"""
957 """show the contents of the current dirstate"""
958
958
959 nodates = not opts['dates']
959 nodates = not opts['dates']
960 if opts.get('nodates') is not None:
960 if opts.get('nodates') is not None:
961 nodates = True
961 nodates = True
962 datesort = opts.get('datesort')
962 datesort = opts.get('datesort')
963
963
964 if datesort:
964 if datesort:
965 keyfunc = lambda x: (
965
966 x[1].v1_mtime(),
966 def keyfunc(entry):
967 x[0],
967 filename, _state, _mode, _size, mtime = entry
968 ) # sort by mtime, then by filename
968 return (mtime, filename)
969
969 else:
970 else:
970 keyfunc = None # sort by filename
971 keyfunc = None # sort by filename
971 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
972 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
972 entries.sort(key=keyfunc)
973 entries.sort(key=keyfunc)
973 for file_, ent in entries:
974 for entry in entries:
974 if ent.v1_mtime() == -1:
975 filename, state, mode, size, mtime = entry
976 if mtime == -1:
975 timestr = b'unset '
977 timestr = b'unset '
976 elif nodates:
978 elif nodates:
977 timestr = b'set '
979 timestr = b'set '
978 else:
980 else:
979 timestr = time.strftime(
981 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
980 "%Y-%m-%d %H:%M:%S ", time.localtime(ent.v1_mtime())
981 )
982 timestr = encoding.strtolocal(timestr)
982 timestr = encoding.strtolocal(timestr)
983 if ent.mode & 0o20000:
983 if mode & 0o20000:
984 mode = b'lnk'
984 mode = b'lnk'
985 else:
985 else:
986 mode = b'%3o' % (ent.v1_mode() & 0o777 & ~util.umask)
986 mode = b'%3o' % (mode & 0o777 & ~util.umask)
987 ui.write(
987 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
988 b"%c %s %10d %s%s\n"
989 % (ent.v1_state(), mode, ent.v1_size(), timestr, file_)
990 )
991 for f in repo.dirstate.copies():
988 for f in repo.dirstate.copies():
992 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
989 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
993
990
994
991
995 @command(
992 @command(
996 b'debugdirstateignorepatternshash',
993 b'debugdirstateignorepatternshash',
997 [],
994 [],
998 _(b''),
995 _(b''),
999 )
996 )
1000 def debugdirstateignorepatternshash(ui, repo, **opts):
997 def debugdirstateignorepatternshash(ui, repo, **opts):
1001 """show the hash of ignore patterns stored in dirstate if v2,
998 """show the hash of ignore patterns stored in dirstate if v2,
1002 or nothing for dirstate-v2
999 or nothing for dirstate-v2
1003 """
1000 """
1004 if repo.dirstate._use_dirstate_v2:
1001 if repo.dirstate._use_dirstate_v2:
1005 docket = repo.dirstate._map.docket
1002 docket = repo.dirstate._map.docket
1006 hash_len = 20 # 160 bits for SHA-1
1003 hash_len = 20 # 160 bits for SHA-1
1007 hash_bytes = docket.tree_metadata[-hash_len:]
1004 hash_bytes = docket.tree_metadata[-hash_len:]
1008 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1005 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1009
1006
1010
1007
1011 @command(
1008 @command(
1012 b'debugdiscovery',
1009 b'debugdiscovery',
1013 [
1010 [
1014 (b'', b'old', None, _(b'use old-style discovery')),
1011 (b'', b'old', None, _(b'use old-style discovery')),
1015 (
1012 (
1016 b'',
1013 b'',
1017 b'nonheads',
1014 b'nonheads',
1018 None,
1015 None,
1019 _(b'use old-style discovery with non-heads included'),
1016 _(b'use old-style discovery with non-heads included'),
1020 ),
1017 ),
1021 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1018 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1022 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1019 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1023 (
1020 (
1024 b'',
1021 b'',
1025 b'local-as-revs',
1022 b'local-as-revs',
1026 b"",
1023 b"",
1027 b'treat local has having these revisions only',
1024 b'treat local has having these revisions only',
1028 ),
1025 ),
1029 (
1026 (
1030 b'',
1027 b'',
1031 b'remote-as-revs',
1028 b'remote-as-revs',
1032 b"",
1029 b"",
1033 b'use local as remote, with only these these revisions',
1030 b'use local as remote, with only these these revisions',
1034 ),
1031 ),
1035 ]
1032 ]
1036 + cmdutil.remoteopts
1033 + cmdutil.remoteopts
1037 + cmdutil.formatteropts,
1034 + cmdutil.formatteropts,
1038 _(b'[--rev REV] [OTHER]'),
1035 _(b'[--rev REV] [OTHER]'),
1039 )
1036 )
1040 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1037 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1041 """runs the changeset discovery protocol in isolation
1038 """runs the changeset discovery protocol in isolation
1042
1039
1043 The local peer can be "replaced" by a subset of the local repository by
1040 The local peer can be "replaced" by a subset of the local repository by
1044 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1041 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1045 be "replaced" by a subset of the local repository using the
1042 be "replaced" by a subset of the local repository using the
1046 `--local-as-revs` flag. This is useful to efficiently debug pathological
1043 `--local-as-revs` flag. This is useful to efficiently debug pathological
1047 discovery situation.
1044 discovery situation.
1048
1045
1049 The following developer oriented config are relevant for people playing with this command:
1046 The following developer oriented config are relevant for people playing with this command:
1050
1047
1051 * devel.discovery.exchange-heads=True
1048 * devel.discovery.exchange-heads=True
1052
1049
1053 If False, the discovery will not start with
1050 If False, the discovery will not start with
1054 remote head fetching and local head querying.
1051 remote head fetching and local head querying.
1055
1052
1056 * devel.discovery.grow-sample=True
1053 * devel.discovery.grow-sample=True
1057
1054
1058 If False, the sample size used in set discovery will not be increased
1055 If False, the sample size used in set discovery will not be increased
1059 through the process
1056 through the process
1060
1057
1061 * devel.discovery.grow-sample.dynamic=True
1058 * devel.discovery.grow-sample.dynamic=True
1062
1059
1063 When discovery.grow-sample.dynamic is True, the default, the sample size is
1060 When discovery.grow-sample.dynamic is True, the default, the sample size is
1064 adapted to the shape of the undecided set (it is set to the max of:
1061 adapted to the shape of the undecided set (it is set to the max of:
1065 <target-size>, len(roots(undecided)), len(heads(undecided)
1062 <target-size>, len(roots(undecided)), len(heads(undecided)
1066
1063
1067 * devel.discovery.grow-sample.rate=1.05
1064 * devel.discovery.grow-sample.rate=1.05
1068
1065
1069 the rate at which the sample grow
1066 the rate at which the sample grow
1070
1067
1071 * devel.discovery.randomize=True
1068 * devel.discovery.randomize=True
1072
1069
1073 If andom sampling during discovery are deterministic. It is meant for
1070 If andom sampling during discovery are deterministic. It is meant for
1074 integration tests.
1071 integration tests.
1075
1072
1076 * devel.discovery.sample-size=200
1073 * devel.discovery.sample-size=200
1077
1074
1078 Control the initial size of the discovery sample
1075 Control the initial size of the discovery sample
1079
1076
1080 * devel.discovery.sample-size.initial=100
1077 * devel.discovery.sample-size.initial=100
1081
1078
1082 Control the initial size of the discovery for initial change
1079 Control the initial size of the discovery for initial change
1083 """
1080 """
1084 opts = pycompat.byteskwargs(opts)
1081 opts = pycompat.byteskwargs(opts)
1085 unfi = repo.unfiltered()
1082 unfi = repo.unfiltered()
1086
1083
1087 # setup potential extra filtering
1084 # setup potential extra filtering
1088 local_revs = opts[b"local_as_revs"]
1085 local_revs = opts[b"local_as_revs"]
1089 remote_revs = opts[b"remote_as_revs"]
1086 remote_revs = opts[b"remote_as_revs"]
1090
1087
1091 # make sure tests are repeatable
1088 # make sure tests are repeatable
1092 random.seed(int(opts[b'seed']))
1089 random.seed(int(opts[b'seed']))
1093
1090
1094 if not remote_revs:
1091 if not remote_revs:
1095
1092
1096 remoteurl, branches = urlutil.get_unique_pull_path(
1093 remoteurl, branches = urlutil.get_unique_pull_path(
1097 b'debugdiscovery', repo, ui, remoteurl
1094 b'debugdiscovery', repo, ui, remoteurl
1098 )
1095 )
1099 remote = hg.peer(repo, opts, remoteurl)
1096 remote = hg.peer(repo, opts, remoteurl)
1100 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1097 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1101 else:
1098 else:
1102 branches = (None, [])
1099 branches = (None, [])
1103 remote_filtered_revs = scmutil.revrange(
1100 remote_filtered_revs = scmutil.revrange(
1104 unfi, [b"not (::(%s))" % remote_revs]
1101 unfi, [b"not (::(%s))" % remote_revs]
1105 )
1102 )
1106 remote_filtered_revs = frozenset(remote_filtered_revs)
1103 remote_filtered_revs = frozenset(remote_filtered_revs)
1107
1104
1108 def remote_func(x):
1105 def remote_func(x):
1109 return remote_filtered_revs
1106 return remote_filtered_revs
1110
1107
1111 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1108 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1112
1109
1113 remote = repo.peer()
1110 remote = repo.peer()
1114 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1111 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1115
1112
1116 if local_revs:
1113 if local_revs:
1117 local_filtered_revs = scmutil.revrange(
1114 local_filtered_revs = scmutil.revrange(
1118 unfi, [b"not (::(%s))" % local_revs]
1115 unfi, [b"not (::(%s))" % local_revs]
1119 )
1116 )
1120 local_filtered_revs = frozenset(local_filtered_revs)
1117 local_filtered_revs = frozenset(local_filtered_revs)
1121
1118
1122 def local_func(x):
1119 def local_func(x):
1123 return local_filtered_revs
1120 return local_filtered_revs
1124
1121
1125 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1122 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1126 repo = repo.filtered(b'debug-discovery-local-filter')
1123 repo = repo.filtered(b'debug-discovery-local-filter')
1127
1124
1128 data = {}
1125 data = {}
1129 if opts.get(b'old'):
1126 if opts.get(b'old'):
1130
1127
1131 def doit(pushedrevs, remoteheads, remote=remote):
1128 def doit(pushedrevs, remoteheads, remote=remote):
1132 if not util.safehasattr(remote, b'branches'):
1129 if not util.safehasattr(remote, b'branches'):
1133 # enable in-client legacy support
1130 # enable in-client legacy support
1134 remote = localrepo.locallegacypeer(remote.local())
1131 remote = localrepo.locallegacypeer(remote.local())
1135 common, _in, hds = treediscovery.findcommonincoming(
1132 common, _in, hds = treediscovery.findcommonincoming(
1136 repo, remote, force=True, audit=data
1133 repo, remote, force=True, audit=data
1137 )
1134 )
1138 common = set(common)
1135 common = set(common)
1139 if not opts.get(b'nonheads'):
1136 if not opts.get(b'nonheads'):
1140 ui.writenoi18n(
1137 ui.writenoi18n(
1141 b"unpruned common: %s\n"
1138 b"unpruned common: %s\n"
1142 % b" ".join(sorted(short(n) for n in common))
1139 % b" ".join(sorted(short(n) for n in common))
1143 )
1140 )
1144
1141
1145 clnode = repo.changelog.node
1142 clnode = repo.changelog.node
1146 common = repo.revs(b'heads(::%ln)', common)
1143 common = repo.revs(b'heads(::%ln)', common)
1147 common = {clnode(r) for r in common}
1144 common = {clnode(r) for r in common}
1148 return common, hds
1145 return common, hds
1149
1146
1150 else:
1147 else:
1151
1148
1152 def doit(pushedrevs, remoteheads, remote=remote):
1149 def doit(pushedrevs, remoteheads, remote=remote):
1153 nodes = None
1150 nodes = None
1154 if pushedrevs:
1151 if pushedrevs:
1155 revs = scmutil.revrange(repo, pushedrevs)
1152 revs = scmutil.revrange(repo, pushedrevs)
1156 nodes = [repo[r].node() for r in revs]
1153 nodes = [repo[r].node() for r in revs]
1157 common, any, hds = setdiscovery.findcommonheads(
1154 common, any, hds = setdiscovery.findcommonheads(
1158 ui, repo, remote, ancestorsof=nodes, audit=data
1155 ui, repo, remote, ancestorsof=nodes, audit=data
1159 )
1156 )
1160 return common, hds
1157 return common, hds
1161
1158
1162 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1159 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1163 localrevs = opts[b'rev']
1160 localrevs = opts[b'rev']
1164
1161
1165 fm = ui.formatter(b'debugdiscovery', opts)
1162 fm = ui.formatter(b'debugdiscovery', opts)
1166 if fm.strict_format:
1163 if fm.strict_format:
1167
1164
1168 @contextlib.contextmanager
1165 @contextlib.contextmanager
1169 def may_capture_output():
1166 def may_capture_output():
1170 ui.pushbuffer()
1167 ui.pushbuffer()
1171 yield
1168 yield
1172 data[b'output'] = ui.popbuffer()
1169 data[b'output'] = ui.popbuffer()
1173
1170
1174 else:
1171 else:
1175 may_capture_output = util.nullcontextmanager
1172 may_capture_output = util.nullcontextmanager
1176 with may_capture_output():
1173 with may_capture_output():
1177 with util.timedcm('debug-discovery') as t:
1174 with util.timedcm('debug-discovery') as t:
1178 common, hds = doit(localrevs, remoterevs)
1175 common, hds = doit(localrevs, remoterevs)
1179
1176
1180 # compute all statistics
1177 # compute all statistics
1181 heads_common = set(common)
1178 heads_common = set(common)
1182 heads_remote = set(hds)
1179 heads_remote = set(hds)
1183 heads_local = set(repo.heads())
1180 heads_local = set(repo.heads())
1184 # note: they cannot be a local or remote head that is in common and not
1181 # note: they cannot be a local or remote head that is in common and not
1185 # itself a head of common.
1182 # itself a head of common.
1186 heads_common_local = heads_common & heads_local
1183 heads_common_local = heads_common & heads_local
1187 heads_common_remote = heads_common & heads_remote
1184 heads_common_remote = heads_common & heads_remote
1188 heads_common_both = heads_common & heads_remote & heads_local
1185 heads_common_both = heads_common & heads_remote & heads_local
1189
1186
1190 all = repo.revs(b'all()')
1187 all = repo.revs(b'all()')
1191 common = repo.revs(b'::%ln', common)
1188 common = repo.revs(b'::%ln', common)
1192 roots_common = repo.revs(b'roots(::%ld)', common)
1189 roots_common = repo.revs(b'roots(::%ld)', common)
1193 missing = repo.revs(b'not ::%ld', common)
1190 missing = repo.revs(b'not ::%ld', common)
1194 heads_missing = repo.revs(b'heads(%ld)', missing)
1191 heads_missing = repo.revs(b'heads(%ld)', missing)
1195 roots_missing = repo.revs(b'roots(%ld)', missing)
1192 roots_missing = repo.revs(b'roots(%ld)', missing)
1196 assert len(common) + len(missing) == len(all)
1193 assert len(common) + len(missing) == len(all)
1197
1194
1198 initial_undecided = repo.revs(
1195 initial_undecided = repo.revs(
1199 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1196 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1200 )
1197 )
1201 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1198 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1202 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1199 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1203 common_initial_undecided = initial_undecided & common
1200 common_initial_undecided = initial_undecided & common
1204 missing_initial_undecided = initial_undecided & missing
1201 missing_initial_undecided = initial_undecided & missing
1205
1202
1206 data[b'elapsed'] = t.elapsed
1203 data[b'elapsed'] = t.elapsed
1207 data[b'nb-common-heads'] = len(heads_common)
1204 data[b'nb-common-heads'] = len(heads_common)
1208 data[b'nb-common-heads-local'] = len(heads_common_local)
1205 data[b'nb-common-heads-local'] = len(heads_common_local)
1209 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1206 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1210 data[b'nb-common-heads-both'] = len(heads_common_both)
1207 data[b'nb-common-heads-both'] = len(heads_common_both)
1211 data[b'nb-common-roots'] = len(roots_common)
1208 data[b'nb-common-roots'] = len(roots_common)
1212 data[b'nb-head-local'] = len(heads_local)
1209 data[b'nb-head-local'] = len(heads_local)
1213 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1210 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1214 data[b'nb-head-remote'] = len(heads_remote)
1211 data[b'nb-head-remote'] = len(heads_remote)
1215 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1212 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1216 heads_common_remote
1213 heads_common_remote
1217 )
1214 )
1218 data[b'nb-revs'] = len(all)
1215 data[b'nb-revs'] = len(all)
1219 data[b'nb-revs-common'] = len(common)
1216 data[b'nb-revs-common'] = len(common)
1220 data[b'nb-revs-missing'] = len(missing)
1217 data[b'nb-revs-missing'] = len(missing)
1221 data[b'nb-missing-heads'] = len(heads_missing)
1218 data[b'nb-missing-heads'] = len(heads_missing)
1222 data[b'nb-missing-roots'] = len(roots_missing)
1219 data[b'nb-missing-roots'] = len(roots_missing)
1223 data[b'nb-ini_und'] = len(initial_undecided)
1220 data[b'nb-ini_und'] = len(initial_undecided)
1224 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1221 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1225 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1222 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1226 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1223 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1227 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1224 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1228
1225
1229 fm.startitem()
1226 fm.startitem()
1230 fm.data(**pycompat.strkwargs(data))
1227 fm.data(**pycompat.strkwargs(data))
1231 # display discovery summary
1228 # display discovery summary
1232 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1229 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1233 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1230 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1234 fm.plain(b"heads summary:\n")
1231 fm.plain(b"heads summary:\n")
1235 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1232 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1236 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1233 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1237 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1234 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1238 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1235 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1239 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1236 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1240 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1237 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1241 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1238 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1242 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1239 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1243 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1240 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1244 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1241 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1245 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1242 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1246 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1243 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1247 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1244 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1248 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1245 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1249 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1246 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1250 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1247 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1251 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1248 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1252 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1249 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1253 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1250 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1254 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1251 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1255 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1252 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1256 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1253 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1257
1254
1258 if ui.verbose:
1255 if ui.verbose:
1259 fm.plain(
1256 fm.plain(
1260 b"common heads: %s\n"
1257 b"common heads: %s\n"
1261 % b" ".join(sorted(short(n) for n in heads_common))
1258 % b" ".join(sorted(short(n) for n in heads_common))
1262 )
1259 )
1263 fm.end()
1260 fm.end()
1264
1261
1265
1262
1266 _chunksize = 4 << 10
1263 _chunksize = 4 << 10
1267
1264
1268
1265
1269 @command(
1266 @command(
1270 b'debugdownload',
1267 b'debugdownload',
1271 [
1268 [
1272 (b'o', b'output', b'', _(b'path')),
1269 (b'o', b'output', b'', _(b'path')),
1273 ],
1270 ],
1274 optionalrepo=True,
1271 optionalrepo=True,
1275 )
1272 )
1276 def debugdownload(ui, repo, url, output=None, **opts):
1273 def debugdownload(ui, repo, url, output=None, **opts):
1277 """download a resource using Mercurial logic and config"""
1274 """download a resource using Mercurial logic and config"""
1278 fh = urlmod.open(ui, url, output)
1275 fh = urlmod.open(ui, url, output)
1279
1276
1280 dest = ui
1277 dest = ui
1281 if output:
1278 if output:
1282 dest = open(output, b"wb", _chunksize)
1279 dest = open(output, b"wb", _chunksize)
1283 try:
1280 try:
1284 data = fh.read(_chunksize)
1281 data = fh.read(_chunksize)
1285 while data:
1282 while data:
1286 dest.write(data)
1283 dest.write(data)
1287 data = fh.read(_chunksize)
1284 data = fh.read(_chunksize)
1288 finally:
1285 finally:
1289 if output:
1286 if output:
1290 dest.close()
1287 dest.close()
1291
1288
1292
1289
1293 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1290 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1294 def debugextensions(ui, repo, **opts):
1291 def debugextensions(ui, repo, **opts):
1295 '''show information about active extensions'''
1292 '''show information about active extensions'''
1296 opts = pycompat.byteskwargs(opts)
1293 opts = pycompat.byteskwargs(opts)
1297 exts = extensions.extensions(ui)
1294 exts = extensions.extensions(ui)
1298 hgver = util.version()
1295 hgver = util.version()
1299 fm = ui.formatter(b'debugextensions', opts)
1296 fm = ui.formatter(b'debugextensions', opts)
1300 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1297 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1301 isinternal = extensions.ismoduleinternal(extmod)
1298 isinternal = extensions.ismoduleinternal(extmod)
1302 extsource = None
1299 extsource = None
1303
1300
1304 if util.safehasattr(extmod, '__file__'):
1301 if util.safehasattr(extmod, '__file__'):
1305 extsource = pycompat.fsencode(extmod.__file__)
1302 extsource = pycompat.fsencode(extmod.__file__)
1306 elif getattr(sys, 'oxidized', False):
1303 elif getattr(sys, 'oxidized', False):
1307 extsource = pycompat.sysexecutable
1304 extsource = pycompat.sysexecutable
1308 if isinternal:
1305 if isinternal:
1309 exttestedwith = [] # never expose magic string to users
1306 exttestedwith = [] # never expose magic string to users
1310 else:
1307 else:
1311 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1308 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1312 extbuglink = getattr(extmod, 'buglink', None)
1309 extbuglink = getattr(extmod, 'buglink', None)
1313
1310
1314 fm.startitem()
1311 fm.startitem()
1315
1312
1316 if ui.quiet or ui.verbose:
1313 if ui.quiet or ui.verbose:
1317 fm.write(b'name', b'%s\n', extname)
1314 fm.write(b'name', b'%s\n', extname)
1318 else:
1315 else:
1319 fm.write(b'name', b'%s', extname)
1316 fm.write(b'name', b'%s', extname)
1320 if isinternal or hgver in exttestedwith:
1317 if isinternal or hgver in exttestedwith:
1321 fm.plain(b'\n')
1318 fm.plain(b'\n')
1322 elif not exttestedwith:
1319 elif not exttestedwith:
1323 fm.plain(_(b' (untested!)\n'))
1320 fm.plain(_(b' (untested!)\n'))
1324 else:
1321 else:
1325 lasttestedversion = exttestedwith[-1]
1322 lasttestedversion = exttestedwith[-1]
1326 fm.plain(b' (%s!)\n' % lasttestedversion)
1323 fm.plain(b' (%s!)\n' % lasttestedversion)
1327
1324
1328 fm.condwrite(
1325 fm.condwrite(
1329 ui.verbose and extsource,
1326 ui.verbose and extsource,
1330 b'source',
1327 b'source',
1331 _(b' location: %s\n'),
1328 _(b' location: %s\n'),
1332 extsource or b"",
1329 extsource or b"",
1333 )
1330 )
1334
1331
1335 if ui.verbose:
1332 if ui.verbose:
1336 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1333 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1337 fm.data(bundled=isinternal)
1334 fm.data(bundled=isinternal)
1338
1335
1339 fm.condwrite(
1336 fm.condwrite(
1340 ui.verbose and exttestedwith,
1337 ui.verbose and exttestedwith,
1341 b'testedwith',
1338 b'testedwith',
1342 _(b' tested with: %s\n'),
1339 _(b' tested with: %s\n'),
1343 fm.formatlist(exttestedwith, name=b'ver'),
1340 fm.formatlist(exttestedwith, name=b'ver'),
1344 )
1341 )
1345
1342
1346 fm.condwrite(
1343 fm.condwrite(
1347 ui.verbose and extbuglink,
1344 ui.verbose and extbuglink,
1348 b'buglink',
1345 b'buglink',
1349 _(b' bug reporting: %s\n'),
1346 _(b' bug reporting: %s\n'),
1350 extbuglink or b"",
1347 extbuglink or b"",
1351 )
1348 )
1352
1349
1353 fm.end()
1350 fm.end()
1354
1351
1355
1352
1356 @command(
1353 @command(
1357 b'debugfileset',
1354 b'debugfileset',
1358 [
1355 [
1359 (
1356 (
1360 b'r',
1357 b'r',
1361 b'rev',
1358 b'rev',
1362 b'',
1359 b'',
1363 _(b'apply the filespec on this revision'),
1360 _(b'apply the filespec on this revision'),
1364 _(b'REV'),
1361 _(b'REV'),
1365 ),
1362 ),
1366 (
1363 (
1367 b'',
1364 b'',
1368 b'all-files',
1365 b'all-files',
1369 False,
1366 False,
1370 _(b'test files from all revisions and working directory'),
1367 _(b'test files from all revisions and working directory'),
1371 ),
1368 ),
1372 (
1369 (
1373 b's',
1370 b's',
1374 b'show-matcher',
1371 b'show-matcher',
1375 None,
1372 None,
1376 _(b'print internal representation of matcher'),
1373 _(b'print internal representation of matcher'),
1377 ),
1374 ),
1378 (
1375 (
1379 b'p',
1376 b'p',
1380 b'show-stage',
1377 b'show-stage',
1381 [],
1378 [],
1382 _(b'print parsed tree at the given stage'),
1379 _(b'print parsed tree at the given stage'),
1383 _(b'NAME'),
1380 _(b'NAME'),
1384 ),
1381 ),
1385 ],
1382 ],
1386 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1383 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1387 )
1384 )
1388 def debugfileset(ui, repo, expr, **opts):
1385 def debugfileset(ui, repo, expr, **opts):
1389 '''parse and apply a fileset specification'''
1386 '''parse and apply a fileset specification'''
1390 from . import fileset
1387 from . import fileset
1391
1388
1392 fileset.symbols # force import of fileset so we have predicates to optimize
1389 fileset.symbols # force import of fileset so we have predicates to optimize
1393 opts = pycompat.byteskwargs(opts)
1390 opts = pycompat.byteskwargs(opts)
1394 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1391 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1395
1392
1396 stages = [
1393 stages = [
1397 (b'parsed', pycompat.identity),
1394 (b'parsed', pycompat.identity),
1398 (b'analyzed', filesetlang.analyze),
1395 (b'analyzed', filesetlang.analyze),
1399 (b'optimized', filesetlang.optimize),
1396 (b'optimized', filesetlang.optimize),
1400 ]
1397 ]
1401 stagenames = {n for n, f in stages}
1398 stagenames = {n for n, f in stages}
1402
1399
1403 showalways = set()
1400 showalways = set()
1404 if ui.verbose and not opts[b'show_stage']:
1401 if ui.verbose and not opts[b'show_stage']:
1405 # show parsed tree by --verbose (deprecated)
1402 # show parsed tree by --verbose (deprecated)
1406 showalways.add(b'parsed')
1403 showalways.add(b'parsed')
1407 if opts[b'show_stage'] == [b'all']:
1404 if opts[b'show_stage'] == [b'all']:
1408 showalways.update(stagenames)
1405 showalways.update(stagenames)
1409 else:
1406 else:
1410 for n in opts[b'show_stage']:
1407 for n in opts[b'show_stage']:
1411 if n not in stagenames:
1408 if n not in stagenames:
1412 raise error.Abort(_(b'invalid stage name: %s') % n)
1409 raise error.Abort(_(b'invalid stage name: %s') % n)
1413 showalways.update(opts[b'show_stage'])
1410 showalways.update(opts[b'show_stage'])
1414
1411
1415 tree = filesetlang.parse(expr)
1412 tree = filesetlang.parse(expr)
1416 for n, f in stages:
1413 for n, f in stages:
1417 tree = f(tree)
1414 tree = f(tree)
1418 if n in showalways:
1415 if n in showalways:
1419 if opts[b'show_stage'] or n != b'parsed':
1416 if opts[b'show_stage'] or n != b'parsed':
1420 ui.write(b"* %s:\n" % n)
1417 ui.write(b"* %s:\n" % n)
1421 ui.write(filesetlang.prettyformat(tree), b"\n")
1418 ui.write(filesetlang.prettyformat(tree), b"\n")
1422
1419
1423 files = set()
1420 files = set()
1424 if opts[b'all_files']:
1421 if opts[b'all_files']:
1425 for r in repo:
1422 for r in repo:
1426 c = repo[r]
1423 c = repo[r]
1427 files.update(c.files())
1424 files.update(c.files())
1428 files.update(c.substate)
1425 files.update(c.substate)
1429 if opts[b'all_files'] or ctx.rev() is None:
1426 if opts[b'all_files'] or ctx.rev() is None:
1430 wctx = repo[None]
1427 wctx = repo[None]
1431 files.update(
1428 files.update(
1432 repo.dirstate.walk(
1429 repo.dirstate.walk(
1433 scmutil.matchall(repo),
1430 scmutil.matchall(repo),
1434 subrepos=list(wctx.substate),
1431 subrepos=list(wctx.substate),
1435 unknown=True,
1432 unknown=True,
1436 ignored=True,
1433 ignored=True,
1437 )
1434 )
1438 )
1435 )
1439 files.update(wctx.substate)
1436 files.update(wctx.substate)
1440 else:
1437 else:
1441 files.update(ctx.files())
1438 files.update(ctx.files())
1442 files.update(ctx.substate)
1439 files.update(ctx.substate)
1443
1440
1444 m = ctx.matchfileset(repo.getcwd(), expr)
1441 m = ctx.matchfileset(repo.getcwd(), expr)
1445 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1442 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1446 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1443 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1447 for f in sorted(files):
1444 for f in sorted(files):
1448 if not m(f):
1445 if not m(f):
1449 continue
1446 continue
1450 ui.write(b"%s\n" % f)
1447 ui.write(b"%s\n" % f)
1451
1448
1452
1449
1453 @command(
1450 @command(
1454 b"debug-repair-issue6528",
1451 b"debug-repair-issue6528",
1455 [
1452 [
1456 (
1453 (
1457 b'',
1454 b'',
1458 b'to-report',
1455 b'to-report',
1459 b'',
1456 b'',
1460 _(b'build a report of affected revisions to this file'),
1457 _(b'build a report of affected revisions to this file'),
1461 _(b'FILE'),
1458 _(b'FILE'),
1462 ),
1459 ),
1463 (
1460 (
1464 b'',
1461 b'',
1465 b'from-report',
1462 b'from-report',
1466 b'',
1463 b'',
1467 _(b'repair revisions listed in this report file'),
1464 _(b'repair revisions listed in this report file'),
1468 _(b'FILE'),
1465 _(b'FILE'),
1469 ),
1466 ),
1470 (
1467 (
1471 b'',
1468 b'',
1472 b'paranoid',
1469 b'paranoid',
1473 False,
1470 False,
1474 _(b'check that both detection methods do the same thing'),
1471 _(b'check that both detection methods do the same thing'),
1475 ),
1472 ),
1476 ]
1473 ]
1477 + cmdutil.dryrunopts,
1474 + cmdutil.dryrunopts,
1478 )
1475 )
1479 def debug_repair_issue6528(ui, repo, **opts):
1476 def debug_repair_issue6528(ui, repo, **opts):
1480 """find affected revisions and repair them. See issue6528 for more details.
1477 """find affected revisions and repair them. See issue6528 for more details.
1481
1478
1482 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1479 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1483 computation of affected revisions for a given repository across clones.
1480 computation of affected revisions for a given repository across clones.
1484 The report format is line-based (with empty lines ignored):
1481 The report format is line-based (with empty lines ignored):
1485
1482
1486 ```
1483 ```
1487 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1484 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1488 ```
1485 ```
1489
1486
1490 There can be multiple broken revisions per filelog, they are separated by
1487 There can be multiple broken revisions per filelog, they are separated by
1491 a comma with no spaces. The only space is between the revision(s) and the
1488 a comma with no spaces. The only space is between the revision(s) and the
1492 filename.
1489 filename.
1493
1490
1494 Note that this does *not* mean that this repairs future affected revisions,
1491 Note that this does *not* mean that this repairs future affected revisions,
1495 that needs a separate fix at the exchange level that hasn't been written yet
1492 that needs a separate fix at the exchange level that hasn't been written yet
1496 (as of 5.9rc0).
1493 (as of 5.9rc0).
1497
1494
1498 There is a `--paranoid` flag to test that the fast implementation is correct
1495 There is a `--paranoid` flag to test that the fast implementation is correct
1499 by checking it against the slow implementation. Since this matter is quite
1496 by checking it against the slow implementation. Since this matter is quite
1500 urgent and testing every edge-case is probably quite costly, we use this
1497 urgent and testing every edge-case is probably quite costly, we use this
1501 method to test on large repositories as a fuzzing method of sorts.
1498 method to test on large repositories as a fuzzing method of sorts.
1502 """
1499 """
1503 cmdutil.check_incompatible_arguments(
1500 cmdutil.check_incompatible_arguments(
1504 opts, 'to_report', ['from_report', 'dry_run']
1501 opts, 'to_report', ['from_report', 'dry_run']
1505 )
1502 )
1506 dry_run = opts.get('dry_run')
1503 dry_run = opts.get('dry_run')
1507 to_report = opts.get('to_report')
1504 to_report = opts.get('to_report')
1508 from_report = opts.get('from_report')
1505 from_report = opts.get('from_report')
1509 paranoid = opts.get('paranoid')
1506 paranoid = opts.get('paranoid')
1510 # TODO maybe add filelog pattern and revision pattern parameters to help
1507 # TODO maybe add filelog pattern and revision pattern parameters to help
1511 # narrow down the search for users that know what they're looking for?
1508 # narrow down the search for users that know what they're looking for?
1512
1509
1513 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1510 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1514 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1511 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1515 raise error.Abort(_(msg))
1512 raise error.Abort(_(msg))
1516
1513
1517 rewrite.repair_issue6528(
1514 rewrite.repair_issue6528(
1518 ui,
1515 ui,
1519 repo,
1516 repo,
1520 dry_run=dry_run,
1517 dry_run=dry_run,
1521 to_report=to_report,
1518 to_report=to_report,
1522 from_report=from_report,
1519 from_report=from_report,
1523 paranoid=paranoid,
1520 paranoid=paranoid,
1524 )
1521 )
1525
1522
1526
1523
1527 @command(b'debugformat', [] + cmdutil.formatteropts)
1524 @command(b'debugformat', [] + cmdutil.formatteropts)
1528 def debugformat(ui, repo, **opts):
1525 def debugformat(ui, repo, **opts):
1529 """display format information about the current repository
1526 """display format information about the current repository
1530
1527
1531 Use --verbose to get extra information about current config value and
1528 Use --verbose to get extra information about current config value and
1532 Mercurial default."""
1529 Mercurial default."""
1533 opts = pycompat.byteskwargs(opts)
1530 opts = pycompat.byteskwargs(opts)
1534 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1531 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1535 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1532 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1536
1533
1537 def makeformatname(name):
1534 def makeformatname(name):
1538 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1535 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1539
1536
1540 fm = ui.formatter(b'debugformat', opts)
1537 fm = ui.formatter(b'debugformat', opts)
1541 if fm.isplain():
1538 if fm.isplain():
1542
1539
1543 def formatvalue(value):
1540 def formatvalue(value):
1544 if util.safehasattr(value, b'startswith'):
1541 if util.safehasattr(value, b'startswith'):
1545 return value
1542 return value
1546 if value:
1543 if value:
1547 return b'yes'
1544 return b'yes'
1548 else:
1545 else:
1549 return b'no'
1546 return b'no'
1550
1547
1551 else:
1548 else:
1552 formatvalue = pycompat.identity
1549 formatvalue = pycompat.identity
1553
1550
1554 fm.plain(b'format-variant')
1551 fm.plain(b'format-variant')
1555 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1552 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1556 fm.plain(b' repo')
1553 fm.plain(b' repo')
1557 if ui.verbose:
1554 if ui.verbose:
1558 fm.plain(b' config default')
1555 fm.plain(b' config default')
1559 fm.plain(b'\n')
1556 fm.plain(b'\n')
1560 for fv in upgrade.allformatvariant:
1557 for fv in upgrade.allformatvariant:
1561 fm.startitem()
1558 fm.startitem()
1562 repovalue = fv.fromrepo(repo)
1559 repovalue = fv.fromrepo(repo)
1563 configvalue = fv.fromconfig(repo)
1560 configvalue = fv.fromconfig(repo)
1564
1561
1565 if repovalue != configvalue:
1562 if repovalue != configvalue:
1566 namelabel = b'formatvariant.name.mismatchconfig'
1563 namelabel = b'formatvariant.name.mismatchconfig'
1567 repolabel = b'formatvariant.repo.mismatchconfig'
1564 repolabel = b'formatvariant.repo.mismatchconfig'
1568 elif repovalue != fv.default:
1565 elif repovalue != fv.default:
1569 namelabel = b'formatvariant.name.mismatchdefault'
1566 namelabel = b'formatvariant.name.mismatchdefault'
1570 repolabel = b'formatvariant.repo.mismatchdefault'
1567 repolabel = b'formatvariant.repo.mismatchdefault'
1571 else:
1568 else:
1572 namelabel = b'formatvariant.name.uptodate'
1569 namelabel = b'formatvariant.name.uptodate'
1573 repolabel = b'formatvariant.repo.uptodate'
1570 repolabel = b'formatvariant.repo.uptodate'
1574
1571
1575 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1572 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1576 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1573 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1577 if fv.default != configvalue:
1574 if fv.default != configvalue:
1578 configlabel = b'formatvariant.config.special'
1575 configlabel = b'formatvariant.config.special'
1579 else:
1576 else:
1580 configlabel = b'formatvariant.config.default'
1577 configlabel = b'formatvariant.config.default'
1581 fm.condwrite(
1578 fm.condwrite(
1582 ui.verbose,
1579 ui.verbose,
1583 b'config',
1580 b'config',
1584 b' %6s',
1581 b' %6s',
1585 formatvalue(configvalue),
1582 formatvalue(configvalue),
1586 label=configlabel,
1583 label=configlabel,
1587 )
1584 )
1588 fm.condwrite(
1585 fm.condwrite(
1589 ui.verbose,
1586 ui.verbose,
1590 b'default',
1587 b'default',
1591 b' %7s',
1588 b' %7s',
1592 formatvalue(fv.default),
1589 formatvalue(fv.default),
1593 label=b'formatvariant.default',
1590 label=b'formatvariant.default',
1594 )
1591 )
1595 fm.plain(b'\n')
1592 fm.plain(b'\n')
1596 fm.end()
1593 fm.end()
1597
1594
1598
1595
1599 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1596 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1600 def debugfsinfo(ui, path=b"."):
1597 def debugfsinfo(ui, path=b"."):
1601 """show information detected about current filesystem"""
1598 """show information detected about current filesystem"""
1602 ui.writenoi18n(b'path: %s\n' % path)
1599 ui.writenoi18n(b'path: %s\n' % path)
1603 ui.writenoi18n(
1600 ui.writenoi18n(
1604 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1601 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1605 )
1602 )
1606 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1603 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1607 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1604 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1608 ui.writenoi18n(
1605 ui.writenoi18n(
1609 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1606 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1610 )
1607 )
1611 ui.writenoi18n(
1608 ui.writenoi18n(
1612 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1609 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1613 )
1610 )
1614 casesensitive = b'(unknown)'
1611 casesensitive = b'(unknown)'
1615 try:
1612 try:
1616 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1613 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1617 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1614 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1618 except OSError:
1615 except OSError:
1619 pass
1616 pass
1620 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1617 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1621
1618
1622
1619
1623 @command(
1620 @command(
1624 b'debuggetbundle',
1621 b'debuggetbundle',
1625 [
1622 [
1626 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1623 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1627 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1624 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1628 (
1625 (
1629 b't',
1626 b't',
1630 b'type',
1627 b'type',
1631 b'bzip2',
1628 b'bzip2',
1632 _(b'bundle compression type to use'),
1629 _(b'bundle compression type to use'),
1633 _(b'TYPE'),
1630 _(b'TYPE'),
1634 ),
1631 ),
1635 ],
1632 ],
1636 _(b'REPO FILE [-H|-C ID]...'),
1633 _(b'REPO FILE [-H|-C ID]...'),
1637 norepo=True,
1634 norepo=True,
1638 )
1635 )
1639 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1636 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1640 """retrieves a bundle from a repo
1637 """retrieves a bundle from a repo
1641
1638
1642 Every ID must be a full-length hex node id string. Saves the bundle to the
1639 Every ID must be a full-length hex node id string. Saves the bundle to the
1643 given file.
1640 given file.
1644 """
1641 """
1645 opts = pycompat.byteskwargs(opts)
1642 opts = pycompat.byteskwargs(opts)
1646 repo = hg.peer(ui, opts, repopath)
1643 repo = hg.peer(ui, opts, repopath)
1647 if not repo.capable(b'getbundle'):
1644 if not repo.capable(b'getbundle'):
1648 raise error.Abort(b"getbundle() not supported by target repository")
1645 raise error.Abort(b"getbundle() not supported by target repository")
1649 args = {}
1646 args = {}
1650 if common:
1647 if common:
1651 args['common'] = [bin(s) for s in common]
1648 args['common'] = [bin(s) for s in common]
1652 if head:
1649 if head:
1653 args['heads'] = [bin(s) for s in head]
1650 args['heads'] = [bin(s) for s in head]
1654 # TODO: get desired bundlecaps from command line.
1651 # TODO: get desired bundlecaps from command line.
1655 args['bundlecaps'] = None
1652 args['bundlecaps'] = None
1656 bundle = repo.getbundle(b'debug', **args)
1653 bundle = repo.getbundle(b'debug', **args)
1657
1654
1658 bundletype = opts.get(b'type', b'bzip2').lower()
1655 bundletype = opts.get(b'type', b'bzip2').lower()
1659 btypes = {
1656 btypes = {
1660 b'none': b'HG10UN',
1657 b'none': b'HG10UN',
1661 b'bzip2': b'HG10BZ',
1658 b'bzip2': b'HG10BZ',
1662 b'gzip': b'HG10GZ',
1659 b'gzip': b'HG10GZ',
1663 b'bundle2': b'HG20',
1660 b'bundle2': b'HG20',
1664 }
1661 }
1665 bundletype = btypes.get(bundletype)
1662 bundletype = btypes.get(bundletype)
1666 if bundletype not in bundle2.bundletypes:
1663 if bundletype not in bundle2.bundletypes:
1667 raise error.Abort(_(b'unknown bundle type specified with --type'))
1664 raise error.Abort(_(b'unknown bundle type specified with --type'))
1668 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1665 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1669
1666
1670
1667
1671 @command(b'debugignore', [], b'[FILE]')
1668 @command(b'debugignore', [], b'[FILE]')
1672 def debugignore(ui, repo, *files, **opts):
1669 def debugignore(ui, repo, *files, **opts):
1673 """display the combined ignore pattern and information about ignored files
1670 """display the combined ignore pattern and information about ignored files
1674
1671
1675 With no argument display the combined ignore pattern.
1672 With no argument display the combined ignore pattern.
1676
1673
1677 Given space separated file names, shows if the given file is ignored and
1674 Given space separated file names, shows if the given file is ignored and
1678 if so, show the ignore rule (file and line number) that matched it.
1675 if so, show the ignore rule (file and line number) that matched it.
1679 """
1676 """
1680 ignore = repo.dirstate._ignore
1677 ignore = repo.dirstate._ignore
1681 if not files:
1678 if not files:
1682 # Show all the patterns
1679 # Show all the patterns
1683 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1680 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1684 else:
1681 else:
1685 m = scmutil.match(repo[None], pats=files)
1682 m = scmutil.match(repo[None], pats=files)
1686 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1683 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1687 for f in m.files():
1684 for f in m.files():
1688 nf = util.normpath(f)
1685 nf = util.normpath(f)
1689 ignored = None
1686 ignored = None
1690 ignoredata = None
1687 ignoredata = None
1691 if nf != b'.':
1688 if nf != b'.':
1692 if ignore(nf):
1689 if ignore(nf):
1693 ignored = nf
1690 ignored = nf
1694 ignoredata = repo.dirstate._ignorefileandline(nf)
1691 ignoredata = repo.dirstate._ignorefileandline(nf)
1695 else:
1692 else:
1696 for p in pathutil.finddirs(nf):
1693 for p in pathutil.finddirs(nf):
1697 if ignore(p):
1694 if ignore(p):
1698 ignored = p
1695 ignored = p
1699 ignoredata = repo.dirstate._ignorefileandline(p)
1696 ignoredata = repo.dirstate._ignorefileandline(p)
1700 break
1697 break
1701 if ignored:
1698 if ignored:
1702 if ignored == nf:
1699 if ignored == nf:
1703 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1700 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1704 else:
1701 else:
1705 ui.write(
1702 ui.write(
1706 _(
1703 _(
1707 b"%s is ignored because of "
1704 b"%s is ignored because of "
1708 b"containing directory %s\n"
1705 b"containing directory %s\n"
1709 )
1706 )
1710 % (uipathfn(f), ignored)
1707 % (uipathfn(f), ignored)
1711 )
1708 )
1712 ignorefile, lineno, line = ignoredata
1709 ignorefile, lineno, line = ignoredata
1713 ui.write(
1710 ui.write(
1714 _(b"(ignore rule in %s, line %d: '%s')\n")
1711 _(b"(ignore rule in %s, line %d: '%s')\n")
1715 % (ignorefile, lineno, line)
1712 % (ignorefile, lineno, line)
1716 )
1713 )
1717 else:
1714 else:
1718 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1715 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1719
1716
1720
1717
1721 @command(
1718 @command(
1722 b'debugindex',
1719 b'debugindex',
1723 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1720 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1724 _(b'-c|-m|FILE'),
1721 _(b'-c|-m|FILE'),
1725 )
1722 )
1726 def debugindex(ui, repo, file_=None, **opts):
1723 def debugindex(ui, repo, file_=None, **opts):
1727 """dump index data for a storage primitive"""
1724 """dump index data for a storage primitive"""
1728 opts = pycompat.byteskwargs(opts)
1725 opts = pycompat.byteskwargs(opts)
1729 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1726 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1730
1727
1731 if ui.debugflag:
1728 if ui.debugflag:
1732 shortfn = hex
1729 shortfn = hex
1733 else:
1730 else:
1734 shortfn = short
1731 shortfn = short
1735
1732
1736 idlen = 12
1733 idlen = 12
1737 for i in store:
1734 for i in store:
1738 idlen = len(shortfn(store.node(i)))
1735 idlen = len(shortfn(store.node(i)))
1739 break
1736 break
1740
1737
1741 fm = ui.formatter(b'debugindex', opts)
1738 fm = ui.formatter(b'debugindex', opts)
1742 fm.plain(
1739 fm.plain(
1743 b' rev linkrev %s %s p2\n'
1740 b' rev linkrev %s %s p2\n'
1744 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1741 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1745 )
1742 )
1746
1743
1747 for rev in store:
1744 for rev in store:
1748 node = store.node(rev)
1745 node = store.node(rev)
1749 parents = store.parents(node)
1746 parents = store.parents(node)
1750
1747
1751 fm.startitem()
1748 fm.startitem()
1752 fm.write(b'rev', b'%6d ', rev)
1749 fm.write(b'rev', b'%6d ', rev)
1753 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1750 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1754 fm.write(b'node', b'%s ', shortfn(node))
1751 fm.write(b'node', b'%s ', shortfn(node))
1755 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1752 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1756 fm.write(b'p2', b'%s', shortfn(parents[1]))
1753 fm.write(b'p2', b'%s', shortfn(parents[1]))
1757 fm.plain(b'\n')
1754 fm.plain(b'\n')
1758
1755
1759 fm.end()
1756 fm.end()
1760
1757
1761
1758
1762 @command(
1759 @command(
1763 b'debugindexdot',
1760 b'debugindexdot',
1764 cmdutil.debugrevlogopts,
1761 cmdutil.debugrevlogopts,
1765 _(b'-c|-m|FILE'),
1762 _(b'-c|-m|FILE'),
1766 optionalrepo=True,
1763 optionalrepo=True,
1767 )
1764 )
1768 def debugindexdot(ui, repo, file_=None, **opts):
1765 def debugindexdot(ui, repo, file_=None, **opts):
1769 """dump an index DAG as a graphviz dot file"""
1766 """dump an index DAG as a graphviz dot file"""
1770 opts = pycompat.byteskwargs(opts)
1767 opts = pycompat.byteskwargs(opts)
1771 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1768 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1772 ui.writenoi18n(b"digraph G {\n")
1769 ui.writenoi18n(b"digraph G {\n")
1773 for i in r:
1770 for i in r:
1774 node = r.node(i)
1771 node = r.node(i)
1775 pp = r.parents(node)
1772 pp = r.parents(node)
1776 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1773 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1777 if pp[1] != repo.nullid:
1774 if pp[1] != repo.nullid:
1778 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1775 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1779 ui.write(b"}\n")
1776 ui.write(b"}\n")
1780
1777
1781
1778
1782 @command(b'debugindexstats', [])
1779 @command(b'debugindexstats', [])
1783 def debugindexstats(ui, repo):
1780 def debugindexstats(ui, repo):
1784 """show stats related to the changelog index"""
1781 """show stats related to the changelog index"""
1785 repo.changelog.shortest(repo.nullid, 1)
1782 repo.changelog.shortest(repo.nullid, 1)
1786 index = repo.changelog.index
1783 index = repo.changelog.index
1787 if not util.safehasattr(index, b'stats'):
1784 if not util.safehasattr(index, b'stats'):
1788 raise error.Abort(_(b'debugindexstats only works with native code'))
1785 raise error.Abort(_(b'debugindexstats only works with native code'))
1789 for k, v in sorted(index.stats().items()):
1786 for k, v in sorted(index.stats().items()):
1790 ui.write(b'%s: %d\n' % (k, v))
1787 ui.write(b'%s: %d\n' % (k, v))
1791
1788
1792
1789
1793 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1790 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1794 def debuginstall(ui, **opts):
1791 def debuginstall(ui, **opts):
1795 """test Mercurial installation
1792 """test Mercurial installation
1796
1793
1797 Returns 0 on success.
1794 Returns 0 on success.
1798 """
1795 """
1799 opts = pycompat.byteskwargs(opts)
1796 opts = pycompat.byteskwargs(opts)
1800
1797
1801 problems = 0
1798 problems = 0
1802
1799
1803 fm = ui.formatter(b'debuginstall', opts)
1800 fm = ui.formatter(b'debuginstall', opts)
1804 fm.startitem()
1801 fm.startitem()
1805
1802
1806 # encoding might be unknown or wrong. don't translate these messages.
1803 # encoding might be unknown or wrong. don't translate these messages.
1807 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1804 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1808 err = None
1805 err = None
1809 try:
1806 try:
1810 codecs.lookup(pycompat.sysstr(encoding.encoding))
1807 codecs.lookup(pycompat.sysstr(encoding.encoding))
1811 except LookupError as inst:
1808 except LookupError as inst:
1812 err = stringutil.forcebytestr(inst)
1809 err = stringutil.forcebytestr(inst)
1813 problems += 1
1810 problems += 1
1814 fm.condwrite(
1811 fm.condwrite(
1815 err,
1812 err,
1816 b'encodingerror',
1813 b'encodingerror',
1817 b" %s\n (check that your locale is properly set)\n",
1814 b" %s\n (check that your locale is properly set)\n",
1818 err,
1815 err,
1819 )
1816 )
1820
1817
1821 # Python
1818 # Python
1822 pythonlib = None
1819 pythonlib = None
1823 if util.safehasattr(os, '__file__'):
1820 if util.safehasattr(os, '__file__'):
1824 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1821 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1825 elif getattr(sys, 'oxidized', False):
1822 elif getattr(sys, 'oxidized', False):
1826 pythonlib = pycompat.sysexecutable
1823 pythonlib = pycompat.sysexecutable
1827
1824
1828 fm.write(
1825 fm.write(
1829 b'pythonexe',
1826 b'pythonexe',
1830 _(b"checking Python executable (%s)\n"),
1827 _(b"checking Python executable (%s)\n"),
1831 pycompat.sysexecutable or _(b"unknown"),
1828 pycompat.sysexecutable or _(b"unknown"),
1832 )
1829 )
1833 fm.write(
1830 fm.write(
1834 b'pythonimplementation',
1831 b'pythonimplementation',
1835 _(b"checking Python implementation (%s)\n"),
1832 _(b"checking Python implementation (%s)\n"),
1836 pycompat.sysbytes(platform.python_implementation()),
1833 pycompat.sysbytes(platform.python_implementation()),
1837 )
1834 )
1838 fm.write(
1835 fm.write(
1839 b'pythonver',
1836 b'pythonver',
1840 _(b"checking Python version (%s)\n"),
1837 _(b"checking Python version (%s)\n"),
1841 (b"%d.%d.%d" % sys.version_info[:3]),
1838 (b"%d.%d.%d" % sys.version_info[:3]),
1842 )
1839 )
1843 fm.write(
1840 fm.write(
1844 b'pythonlib',
1841 b'pythonlib',
1845 _(b"checking Python lib (%s)...\n"),
1842 _(b"checking Python lib (%s)...\n"),
1846 pythonlib or _(b"unknown"),
1843 pythonlib or _(b"unknown"),
1847 )
1844 )
1848
1845
1849 try:
1846 try:
1850 from . import rustext # pytype: disable=import-error
1847 from . import rustext # pytype: disable=import-error
1851
1848
1852 rustext.__doc__ # trigger lazy import
1849 rustext.__doc__ # trigger lazy import
1853 except ImportError:
1850 except ImportError:
1854 rustext = None
1851 rustext = None
1855
1852
1856 security = set(sslutil.supportedprotocols)
1853 security = set(sslutil.supportedprotocols)
1857 if sslutil.hassni:
1854 if sslutil.hassni:
1858 security.add(b'sni')
1855 security.add(b'sni')
1859
1856
1860 fm.write(
1857 fm.write(
1861 b'pythonsecurity',
1858 b'pythonsecurity',
1862 _(b"checking Python security support (%s)\n"),
1859 _(b"checking Python security support (%s)\n"),
1863 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1860 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1864 )
1861 )
1865
1862
1866 # These are warnings, not errors. So don't increment problem count. This
1863 # These are warnings, not errors. So don't increment problem count. This
1867 # may change in the future.
1864 # may change in the future.
1868 if b'tls1.2' not in security:
1865 if b'tls1.2' not in security:
1869 fm.plain(
1866 fm.plain(
1870 _(
1867 _(
1871 b' TLS 1.2 not supported by Python install; '
1868 b' TLS 1.2 not supported by Python install; '
1872 b'network connections lack modern security\n'
1869 b'network connections lack modern security\n'
1873 )
1870 )
1874 )
1871 )
1875 if b'sni' not in security:
1872 if b'sni' not in security:
1876 fm.plain(
1873 fm.plain(
1877 _(
1874 _(
1878 b' SNI not supported by Python install; may have '
1875 b' SNI not supported by Python install; may have '
1879 b'connectivity issues with some servers\n'
1876 b'connectivity issues with some servers\n'
1880 )
1877 )
1881 )
1878 )
1882
1879
1883 fm.plain(
1880 fm.plain(
1884 _(
1881 _(
1885 b"checking Rust extensions (%s)\n"
1882 b"checking Rust extensions (%s)\n"
1886 % (b'missing' if rustext is None else b'installed')
1883 % (b'missing' if rustext is None else b'installed')
1887 ),
1884 ),
1888 )
1885 )
1889
1886
1890 # TODO print CA cert info
1887 # TODO print CA cert info
1891
1888
1892 # hg version
1889 # hg version
1893 hgver = util.version()
1890 hgver = util.version()
1894 fm.write(
1891 fm.write(
1895 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1892 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1896 )
1893 )
1897 fm.write(
1894 fm.write(
1898 b'hgverextra',
1895 b'hgverextra',
1899 _(b"checking Mercurial custom build (%s)\n"),
1896 _(b"checking Mercurial custom build (%s)\n"),
1900 b'+'.join(hgver.split(b'+')[1:]),
1897 b'+'.join(hgver.split(b'+')[1:]),
1901 )
1898 )
1902
1899
1903 # compiled modules
1900 # compiled modules
1904 hgmodules = None
1901 hgmodules = None
1905 if util.safehasattr(sys.modules[__name__], '__file__'):
1902 if util.safehasattr(sys.modules[__name__], '__file__'):
1906 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1903 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1907 elif getattr(sys, 'oxidized', False):
1904 elif getattr(sys, 'oxidized', False):
1908 hgmodules = pycompat.sysexecutable
1905 hgmodules = pycompat.sysexecutable
1909
1906
1910 fm.write(
1907 fm.write(
1911 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1908 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1912 )
1909 )
1913 fm.write(
1910 fm.write(
1914 b'hgmodules',
1911 b'hgmodules',
1915 _(b"checking installed modules (%s)...\n"),
1912 _(b"checking installed modules (%s)...\n"),
1916 hgmodules or _(b"unknown"),
1913 hgmodules or _(b"unknown"),
1917 )
1914 )
1918
1915
1919 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1916 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1920 rustext = rustandc # for now, that's the only case
1917 rustext = rustandc # for now, that's the only case
1921 cext = policy.policy in (b'c', b'allow') or rustandc
1918 cext = policy.policy in (b'c', b'allow') or rustandc
1922 nopure = cext or rustext
1919 nopure = cext or rustext
1923 if nopure:
1920 if nopure:
1924 err = None
1921 err = None
1925 try:
1922 try:
1926 if cext:
1923 if cext:
1927 from .cext import ( # pytype: disable=import-error
1924 from .cext import ( # pytype: disable=import-error
1928 base85,
1925 base85,
1929 bdiff,
1926 bdiff,
1930 mpatch,
1927 mpatch,
1931 osutil,
1928 osutil,
1932 )
1929 )
1933
1930
1934 # quiet pyflakes
1931 # quiet pyflakes
1935 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1932 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1936 if rustext:
1933 if rustext:
1937 from .rustext import ( # pytype: disable=import-error
1934 from .rustext import ( # pytype: disable=import-error
1938 ancestor,
1935 ancestor,
1939 dirstate,
1936 dirstate,
1940 )
1937 )
1941
1938
1942 dir(ancestor), dir(dirstate) # quiet pyflakes
1939 dir(ancestor), dir(dirstate) # quiet pyflakes
1943 except Exception as inst:
1940 except Exception as inst:
1944 err = stringutil.forcebytestr(inst)
1941 err = stringutil.forcebytestr(inst)
1945 problems += 1
1942 problems += 1
1946 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1943 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1947
1944
1948 compengines = util.compengines._engines.values()
1945 compengines = util.compengines._engines.values()
1949 fm.write(
1946 fm.write(
1950 b'compengines',
1947 b'compengines',
1951 _(b'checking registered compression engines (%s)\n'),
1948 _(b'checking registered compression engines (%s)\n'),
1952 fm.formatlist(
1949 fm.formatlist(
1953 sorted(e.name() for e in compengines),
1950 sorted(e.name() for e in compengines),
1954 name=b'compengine',
1951 name=b'compengine',
1955 fmt=b'%s',
1952 fmt=b'%s',
1956 sep=b', ',
1953 sep=b', ',
1957 ),
1954 ),
1958 )
1955 )
1959 fm.write(
1956 fm.write(
1960 b'compenginesavail',
1957 b'compenginesavail',
1961 _(b'checking available compression engines (%s)\n'),
1958 _(b'checking available compression engines (%s)\n'),
1962 fm.formatlist(
1959 fm.formatlist(
1963 sorted(e.name() for e in compengines if e.available()),
1960 sorted(e.name() for e in compengines if e.available()),
1964 name=b'compengine',
1961 name=b'compengine',
1965 fmt=b'%s',
1962 fmt=b'%s',
1966 sep=b', ',
1963 sep=b', ',
1967 ),
1964 ),
1968 )
1965 )
1969 wirecompengines = compression.compengines.supportedwireengines(
1966 wirecompengines = compression.compengines.supportedwireengines(
1970 compression.SERVERROLE
1967 compression.SERVERROLE
1971 )
1968 )
1972 fm.write(
1969 fm.write(
1973 b'compenginesserver',
1970 b'compenginesserver',
1974 _(
1971 _(
1975 b'checking available compression engines '
1972 b'checking available compression engines '
1976 b'for wire protocol (%s)\n'
1973 b'for wire protocol (%s)\n'
1977 ),
1974 ),
1978 fm.formatlist(
1975 fm.formatlist(
1979 [e.name() for e in wirecompengines if e.wireprotosupport()],
1976 [e.name() for e in wirecompengines if e.wireprotosupport()],
1980 name=b'compengine',
1977 name=b'compengine',
1981 fmt=b'%s',
1978 fmt=b'%s',
1982 sep=b', ',
1979 sep=b', ',
1983 ),
1980 ),
1984 )
1981 )
1985 re2 = b'missing'
1982 re2 = b'missing'
1986 if util._re2:
1983 if util._re2:
1987 re2 = b'available'
1984 re2 = b'available'
1988 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1985 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1989 fm.data(re2=bool(util._re2))
1986 fm.data(re2=bool(util._re2))
1990
1987
1991 # templates
1988 # templates
1992 p = templater.templatedir()
1989 p = templater.templatedir()
1993 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1990 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1994 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1991 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1995 if p:
1992 if p:
1996 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1993 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1997 if m:
1994 if m:
1998 # template found, check if it is working
1995 # template found, check if it is working
1999 err = None
1996 err = None
2000 try:
1997 try:
2001 templater.templater.frommapfile(m)
1998 templater.templater.frommapfile(m)
2002 except Exception as inst:
1999 except Exception as inst:
2003 err = stringutil.forcebytestr(inst)
2000 err = stringutil.forcebytestr(inst)
2004 p = None
2001 p = None
2005 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2002 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2006 else:
2003 else:
2007 p = None
2004 p = None
2008 fm.condwrite(
2005 fm.condwrite(
2009 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2006 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2010 )
2007 )
2011 fm.condwrite(
2008 fm.condwrite(
2012 not m,
2009 not m,
2013 b'defaulttemplatenotfound',
2010 b'defaulttemplatenotfound',
2014 _(b" template '%s' not found\n"),
2011 _(b" template '%s' not found\n"),
2015 b"default",
2012 b"default",
2016 )
2013 )
2017 if not p:
2014 if not p:
2018 problems += 1
2015 problems += 1
2019 fm.condwrite(
2016 fm.condwrite(
2020 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2017 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2021 )
2018 )
2022
2019
2023 # editor
2020 # editor
2024 editor = ui.geteditor()
2021 editor = ui.geteditor()
2025 editor = util.expandpath(editor)
2022 editor = util.expandpath(editor)
2026 editorbin = procutil.shellsplit(editor)[0]
2023 editorbin = procutil.shellsplit(editor)[0]
2027 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2024 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2028 cmdpath = procutil.findexe(editorbin)
2025 cmdpath = procutil.findexe(editorbin)
2029 fm.condwrite(
2026 fm.condwrite(
2030 not cmdpath and editor == b'vi',
2027 not cmdpath and editor == b'vi',
2031 b'vinotfound',
2028 b'vinotfound',
2032 _(
2029 _(
2033 b" No commit editor set and can't find %s in PATH\n"
2030 b" No commit editor set and can't find %s in PATH\n"
2034 b" (specify a commit editor in your configuration"
2031 b" (specify a commit editor in your configuration"
2035 b" file)\n"
2032 b" file)\n"
2036 ),
2033 ),
2037 not cmdpath and editor == b'vi' and editorbin,
2034 not cmdpath and editor == b'vi' and editorbin,
2038 )
2035 )
2039 fm.condwrite(
2036 fm.condwrite(
2040 not cmdpath and editor != b'vi',
2037 not cmdpath and editor != b'vi',
2041 b'editornotfound',
2038 b'editornotfound',
2042 _(
2039 _(
2043 b" Can't find editor '%s' in PATH\n"
2040 b" Can't find editor '%s' in PATH\n"
2044 b" (specify a commit editor in your configuration"
2041 b" (specify a commit editor in your configuration"
2045 b" file)\n"
2042 b" file)\n"
2046 ),
2043 ),
2047 not cmdpath and editorbin,
2044 not cmdpath and editorbin,
2048 )
2045 )
2049 if not cmdpath and editor != b'vi':
2046 if not cmdpath and editor != b'vi':
2050 problems += 1
2047 problems += 1
2051
2048
2052 # check username
2049 # check username
2053 username = None
2050 username = None
2054 err = None
2051 err = None
2055 try:
2052 try:
2056 username = ui.username()
2053 username = ui.username()
2057 except error.Abort as e:
2054 except error.Abort as e:
2058 err = e.message
2055 err = e.message
2059 problems += 1
2056 problems += 1
2060
2057
2061 fm.condwrite(
2058 fm.condwrite(
2062 username, b'username', _(b"checking username (%s)\n"), username
2059 username, b'username', _(b"checking username (%s)\n"), username
2063 )
2060 )
2064 fm.condwrite(
2061 fm.condwrite(
2065 err,
2062 err,
2066 b'usernameerror',
2063 b'usernameerror',
2067 _(
2064 _(
2068 b"checking username...\n %s\n"
2065 b"checking username...\n %s\n"
2069 b" (specify a username in your configuration file)\n"
2066 b" (specify a username in your configuration file)\n"
2070 ),
2067 ),
2071 err,
2068 err,
2072 )
2069 )
2073
2070
2074 for name, mod in extensions.extensions():
2071 for name, mod in extensions.extensions():
2075 handler = getattr(mod, 'debuginstall', None)
2072 handler = getattr(mod, 'debuginstall', None)
2076 if handler is not None:
2073 if handler is not None:
2077 problems += handler(ui, fm)
2074 problems += handler(ui, fm)
2078
2075
2079 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2076 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2080 if not problems:
2077 if not problems:
2081 fm.data(problems=problems)
2078 fm.data(problems=problems)
2082 fm.condwrite(
2079 fm.condwrite(
2083 problems,
2080 problems,
2084 b'problems',
2081 b'problems',
2085 _(b"%d problems detected, please check your install!\n"),
2082 _(b"%d problems detected, please check your install!\n"),
2086 problems,
2083 problems,
2087 )
2084 )
2088 fm.end()
2085 fm.end()
2089
2086
2090 return problems
2087 return problems
2091
2088
2092
2089
2093 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2090 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2094 def debugknown(ui, repopath, *ids, **opts):
2091 def debugknown(ui, repopath, *ids, **opts):
2095 """test whether node ids are known to a repo
2092 """test whether node ids are known to a repo
2096
2093
2097 Every ID must be a full-length hex node id string. Returns a list of 0s
2094 Every ID must be a full-length hex node id string. Returns a list of 0s
2098 and 1s indicating unknown/known.
2095 and 1s indicating unknown/known.
2099 """
2096 """
2100 opts = pycompat.byteskwargs(opts)
2097 opts = pycompat.byteskwargs(opts)
2101 repo = hg.peer(ui, opts, repopath)
2098 repo = hg.peer(ui, opts, repopath)
2102 if not repo.capable(b'known'):
2099 if not repo.capable(b'known'):
2103 raise error.Abort(b"known() not supported by target repository")
2100 raise error.Abort(b"known() not supported by target repository")
2104 flags = repo.known([bin(s) for s in ids])
2101 flags = repo.known([bin(s) for s in ids])
2105 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2102 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2106
2103
2107
2104
2108 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2105 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2109 def debuglabelcomplete(ui, repo, *args):
2106 def debuglabelcomplete(ui, repo, *args):
2110 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2107 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2111 debugnamecomplete(ui, repo, *args)
2108 debugnamecomplete(ui, repo, *args)
2112
2109
2113
2110
2114 @command(
2111 @command(
2115 b'debuglocks',
2112 b'debuglocks',
2116 [
2113 [
2117 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2114 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2118 (
2115 (
2119 b'W',
2116 b'W',
2120 b'force-free-wlock',
2117 b'force-free-wlock',
2121 None,
2118 None,
2122 _(b'free the working state lock (DANGEROUS)'),
2119 _(b'free the working state lock (DANGEROUS)'),
2123 ),
2120 ),
2124 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2121 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2125 (
2122 (
2126 b'S',
2123 b'S',
2127 b'set-wlock',
2124 b'set-wlock',
2128 None,
2125 None,
2129 _(b'set the working state lock until stopped'),
2126 _(b'set the working state lock until stopped'),
2130 ),
2127 ),
2131 ],
2128 ],
2132 _(b'[OPTION]...'),
2129 _(b'[OPTION]...'),
2133 )
2130 )
2134 def debuglocks(ui, repo, **opts):
2131 def debuglocks(ui, repo, **opts):
2135 """show or modify state of locks
2132 """show or modify state of locks
2136
2133
2137 By default, this command will show which locks are held. This
2134 By default, this command will show which locks are held. This
2138 includes the user and process holding the lock, the amount of time
2135 includes the user and process holding the lock, the amount of time
2139 the lock has been held, and the machine name where the process is
2136 the lock has been held, and the machine name where the process is
2140 running if it's not local.
2137 running if it's not local.
2141
2138
2142 Locks protect the integrity of Mercurial's data, so should be
2139 Locks protect the integrity of Mercurial's data, so should be
2143 treated with care. System crashes or other interruptions may cause
2140 treated with care. System crashes or other interruptions may cause
2144 locks to not be properly released, though Mercurial will usually
2141 locks to not be properly released, though Mercurial will usually
2145 detect and remove such stale locks automatically.
2142 detect and remove such stale locks automatically.
2146
2143
2147 However, detecting stale locks may not always be possible (for
2144 However, detecting stale locks may not always be possible (for
2148 instance, on a shared filesystem). Removing locks may also be
2145 instance, on a shared filesystem). Removing locks may also be
2149 blocked by filesystem permissions.
2146 blocked by filesystem permissions.
2150
2147
2151 Setting a lock will prevent other commands from changing the data.
2148 Setting a lock will prevent other commands from changing the data.
2152 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2149 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2153 The set locks are removed when the command exits.
2150 The set locks are removed when the command exits.
2154
2151
2155 Returns 0 if no locks are held.
2152 Returns 0 if no locks are held.
2156
2153
2157 """
2154 """
2158
2155
2159 if opts.get('force_free_lock'):
2156 if opts.get('force_free_lock'):
2160 repo.svfs.unlink(b'lock')
2157 repo.svfs.unlink(b'lock')
2161 if opts.get('force_free_wlock'):
2158 if opts.get('force_free_wlock'):
2162 repo.vfs.unlink(b'wlock')
2159 repo.vfs.unlink(b'wlock')
2163 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2160 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2164 return 0
2161 return 0
2165
2162
2166 locks = []
2163 locks = []
2167 try:
2164 try:
2168 if opts.get('set_wlock'):
2165 if opts.get('set_wlock'):
2169 try:
2166 try:
2170 locks.append(repo.wlock(False))
2167 locks.append(repo.wlock(False))
2171 except error.LockHeld:
2168 except error.LockHeld:
2172 raise error.Abort(_(b'wlock is already held'))
2169 raise error.Abort(_(b'wlock is already held'))
2173 if opts.get('set_lock'):
2170 if opts.get('set_lock'):
2174 try:
2171 try:
2175 locks.append(repo.lock(False))
2172 locks.append(repo.lock(False))
2176 except error.LockHeld:
2173 except error.LockHeld:
2177 raise error.Abort(_(b'lock is already held'))
2174 raise error.Abort(_(b'lock is already held'))
2178 if len(locks):
2175 if len(locks):
2179 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2176 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2180 return 0
2177 return 0
2181 finally:
2178 finally:
2182 release(*locks)
2179 release(*locks)
2183
2180
2184 now = time.time()
2181 now = time.time()
2185 held = 0
2182 held = 0
2186
2183
2187 def report(vfs, name, method):
2184 def report(vfs, name, method):
2188 # this causes stale locks to get reaped for more accurate reporting
2185 # this causes stale locks to get reaped for more accurate reporting
2189 try:
2186 try:
2190 l = method(False)
2187 l = method(False)
2191 except error.LockHeld:
2188 except error.LockHeld:
2192 l = None
2189 l = None
2193
2190
2194 if l:
2191 if l:
2195 l.release()
2192 l.release()
2196 else:
2193 else:
2197 try:
2194 try:
2198 st = vfs.lstat(name)
2195 st = vfs.lstat(name)
2199 age = now - st[stat.ST_MTIME]
2196 age = now - st[stat.ST_MTIME]
2200 user = util.username(st.st_uid)
2197 user = util.username(st.st_uid)
2201 locker = vfs.readlock(name)
2198 locker = vfs.readlock(name)
2202 if b":" in locker:
2199 if b":" in locker:
2203 host, pid = locker.split(b':')
2200 host, pid = locker.split(b':')
2204 if host == socket.gethostname():
2201 if host == socket.gethostname():
2205 locker = b'user %s, process %s' % (user or b'None', pid)
2202 locker = b'user %s, process %s' % (user or b'None', pid)
2206 else:
2203 else:
2207 locker = b'user %s, process %s, host %s' % (
2204 locker = b'user %s, process %s, host %s' % (
2208 user or b'None',
2205 user or b'None',
2209 pid,
2206 pid,
2210 host,
2207 host,
2211 )
2208 )
2212 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2209 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2213 return 1
2210 return 1
2214 except OSError as e:
2211 except OSError as e:
2215 if e.errno != errno.ENOENT:
2212 if e.errno != errno.ENOENT:
2216 raise
2213 raise
2217
2214
2218 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2215 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2219 return 0
2216 return 0
2220
2217
2221 held += report(repo.svfs, b"lock", repo.lock)
2218 held += report(repo.svfs, b"lock", repo.lock)
2222 held += report(repo.vfs, b"wlock", repo.wlock)
2219 held += report(repo.vfs, b"wlock", repo.wlock)
2223
2220
2224 return held
2221 return held
2225
2222
2226
2223
2227 @command(
2224 @command(
2228 b'debugmanifestfulltextcache',
2225 b'debugmanifestfulltextcache',
2229 [
2226 [
2230 (b'', b'clear', False, _(b'clear the cache')),
2227 (b'', b'clear', False, _(b'clear the cache')),
2231 (
2228 (
2232 b'a',
2229 b'a',
2233 b'add',
2230 b'add',
2234 [],
2231 [],
2235 _(b'add the given manifest nodes to the cache'),
2232 _(b'add the given manifest nodes to the cache'),
2236 _(b'NODE'),
2233 _(b'NODE'),
2237 ),
2234 ),
2238 ],
2235 ],
2239 b'',
2236 b'',
2240 )
2237 )
2241 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2238 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2242 """show, clear or amend the contents of the manifest fulltext cache"""
2239 """show, clear or amend the contents of the manifest fulltext cache"""
2243
2240
2244 def getcache():
2241 def getcache():
2245 r = repo.manifestlog.getstorage(b'')
2242 r = repo.manifestlog.getstorage(b'')
2246 try:
2243 try:
2247 return r._fulltextcache
2244 return r._fulltextcache
2248 except AttributeError:
2245 except AttributeError:
2249 msg = _(
2246 msg = _(
2250 b"Current revlog implementation doesn't appear to have a "
2247 b"Current revlog implementation doesn't appear to have a "
2251 b"manifest fulltext cache\n"
2248 b"manifest fulltext cache\n"
2252 )
2249 )
2253 raise error.Abort(msg)
2250 raise error.Abort(msg)
2254
2251
2255 if opts.get('clear'):
2252 if opts.get('clear'):
2256 with repo.wlock():
2253 with repo.wlock():
2257 cache = getcache()
2254 cache = getcache()
2258 cache.clear(clear_persisted_data=True)
2255 cache.clear(clear_persisted_data=True)
2259 return
2256 return
2260
2257
2261 if add:
2258 if add:
2262 with repo.wlock():
2259 with repo.wlock():
2263 m = repo.manifestlog
2260 m = repo.manifestlog
2264 store = m.getstorage(b'')
2261 store = m.getstorage(b'')
2265 for n in add:
2262 for n in add:
2266 try:
2263 try:
2267 manifest = m[store.lookup(n)]
2264 manifest = m[store.lookup(n)]
2268 except error.LookupError as e:
2265 except error.LookupError as e:
2269 raise error.Abort(
2266 raise error.Abort(
2270 bytes(e), hint=b"Check your manifest node id"
2267 bytes(e), hint=b"Check your manifest node id"
2271 )
2268 )
2272 manifest.read() # stores revisision in cache too
2269 manifest.read() # stores revisision in cache too
2273 return
2270 return
2274
2271
2275 cache = getcache()
2272 cache = getcache()
2276 if not len(cache):
2273 if not len(cache):
2277 ui.write(_(b'cache empty\n'))
2274 ui.write(_(b'cache empty\n'))
2278 else:
2275 else:
2279 ui.write(
2276 ui.write(
2280 _(
2277 _(
2281 b'cache contains %d manifest entries, in order of most to '
2278 b'cache contains %d manifest entries, in order of most to '
2282 b'least recent:\n'
2279 b'least recent:\n'
2283 )
2280 )
2284 % (len(cache),)
2281 % (len(cache),)
2285 )
2282 )
2286 totalsize = 0
2283 totalsize = 0
2287 for nodeid in cache:
2284 for nodeid in cache:
2288 # Use cache.get to not update the LRU order
2285 # Use cache.get to not update the LRU order
2289 data = cache.peek(nodeid)
2286 data = cache.peek(nodeid)
2290 size = len(data)
2287 size = len(data)
2291 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2288 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2292 ui.write(
2289 ui.write(
2293 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2290 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2294 )
2291 )
2295 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2292 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2296 ui.write(
2293 ui.write(
2297 _(b'total cache data size %s, on-disk %s\n')
2294 _(b'total cache data size %s, on-disk %s\n')
2298 % (util.bytecount(totalsize), util.bytecount(ondisk))
2295 % (util.bytecount(totalsize), util.bytecount(ondisk))
2299 )
2296 )
2300
2297
2301
2298
2302 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2299 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2303 def debugmergestate(ui, repo, *args, **opts):
2300 def debugmergestate(ui, repo, *args, **opts):
2304 """print merge state
2301 """print merge state
2305
2302
2306 Use --verbose to print out information about whether v1 or v2 merge state
2303 Use --verbose to print out information about whether v1 or v2 merge state
2307 was chosen."""
2304 was chosen."""
2308
2305
2309 if ui.verbose:
2306 if ui.verbose:
2310 ms = mergestatemod.mergestate(repo)
2307 ms = mergestatemod.mergestate(repo)
2311
2308
2312 # sort so that reasonable information is on top
2309 # sort so that reasonable information is on top
2313 v1records = ms._readrecordsv1()
2310 v1records = ms._readrecordsv1()
2314 v2records = ms._readrecordsv2()
2311 v2records = ms._readrecordsv2()
2315
2312
2316 if not v1records and not v2records:
2313 if not v1records and not v2records:
2317 pass
2314 pass
2318 elif not v2records:
2315 elif not v2records:
2319 ui.writenoi18n(b'no version 2 merge state\n')
2316 ui.writenoi18n(b'no version 2 merge state\n')
2320 elif ms._v1v2match(v1records, v2records):
2317 elif ms._v1v2match(v1records, v2records):
2321 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2318 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2322 else:
2319 else:
2323 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2320 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2324
2321
2325 opts = pycompat.byteskwargs(opts)
2322 opts = pycompat.byteskwargs(opts)
2326 if not opts[b'template']:
2323 if not opts[b'template']:
2327 opts[b'template'] = (
2324 opts[b'template'] = (
2328 b'{if(commits, "", "no merge state found\n")}'
2325 b'{if(commits, "", "no merge state found\n")}'
2329 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2326 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2330 b'{files % "file: {path} (state \\"{state}\\")\n'
2327 b'{files % "file: {path} (state \\"{state}\\")\n'
2331 b'{if(local_path, "'
2328 b'{if(local_path, "'
2332 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2329 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2333 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2330 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2334 b' other path: {other_path} (node {other_node})\n'
2331 b' other path: {other_path} (node {other_node})\n'
2335 b'")}'
2332 b'")}'
2336 b'{if(rename_side, "'
2333 b'{if(rename_side, "'
2337 b' rename side: {rename_side}\n'
2334 b' rename side: {rename_side}\n'
2338 b' renamed path: {renamed_path}\n'
2335 b' renamed path: {renamed_path}\n'
2339 b'")}'
2336 b'")}'
2340 b'{extras % " extra: {key} = {value}\n"}'
2337 b'{extras % " extra: {key} = {value}\n"}'
2341 b'"}'
2338 b'"}'
2342 b'{extras % "extra: {file} ({key} = {value})\n"}'
2339 b'{extras % "extra: {file} ({key} = {value})\n"}'
2343 )
2340 )
2344
2341
2345 ms = mergestatemod.mergestate.read(repo)
2342 ms = mergestatemod.mergestate.read(repo)
2346
2343
2347 fm = ui.formatter(b'debugmergestate', opts)
2344 fm = ui.formatter(b'debugmergestate', opts)
2348 fm.startitem()
2345 fm.startitem()
2349
2346
2350 fm_commits = fm.nested(b'commits')
2347 fm_commits = fm.nested(b'commits')
2351 if ms.active():
2348 if ms.active():
2352 for name, node, label_index in (
2349 for name, node, label_index in (
2353 (b'local', ms.local, 0),
2350 (b'local', ms.local, 0),
2354 (b'other', ms.other, 1),
2351 (b'other', ms.other, 1),
2355 ):
2352 ):
2356 fm_commits.startitem()
2353 fm_commits.startitem()
2357 fm_commits.data(name=name)
2354 fm_commits.data(name=name)
2358 fm_commits.data(node=hex(node))
2355 fm_commits.data(node=hex(node))
2359 if ms._labels and len(ms._labels) > label_index:
2356 if ms._labels and len(ms._labels) > label_index:
2360 fm_commits.data(label=ms._labels[label_index])
2357 fm_commits.data(label=ms._labels[label_index])
2361 fm_commits.end()
2358 fm_commits.end()
2362
2359
2363 fm_files = fm.nested(b'files')
2360 fm_files = fm.nested(b'files')
2364 if ms.active():
2361 if ms.active():
2365 for f in ms:
2362 for f in ms:
2366 fm_files.startitem()
2363 fm_files.startitem()
2367 fm_files.data(path=f)
2364 fm_files.data(path=f)
2368 state = ms._state[f]
2365 state = ms._state[f]
2369 fm_files.data(state=state[0])
2366 fm_files.data(state=state[0])
2370 if state[0] in (
2367 if state[0] in (
2371 mergestatemod.MERGE_RECORD_UNRESOLVED,
2368 mergestatemod.MERGE_RECORD_UNRESOLVED,
2372 mergestatemod.MERGE_RECORD_RESOLVED,
2369 mergestatemod.MERGE_RECORD_RESOLVED,
2373 ):
2370 ):
2374 fm_files.data(local_key=state[1])
2371 fm_files.data(local_key=state[1])
2375 fm_files.data(local_path=state[2])
2372 fm_files.data(local_path=state[2])
2376 fm_files.data(ancestor_path=state[3])
2373 fm_files.data(ancestor_path=state[3])
2377 fm_files.data(ancestor_node=state[4])
2374 fm_files.data(ancestor_node=state[4])
2378 fm_files.data(other_path=state[5])
2375 fm_files.data(other_path=state[5])
2379 fm_files.data(other_node=state[6])
2376 fm_files.data(other_node=state[6])
2380 fm_files.data(local_flags=state[7])
2377 fm_files.data(local_flags=state[7])
2381 elif state[0] in (
2378 elif state[0] in (
2382 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2379 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2383 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2380 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2384 ):
2381 ):
2385 fm_files.data(renamed_path=state[1])
2382 fm_files.data(renamed_path=state[1])
2386 fm_files.data(rename_side=state[2])
2383 fm_files.data(rename_side=state[2])
2387 fm_extras = fm_files.nested(b'extras')
2384 fm_extras = fm_files.nested(b'extras')
2388 for k, v in sorted(ms.extras(f).items()):
2385 for k, v in sorted(ms.extras(f).items()):
2389 fm_extras.startitem()
2386 fm_extras.startitem()
2390 fm_extras.data(key=k)
2387 fm_extras.data(key=k)
2391 fm_extras.data(value=v)
2388 fm_extras.data(value=v)
2392 fm_extras.end()
2389 fm_extras.end()
2393
2390
2394 fm_files.end()
2391 fm_files.end()
2395
2392
2396 fm_extras = fm.nested(b'extras')
2393 fm_extras = fm.nested(b'extras')
2397 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2394 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2398 if f in ms:
2395 if f in ms:
2399 # If file is in mergestate, we have already processed it's extras
2396 # If file is in mergestate, we have already processed it's extras
2400 continue
2397 continue
2401 for k, v in pycompat.iteritems(d):
2398 for k, v in pycompat.iteritems(d):
2402 fm_extras.startitem()
2399 fm_extras.startitem()
2403 fm_extras.data(file=f)
2400 fm_extras.data(file=f)
2404 fm_extras.data(key=k)
2401 fm_extras.data(key=k)
2405 fm_extras.data(value=v)
2402 fm_extras.data(value=v)
2406 fm_extras.end()
2403 fm_extras.end()
2407
2404
2408 fm.end()
2405 fm.end()
2409
2406
2410
2407
2411 @command(b'debugnamecomplete', [], _(b'NAME...'))
2408 @command(b'debugnamecomplete', [], _(b'NAME...'))
2412 def debugnamecomplete(ui, repo, *args):
2409 def debugnamecomplete(ui, repo, *args):
2413 '''complete "names" - tags, open branch names, bookmark names'''
2410 '''complete "names" - tags, open branch names, bookmark names'''
2414
2411
2415 names = set()
2412 names = set()
2416 # since we previously only listed open branches, we will handle that
2413 # since we previously only listed open branches, we will handle that
2417 # specially (after this for loop)
2414 # specially (after this for loop)
2418 for name, ns in pycompat.iteritems(repo.names):
2415 for name, ns in pycompat.iteritems(repo.names):
2419 if name != b'branches':
2416 if name != b'branches':
2420 names.update(ns.listnames(repo))
2417 names.update(ns.listnames(repo))
2421 names.update(
2418 names.update(
2422 tag
2419 tag
2423 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2420 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2424 if not closed
2421 if not closed
2425 )
2422 )
2426 completions = set()
2423 completions = set()
2427 if not args:
2424 if not args:
2428 args = [b'']
2425 args = [b'']
2429 for a in args:
2426 for a in args:
2430 completions.update(n for n in names if n.startswith(a))
2427 completions.update(n for n in names if n.startswith(a))
2431 ui.write(b'\n'.join(sorted(completions)))
2428 ui.write(b'\n'.join(sorted(completions)))
2432 ui.write(b'\n')
2429 ui.write(b'\n')
2433
2430
2434
2431
2435 @command(
2432 @command(
2436 b'debugnodemap',
2433 b'debugnodemap',
2437 [
2434 [
2438 (
2435 (
2439 b'',
2436 b'',
2440 b'dump-new',
2437 b'dump-new',
2441 False,
2438 False,
2442 _(b'write a (new) persistent binary nodemap on stdout'),
2439 _(b'write a (new) persistent binary nodemap on stdout'),
2443 ),
2440 ),
2444 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2441 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2445 (
2442 (
2446 b'',
2443 b'',
2447 b'check',
2444 b'check',
2448 False,
2445 False,
2449 _(b'check that the data on disk data are correct.'),
2446 _(b'check that the data on disk data are correct.'),
2450 ),
2447 ),
2451 (
2448 (
2452 b'',
2449 b'',
2453 b'metadata',
2450 b'metadata',
2454 False,
2451 False,
2455 _(b'display the on disk meta data for the nodemap'),
2452 _(b'display the on disk meta data for the nodemap'),
2456 ),
2453 ),
2457 ],
2454 ],
2458 )
2455 )
2459 def debugnodemap(ui, repo, **opts):
2456 def debugnodemap(ui, repo, **opts):
2460 """write and inspect on disk nodemap"""
2457 """write and inspect on disk nodemap"""
2461 if opts['dump_new']:
2458 if opts['dump_new']:
2462 unfi = repo.unfiltered()
2459 unfi = repo.unfiltered()
2463 cl = unfi.changelog
2460 cl = unfi.changelog
2464 if util.safehasattr(cl.index, "nodemap_data_all"):
2461 if util.safehasattr(cl.index, "nodemap_data_all"):
2465 data = cl.index.nodemap_data_all()
2462 data = cl.index.nodemap_data_all()
2466 else:
2463 else:
2467 data = nodemap.persistent_data(cl.index)
2464 data = nodemap.persistent_data(cl.index)
2468 ui.write(data)
2465 ui.write(data)
2469 elif opts['dump_disk']:
2466 elif opts['dump_disk']:
2470 unfi = repo.unfiltered()
2467 unfi = repo.unfiltered()
2471 cl = unfi.changelog
2468 cl = unfi.changelog
2472 nm_data = nodemap.persisted_data(cl)
2469 nm_data = nodemap.persisted_data(cl)
2473 if nm_data is not None:
2470 if nm_data is not None:
2474 docket, data = nm_data
2471 docket, data = nm_data
2475 ui.write(data[:])
2472 ui.write(data[:])
2476 elif opts['check']:
2473 elif opts['check']:
2477 unfi = repo.unfiltered()
2474 unfi = repo.unfiltered()
2478 cl = unfi.changelog
2475 cl = unfi.changelog
2479 nm_data = nodemap.persisted_data(cl)
2476 nm_data = nodemap.persisted_data(cl)
2480 if nm_data is not None:
2477 if nm_data is not None:
2481 docket, data = nm_data
2478 docket, data = nm_data
2482 return nodemap.check_data(ui, cl.index, data)
2479 return nodemap.check_data(ui, cl.index, data)
2483 elif opts['metadata']:
2480 elif opts['metadata']:
2484 unfi = repo.unfiltered()
2481 unfi = repo.unfiltered()
2485 cl = unfi.changelog
2482 cl = unfi.changelog
2486 nm_data = nodemap.persisted_data(cl)
2483 nm_data = nodemap.persisted_data(cl)
2487 if nm_data is not None:
2484 if nm_data is not None:
2488 docket, data = nm_data
2485 docket, data = nm_data
2489 ui.write((b"uid: %s\n") % docket.uid)
2486 ui.write((b"uid: %s\n") % docket.uid)
2490 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2487 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2491 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2488 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2492 ui.write((b"data-length: %d\n") % docket.data_length)
2489 ui.write((b"data-length: %d\n") % docket.data_length)
2493 ui.write((b"data-unused: %d\n") % docket.data_unused)
2490 ui.write((b"data-unused: %d\n") % docket.data_unused)
2494 unused_perc = docket.data_unused * 100.0 / docket.data_length
2491 unused_perc = docket.data_unused * 100.0 / docket.data_length
2495 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2492 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2496
2493
2497
2494
2498 @command(
2495 @command(
2499 b'debugobsolete',
2496 b'debugobsolete',
2500 [
2497 [
2501 (b'', b'flags', 0, _(b'markers flag')),
2498 (b'', b'flags', 0, _(b'markers flag')),
2502 (
2499 (
2503 b'',
2500 b'',
2504 b'record-parents',
2501 b'record-parents',
2505 False,
2502 False,
2506 _(b'record parent information for the precursor'),
2503 _(b'record parent information for the precursor'),
2507 ),
2504 ),
2508 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2505 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2509 (
2506 (
2510 b'',
2507 b'',
2511 b'exclusive',
2508 b'exclusive',
2512 False,
2509 False,
2513 _(b'restrict display to markers only relevant to REV'),
2510 _(b'restrict display to markers only relevant to REV'),
2514 ),
2511 ),
2515 (b'', b'index', False, _(b'display index of the marker')),
2512 (b'', b'index', False, _(b'display index of the marker')),
2516 (b'', b'delete', [], _(b'delete markers specified by indices')),
2513 (b'', b'delete', [], _(b'delete markers specified by indices')),
2517 ]
2514 ]
2518 + cmdutil.commitopts2
2515 + cmdutil.commitopts2
2519 + cmdutil.formatteropts,
2516 + cmdutil.formatteropts,
2520 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2517 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2521 )
2518 )
2522 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2519 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2523 """create arbitrary obsolete marker
2520 """create arbitrary obsolete marker
2524
2521
2525 With no arguments, displays the list of obsolescence markers."""
2522 With no arguments, displays the list of obsolescence markers."""
2526
2523
2527 opts = pycompat.byteskwargs(opts)
2524 opts = pycompat.byteskwargs(opts)
2528
2525
2529 def parsenodeid(s):
2526 def parsenodeid(s):
2530 try:
2527 try:
2531 # We do not use revsingle/revrange functions here to accept
2528 # We do not use revsingle/revrange functions here to accept
2532 # arbitrary node identifiers, possibly not present in the
2529 # arbitrary node identifiers, possibly not present in the
2533 # local repository.
2530 # local repository.
2534 n = bin(s)
2531 n = bin(s)
2535 if len(n) != repo.nodeconstants.nodelen:
2532 if len(n) != repo.nodeconstants.nodelen:
2536 raise TypeError()
2533 raise TypeError()
2537 return n
2534 return n
2538 except TypeError:
2535 except TypeError:
2539 raise error.InputError(
2536 raise error.InputError(
2540 b'changeset references must be full hexadecimal '
2537 b'changeset references must be full hexadecimal '
2541 b'node identifiers'
2538 b'node identifiers'
2542 )
2539 )
2543
2540
2544 if opts.get(b'delete'):
2541 if opts.get(b'delete'):
2545 indices = []
2542 indices = []
2546 for v in opts.get(b'delete'):
2543 for v in opts.get(b'delete'):
2547 try:
2544 try:
2548 indices.append(int(v))
2545 indices.append(int(v))
2549 except ValueError:
2546 except ValueError:
2550 raise error.InputError(
2547 raise error.InputError(
2551 _(b'invalid index value: %r') % v,
2548 _(b'invalid index value: %r') % v,
2552 hint=_(b'use integers for indices'),
2549 hint=_(b'use integers for indices'),
2553 )
2550 )
2554
2551
2555 if repo.currenttransaction():
2552 if repo.currenttransaction():
2556 raise error.Abort(
2553 raise error.Abort(
2557 _(b'cannot delete obsmarkers in the middle of transaction.')
2554 _(b'cannot delete obsmarkers in the middle of transaction.')
2558 )
2555 )
2559
2556
2560 with repo.lock():
2557 with repo.lock():
2561 n = repair.deleteobsmarkers(repo.obsstore, indices)
2558 n = repair.deleteobsmarkers(repo.obsstore, indices)
2562 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2559 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2563
2560
2564 return
2561 return
2565
2562
2566 if precursor is not None:
2563 if precursor is not None:
2567 if opts[b'rev']:
2564 if opts[b'rev']:
2568 raise error.InputError(
2565 raise error.InputError(
2569 b'cannot select revision when creating marker'
2566 b'cannot select revision when creating marker'
2570 )
2567 )
2571 metadata = {}
2568 metadata = {}
2572 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2569 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2573 succs = tuple(parsenodeid(succ) for succ in successors)
2570 succs = tuple(parsenodeid(succ) for succ in successors)
2574 l = repo.lock()
2571 l = repo.lock()
2575 try:
2572 try:
2576 tr = repo.transaction(b'debugobsolete')
2573 tr = repo.transaction(b'debugobsolete')
2577 try:
2574 try:
2578 date = opts.get(b'date')
2575 date = opts.get(b'date')
2579 if date:
2576 if date:
2580 date = dateutil.parsedate(date)
2577 date = dateutil.parsedate(date)
2581 else:
2578 else:
2582 date = None
2579 date = None
2583 prec = parsenodeid(precursor)
2580 prec = parsenodeid(precursor)
2584 parents = None
2581 parents = None
2585 if opts[b'record_parents']:
2582 if opts[b'record_parents']:
2586 if prec not in repo.unfiltered():
2583 if prec not in repo.unfiltered():
2587 raise error.Abort(
2584 raise error.Abort(
2588 b'cannot used --record-parents on '
2585 b'cannot used --record-parents on '
2589 b'unknown changesets'
2586 b'unknown changesets'
2590 )
2587 )
2591 parents = repo.unfiltered()[prec].parents()
2588 parents = repo.unfiltered()[prec].parents()
2592 parents = tuple(p.node() for p in parents)
2589 parents = tuple(p.node() for p in parents)
2593 repo.obsstore.create(
2590 repo.obsstore.create(
2594 tr,
2591 tr,
2595 prec,
2592 prec,
2596 succs,
2593 succs,
2597 opts[b'flags'],
2594 opts[b'flags'],
2598 parents=parents,
2595 parents=parents,
2599 date=date,
2596 date=date,
2600 metadata=metadata,
2597 metadata=metadata,
2601 ui=ui,
2598 ui=ui,
2602 )
2599 )
2603 tr.close()
2600 tr.close()
2604 except ValueError as exc:
2601 except ValueError as exc:
2605 raise error.Abort(
2602 raise error.Abort(
2606 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2603 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2607 )
2604 )
2608 finally:
2605 finally:
2609 tr.release()
2606 tr.release()
2610 finally:
2607 finally:
2611 l.release()
2608 l.release()
2612 else:
2609 else:
2613 if opts[b'rev']:
2610 if opts[b'rev']:
2614 revs = scmutil.revrange(repo, opts[b'rev'])
2611 revs = scmutil.revrange(repo, opts[b'rev'])
2615 nodes = [repo[r].node() for r in revs]
2612 nodes = [repo[r].node() for r in revs]
2616 markers = list(
2613 markers = list(
2617 obsutil.getmarkers(
2614 obsutil.getmarkers(
2618 repo, nodes=nodes, exclusive=opts[b'exclusive']
2615 repo, nodes=nodes, exclusive=opts[b'exclusive']
2619 )
2616 )
2620 )
2617 )
2621 markers.sort(key=lambda x: x._data)
2618 markers.sort(key=lambda x: x._data)
2622 else:
2619 else:
2623 markers = obsutil.getmarkers(repo)
2620 markers = obsutil.getmarkers(repo)
2624
2621
2625 markerstoiter = markers
2622 markerstoiter = markers
2626 isrelevant = lambda m: True
2623 isrelevant = lambda m: True
2627 if opts.get(b'rev') and opts.get(b'index'):
2624 if opts.get(b'rev') and opts.get(b'index'):
2628 markerstoiter = obsutil.getmarkers(repo)
2625 markerstoiter = obsutil.getmarkers(repo)
2629 markerset = set(markers)
2626 markerset = set(markers)
2630 isrelevant = lambda m: m in markerset
2627 isrelevant = lambda m: m in markerset
2631
2628
2632 fm = ui.formatter(b'debugobsolete', opts)
2629 fm = ui.formatter(b'debugobsolete', opts)
2633 for i, m in enumerate(markerstoiter):
2630 for i, m in enumerate(markerstoiter):
2634 if not isrelevant(m):
2631 if not isrelevant(m):
2635 # marker can be irrelevant when we're iterating over a set
2632 # marker can be irrelevant when we're iterating over a set
2636 # of markers (markerstoiter) which is bigger than the set
2633 # of markers (markerstoiter) which is bigger than the set
2637 # of markers we want to display (markers)
2634 # of markers we want to display (markers)
2638 # this can happen if both --index and --rev options are
2635 # this can happen if both --index and --rev options are
2639 # provided and thus we need to iterate over all of the markers
2636 # provided and thus we need to iterate over all of the markers
2640 # to get the correct indices, but only display the ones that
2637 # to get the correct indices, but only display the ones that
2641 # are relevant to --rev value
2638 # are relevant to --rev value
2642 continue
2639 continue
2643 fm.startitem()
2640 fm.startitem()
2644 ind = i if opts.get(b'index') else None
2641 ind = i if opts.get(b'index') else None
2645 cmdutil.showmarker(fm, m, index=ind)
2642 cmdutil.showmarker(fm, m, index=ind)
2646 fm.end()
2643 fm.end()
2647
2644
2648
2645
2649 @command(
2646 @command(
2650 b'debugp1copies',
2647 b'debugp1copies',
2651 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2648 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2652 _(b'[-r REV]'),
2649 _(b'[-r REV]'),
2653 )
2650 )
2654 def debugp1copies(ui, repo, **opts):
2651 def debugp1copies(ui, repo, **opts):
2655 """dump copy information compared to p1"""
2652 """dump copy information compared to p1"""
2656
2653
2657 opts = pycompat.byteskwargs(opts)
2654 opts = pycompat.byteskwargs(opts)
2658 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2655 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2659 for dst, src in ctx.p1copies().items():
2656 for dst, src in ctx.p1copies().items():
2660 ui.write(b'%s -> %s\n' % (src, dst))
2657 ui.write(b'%s -> %s\n' % (src, dst))
2661
2658
2662
2659
2663 @command(
2660 @command(
2664 b'debugp2copies',
2661 b'debugp2copies',
2665 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2662 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2666 _(b'[-r REV]'),
2663 _(b'[-r REV]'),
2667 )
2664 )
2668 def debugp1copies(ui, repo, **opts):
2665 def debugp1copies(ui, repo, **opts):
2669 """dump copy information compared to p2"""
2666 """dump copy information compared to p2"""
2670
2667
2671 opts = pycompat.byteskwargs(opts)
2668 opts = pycompat.byteskwargs(opts)
2672 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2669 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2673 for dst, src in ctx.p2copies().items():
2670 for dst, src in ctx.p2copies().items():
2674 ui.write(b'%s -> %s\n' % (src, dst))
2671 ui.write(b'%s -> %s\n' % (src, dst))
2675
2672
2676
2673
2677 @command(
2674 @command(
2678 b'debugpathcomplete',
2675 b'debugpathcomplete',
2679 [
2676 [
2680 (b'f', b'full', None, _(b'complete an entire path')),
2677 (b'f', b'full', None, _(b'complete an entire path')),
2681 (b'n', b'normal', None, _(b'show only normal files')),
2678 (b'n', b'normal', None, _(b'show only normal files')),
2682 (b'a', b'added', None, _(b'show only added files')),
2679 (b'a', b'added', None, _(b'show only added files')),
2683 (b'r', b'removed', None, _(b'show only removed files')),
2680 (b'r', b'removed', None, _(b'show only removed files')),
2684 ],
2681 ],
2685 _(b'FILESPEC...'),
2682 _(b'FILESPEC...'),
2686 )
2683 )
2687 def debugpathcomplete(ui, repo, *specs, **opts):
2684 def debugpathcomplete(ui, repo, *specs, **opts):
2688 """complete part or all of a tracked path
2685 """complete part or all of a tracked path
2689
2686
2690 This command supports shells that offer path name completion. It
2687 This command supports shells that offer path name completion. It
2691 currently completes only files already known to the dirstate.
2688 currently completes only files already known to the dirstate.
2692
2689
2693 Completion extends only to the next path segment unless
2690 Completion extends only to the next path segment unless
2694 --full is specified, in which case entire paths are used."""
2691 --full is specified, in which case entire paths are used."""
2695
2692
2696 def complete(path, acceptable):
2693 def complete(path, acceptable):
2697 dirstate = repo.dirstate
2694 dirstate = repo.dirstate
2698 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2695 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2699 rootdir = repo.root + pycompat.ossep
2696 rootdir = repo.root + pycompat.ossep
2700 if spec != repo.root and not spec.startswith(rootdir):
2697 if spec != repo.root and not spec.startswith(rootdir):
2701 return [], []
2698 return [], []
2702 if os.path.isdir(spec):
2699 if os.path.isdir(spec):
2703 spec += b'/'
2700 spec += b'/'
2704 spec = spec[len(rootdir) :]
2701 spec = spec[len(rootdir) :]
2705 fixpaths = pycompat.ossep != b'/'
2702 fixpaths = pycompat.ossep != b'/'
2706 if fixpaths:
2703 if fixpaths:
2707 spec = spec.replace(pycompat.ossep, b'/')
2704 spec = spec.replace(pycompat.ossep, b'/')
2708 speclen = len(spec)
2705 speclen = len(spec)
2709 fullpaths = opts['full']
2706 fullpaths = opts['full']
2710 files, dirs = set(), set()
2707 files, dirs = set(), set()
2711 adddir, addfile = dirs.add, files.add
2708 adddir, addfile = dirs.add, files.add
2712 for f, st in pycompat.iteritems(dirstate):
2709 for f, st in pycompat.iteritems(dirstate):
2713 if f.startswith(spec) and st.state in acceptable:
2710 if f.startswith(spec) and st.state in acceptable:
2714 if fixpaths:
2711 if fixpaths:
2715 f = f.replace(b'/', pycompat.ossep)
2712 f = f.replace(b'/', pycompat.ossep)
2716 if fullpaths:
2713 if fullpaths:
2717 addfile(f)
2714 addfile(f)
2718 continue
2715 continue
2719 s = f.find(pycompat.ossep, speclen)
2716 s = f.find(pycompat.ossep, speclen)
2720 if s >= 0:
2717 if s >= 0:
2721 adddir(f[:s])
2718 adddir(f[:s])
2722 else:
2719 else:
2723 addfile(f)
2720 addfile(f)
2724 return files, dirs
2721 return files, dirs
2725
2722
2726 acceptable = b''
2723 acceptable = b''
2727 if opts['normal']:
2724 if opts['normal']:
2728 acceptable += b'nm'
2725 acceptable += b'nm'
2729 if opts['added']:
2726 if opts['added']:
2730 acceptable += b'a'
2727 acceptable += b'a'
2731 if opts['removed']:
2728 if opts['removed']:
2732 acceptable += b'r'
2729 acceptable += b'r'
2733 cwd = repo.getcwd()
2730 cwd = repo.getcwd()
2734 if not specs:
2731 if not specs:
2735 specs = [b'.']
2732 specs = [b'.']
2736
2733
2737 files, dirs = set(), set()
2734 files, dirs = set(), set()
2738 for spec in specs:
2735 for spec in specs:
2739 f, d = complete(spec, acceptable or b'nmar')
2736 f, d = complete(spec, acceptable or b'nmar')
2740 files.update(f)
2737 files.update(f)
2741 dirs.update(d)
2738 dirs.update(d)
2742 files.update(dirs)
2739 files.update(dirs)
2743 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2740 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2744 ui.write(b'\n')
2741 ui.write(b'\n')
2745
2742
2746
2743
2747 @command(
2744 @command(
2748 b'debugpathcopies',
2745 b'debugpathcopies',
2749 cmdutil.walkopts,
2746 cmdutil.walkopts,
2750 b'hg debugpathcopies REV1 REV2 [FILE]',
2747 b'hg debugpathcopies REV1 REV2 [FILE]',
2751 inferrepo=True,
2748 inferrepo=True,
2752 )
2749 )
2753 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2750 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2754 """show copies between two revisions"""
2751 """show copies between two revisions"""
2755 ctx1 = scmutil.revsingle(repo, rev1)
2752 ctx1 = scmutil.revsingle(repo, rev1)
2756 ctx2 = scmutil.revsingle(repo, rev2)
2753 ctx2 = scmutil.revsingle(repo, rev2)
2757 m = scmutil.match(ctx1, pats, opts)
2754 m = scmutil.match(ctx1, pats, opts)
2758 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2755 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2759 ui.write(b'%s -> %s\n' % (src, dst))
2756 ui.write(b'%s -> %s\n' % (src, dst))
2760
2757
2761
2758
2762 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2759 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2763 def debugpeer(ui, path):
2760 def debugpeer(ui, path):
2764 """establish a connection to a peer repository"""
2761 """establish a connection to a peer repository"""
2765 # Always enable peer request logging. Requires --debug to display
2762 # Always enable peer request logging. Requires --debug to display
2766 # though.
2763 # though.
2767 overrides = {
2764 overrides = {
2768 (b'devel', b'debug.peer-request'): True,
2765 (b'devel', b'debug.peer-request'): True,
2769 }
2766 }
2770
2767
2771 with ui.configoverride(overrides):
2768 with ui.configoverride(overrides):
2772 peer = hg.peer(ui, {}, path)
2769 peer = hg.peer(ui, {}, path)
2773
2770
2774 try:
2771 try:
2775 local = peer.local() is not None
2772 local = peer.local() is not None
2776 canpush = peer.canpush()
2773 canpush = peer.canpush()
2777
2774
2778 ui.write(_(b'url: %s\n') % peer.url())
2775 ui.write(_(b'url: %s\n') % peer.url())
2779 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2776 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2780 ui.write(
2777 ui.write(
2781 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2778 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2782 )
2779 )
2783 finally:
2780 finally:
2784 peer.close()
2781 peer.close()
2785
2782
2786
2783
2787 @command(
2784 @command(
2788 b'debugpickmergetool',
2785 b'debugpickmergetool',
2789 [
2786 [
2790 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2787 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2791 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2788 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2792 ]
2789 ]
2793 + cmdutil.walkopts
2790 + cmdutil.walkopts
2794 + cmdutil.mergetoolopts,
2791 + cmdutil.mergetoolopts,
2795 _(b'[PATTERN]...'),
2792 _(b'[PATTERN]...'),
2796 inferrepo=True,
2793 inferrepo=True,
2797 )
2794 )
2798 def debugpickmergetool(ui, repo, *pats, **opts):
2795 def debugpickmergetool(ui, repo, *pats, **opts):
2799 """examine which merge tool is chosen for specified file
2796 """examine which merge tool is chosen for specified file
2800
2797
2801 As described in :hg:`help merge-tools`, Mercurial examines
2798 As described in :hg:`help merge-tools`, Mercurial examines
2802 configurations below in this order to decide which merge tool is
2799 configurations below in this order to decide which merge tool is
2803 chosen for specified file.
2800 chosen for specified file.
2804
2801
2805 1. ``--tool`` option
2802 1. ``--tool`` option
2806 2. ``HGMERGE`` environment variable
2803 2. ``HGMERGE`` environment variable
2807 3. configurations in ``merge-patterns`` section
2804 3. configurations in ``merge-patterns`` section
2808 4. configuration of ``ui.merge``
2805 4. configuration of ``ui.merge``
2809 5. configurations in ``merge-tools`` section
2806 5. configurations in ``merge-tools`` section
2810 6. ``hgmerge`` tool (for historical reason only)
2807 6. ``hgmerge`` tool (for historical reason only)
2811 7. default tool for fallback (``:merge`` or ``:prompt``)
2808 7. default tool for fallback (``:merge`` or ``:prompt``)
2812
2809
2813 This command writes out examination result in the style below::
2810 This command writes out examination result in the style below::
2814
2811
2815 FILE = MERGETOOL
2812 FILE = MERGETOOL
2816
2813
2817 By default, all files known in the first parent context of the
2814 By default, all files known in the first parent context of the
2818 working directory are examined. Use file patterns and/or -I/-X
2815 working directory are examined. Use file patterns and/or -I/-X
2819 options to limit target files. -r/--rev is also useful to examine
2816 options to limit target files. -r/--rev is also useful to examine
2820 files in another context without actual updating to it.
2817 files in another context without actual updating to it.
2821
2818
2822 With --debug, this command shows warning messages while matching
2819 With --debug, this command shows warning messages while matching
2823 against ``merge-patterns`` and so on, too. It is recommended to
2820 against ``merge-patterns`` and so on, too. It is recommended to
2824 use this option with explicit file patterns and/or -I/-X options,
2821 use this option with explicit file patterns and/or -I/-X options,
2825 because this option increases amount of output per file according
2822 because this option increases amount of output per file according
2826 to configurations in hgrc.
2823 to configurations in hgrc.
2827
2824
2828 With -v/--verbose, this command shows configurations below at
2825 With -v/--verbose, this command shows configurations below at
2829 first (only if specified).
2826 first (only if specified).
2830
2827
2831 - ``--tool`` option
2828 - ``--tool`` option
2832 - ``HGMERGE`` environment variable
2829 - ``HGMERGE`` environment variable
2833 - configuration of ``ui.merge``
2830 - configuration of ``ui.merge``
2834
2831
2835 If merge tool is chosen before matching against
2832 If merge tool is chosen before matching against
2836 ``merge-patterns``, this command can't show any helpful
2833 ``merge-patterns``, this command can't show any helpful
2837 information, even with --debug. In such case, information above is
2834 information, even with --debug. In such case, information above is
2838 useful to know why a merge tool is chosen.
2835 useful to know why a merge tool is chosen.
2839 """
2836 """
2840 opts = pycompat.byteskwargs(opts)
2837 opts = pycompat.byteskwargs(opts)
2841 overrides = {}
2838 overrides = {}
2842 if opts[b'tool']:
2839 if opts[b'tool']:
2843 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2840 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2844 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2841 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2845
2842
2846 with ui.configoverride(overrides, b'debugmergepatterns'):
2843 with ui.configoverride(overrides, b'debugmergepatterns'):
2847 hgmerge = encoding.environ.get(b"HGMERGE")
2844 hgmerge = encoding.environ.get(b"HGMERGE")
2848 if hgmerge is not None:
2845 if hgmerge is not None:
2849 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2846 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2850 uimerge = ui.config(b"ui", b"merge")
2847 uimerge = ui.config(b"ui", b"merge")
2851 if uimerge:
2848 if uimerge:
2852 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2849 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2853
2850
2854 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2851 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2855 m = scmutil.match(ctx, pats, opts)
2852 m = scmutil.match(ctx, pats, opts)
2856 changedelete = opts[b'changedelete']
2853 changedelete = opts[b'changedelete']
2857 for path in ctx.walk(m):
2854 for path in ctx.walk(m):
2858 fctx = ctx[path]
2855 fctx = ctx[path]
2859 with ui.silent(
2856 with ui.silent(
2860 error=True
2857 error=True
2861 ) if not ui.debugflag else util.nullcontextmanager():
2858 ) if not ui.debugflag else util.nullcontextmanager():
2862 tool, toolpath = filemerge._picktool(
2859 tool, toolpath = filemerge._picktool(
2863 repo,
2860 repo,
2864 ui,
2861 ui,
2865 path,
2862 path,
2866 fctx.isbinary(),
2863 fctx.isbinary(),
2867 b'l' in fctx.flags(),
2864 b'l' in fctx.flags(),
2868 changedelete,
2865 changedelete,
2869 )
2866 )
2870 ui.write(b'%s = %s\n' % (path, tool))
2867 ui.write(b'%s = %s\n' % (path, tool))
2871
2868
2872
2869
2873 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2870 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2874 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2871 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2875 """access the pushkey key/value protocol
2872 """access the pushkey key/value protocol
2876
2873
2877 With two args, list the keys in the given namespace.
2874 With two args, list the keys in the given namespace.
2878
2875
2879 With five args, set a key to new if it currently is set to old.
2876 With five args, set a key to new if it currently is set to old.
2880 Reports success or failure.
2877 Reports success or failure.
2881 """
2878 """
2882
2879
2883 target = hg.peer(ui, {}, repopath)
2880 target = hg.peer(ui, {}, repopath)
2884 try:
2881 try:
2885 if keyinfo:
2882 if keyinfo:
2886 key, old, new = keyinfo
2883 key, old, new = keyinfo
2887 with target.commandexecutor() as e:
2884 with target.commandexecutor() as e:
2888 r = e.callcommand(
2885 r = e.callcommand(
2889 b'pushkey',
2886 b'pushkey',
2890 {
2887 {
2891 b'namespace': namespace,
2888 b'namespace': namespace,
2892 b'key': key,
2889 b'key': key,
2893 b'old': old,
2890 b'old': old,
2894 b'new': new,
2891 b'new': new,
2895 },
2892 },
2896 ).result()
2893 ).result()
2897
2894
2898 ui.status(pycompat.bytestr(r) + b'\n')
2895 ui.status(pycompat.bytestr(r) + b'\n')
2899 return not r
2896 return not r
2900 else:
2897 else:
2901 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2898 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2902 ui.write(
2899 ui.write(
2903 b"%s\t%s\n"
2900 b"%s\t%s\n"
2904 % (stringutil.escapestr(k), stringutil.escapestr(v))
2901 % (stringutil.escapestr(k), stringutil.escapestr(v))
2905 )
2902 )
2906 finally:
2903 finally:
2907 target.close()
2904 target.close()
2908
2905
2909
2906
2910 @command(b'debugpvec', [], _(b'A B'))
2907 @command(b'debugpvec', [], _(b'A B'))
2911 def debugpvec(ui, repo, a, b=None):
2908 def debugpvec(ui, repo, a, b=None):
2912 ca = scmutil.revsingle(repo, a)
2909 ca = scmutil.revsingle(repo, a)
2913 cb = scmutil.revsingle(repo, b)
2910 cb = scmutil.revsingle(repo, b)
2914 pa = pvec.ctxpvec(ca)
2911 pa = pvec.ctxpvec(ca)
2915 pb = pvec.ctxpvec(cb)
2912 pb = pvec.ctxpvec(cb)
2916 if pa == pb:
2913 if pa == pb:
2917 rel = b"="
2914 rel = b"="
2918 elif pa > pb:
2915 elif pa > pb:
2919 rel = b">"
2916 rel = b">"
2920 elif pa < pb:
2917 elif pa < pb:
2921 rel = b"<"
2918 rel = b"<"
2922 elif pa | pb:
2919 elif pa | pb:
2923 rel = b"|"
2920 rel = b"|"
2924 ui.write(_(b"a: %s\n") % pa)
2921 ui.write(_(b"a: %s\n") % pa)
2925 ui.write(_(b"b: %s\n") % pb)
2922 ui.write(_(b"b: %s\n") % pb)
2926 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2923 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2927 ui.write(
2924 ui.write(
2928 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2925 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2929 % (
2926 % (
2930 abs(pa._depth - pb._depth),
2927 abs(pa._depth - pb._depth),
2931 pvec._hamming(pa._vec, pb._vec),
2928 pvec._hamming(pa._vec, pb._vec),
2932 pa.distance(pb),
2929 pa.distance(pb),
2933 rel,
2930 rel,
2934 )
2931 )
2935 )
2932 )
2936
2933
2937
2934
2938 @command(
2935 @command(
2939 b'debugrebuilddirstate|debugrebuildstate',
2936 b'debugrebuilddirstate|debugrebuildstate',
2940 [
2937 [
2941 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2938 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2942 (
2939 (
2943 b'',
2940 b'',
2944 b'minimal',
2941 b'minimal',
2945 None,
2942 None,
2946 _(
2943 _(
2947 b'only rebuild files that are inconsistent with '
2944 b'only rebuild files that are inconsistent with '
2948 b'the working copy parent'
2945 b'the working copy parent'
2949 ),
2946 ),
2950 ),
2947 ),
2951 ],
2948 ],
2952 _(b'[-r REV]'),
2949 _(b'[-r REV]'),
2953 )
2950 )
2954 def debugrebuilddirstate(ui, repo, rev, **opts):
2951 def debugrebuilddirstate(ui, repo, rev, **opts):
2955 """rebuild the dirstate as it would look like for the given revision
2952 """rebuild the dirstate as it would look like for the given revision
2956
2953
2957 If no revision is specified the first current parent will be used.
2954 If no revision is specified the first current parent will be used.
2958
2955
2959 The dirstate will be set to the files of the given revision.
2956 The dirstate will be set to the files of the given revision.
2960 The actual working directory content or existing dirstate
2957 The actual working directory content or existing dirstate
2961 information such as adds or removes is not considered.
2958 information such as adds or removes is not considered.
2962
2959
2963 ``minimal`` will only rebuild the dirstate status for files that claim to be
2960 ``minimal`` will only rebuild the dirstate status for files that claim to be
2964 tracked but are not in the parent manifest, or that exist in the parent
2961 tracked but are not in the parent manifest, or that exist in the parent
2965 manifest but are not in the dirstate. It will not change adds, removes, or
2962 manifest but are not in the dirstate. It will not change adds, removes, or
2966 modified files that are in the working copy parent.
2963 modified files that are in the working copy parent.
2967
2964
2968 One use of this command is to make the next :hg:`status` invocation
2965 One use of this command is to make the next :hg:`status` invocation
2969 check the actual file content.
2966 check the actual file content.
2970 """
2967 """
2971 ctx = scmutil.revsingle(repo, rev)
2968 ctx = scmutil.revsingle(repo, rev)
2972 with repo.wlock():
2969 with repo.wlock():
2973 dirstate = repo.dirstate
2970 dirstate = repo.dirstate
2974 changedfiles = None
2971 changedfiles = None
2975 # See command doc for what minimal does.
2972 # See command doc for what minimal does.
2976 if opts.get('minimal'):
2973 if opts.get('minimal'):
2977 manifestfiles = set(ctx.manifest().keys())
2974 manifestfiles = set(ctx.manifest().keys())
2978 dirstatefiles = set(dirstate)
2975 dirstatefiles = set(dirstate)
2979 manifestonly = manifestfiles - dirstatefiles
2976 manifestonly = manifestfiles - dirstatefiles
2980 dsonly = dirstatefiles - manifestfiles
2977 dsonly = dirstatefiles - manifestfiles
2981 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2978 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2982 changedfiles = manifestonly | dsnotadded
2979 changedfiles = manifestonly | dsnotadded
2983
2980
2984 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2981 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2985
2982
2986
2983
2987 @command(
2984 @command(
2988 b'debugrebuildfncache',
2985 b'debugrebuildfncache',
2989 [
2986 [
2990 (
2987 (
2991 b'',
2988 b'',
2992 b'only-data',
2989 b'only-data',
2993 False,
2990 False,
2994 _(b'only look for wrong .d files (much faster)'),
2991 _(b'only look for wrong .d files (much faster)'),
2995 )
2992 )
2996 ],
2993 ],
2997 b'',
2994 b'',
2998 )
2995 )
2999 def debugrebuildfncache(ui, repo, **opts):
2996 def debugrebuildfncache(ui, repo, **opts):
3000 """rebuild the fncache file"""
2997 """rebuild the fncache file"""
3001 opts = pycompat.byteskwargs(opts)
2998 opts = pycompat.byteskwargs(opts)
3002 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
2999 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
3003
3000
3004
3001
3005 @command(
3002 @command(
3006 b'debugrename',
3003 b'debugrename',
3007 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3004 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3008 _(b'[-r REV] [FILE]...'),
3005 _(b'[-r REV] [FILE]...'),
3009 )
3006 )
3010 def debugrename(ui, repo, *pats, **opts):
3007 def debugrename(ui, repo, *pats, **opts):
3011 """dump rename information"""
3008 """dump rename information"""
3012
3009
3013 opts = pycompat.byteskwargs(opts)
3010 opts = pycompat.byteskwargs(opts)
3014 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3011 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3015 m = scmutil.match(ctx, pats, opts)
3012 m = scmutil.match(ctx, pats, opts)
3016 for abs in ctx.walk(m):
3013 for abs in ctx.walk(m):
3017 fctx = ctx[abs]
3014 fctx = ctx[abs]
3018 o = fctx.filelog().renamed(fctx.filenode())
3015 o = fctx.filelog().renamed(fctx.filenode())
3019 rel = repo.pathto(abs)
3016 rel = repo.pathto(abs)
3020 if o:
3017 if o:
3021 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3018 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3022 else:
3019 else:
3023 ui.write(_(b"%s not renamed\n") % rel)
3020 ui.write(_(b"%s not renamed\n") % rel)
3024
3021
3025
3022
3026 @command(b'debugrequires|debugrequirements', [], b'')
3023 @command(b'debugrequires|debugrequirements', [], b'')
3027 def debugrequirements(ui, repo):
3024 def debugrequirements(ui, repo):
3028 """print the current repo requirements"""
3025 """print the current repo requirements"""
3029 for r in sorted(repo.requirements):
3026 for r in sorted(repo.requirements):
3030 ui.write(b"%s\n" % r)
3027 ui.write(b"%s\n" % r)
3031
3028
3032
3029
3033 @command(
3030 @command(
3034 b'debugrevlog',
3031 b'debugrevlog',
3035 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3032 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3036 _(b'-c|-m|FILE'),
3033 _(b'-c|-m|FILE'),
3037 optionalrepo=True,
3034 optionalrepo=True,
3038 )
3035 )
3039 def debugrevlog(ui, repo, file_=None, **opts):
3036 def debugrevlog(ui, repo, file_=None, **opts):
3040 """show data and statistics about a revlog"""
3037 """show data and statistics about a revlog"""
3041 opts = pycompat.byteskwargs(opts)
3038 opts = pycompat.byteskwargs(opts)
3042 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
3039 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
3043
3040
3044 if opts.get(b"dump"):
3041 if opts.get(b"dump"):
3045 numrevs = len(r)
3042 numrevs = len(r)
3046 ui.write(
3043 ui.write(
3047 (
3044 (
3048 b"# rev p1rev p2rev start end deltastart base p1 p2"
3045 b"# rev p1rev p2rev start end deltastart base p1 p2"
3049 b" rawsize totalsize compression heads chainlen\n"
3046 b" rawsize totalsize compression heads chainlen\n"
3050 )
3047 )
3051 )
3048 )
3052 ts = 0
3049 ts = 0
3053 heads = set()
3050 heads = set()
3054
3051
3055 for rev in pycompat.xrange(numrevs):
3052 for rev in pycompat.xrange(numrevs):
3056 dbase = r.deltaparent(rev)
3053 dbase = r.deltaparent(rev)
3057 if dbase == -1:
3054 if dbase == -1:
3058 dbase = rev
3055 dbase = rev
3059 cbase = r.chainbase(rev)
3056 cbase = r.chainbase(rev)
3060 clen = r.chainlen(rev)
3057 clen = r.chainlen(rev)
3061 p1, p2 = r.parentrevs(rev)
3058 p1, p2 = r.parentrevs(rev)
3062 rs = r.rawsize(rev)
3059 rs = r.rawsize(rev)
3063 ts = ts + rs
3060 ts = ts + rs
3064 heads -= set(r.parentrevs(rev))
3061 heads -= set(r.parentrevs(rev))
3065 heads.add(rev)
3062 heads.add(rev)
3066 try:
3063 try:
3067 compression = ts / r.end(rev)
3064 compression = ts / r.end(rev)
3068 except ZeroDivisionError:
3065 except ZeroDivisionError:
3069 compression = 0
3066 compression = 0
3070 ui.write(
3067 ui.write(
3071 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
3068 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
3072 b"%11d %5d %8d\n"
3069 b"%11d %5d %8d\n"
3073 % (
3070 % (
3074 rev,
3071 rev,
3075 p1,
3072 p1,
3076 p2,
3073 p2,
3077 r.start(rev),
3074 r.start(rev),
3078 r.end(rev),
3075 r.end(rev),
3079 r.start(dbase),
3076 r.start(dbase),
3080 r.start(cbase),
3077 r.start(cbase),
3081 r.start(p1),
3078 r.start(p1),
3082 r.start(p2),
3079 r.start(p2),
3083 rs,
3080 rs,
3084 ts,
3081 ts,
3085 compression,
3082 compression,
3086 len(heads),
3083 len(heads),
3087 clen,
3084 clen,
3088 )
3085 )
3089 )
3086 )
3090 return 0
3087 return 0
3091
3088
3092 format = r._format_version
3089 format = r._format_version
3093 v = r._format_flags
3090 v = r._format_flags
3094 flags = []
3091 flags = []
3095 gdelta = False
3092 gdelta = False
3096 if v & revlog.FLAG_INLINE_DATA:
3093 if v & revlog.FLAG_INLINE_DATA:
3097 flags.append(b'inline')
3094 flags.append(b'inline')
3098 if v & revlog.FLAG_GENERALDELTA:
3095 if v & revlog.FLAG_GENERALDELTA:
3099 gdelta = True
3096 gdelta = True
3100 flags.append(b'generaldelta')
3097 flags.append(b'generaldelta')
3101 if not flags:
3098 if not flags:
3102 flags = [b'(none)']
3099 flags = [b'(none)']
3103
3100
3104 ### tracks merge vs single parent
3101 ### tracks merge vs single parent
3105 nummerges = 0
3102 nummerges = 0
3106
3103
3107 ### tracks ways the "delta" are build
3104 ### tracks ways the "delta" are build
3108 # nodelta
3105 # nodelta
3109 numempty = 0
3106 numempty = 0
3110 numemptytext = 0
3107 numemptytext = 0
3111 numemptydelta = 0
3108 numemptydelta = 0
3112 # full file content
3109 # full file content
3113 numfull = 0
3110 numfull = 0
3114 # intermediate snapshot against a prior snapshot
3111 # intermediate snapshot against a prior snapshot
3115 numsemi = 0
3112 numsemi = 0
3116 # snapshot count per depth
3113 # snapshot count per depth
3117 numsnapdepth = collections.defaultdict(lambda: 0)
3114 numsnapdepth = collections.defaultdict(lambda: 0)
3118 # delta against previous revision
3115 # delta against previous revision
3119 numprev = 0
3116 numprev = 0
3120 # delta against first or second parent (not prev)
3117 # delta against first or second parent (not prev)
3121 nump1 = 0
3118 nump1 = 0
3122 nump2 = 0
3119 nump2 = 0
3123 # delta against neither prev nor parents
3120 # delta against neither prev nor parents
3124 numother = 0
3121 numother = 0
3125 # delta against prev that are also first or second parent
3122 # delta against prev that are also first or second parent
3126 # (details of `numprev`)
3123 # (details of `numprev`)
3127 nump1prev = 0
3124 nump1prev = 0
3128 nump2prev = 0
3125 nump2prev = 0
3129
3126
3130 # data about delta chain of each revs
3127 # data about delta chain of each revs
3131 chainlengths = []
3128 chainlengths = []
3132 chainbases = []
3129 chainbases = []
3133 chainspans = []
3130 chainspans = []
3134
3131
3135 # data about each revision
3132 # data about each revision
3136 datasize = [None, 0, 0]
3133 datasize = [None, 0, 0]
3137 fullsize = [None, 0, 0]
3134 fullsize = [None, 0, 0]
3138 semisize = [None, 0, 0]
3135 semisize = [None, 0, 0]
3139 # snapshot count per depth
3136 # snapshot count per depth
3140 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3137 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3141 deltasize = [None, 0, 0]
3138 deltasize = [None, 0, 0]
3142 chunktypecounts = {}
3139 chunktypecounts = {}
3143 chunktypesizes = {}
3140 chunktypesizes = {}
3144
3141
3145 def addsize(size, l):
3142 def addsize(size, l):
3146 if l[0] is None or size < l[0]:
3143 if l[0] is None or size < l[0]:
3147 l[0] = size
3144 l[0] = size
3148 if size > l[1]:
3145 if size > l[1]:
3149 l[1] = size
3146 l[1] = size
3150 l[2] += size
3147 l[2] += size
3151
3148
3152 numrevs = len(r)
3149 numrevs = len(r)
3153 for rev in pycompat.xrange(numrevs):
3150 for rev in pycompat.xrange(numrevs):
3154 p1, p2 = r.parentrevs(rev)
3151 p1, p2 = r.parentrevs(rev)
3155 delta = r.deltaparent(rev)
3152 delta = r.deltaparent(rev)
3156 if format > 0:
3153 if format > 0:
3157 addsize(r.rawsize(rev), datasize)
3154 addsize(r.rawsize(rev), datasize)
3158 if p2 != nullrev:
3155 if p2 != nullrev:
3159 nummerges += 1
3156 nummerges += 1
3160 size = r.length(rev)
3157 size = r.length(rev)
3161 if delta == nullrev:
3158 if delta == nullrev:
3162 chainlengths.append(0)
3159 chainlengths.append(0)
3163 chainbases.append(r.start(rev))
3160 chainbases.append(r.start(rev))
3164 chainspans.append(size)
3161 chainspans.append(size)
3165 if size == 0:
3162 if size == 0:
3166 numempty += 1
3163 numempty += 1
3167 numemptytext += 1
3164 numemptytext += 1
3168 else:
3165 else:
3169 numfull += 1
3166 numfull += 1
3170 numsnapdepth[0] += 1
3167 numsnapdepth[0] += 1
3171 addsize(size, fullsize)
3168 addsize(size, fullsize)
3172 addsize(size, snapsizedepth[0])
3169 addsize(size, snapsizedepth[0])
3173 else:
3170 else:
3174 chainlengths.append(chainlengths[delta] + 1)
3171 chainlengths.append(chainlengths[delta] + 1)
3175 baseaddr = chainbases[delta]
3172 baseaddr = chainbases[delta]
3176 revaddr = r.start(rev)
3173 revaddr = r.start(rev)
3177 chainbases.append(baseaddr)
3174 chainbases.append(baseaddr)
3178 chainspans.append((revaddr - baseaddr) + size)
3175 chainspans.append((revaddr - baseaddr) + size)
3179 if size == 0:
3176 if size == 0:
3180 numempty += 1
3177 numempty += 1
3181 numemptydelta += 1
3178 numemptydelta += 1
3182 elif r.issnapshot(rev):
3179 elif r.issnapshot(rev):
3183 addsize(size, semisize)
3180 addsize(size, semisize)
3184 numsemi += 1
3181 numsemi += 1
3185 depth = r.snapshotdepth(rev)
3182 depth = r.snapshotdepth(rev)
3186 numsnapdepth[depth] += 1
3183 numsnapdepth[depth] += 1
3187 addsize(size, snapsizedepth[depth])
3184 addsize(size, snapsizedepth[depth])
3188 else:
3185 else:
3189 addsize(size, deltasize)
3186 addsize(size, deltasize)
3190 if delta == rev - 1:
3187 if delta == rev - 1:
3191 numprev += 1
3188 numprev += 1
3192 if delta == p1:
3189 if delta == p1:
3193 nump1prev += 1
3190 nump1prev += 1
3194 elif delta == p2:
3191 elif delta == p2:
3195 nump2prev += 1
3192 nump2prev += 1
3196 elif delta == p1:
3193 elif delta == p1:
3197 nump1 += 1
3194 nump1 += 1
3198 elif delta == p2:
3195 elif delta == p2:
3199 nump2 += 1
3196 nump2 += 1
3200 elif delta != nullrev:
3197 elif delta != nullrev:
3201 numother += 1
3198 numother += 1
3202
3199
3203 # Obtain data on the raw chunks in the revlog.
3200 # Obtain data on the raw chunks in the revlog.
3204 if util.safehasattr(r, b'_getsegmentforrevs'):
3201 if util.safehasattr(r, b'_getsegmentforrevs'):
3205 segment = r._getsegmentforrevs(rev, rev)[1]
3202 segment = r._getsegmentforrevs(rev, rev)[1]
3206 else:
3203 else:
3207 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3204 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3208 if segment:
3205 if segment:
3209 chunktype = bytes(segment[0:1])
3206 chunktype = bytes(segment[0:1])
3210 else:
3207 else:
3211 chunktype = b'empty'
3208 chunktype = b'empty'
3212
3209
3213 if chunktype not in chunktypecounts:
3210 if chunktype not in chunktypecounts:
3214 chunktypecounts[chunktype] = 0
3211 chunktypecounts[chunktype] = 0
3215 chunktypesizes[chunktype] = 0
3212 chunktypesizes[chunktype] = 0
3216
3213
3217 chunktypecounts[chunktype] += 1
3214 chunktypecounts[chunktype] += 1
3218 chunktypesizes[chunktype] += size
3215 chunktypesizes[chunktype] += size
3219
3216
3220 # Adjust size min value for empty cases
3217 # Adjust size min value for empty cases
3221 for size in (datasize, fullsize, semisize, deltasize):
3218 for size in (datasize, fullsize, semisize, deltasize):
3222 if size[0] is None:
3219 if size[0] is None:
3223 size[0] = 0
3220 size[0] = 0
3224
3221
3225 numdeltas = numrevs - numfull - numempty - numsemi
3222 numdeltas = numrevs - numfull - numempty - numsemi
3226 numoprev = numprev - nump1prev - nump2prev
3223 numoprev = numprev - nump1prev - nump2prev
3227 totalrawsize = datasize[2]
3224 totalrawsize = datasize[2]
3228 datasize[2] /= numrevs
3225 datasize[2] /= numrevs
3229 fulltotal = fullsize[2]
3226 fulltotal = fullsize[2]
3230 if numfull == 0:
3227 if numfull == 0:
3231 fullsize[2] = 0
3228 fullsize[2] = 0
3232 else:
3229 else:
3233 fullsize[2] /= numfull
3230 fullsize[2] /= numfull
3234 semitotal = semisize[2]
3231 semitotal = semisize[2]
3235 snaptotal = {}
3232 snaptotal = {}
3236 if numsemi > 0:
3233 if numsemi > 0:
3237 semisize[2] /= numsemi
3234 semisize[2] /= numsemi
3238 for depth in snapsizedepth:
3235 for depth in snapsizedepth:
3239 snaptotal[depth] = snapsizedepth[depth][2]
3236 snaptotal[depth] = snapsizedepth[depth][2]
3240 snapsizedepth[depth][2] /= numsnapdepth[depth]
3237 snapsizedepth[depth][2] /= numsnapdepth[depth]
3241
3238
3242 deltatotal = deltasize[2]
3239 deltatotal = deltasize[2]
3243 if numdeltas > 0:
3240 if numdeltas > 0:
3244 deltasize[2] /= numdeltas
3241 deltasize[2] /= numdeltas
3245 totalsize = fulltotal + semitotal + deltatotal
3242 totalsize = fulltotal + semitotal + deltatotal
3246 avgchainlen = sum(chainlengths) / numrevs
3243 avgchainlen = sum(chainlengths) / numrevs
3247 maxchainlen = max(chainlengths)
3244 maxchainlen = max(chainlengths)
3248 maxchainspan = max(chainspans)
3245 maxchainspan = max(chainspans)
3249 compratio = 1
3246 compratio = 1
3250 if totalsize:
3247 if totalsize:
3251 compratio = totalrawsize / totalsize
3248 compratio = totalrawsize / totalsize
3252
3249
3253 basedfmtstr = b'%%%dd\n'
3250 basedfmtstr = b'%%%dd\n'
3254 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3251 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3255
3252
3256 def dfmtstr(max):
3253 def dfmtstr(max):
3257 return basedfmtstr % len(str(max))
3254 return basedfmtstr % len(str(max))
3258
3255
3259 def pcfmtstr(max, padding=0):
3256 def pcfmtstr(max, padding=0):
3260 return basepcfmtstr % (len(str(max)), b' ' * padding)
3257 return basepcfmtstr % (len(str(max)), b' ' * padding)
3261
3258
3262 def pcfmt(value, total):
3259 def pcfmt(value, total):
3263 if total:
3260 if total:
3264 return (value, 100 * float(value) / total)
3261 return (value, 100 * float(value) / total)
3265 else:
3262 else:
3266 return value, 100.0
3263 return value, 100.0
3267
3264
3268 ui.writenoi18n(b'format : %d\n' % format)
3265 ui.writenoi18n(b'format : %d\n' % format)
3269 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3266 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3270
3267
3271 ui.write(b'\n')
3268 ui.write(b'\n')
3272 fmt = pcfmtstr(totalsize)
3269 fmt = pcfmtstr(totalsize)
3273 fmt2 = dfmtstr(totalsize)
3270 fmt2 = dfmtstr(totalsize)
3274 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3271 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3275 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3272 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3276 ui.writenoi18n(
3273 ui.writenoi18n(
3277 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3274 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3278 )
3275 )
3279 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3276 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3280 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3277 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3281 ui.writenoi18n(
3278 ui.writenoi18n(
3282 b' text : '
3279 b' text : '
3283 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3280 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3284 )
3281 )
3285 ui.writenoi18n(
3282 ui.writenoi18n(
3286 b' delta : '
3283 b' delta : '
3287 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3284 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3288 )
3285 )
3289 ui.writenoi18n(
3286 ui.writenoi18n(
3290 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3287 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3291 )
3288 )
3292 for depth in sorted(numsnapdepth):
3289 for depth in sorted(numsnapdepth):
3293 ui.write(
3290 ui.write(
3294 (b' lvl-%-3d : ' % depth)
3291 (b' lvl-%-3d : ' % depth)
3295 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3292 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3296 )
3293 )
3297 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3294 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3298 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3295 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3299 ui.writenoi18n(
3296 ui.writenoi18n(
3300 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3297 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3301 )
3298 )
3302 for depth in sorted(numsnapdepth):
3299 for depth in sorted(numsnapdepth):
3303 ui.write(
3300 ui.write(
3304 (b' lvl-%-3d : ' % depth)
3301 (b' lvl-%-3d : ' % depth)
3305 + fmt % pcfmt(snaptotal[depth], totalsize)
3302 + fmt % pcfmt(snaptotal[depth], totalsize)
3306 )
3303 )
3307 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3304 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3308
3305
3309 def fmtchunktype(chunktype):
3306 def fmtchunktype(chunktype):
3310 if chunktype == b'empty':
3307 if chunktype == b'empty':
3311 return b' %s : ' % chunktype
3308 return b' %s : ' % chunktype
3312 elif chunktype in pycompat.bytestr(string.ascii_letters):
3309 elif chunktype in pycompat.bytestr(string.ascii_letters):
3313 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3310 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3314 else:
3311 else:
3315 return b' 0x%s : ' % hex(chunktype)
3312 return b' 0x%s : ' % hex(chunktype)
3316
3313
3317 ui.write(b'\n')
3314 ui.write(b'\n')
3318 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3315 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3319 for chunktype in sorted(chunktypecounts):
3316 for chunktype in sorted(chunktypecounts):
3320 ui.write(fmtchunktype(chunktype))
3317 ui.write(fmtchunktype(chunktype))
3321 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3318 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3322 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3319 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3323 for chunktype in sorted(chunktypecounts):
3320 for chunktype in sorted(chunktypecounts):
3324 ui.write(fmtchunktype(chunktype))
3321 ui.write(fmtchunktype(chunktype))
3325 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3322 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3326
3323
3327 ui.write(b'\n')
3324 ui.write(b'\n')
3328 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3325 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3329 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3326 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3330 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3327 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3331 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3328 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3332 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3329 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3333
3330
3334 if format > 0:
3331 if format > 0:
3335 ui.write(b'\n')
3332 ui.write(b'\n')
3336 ui.writenoi18n(
3333 ui.writenoi18n(
3337 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3334 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3338 % tuple(datasize)
3335 % tuple(datasize)
3339 )
3336 )
3340 ui.writenoi18n(
3337 ui.writenoi18n(
3341 b'full revision size (min/max/avg) : %d / %d / %d\n'
3338 b'full revision size (min/max/avg) : %d / %d / %d\n'
3342 % tuple(fullsize)
3339 % tuple(fullsize)
3343 )
3340 )
3344 ui.writenoi18n(
3341 ui.writenoi18n(
3345 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3342 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3346 % tuple(semisize)
3343 % tuple(semisize)
3347 )
3344 )
3348 for depth in sorted(snapsizedepth):
3345 for depth in sorted(snapsizedepth):
3349 if depth == 0:
3346 if depth == 0:
3350 continue
3347 continue
3351 ui.writenoi18n(
3348 ui.writenoi18n(
3352 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3349 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3353 % ((depth,) + tuple(snapsizedepth[depth]))
3350 % ((depth,) + tuple(snapsizedepth[depth]))
3354 )
3351 )
3355 ui.writenoi18n(
3352 ui.writenoi18n(
3356 b'delta size (min/max/avg) : %d / %d / %d\n'
3353 b'delta size (min/max/avg) : %d / %d / %d\n'
3357 % tuple(deltasize)
3354 % tuple(deltasize)
3358 )
3355 )
3359
3356
3360 if numdeltas > 0:
3357 if numdeltas > 0:
3361 ui.write(b'\n')
3358 ui.write(b'\n')
3362 fmt = pcfmtstr(numdeltas)
3359 fmt = pcfmtstr(numdeltas)
3363 fmt2 = pcfmtstr(numdeltas, 4)
3360 fmt2 = pcfmtstr(numdeltas, 4)
3364 ui.writenoi18n(
3361 ui.writenoi18n(
3365 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3362 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3366 )
3363 )
3367 if numprev > 0:
3364 if numprev > 0:
3368 ui.writenoi18n(
3365 ui.writenoi18n(
3369 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3366 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3370 )
3367 )
3371 ui.writenoi18n(
3368 ui.writenoi18n(
3372 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3369 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3373 )
3370 )
3374 ui.writenoi18n(
3371 ui.writenoi18n(
3375 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3372 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3376 )
3373 )
3377 if gdelta:
3374 if gdelta:
3378 ui.writenoi18n(
3375 ui.writenoi18n(
3379 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3376 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3380 )
3377 )
3381 ui.writenoi18n(
3378 ui.writenoi18n(
3382 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3379 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3383 )
3380 )
3384 ui.writenoi18n(
3381 ui.writenoi18n(
3385 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3382 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3386 )
3383 )
3387
3384
3388
3385
3389 @command(
3386 @command(
3390 b'debugrevlogindex',
3387 b'debugrevlogindex',
3391 cmdutil.debugrevlogopts
3388 cmdutil.debugrevlogopts
3392 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3389 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3393 _(b'[-f FORMAT] -c|-m|FILE'),
3390 _(b'[-f FORMAT] -c|-m|FILE'),
3394 optionalrepo=True,
3391 optionalrepo=True,
3395 )
3392 )
3396 def debugrevlogindex(ui, repo, file_=None, **opts):
3393 def debugrevlogindex(ui, repo, file_=None, **opts):
3397 """dump the contents of a revlog index"""
3394 """dump the contents of a revlog index"""
3398 opts = pycompat.byteskwargs(opts)
3395 opts = pycompat.byteskwargs(opts)
3399 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3396 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3400 format = opts.get(b'format', 0)
3397 format = opts.get(b'format', 0)
3401 if format not in (0, 1):
3398 if format not in (0, 1):
3402 raise error.Abort(_(b"unknown format %d") % format)
3399 raise error.Abort(_(b"unknown format %d") % format)
3403
3400
3404 if ui.debugflag:
3401 if ui.debugflag:
3405 shortfn = hex
3402 shortfn = hex
3406 else:
3403 else:
3407 shortfn = short
3404 shortfn = short
3408
3405
3409 # There might not be anything in r, so have a sane default
3406 # There might not be anything in r, so have a sane default
3410 idlen = 12
3407 idlen = 12
3411 for i in r:
3408 for i in r:
3412 idlen = len(shortfn(r.node(i)))
3409 idlen = len(shortfn(r.node(i)))
3413 break
3410 break
3414
3411
3415 if format == 0:
3412 if format == 0:
3416 if ui.verbose:
3413 if ui.verbose:
3417 ui.writenoi18n(
3414 ui.writenoi18n(
3418 b" rev offset length linkrev %s %s p2\n"
3415 b" rev offset length linkrev %s %s p2\n"
3419 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3416 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3420 )
3417 )
3421 else:
3418 else:
3422 ui.writenoi18n(
3419 ui.writenoi18n(
3423 b" rev linkrev %s %s p2\n"
3420 b" rev linkrev %s %s p2\n"
3424 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3421 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3425 )
3422 )
3426 elif format == 1:
3423 elif format == 1:
3427 if ui.verbose:
3424 if ui.verbose:
3428 ui.writenoi18n(
3425 ui.writenoi18n(
3429 (
3426 (
3430 b" rev flag offset length size link p1"
3427 b" rev flag offset length size link p1"
3431 b" p2 %s\n"
3428 b" p2 %s\n"
3432 )
3429 )
3433 % b"nodeid".rjust(idlen)
3430 % b"nodeid".rjust(idlen)
3434 )
3431 )
3435 else:
3432 else:
3436 ui.writenoi18n(
3433 ui.writenoi18n(
3437 b" rev flag size link p1 p2 %s\n"
3434 b" rev flag size link p1 p2 %s\n"
3438 % b"nodeid".rjust(idlen)
3435 % b"nodeid".rjust(idlen)
3439 )
3436 )
3440
3437
3441 for i in r:
3438 for i in r:
3442 node = r.node(i)
3439 node = r.node(i)
3443 if format == 0:
3440 if format == 0:
3444 try:
3441 try:
3445 pp = r.parents(node)
3442 pp = r.parents(node)
3446 except Exception:
3443 except Exception:
3447 pp = [repo.nullid, repo.nullid]
3444 pp = [repo.nullid, repo.nullid]
3448 if ui.verbose:
3445 if ui.verbose:
3449 ui.write(
3446 ui.write(
3450 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3447 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3451 % (
3448 % (
3452 i,
3449 i,
3453 r.start(i),
3450 r.start(i),
3454 r.length(i),
3451 r.length(i),
3455 r.linkrev(i),
3452 r.linkrev(i),
3456 shortfn(node),
3453 shortfn(node),
3457 shortfn(pp[0]),
3454 shortfn(pp[0]),
3458 shortfn(pp[1]),
3455 shortfn(pp[1]),
3459 )
3456 )
3460 )
3457 )
3461 else:
3458 else:
3462 ui.write(
3459 ui.write(
3463 b"% 6d % 7d %s %s %s\n"
3460 b"% 6d % 7d %s %s %s\n"
3464 % (
3461 % (
3465 i,
3462 i,
3466 r.linkrev(i),
3463 r.linkrev(i),
3467 shortfn(node),
3464 shortfn(node),
3468 shortfn(pp[0]),
3465 shortfn(pp[0]),
3469 shortfn(pp[1]),
3466 shortfn(pp[1]),
3470 )
3467 )
3471 )
3468 )
3472 elif format == 1:
3469 elif format == 1:
3473 pr = r.parentrevs(i)
3470 pr = r.parentrevs(i)
3474 if ui.verbose:
3471 if ui.verbose:
3475 ui.write(
3472 ui.write(
3476 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3473 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3477 % (
3474 % (
3478 i,
3475 i,
3479 r.flags(i),
3476 r.flags(i),
3480 r.start(i),
3477 r.start(i),
3481 r.length(i),
3478 r.length(i),
3482 r.rawsize(i),
3479 r.rawsize(i),
3483 r.linkrev(i),
3480 r.linkrev(i),
3484 pr[0],
3481 pr[0],
3485 pr[1],
3482 pr[1],
3486 shortfn(node),
3483 shortfn(node),
3487 )
3484 )
3488 )
3485 )
3489 else:
3486 else:
3490 ui.write(
3487 ui.write(
3491 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3488 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3492 % (
3489 % (
3493 i,
3490 i,
3494 r.flags(i),
3491 r.flags(i),
3495 r.rawsize(i),
3492 r.rawsize(i),
3496 r.linkrev(i),
3493 r.linkrev(i),
3497 pr[0],
3494 pr[0],
3498 pr[1],
3495 pr[1],
3499 shortfn(node),
3496 shortfn(node),
3500 )
3497 )
3501 )
3498 )
3502
3499
3503
3500
3504 @command(
3501 @command(
3505 b'debugrevspec',
3502 b'debugrevspec',
3506 [
3503 [
3507 (
3504 (
3508 b'',
3505 b'',
3509 b'optimize',
3506 b'optimize',
3510 None,
3507 None,
3511 _(b'print parsed tree after optimizing (DEPRECATED)'),
3508 _(b'print parsed tree after optimizing (DEPRECATED)'),
3512 ),
3509 ),
3513 (
3510 (
3514 b'',
3511 b'',
3515 b'show-revs',
3512 b'show-revs',
3516 True,
3513 True,
3517 _(b'print list of result revisions (default)'),
3514 _(b'print list of result revisions (default)'),
3518 ),
3515 ),
3519 (
3516 (
3520 b's',
3517 b's',
3521 b'show-set',
3518 b'show-set',
3522 None,
3519 None,
3523 _(b'print internal representation of result set'),
3520 _(b'print internal representation of result set'),
3524 ),
3521 ),
3525 (
3522 (
3526 b'p',
3523 b'p',
3527 b'show-stage',
3524 b'show-stage',
3528 [],
3525 [],
3529 _(b'print parsed tree at the given stage'),
3526 _(b'print parsed tree at the given stage'),
3530 _(b'NAME'),
3527 _(b'NAME'),
3531 ),
3528 ),
3532 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3529 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3533 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3530 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3534 ],
3531 ],
3535 b'REVSPEC',
3532 b'REVSPEC',
3536 )
3533 )
3537 def debugrevspec(ui, repo, expr, **opts):
3534 def debugrevspec(ui, repo, expr, **opts):
3538 """parse and apply a revision specification
3535 """parse and apply a revision specification
3539
3536
3540 Use -p/--show-stage option to print the parsed tree at the given stages.
3537 Use -p/--show-stage option to print the parsed tree at the given stages.
3541 Use -p all to print tree at every stage.
3538 Use -p all to print tree at every stage.
3542
3539
3543 Use --no-show-revs option with -s or -p to print only the set
3540 Use --no-show-revs option with -s or -p to print only the set
3544 representation or the parsed tree respectively.
3541 representation or the parsed tree respectively.
3545
3542
3546 Use --verify-optimized to compare the optimized result with the unoptimized
3543 Use --verify-optimized to compare the optimized result with the unoptimized
3547 one. Returns 1 if the optimized result differs.
3544 one. Returns 1 if the optimized result differs.
3548 """
3545 """
3549 opts = pycompat.byteskwargs(opts)
3546 opts = pycompat.byteskwargs(opts)
3550 aliases = ui.configitems(b'revsetalias')
3547 aliases = ui.configitems(b'revsetalias')
3551 stages = [
3548 stages = [
3552 (b'parsed', lambda tree: tree),
3549 (b'parsed', lambda tree: tree),
3553 (
3550 (
3554 b'expanded',
3551 b'expanded',
3555 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3552 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3556 ),
3553 ),
3557 (b'concatenated', revsetlang.foldconcat),
3554 (b'concatenated', revsetlang.foldconcat),
3558 (b'analyzed', revsetlang.analyze),
3555 (b'analyzed', revsetlang.analyze),
3559 (b'optimized', revsetlang.optimize),
3556 (b'optimized', revsetlang.optimize),
3560 ]
3557 ]
3561 if opts[b'no_optimized']:
3558 if opts[b'no_optimized']:
3562 stages = stages[:-1]
3559 stages = stages[:-1]
3563 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3560 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3564 raise error.Abort(
3561 raise error.Abort(
3565 _(b'cannot use --verify-optimized with --no-optimized')
3562 _(b'cannot use --verify-optimized with --no-optimized')
3566 )
3563 )
3567 stagenames = {n for n, f in stages}
3564 stagenames = {n for n, f in stages}
3568
3565
3569 showalways = set()
3566 showalways = set()
3570 showchanged = set()
3567 showchanged = set()
3571 if ui.verbose and not opts[b'show_stage']:
3568 if ui.verbose and not opts[b'show_stage']:
3572 # show parsed tree by --verbose (deprecated)
3569 # show parsed tree by --verbose (deprecated)
3573 showalways.add(b'parsed')
3570 showalways.add(b'parsed')
3574 showchanged.update([b'expanded', b'concatenated'])
3571 showchanged.update([b'expanded', b'concatenated'])
3575 if opts[b'optimize']:
3572 if opts[b'optimize']:
3576 showalways.add(b'optimized')
3573 showalways.add(b'optimized')
3577 if opts[b'show_stage'] and opts[b'optimize']:
3574 if opts[b'show_stage'] and opts[b'optimize']:
3578 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3575 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3579 if opts[b'show_stage'] == [b'all']:
3576 if opts[b'show_stage'] == [b'all']:
3580 showalways.update(stagenames)
3577 showalways.update(stagenames)
3581 else:
3578 else:
3582 for n in opts[b'show_stage']:
3579 for n in opts[b'show_stage']:
3583 if n not in stagenames:
3580 if n not in stagenames:
3584 raise error.Abort(_(b'invalid stage name: %s') % n)
3581 raise error.Abort(_(b'invalid stage name: %s') % n)
3585 showalways.update(opts[b'show_stage'])
3582 showalways.update(opts[b'show_stage'])
3586
3583
3587 treebystage = {}
3584 treebystage = {}
3588 printedtree = None
3585 printedtree = None
3589 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3586 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3590 for n, f in stages:
3587 for n, f in stages:
3591 treebystage[n] = tree = f(tree)
3588 treebystage[n] = tree = f(tree)
3592 if n in showalways or (n in showchanged and tree != printedtree):
3589 if n in showalways or (n in showchanged and tree != printedtree):
3593 if opts[b'show_stage'] or n != b'parsed':
3590 if opts[b'show_stage'] or n != b'parsed':
3594 ui.write(b"* %s:\n" % n)
3591 ui.write(b"* %s:\n" % n)
3595 ui.write(revsetlang.prettyformat(tree), b"\n")
3592 ui.write(revsetlang.prettyformat(tree), b"\n")
3596 printedtree = tree
3593 printedtree = tree
3597
3594
3598 if opts[b'verify_optimized']:
3595 if opts[b'verify_optimized']:
3599 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3596 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3600 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3597 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3601 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3598 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3602 ui.writenoi18n(
3599 ui.writenoi18n(
3603 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3600 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3604 )
3601 )
3605 ui.writenoi18n(
3602 ui.writenoi18n(
3606 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3603 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3607 )
3604 )
3608 arevs = list(arevs)
3605 arevs = list(arevs)
3609 brevs = list(brevs)
3606 brevs = list(brevs)
3610 if arevs == brevs:
3607 if arevs == brevs:
3611 return 0
3608 return 0
3612 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3609 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3613 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3610 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3614 sm = difflib.SequenceMatcher(None, arevs, brevs)
3611 sm = difflib.SequenceMatcher(None, arevs, brevs)
3615 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3612 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3616 if tag in ('delete', 'replace'):
3613 if tag in ('delete', 'replace'):
3617 for c in arevs[alo:ahi]:
3614 for c in arevs[alo:ahi]:
3618 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3615 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3619 if tag in ('insert', 'replace'):
3616 if tag in ('insert', 'replace'):
3620 for c in brevs[blo:bhi]:
3617 for c in brevs[blo:bhi]:
3621 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3618 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3622 if tag == 'equal':
3619 if tag == 'equal':
3623 for c in arevs[alo:ahi]:
3620 for c in arevs[alo:ahi]:
3624 ui.write(b' %d\n' % c)
3621 ui.write(b' %d\n' % c)
3625 return 1
3622 return 1
3626
3623
3627 func = revset.makematcher(tree)
3624 func = revset.makematcher(tree)
3628 revs = func(repo)
3625 revs = func(repo)
3629 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3626 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3630 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3627 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3631 if not opts[b'show_revs']:
3628 if not opts[b'show_revs']:
3632 return
3629 return
3633 for c in revs:
3630 for c in revs:
3634 ui.write(b"%d\n" % c)
3631 ui.write(b"%d\n" % c)
3635
3632
3636
3633
3637 @command(
3634 @command(
3638 b'debugserve',
3635 b'debugserve',
3639 [
3636 [
3640 (
3637 (
3641 b'',
3638 b'',
3642 b'sshstdio',
3639 b'sshstdio',
3643 False,
3640 False,
3644 _(b'run an SSH server bound to process handles'),
3641 _(b'run an SSH server bound to process handles'),
3645 ),
3642 ),
3646 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3643 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3647 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3644 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3648 ],
3645 ],
3649 b'',
3646 b'',
3650 )
3647 )
3651 def debugserve(ui, repo, **opts):
3648 def debugserve(ui, repo, **opts):
3652 """run a server with advanced settings
3649 """run a server with advanced settings
3653
3650
3654 This command is similar to :hg:`serve`. It exists partially as a
3651 This command is similar to :hg:`serve`. It exists partially as a
3655 workaround to the fact that ``hg serve --stdio`` must have specific
3652 workaround to the fact that ``hg serve --stdio`` must have specific
3656 arguments for security reasons.
3653 arguments for security reasons.
3657 """
3654 """
3658 opts = pycompat.byteskwargs(opts)
3655 opts = pycompat.byteskwargs(opts)
3659
3656
3660 if not opts[b'sshstdio']:
3657 if not opts[b'sshstdio']:
3661 raise error.Abort(_(b'only --sshstdio is currently supported'))
3658 raise error.Abort(_(b'only --sshstdio is currently supported'))
3662
3659
3663 logfh = None
3660 logfh = None
3664
3661
3665 if opts[b'logiofd'] and opts[b'logiofile']:
3662 if opts[b'logiofd'] and opts[b'logiofile']:
3666 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3663 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3667
3664
3668 if opts[b'logiofd']:
3665 if opts[b'logiofd']:
3669 # Ideally we would be line buffered. But line buffering in binary
3666 # Ideally we would be line buffered. But line buffering in binary
3670 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3667 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3671 # buffering could have performance impacts. But since this isn't
3668 # buffering could have performance impacts. But since this isn't
3672 # performance critical code, it should be fine.
3669 # performance critical code, it should be fine.
3673 try:
3670 try:
3674 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3671 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3675 except OSError as e:
3672 except OSError as e:
3676 if e.errno != errno.ESPIPE:
3673 if e.errno != errno.ESPIPE:
3677 raise
3674 raise
3678 # can't seek a pipe, so `ab` mode fails on py3
3675 # can't seek a pipe, so `ab` mode fails on py3
3679 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3676 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3680 elif opts[b'logiofile']:
3677 elif opts[b'logiofile']:
3681 logfh = open(opts[b'logiofile'], b'ab', 0)
3678 logfh = open(opts[b'logiofile'], b'ab', 0)
3682
3679
3683 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3680 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3684 s.serve_forever()
3681 s.serve_forever()
3685
3682
3686
3683
3687 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3684 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3688 def debugsetparents(ui, repo, rev1, rev2=None):
3685 def debugsetparents(ui, repo, rev1, rev2=None):
3689 """manually set the parents of the current working directory (DANGEROUS)
3686 """manually set the parents of the current working directory (DANGEROUS)
3690
3687
3691 This command is not what you are looking for and should not be used. Using
3688 This command is not what you are looking for and should not be used. Using
3692 this command will most certainly results in slight corruption of the file
3689 this command will most certainly results in slight corruption of the file
3693 level histories withing your repository. DO NOT USE THIS COMMAND.
3690 level histories withing your repository. DO NOT USE THIS COMMAND.
3694
3691
3695 The command update the p1 and p2 field in the dirstate, and not touching
3692 The command update the p1 and p2 field in the dirstate, and not touching
3696 anything else. This useful for writing repository conversion tools, but
3693 anything else. This useful for writing repository conversion tools, but
3697 should be used with extreme care. For example, neither the working
3694 should be used with extreme care. For example, neither the working
3698 directory nor the dirstate is updated, so file status may be incorrect
3695 directory nor the dirstate is updated, so file status may be incorrect
3699 after running this command. Only used if you are one of the few people that
3696 after running this command. Only used if you are one of the few people that
3700 deeply unstand both conversion tools and file level histories. If you are
3697 deeply unstand both conversion tools and file level histories. If you are
3701 reading this help, you are not one of this people (most of them sailed west
3698 reading this help, you are not one of this people (most of them sailed west
3702 from Mithlond anyway.
3699 from Mithlond anyway.
3703
3700
3704 So one last time DO NOT USE THIS COMMAND.
3701 So one last time DO NOT USE THIS COMMAND.
3705
3702
3706 Returns 0 on success.
3703 Returns 0 on success.
3707 """
3704 """
3708
3705
3709 node1 = scmutil.revsingle(repo, rev1).node()
3706 node1 = scmutil.revsingle(repo, rev1).node()
3710 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3707 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3711
3708
3712 with repo.wlock():
3709 with repo.wlock():
3713 repo.setparents(node1, node2)
3710 repo.setparents(node1, node2)
3714
3711
3715
3712
3716 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3713 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3717 def debugsidedata(ui, repo, file_, rev=None, **opts):
3714 def debugsidedata(ui, repo, file_, rev=None, **opts):
3718 """dump the side data for a cl/manifest/file revision
3715 """dump the side data for a cl/manifest/file revision
3719
3716
3720 Use --verbose to dump the sidedata content."""
3717 Use --verbose to dump the sidedata content."""
3721 opts = pycompat.byteskwargs(opts)
3718 opts = pycompat.byteskwargs(opts)
3722 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3719 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3723 if rev is not None:
3720 if rev is not None:
3724 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3721 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3725 file_, rev = None, file_
3722 file_, rev = None, file_
3726 elif rev is None:
3723 elif rev is None:
3727 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3724 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3728 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3725 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3729 r = getattr(r, '_revlog', r)
3726 r = getattr(r, '_revlog', r)
3730 try:
3727 try:
3731 sidedata = r.sidedata(r.lookup(rev))
3728 sidedata = r.sidedata(r.lookup(rev))
3732 except KeyError:
3729 except KeyError:
3733 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3730 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3734 if sidedata:
3731 if sidedata:
3735 sidedata = list(sidedata.items())
3732 sidedata = list(sidedata.items())
3736 sidedata.sort()
3733 sidedata.sort()
3737 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3734 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3738 for key, value in sidedata:
3735 for key, value in sidedata:
3739 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3736 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3740 if ui.verbose:
3737 if ui.verbose:
3741 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3738 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3742
3739
3743
3740
3744 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3741 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3745 def debugssl(ui, repo, source=None, **opts):
3742 def debugssl(ui, repo, source=None, **opts):
3746 """test a secure connection to a server
3743 """test a secure connection to a server
3747
3744
3748 This builds the certificate chain for the server on Windows, installing the
3745 This builds the certificate chain for the server on Windows, installing the
3749 missing intermediates and trusted root via Windows Update if necessary. It
3746 missing intermediates and trusted root via Windows Update if necessary. It
3750 does nothing on other platforms.
3747 does nothing on other platforms.
3751
3748
3752 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3749 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3753 that server is used. See :hg:`help urls` for more information.
3750 that server is used. See :hg:`help urls` for more information.
3754
3751
3755 If the update succeeds, retry the original operation. Otherwise, the cause
3752 If the update succeeds, retry the original operation. Otherwise, the cause
3756 of the SSL error is likely another issue.
3753 of the SSL error is likely another issue.
3757 """
3754 """
3758 if not pycompat.iswindows:
3755 if not pycompat.iswindows:
3759 raise error.Abort(
3756 raise error.Abort(
3760 _(b'certificate chain building is only possible on Windows')
3757 _(b'certificate chain building is only possible on Windows')
3761 )
3758 )
3762
3759
3763 if not source:
3760 if not source:
3764 if not repo:
3761 if not repo:
3765 raise error.Abort(
3762 raise error.Abort(
3766 _(
3763 _(
3767 b"there is no Mercurial repository here, and no "
3764 b"there is no Mercurial repository here, and no "
3768 b"server specified"
3765 b"server specified"
3769 )
3766 )
3770 )
3767 )
3771 source = b"default"
3768 source = b"default"
3772
3769
3773 source, branches = urlutil.get_unique_pull_path(
3770 source, branches = urlutil.get_unique_pull_path(
3774 b'debugssl', repo, ui, source
3771 b'debugssl', repo, ui, source
3775 )
3772 )
3776 url = urlutil.url(source)
3773 url = urlutil.url(source)
3777
3774
3778 defaultport = {b'https': 443, b'ssh': 22}
3775 defaultport = {b'https': 443, b'ssh': 22}
3779 if url.scheme in defaultport:
3776 if url.scheme in defaultport:
3780 try:
3777 try:
3781 addr = (url.host, int(url.port or defaultport[url.scheme]))
3778 addr = (url.host, int(url.port or defaultport[url.scheme]))
3782 except ValueError:
3779 except ValueError:
3783 raise error.Abort(_(b"malformed port number in URL"))
3780 raise error.Abort(_(b"malformed port number in URL"))
3784 else:
3781 else:
3785 raise error.Abort(_(b"only https and ssh connections are supported"))
3782 raise error.Abort(_(b"only https and ssh connections are supported"))
3786
3783
3787 from . import win32
3784 from . import win32
3788
3785
3789 s = ssl.wrap_socket(
3786 s = ssl.wrap_socket(
3790 socket.socket(),
3787 socket.socket(),
3791 ssl_version=ssl.PROTOCOL_TLS,
3788 ssl_version=ssl.PROTOCOL_TLS,
3792 cert_reqs=ssl.CERT_NONE,
3789 cert_reqs=ssl.CERT_NONE,
3793 ca_certs=None,
3790 ca_certs=None,
3794 )
3791 )
3795
3792
3796 try:
3793 try:
3797 s.connect(addr)
3794 s.connect(addr)
3798 cert = s.getpeercert(True)
3795 cert = s.getpeercert(True)
3799
3796
3800 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3797 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3801
3798
3802 complete = win32.checkcertificatechain(cert, build=False)
3799 complete = win32.checkcertificatechain(cert, build=False)
3803
3800
3804 if not complete:
3801 if not complete:
3805 ui.status(_(b'certificate chain is incomplete, updating... '))
3802 ui.status(_(b'certificate chain is incomplete, updating... '))
3806
3803
3807 if not win32.checkcertificatechain(cert):
3804 if not win32.checkcertificatechain(cert):
3808 ui.status(_(b'failed.\n'))
3805 ui.status(_(b'failed.\n'))
3809 else:
3806 else:
3810 ui.status(_(b'done.\n'))
3807 ui.status(_(b'done.\n'))
3811 else:
3808 else:
3812 ui.status(_(b'full certificate chain is available\n'))
3809 ui.status(_(b'full certificate chain is available\n'))
3813 finally:
3810 finally:
3814 s.close()
3811 s.close()
3815
3812
3816
3813
3817 @command(
3814 @command(
3818 b"debugbackupbundle",
3815 b"debugbackupbundle",
3819 [
3816 [
3820 (
3817 (
3821 b"",
3818 b"",
3822 b"recover",
3819 b"recover",
3823 b"",
3820 b"",
3824 b"brings the specified changeset back into the repository",
3821 b"brings the specified changeset back into the repository",
3825 )
3822 )
3826 ]
3823 ]
3827 + cmdutil.logopts,
3824 + cmdutil.logopts,
3828 _(b"hg debugbackupbundle [--recover HASH]"),
3825 _(b"hg debugbackupbundle [--recover HASH]"),
3829 )
3826 )
3830 def debugbackupbundle(ui, repo, *pats, **opts):
3827 def debugbackupbundle(ui, repo, *pats, **opts):
3831 """lists the changesets available in backup bundles
3828 """lists the changesets available in backup bundles
3832
3829
3833 Without any arguments, this command prints a list of the changesets in each
3830 Without any arguments, this command prints a list of the changesets in each
3834 backup bundle.
3831 backup bundle.
3835
3832
3836 --recover takes a changeset hash and unbundles the first bundle that
3833 --recover takes a changeset hash and unbundles the first bundle that
3837 contains that hash, which puts that changeset back in your repository.
3834 contains that hash, which puts that changeset back in your repository.
3838
3835
3839 --verbose will print the entire commit message and the bundle path for that
3836 --verbose will print the entire commit message and the bundle path for that
3840 backup.
3837 backup.
3841 """
3838 """
3842 backups = list(
3839 backups = list(
3843 filter(
3840 filter(
3844 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3841 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3845 )
3842 )
3846 )
3843 )
3847 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3844 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3848
3845
3849 opts = pycompat.byteskwargs(opts)
3846 opts = pycompat.byteskwargs(opts)
3850 opts[b"bundle"] = b""
3847 opts[b"bundle"] = b""
3851 opts[b"force"] = None
3848 opts[b"force"] = None
3852 limit = logcmdutil.getlimit(opts)
3849 limit = logcmdutil.getlimit(opts)
3853
3850
3854 def display(other, chlist, displayer):
3851 def display(other, chlist, displayer):
3855 if opts.get(b"newest_first"):
3852 if opts.get(b"newest_first"):
3856 chlist.reverse()
3853 chlist.reverse()
3857 count = 0
3854 count = 0
3858 for n in chlist:
3855 for n in chlist:
3859 if limit is not None and count >= limit:
3856 if limit is not None and count >= limit:
3860 break
3857 break
3861 parents = [
3858 parents = [
3862 True for p in other.changelog.parents(n) if p != repo.nullid
3859 True for p in other.changelog.parents(n) if p != repo.nullid
3863 ]
3860 ]
3864 if opts.get(b"no_merges") and len(parents) == 2:
3861 if opts.get(b"no_merges") and len(parents) == 2:
3865 continue
3862 continue
3866 count += 1
3863 count += 1
3867 displayer.show(other[n])
3864 displayer.show(other[n])
3868
3865
3869 recovernode = opts.get(b"recover")
3866 recovernode = opts.get(b"recover")
3870 if recovernode:
3867 if recovernode:
3871 if scmutil.isrevsymbol(repo, recovernode):
3868 if scmutil.isrevsymbol(repo, recovernode):
3872 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3869 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3873 return
3870 return
3874 elif backups:
3871 elif backups:
3875 msg = _(
3872 msg = _(
3876 b"Recover changesets using: hg debugbackupbundle --recover "
3873 b"Recover changesets using: hg debugbackupbundle --recover "
3877 b"<changeset hash>\n\nAvailable backup changesets:"
3874 b"<changeset hash>\n\nAvailable backup changesets:"
3878 )
3875 )
3879 ui.status(msg, label=b"status.removed")
3876 ui.status(msg, label=b"status.removed")
3880 else:
3877 else:
3881 ui.status(_(b"no backup changesets found\n"))
3878 ui.status(_(b"no backup changesets found\n"))
3882 return
3879 return
3883
3880
3884 for backup in backups:
3881 for backup in backups:
3885 # Much of this is copied from the hg incoming logic
3882 # Much of this is copied from the hg incoming logic
3886 source = os.path.relpath(backup, encoding.getcwd())
3883 source = os.path.relpath(backup, encoding.getcwd())
3887 source, branches = urlutil.get_unique_pull_path(
3884 source, branches = urlutil.get_unique_pull_path(
3888 b'debugbackupbundle',
3885 b'debugbackupbundle',
3889 repo,
3886 repo,
3890 ui,
3887 ui,
3891 source,
3888 source,
3892 default_branches=opts.get(b'branch'),
3889 default_branches=opts.get(b'branch'),
3893 )
3890 )
3894 try:
3891 try:
3895 other = hg.peer(repo, opts, source)
3892 other = hg.peer(repo, opts, source)
3896 except error.LookupError as ex:
3893 except error.LookupError as ex:
3897 msg = _(b"\nwarning: unable to open bundle %s") % source
3894 msg = _(b"\nwarning: unable to open bundle %s") % source
3898 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3895 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3899 ui.warn(msg, hint=hint)
3896 ui.warn(msg, hint=hint)
3900 continue
3897 continue
3901 revs, checkout = hg.addbranchrevs(
3898 revs, checkout = hg.addbranchrevs(
3902 repo, other, branches, opts.get(b"rev")
3899 repo, other, branches, opts.get(b"rev")
3903 )
3900 )
3904
3901
3905 if revs:
3902 if revs:
3906 revs = [other.lookup(rev) for rev in revs]
3903 revs = [other.lookup(rev) for rev in revs]
3907
3904
3908 with ui.silent():
3905 with ui.silent():
3909 try:
3906 try:
3910 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3907 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3911 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3908 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3912 )
3909 )
3913 except error.LookupError:
3910 except error.LookupError:
3914 continue
3911 continue
3915
3912
3916 try:
3913 try:
3917 if not chlist:
3914 if not chlist:
3918 continue
3915 continue
3919 if recovernode:
3916 if recovernode:
3920 with repo.lock(), repo.transaction(b"unbundle") as tr:
3917 with repo.lock(), repo.transaction(b"unbundle") as tr:
3921 if scmutil.isrevsymbol(other, recovernode):
3918 if scmutil.isrevsymbol(other, recovernode):
3922 ui.status(_(b"Unbundling %s\n") % (recovernode))
3919 ui.status(_(b"Unbundling %s\n") % (recovernode))
3923 f = hg.openpath(ui, source)
3920 f = hg.openpath(ui, source)
3924 gen = exchange.readbundle(ui, f, source)
3921 gen = exchange.readbundle(ui, f, source)
3925 if isinstance(gen, bundle2.unbundle20):
3922 if isinstance(gen, bundle2.unbundle20):
3926 bundle2.applybundle(
3923 bundle2.applybundle(
3927 repo,
3924 repo,
3928 gen,
3925 gen,
3929 tr,
3926 tr,
3930 source=b"unbundle",
3927 source=b"unbundle",
3931 url=b"bundle:" + source,
3928 url=b"bundle:" + source,
3932 )
3929 )
3933 else:
3930 else:
3934 gen.apply(repo, b"unbundle", b"bundle:" + source)
3931 gen.apply(repo, b"unbundle", b"bundle:" + source)
3935 break
3932 break
3936 else:
3933 else:
3937 backupdate = encoding.strtolocal(
3934 backupdate = encoding.strtolocal(
3938 time.strftime(
3935 time.strftime(
3939 "%a %H:%M, %Y-%m-%d",
3936 "%a %H:%M, %Y-%m-%d",
3940 time.localtime(os.path.getmtime(source)),
3937 time.localtime(os.path.getmtime(source)),
3941 )
3938 )
3942 )
3939 )
3943 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3940 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3944 if ui.verbose:
3941 if ui.verbose:
3945 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3942 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3946 else:
3943 else:
3947 opts[
3944 opts[
3948 b"template"
3945 b"template"
3949 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3946 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3950 displayer = logcmdutil.changesetdisplayer(
3947 displayer = logcmdutil.changesetdisplayer(
3951 ui, other, opts, False
3948 ui, other, opts, False
3952 )
3949 )
3953 display(other, chlist, displayer)
3950 display(other, chlist, displayer)
3954 displayer.close()
3951 displayer.close()
3955 finally:
3952 finally:
3956 cleanupfn()
3953 cleanupfn()
3957
3954
3958
3955
3959 @command(
3956 @command(
3960 b'debugsub',
3957 b'debugsub',
3961 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3958 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3962 _(b'[-r REV] [REV]'),
3959 _(b'[-r REV] [REV]'),
3963 )
3960 )
3964 def debugsub(ui, repo, rev=None):
3961 def debugsub(ui, repo, rev=None):
3965 ctx = scmutil.revsingle(repo, rev, None)
3962 ctx = scmutil.revsingle(repo, rev, None)
3966 for k, v in sorted(ctx.substate.items()):
3963 for k, v in sorted(ctx.substate.items()):
3967 ui.writenoi18n(b'path %s\n' % k)
3964 ui.writenoi18n(b'path %s\n' % k)
3968 ui.writenoi18n(b' source %s\n' % v[0])
3965 ui.writenoi18n(b' source %s\n' % v[0])
3969 ui.writenoi18n(b' revision %s\n' % v[1])
3966 ui.writenoi18n(b' revision %s\n' % v[1])
3970
3967
3971
3968
3972 @command(b'debugshell', optionalrepo=True)
3969 @command(b'debugshell', optionalrepo=True)
3973 def debugshell(ui, repo):
3970 def debugshell(ui, repo):
3974 """run an interactive Python interpreter
3971 """run an interactive Python interpreter
3975
3972
3976 The local namespace is provided with a reference to the ui and
3973 The local namespace is provided with a reference to the ui and
3977 the repo instance (if available).
3974 the repo instance (if available).
3978 """
3975 """
3979 import code
3976 import code
3980
3977
3981 imported_objects = {
3978 imported_objects = {
3982 'ui': ui,
3979 'ui': ui,
3983 'repo': repo,
3980 'repo': repo,
3984 }
3981 }
3985
3982
3986 code.interact(local=imported_objects)
3983 code.interact(local=imported_objects)
3987
3984
3988
3985
3989 @command(
3986 @command(
3990 b'debugsuccessorssets',
3987 b'debugsuccessorssets',
3991 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3988 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3992 _(b'[REV]'),
3989 _(b'[REV]'),
3993 )
3990 )
3994 def debugsuccessorssets(ui, repo, *revs, **opts):
3991 def debugsuccessorssets(ui, repo, *revs, **opts):
3995 """show set of successors for revision
3992 """show set of successors for revision
3996
3993
3997 A successors set of changeset A is a consistent group of revisions that
3994 A successors set of changeset A is a consistent group of revisions that
3998 succeed A. It contains non-obsolete changesets only unless closests
3995 succeed A. It contains non-obsolete changesets only unless closests
3999 successors set is set.
3996 successors set is set.
4000
3997
4001 In most cases a changeset A has a single successors set containing a single
3998 In most cases a changeset A has a single successors set containing a single
4002 successor (changeset A replaced by A').
3999 successor (changeset A replaced by A').
4003
4000
4004 A changeset that is made obsolete with no successors are called "pruned".
4001 A changeset that is made obsolete with no successors are called "pruned".
4005 Such changesets have no successors sets at all.
4002 Such changesets have no successors sets at all.
4006
4003
4007 A changeset that has been "split" will have a successors set containing
4004 A changeset that has been "split" will have a successors set containing
4008 more than one successor.
4005 more than one successor.
4009
4006
4010 A changeset that has been rewritten in multiple different ways is called
4007 A changeset that has been rewritten in multiple different ways is called
4011 "divergent". Such changesets have multiple successor sets (each of which
4008 "divergent". Such changesets have multiple successor sets (each of which
4012 may also be split, i.e. have multiple successors).
4009 may also be split, i.e. have multiple successors).
4013
4010
4014 Results are displayed as follows::
4011 Results are displayed as follows::
4015
4012
4016 <rev1>
4013 <rev1>
4017 <successors-1A>
4014 <successors-1A>
4018 <rev2>
4015 <rev2>
4019 <successors-2A>
4016 <successors-2A>
4020 <successors-2B1> <successors-2B2> <successors-2B3>
4017 <successors-2B1> <successors-2B2> <successors-2B3>
4021
4018
4022 Here rev2 has two possible (i.e. divergent) successors sets. The first
4019 Here rev2 has two possible (i.e. divergent) successors sets. The first
4023 holds one element, whereas the second holds three (i.e. the changeset has
4020 holds one element, whereas the second holds three (i.e. the changeset has
4024 been split).
4021 been split).
4025 """
4022 """
4026 # passed to successorssets caching computation from one call to another
4023 # passed to successorssets caching computation from one call to another
4027 cache = {}
4024 cache = {}
4028 ctx2str = bytes
4025 ctx2str = bytes
4029 node2str = short
4026 node2str = short
4030 for rev in scmutil.revrange(repo, revs):
4027 for rev in scmutil.revrange(repo, revs):
4031 ctx = repo[rev]
4028 ctx = repo[rev]
4032 ui.write(b'%s\n' % ctx2str(ctx))
4029 ui.write(b'%s\n' % ctx2str(ctx))
4033 for succsset in obsutil.successorssets(
4030 for succsset in obsutil.successorssets(
4034 repo, ctx.node(), closest=opts['closest'], cache=cache
4031 repo, ctx.node(), closest=opts['closest'], cache=cache
4035 ):
4032 ):
4036 if succsset:
4033 if succsset:
4037 ui.write(b' ')
4034 ui.write(b' ')
4038 ui.write(node2str(succsset[0]))
4035 ui.write(node2str(succsset[0]))
4039 for node in succsset[1:]:
4036 for node in succsset[1:]:
4040 ui.write(b' ')
4037 ui.write(b' ')
4041 ui.write(node2str(node))
4038 ui.write(node2str(node))
4042 ui.write(b'\n')
4039 ui.write(b'\n')
4043
4040
4044
4041
4045 @command(b'debugtagscache', [])
4042 @command(b'debugtagscache', [])
4046 def debugtagscache(ui, repo):
4043 def debugtagscache(ui, repo):
4047 """display the contents of .hg/cache/hgtagsfnodes1"""
4044 """display the contents of .hg/cache/hgtagsfnodes1"""
4048 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
4045 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
4049 flog = repo.file(b'.hgtags')
4046 flog = repo.file(b'.hgtags')
4050 for r in repo:
4047 for r in repo:
4051 node = repo[r].node()
4048 node = repo[r].node()
4052 tagsnode = cache.getfnode(node, computemissing=False)
4049 tagsnode = cache.getfnode(node, computemissing=False)
4053 if tagsnode:
4050 if tagsnode:
4054 tagsnodedisplay = hex(tagsnode)
4051 tagsnodedisplay = hex(tagsnode)
4055 if not flog.hasnode(tagsnode):
4052 if not flog.hasnode(tagsnode):
4056 tagsnodedisplay += b' (unknown node)'
4053 tagsnodedisplay += b' (unknown node)'
4057 elif tagsnode is None:
4054 elif tagsnode is None:
4058 tagsnodedisplay = b'missing'
4055 tagsnodedisplay = b'missing'
4059 else:
4056 else:
4060 tagsnodedisplay = b'invalid'
4057 tagsnodedisplay = b'invalid'
4061
4058
4062 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
4059 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
4063
4060
4064
4061
4065 @command(
4062 @command(
4066 b'debugtemplate',
4063 b'debugtemplate',
4067 [
4064 [
4068 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
4065 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
4069 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
4066 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
4070 ],
4067 ],
4071 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
4068 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
4072 optionalrepo=True,
4069 optionalrepo=True,
4073 )
4070 )
4074 def debugtemplate(ui, repo, tmpl, **opts):
4071 def debugtemplate(ui, repo, tmpl, **opts):
4075 """parse and apply a template
4072 """parse and apply a template
4076
4073
4077 If -r/--rev is given, the template is processed as a log template and
4074 If -r/--rev is given, the template is processed as a log template and
4078 applied to the given changesets. Otherwise, it is processed as a generic
4075 applied to the given changesets. Otherwise, it is processed as a generic
4079 template.
4076 template.
4080
4077
4081 Use --verbose to print the parsed tree.
4078 Use --verbose to print the parsed tree.
4082 """
4079 """
4083 revs = None
4080 revs = None
4084 if opts['rev']:
4081 if opts['rev']:
4085 if repo is None:
4082 if repo is None:
4086 raise error.RepoError(
4083 raise error.RepoError(
4087 _(b'there is no Mercurial repository here (.hg not found)')
4084 _(b'there is no Mercurial repository here (.hg not found)')
4088 )
4085 )
4089 revs = scmutil.revrange(repo, opts['rev'])
4086 revs = scmutil.revrange(repo, opts['rev'])
4090
4087
4091 props = {}
4088 props = {}
4092 for d in opts['define']:
4089 for d in opts['define']:
4093 try:
4090 try:
4094 k, v = (e.strip() for e in d.split(b'=', 1))
4091 k, v = (e.strip() for e in d.split(b'=', 1))
4095 if not k or k == b'ui':
4092 if not k or k == b'ui':
4096 raise ValueError
4093 raise ValueError
4097 props[k] = v
4094 props[k] = v
4098 except ValueError:
4095 except ValueError:
4099 raise error.Abort(_(b'malformed keyword definition: %s') % d)
4096 raise error.Abort(_(b'malformed keyword definition: %s') % d)
4100
4097
4101 if ui.verbose:
4098 if ui.verbose:
4102 aliases = ui.configitems(b'templatealias')
4099 aliases = ui.configitems(b'templatealias')
4103 tree = templater.parse(tmpl)
4100 tree = templater.parse(tmpl)
4104 ui.note(templater.prettyformat(tree), b'\n')
4101 ui.note(templater.prettyformat(tree), b'\n')
4105 newtree = templater.expandaliases(tree, aliases)
4102 newtree = templater.expandaliases(tree, aliases)
4106 if newtree != tree:
4103 if newtree != tree:
4107 ui.notenoi18n(
4104 ui.notenoi18n(
4108 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4105 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4109 )
4106 )
4110
4107
4111 if revs is None:
4108 if revs is None:
4112 tres = formatter.templateresources(ui, repo)
4109 tres = formatter.templateresources(ui, repo)
4113 t = formatter.maketemplater(ui, tmpl, resources=tres)
4110 t = formatter.maketemplater(ui, tmpl, resources=tres)
4114 if ui.verbose:
4111 if ui.verbose:
4115 kwds, funcs = t.symbolsuseddefault()
4112 kwds, funcs = t.symbolsuseddefault()
4116 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4113 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4117 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4114 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4118 ui.write(t.renderdefault(props))
4115 ui.write(t.renderdefault(props))
4119 else:
4116 else:
4120 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4117 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4121 if ui.verbose:
4118 if ui.verbose:
4122 kwds, funcs = displayer.t.symbolsuseddefault()
4119 kwds, funcs = displayer.t.symbolsuseddefault()
4123 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4120 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4124 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4121 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4125 for r in revs:
4122 for r in revs:
4126 displayer.show(repo[r], **pycompat.strkwargs(props))
4123 displayer.show(repo[r], **pycompat.strkwargs(props))
4127 displayer.close()
4124 displayer.close()
4128
4125
4129
4126
4130 @command(
4127 @command(
4131 b'debuguigetpass',
4128 b'debuguigetpass',
4132 [
4129 [
4133 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4130 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4134 ],
4131 ],
4135 _(b'[-p TEXT]'),
4132 _(b'[-p TEXT]'),
4136 norepo=True,
4133 norepo=True,
4137 )
4134 )
4138 def debuguigetpass(ui, prompt=b''):
4135 def debuguigetpass(ui, prompt=b''):
4139 """show prompt to type password"""
4136 """show prompt to type password"""
4140 r = ui.getpass(prompt)
4137 r = ui.getpass(prompt)
4141 if r is None:
4138 if r is None:
4142 r = b"<default response>"
4139 r = b"<default response>"
4143 ui.writenoi18n(b'response: %s\n' % r)
4140 ui.writenoi18n(b'response: %s\n' % r)
4144
4141
4145
4142
4146 @command(
4143 @command(
4147 b'debuguiprompt',
4144 b'debuguiprompt',
4148 [
4145 [
4149 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4146 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4150 ],
4147 ],
4151 _(b'[-p TEXT]'),
4148 _(b'[-p TEXT]'),
4152 norepo=True,
4149 norepo=True,
4153 )
4150 )
4154 def debuguiprompt(ui, prompt=b''):
4151 def debuguiprompt(ui, prompt=b''):
4155 """show plain prompt"""
4152 """show plain prompt"""
4156 r = ui.prompt(prompt)
4153 r = ui.prompt(prompt)
4157 ui.writenoi18n(b'response: %s\n' % r)
4154 ui.writenoi18n(b'response: %s\n' % r)
4158
4155
4159
4156
4160 @command(b'debugupdatecaches', [])
4157 @command(b'debugupdatecaches', [])
4161 def debugupdatecaches(ui, repo, *pats, **opts):
4158 def debugupdatecaches(ui, repo, *pats, **opts):
4162 """warm all known caches in the repository"""
4159 """warm all known caches in the repository"""
4163 with repo.wlock(), repo.lock():
4160 with repo.wlock(), repo.lock():
4164 repo.updatecaches(caches=repository.CACHES_ALL)
4161 repo.updatecaches(caches=repository.CACHES_ALL)
4165
4162
4166
4163
4167 @command(
4164 @command(
4168 b'debugupgraderepo',
4165 b'debugupgraderepo',
4169 [
4166 [
4170 (
4167 (
4171 b'o',
4168 b'o',
4172 b'optimize',
4169 b'optimize',
4173 [],
4170 [],
4174 _(b'extra optimization to perform'),
4171 _(b'extra optimization to perform'),
4175 _(b'NAME'),
4172 _(b'NAME'),
4176 ),
4173 ),
4177 (b'', b'run', False, _(b'performs an upgrade')),
4174 (b'', b'run', False, _(b'performs an upgrade')),
4178 (b'', b'backup', True, _(b'keep the old repository content around')),
4175 (b'', b'backup', True, _(b'keep the old repository content around')),
4179 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4176 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4180 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4177 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4181 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4178 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4182 ],
4179 ],
4183 )
4180 )
4184 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4181 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4185 """upgrade a repository to use different features
4182 """upgrade a repository to use different features
4186
4183
4187 If no arguments are specified, the repository is evaluated for upgrade
4184 If no arguments are specified, the repository is evaluated for upgrade
4188 and a list of problems and potential optimizations is printed.
4185 and a list of problems and potential optimizations is printed.
4189
4186
4190 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4187 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4191 can be influenced via additional arguments. More details will be provided
4188 can be influenced via additional arguments. More details will be provided
4192 by the command output when run without ``--run``.
4189 by the command output when run without ``--run``.
4193
4190
4194 During the upgrade, the repository will be locked and no writes will be
4191 During the upgrade, the repository will be locked and no writes will be
4195 allowed.
4192 allowed.
4196
4193
4197 At the end of the upgrade, the repository may not be readable while new
4194 At the end of the upgrade, the repository may not be readable while new
4198 repository data is swapped in. This window will be as long as it takes to
4195 repository data is swapped in. This window will be as long as it takes to
4199 rename some directories inside the ``.hg`` directory. On most machines, this
4196 rename some directories inside the ``.hg`` directory. On most machines, this
4200 should complete almost instantaneously and the chances of a consumer being
4197 should complete almost instantaneously and the chances of a consumer being
4201 unable to access the repository should be low.
4198 unable to access the repository should be low.
4202
4199
4203 By default, all revlogs will be upgraded. You can restrict this using flags
4200 By default, all revlogs will be upgraded. You can restrict this using flags
4204 such as `--manifest`:
4201 such as `--manifest`:
4205
4202
4206 * `--manifest`: only optimize the manifest
4203 * `--manifest`: only optimize the manifest
4207 * `--no-manifest`: optimize all revlog but the manifest
4204 * `--no-manifest`: optimize all revlog but the manifest
4208 * `--changelog`: optimize the changelog only
4205 * `--changelog`: optimize the changelog only
4209 * `--no-changelog --no-manifest`: optimize filelogs only
4206 * `--no-changelog --no-manifest`: optimize filelogs only
4210 * `--filelogs`: optimize the filelogs only
4207 * `--filelogs`: optimize the filelogs only
4211 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4208 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4212 """
4209 """
4213 return upgrade.upgraderepo(
4210 return upgrade.upgraderepo(
4214 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4211 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4215 )
4212 )
4216
4213
4217
4214
4218 @command(
4215 @command(
4219 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4216 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4220 )
4217 )
4221 def debugwalk(ui, repo, *pats, **opts):
4218 def debugwalk(ui, repo, *pats, **opts):
4222 """show how files match on given patterns"""
4219 """show how files match on given patterns"""
4223 opts = pycompat.byteskwargs(opts)
4220 opts = pycompat.byteskwargs(opts)
4224 m = scmutil.match(repo[None], pats, opts)
4221 m = scmutil.match(repo[None], pats, opts)
4225 if ui.verbose:
4222 if ui.verbose:
4226 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4223 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4227 items = list(repo[None].walk(m))
4224 items = list(repo[None].walk(m))
4228 if not items:
4225 if not items:
4229 return
4226 return
4230 f = lambda fn: fn
4227 f = lambda fn: fn
4231 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4228 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4232 f = lambda fn: util.normpath(fn)
4229 f = lambda fn: util.normpath(fn)
4233 fmt = b'f %%-%ds %%-%ds %%s' % (
4230 fmt = b'f %%-%ds %%-%ds %%s' % (
4234 max([len(abs) for abs in items]),
4231 max([len(abs) for abs in items]),
4235 max([len(repo.pathto(abs)) for abs in items]),
4232 max([len(repo.pathto(abs)) for abs in items]),
4236 )
4233 )
4237 for abs in items:
4234 for abs in items:
4238 line = fmt % (
4235 line = fmt % (
4239 abs,
4236 abs,
4240 f(repo.pathto(abs)),
4237 f(repo.pathto(abs)),
4241 m.exact(abs) and b'exact' or b'',
4238 m.exact(abs) and b'exact' or b'',
4242 )
4239 )
4243 ui.write(b"%s\n" % line.rstrip())
4240 ui.write(b"%s\n" % line.rstrip())
4244
4241
4245
4242
4246 @command(b'debugwhyunstable', [], _(b'REV'))
4243 @command(b'debugwhyunstable', [], _(b'REV'))
4247 def debugwhyunstable(ui, repo, rev):
4244 def debugwhyunstable(ui, repo, rev):
4248 """explain instabilities of a changeset"""
4245 """explain instabilities of a changeset"""
4249 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4246 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4250 dnodes = b''
4247 dnodes = b''
4251 if entry.get(b'divergentnodes'):
4248 if entry.get(b'divergentnodes'):
4252 dnodes = (
4249 dnodes = (
4253 b' '.join(
4250 b' '.join(
4254 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4251 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4255 for ctx in entry[b'divergentnodes']
4252 for ctx in entry[b'divergentnodes']
4256 )
4253 )
4257 + b' '
4254 + b' '
4258 )
4255 )
4259 ui.write(
4256 ui.write(
4260 b'%s: %s%s %s\n'
4257 b'%s: %s%s %s\n'
4261 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4258 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4262 )
4259 )
4263
4260
4264
4261
4265 @command(
4262 @command(
4266 b'debugwireargs',
4263 b'debugwireargs',
4267 [
4264 [
4268 (b'', b'three', b'', b'three'),
4265 (b'', b'three', b'', b'three'),
4269 (b'', b'four', b'', b'four'),
4266 (b'', b'four', b'', b'four'),
4270 (b'', b'five', b'', b'five'),
4267 (b'', b'five', b'', b'five'),
4271 ]
4268 ]
4272 + cmdutil.remoteopts,
4269 + cmdutil.remoteopts,
4273 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4270 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4274 norepo=True,
4271 norepo=True,
4275 )
4272 )
4276 def debugwireargs(ui, repopath, *vals, **opts):
4273 def debugwireargs(ui, repopath, *vals, **opts):
4277 opts = pycompat.byteskwargs(opts)
4274 opts = pycompat.byteskwargs(opts)
4278 repo = hg.peer(ui, opts, repopath)
4275 repo = hg.peer(ui, opts, repopath)
4279 try:
4276 try:
4280 for opt in cmdutil.remoteopts:
4277 for opt in cmdutil.remoteopts:
4281 del opts[opt[1]]
4278 del opts[opt[1]]
4282 args = {}
4279 args = {}
4283 for k, v in pycompat.iteritems(opts):
4280 for k, v in pycompat.iteritems(opts):
4284 if v:
4281 if v:
4285 args[k] = v
4282 args[k] = v
4286 args = pycompat.strkwargs(args)
4283 args = pycompat.strkwargs(args)
4287 # run twice to check that we don't mess up the stream for the next command
4284 # run twice to check that we don't mess up the stream for the next command
4288 res1 = repo.debugwireargs(*vals, **args)
4285 res1 = repo.debugwireargs(*vals, **args)
4289 res2 = repo.debugwireargs(*vals, **args)
4286 res2 = repo.debugwireargs(*vals, **args)
4290 ui.write(b"%s\n" % res1)
4287 ui.write(b"%s\n" % res1)
4291 if res1 != res2:
4288 if res1 != res2:
4292 ui.warn(b"%s\n" % res2)
4289 ui.warn(b"%s\n" % res2)
4293 finally:
4290 finally:
4294 repo.close()
4291 repo.close()
4295
4292
4296
4293
4297 def _parsewirelangblocks(fh):
4294 def _parsewirelangblocks(fh):
4298 activeaction = None
4295 activeaction = None
4299 blocklines = []
4296 blocklines = []
4300 lastindent = 0
4297 lastindent = 0
4301
4298
4302 for line in fh:
4299 for line in fh:
4303 line = line.rstrip()
4300 line = line.rstrip()
4304 if not line:
4301 if not line:
4305 continue
4302 continue
4306
4303
4307 if line.startswith(b'#'):
4304 if line.startswith(b'#'):
4308 continue
4305 continue
4309
4306
4310 if not line.startswith(b' '):
4307 if not line.startswith(b' '):
4311 # New block. Flush previous one.
4308 # New block. Flush previous one.
4312 if activeaction:
4309 if activeaction:
4313 yield activeaction, blocklines
4310 yield activeaction, blocklines
4314
4311
4315 activeaction = line
4312 activeaction = line
4316 blocklines = []
4313 blocklines = []
4317 lastindent = 0
4314 lastindent = 0
4318 continue
4315 continue
4319
4316
4320 # Else we start with an indent.
4317 # Else we start with an indent.
4321
4318
4322 if not activeaction:
4319 if not activeaction:
4323 raise error.Abort(_(b'indented line outside of block'))
4320 raise error.Abort(_(b'indented line outside of block'))
4324
4321
4325 indent = len(line) - len(line.lstrip())
4322 indent = len(line) - len(line.lstrip())
4326
4323
4327 # If this line is indented more than the last line, concatenate it.
4324 # If this line is indented more than the last line, concatenate it.
4328 if indent > lastindent and blocklines:
4325 if indent > lastindent and blocklines:
4329 blocklines[-1] += line.lstrip()
4326 blocklines[-1] += line.lstrip()
4330 else:
4327 else:
4331 blocklines.append(line)
4328 blocklines.append(line)
4332 lastindent = indent
4329 lastindent = indent
4333
4330
4334 # Flush last block.
4331 # Flush last block.
4335 if activeaction:
4332 if activeaction:
4336 yield activeaction, blocklines
4333 yield activeaction, blocklines
4337
4334
4338
4335
4339 @command(
4336 @command(
4340 b'debugwireproto',
4337 b'debugwireproto',
4341 [
4338 [
4342 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4339 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4343 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4340 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4344 (
4341 (
4345 b'',
4342 b'',
4346 b'noreadstderr',
4343 b'noreadstderr',
4347 False,
4344 False,
4348 _(b'do not read from stderr of the remote'),
4345 _(b'do not read from stderr of the remote'),
4349 ),
4346 ),
4350 (
4347 (
4351 b'',
4348 b'',
4352 b'nologhandshake',
4349 b'nologhandshake',
4353 False,
4350 False,
4354 _(b'do not log I/O related to the peer handshake'),
4351 _(b'do not log I/O related to the peer handshake'),
4355 ),
4352 ),
4356 ]
4353 ]
4357 + cmdutil.remoteopts,
4354 + cmdutil.remoteopts,
4358 _(b'[PATH]'),
4355 _(b'[PATH]'),
4359 optionalrepo=True,
4356 optionalrepo=True,
4360 )
4357 )
4361 def debugwireproto(ui, repo, path=None, **opts):
4358 def debugwireproto(ui, repo, path=None, **opts):
4362 """send wire protocol commands to a server
4359 """send wire protocol commands to a server
4363
4360
4364 This command can be used to issue wire protocol commands to remote
4361 This command can be used to issue wire protocol commands to remote
4365 peers and to debug the raw data being exchanged.
4362 peers and to debug the raw data being exchanged.
4366
4363
4367 ``--localssh`` will start an SSH server against the current repository
4364 ``--localssh`` will start an SSH server against the current repository
4368 and connect to that. By default, the connection will perform a handshake
4365 and connect to that. By default, the connection will perform a handshake
4369 and establish an appropriate peer instance.
4366 and establish an appropriate peer instance.
4370
4367
4371 ``--peer`` can be used to bypass the handshake protocol and construct a
4368 ``--peer`` can be used to bypass the handshake protocol and construct a
4372 peer instance using the specified class type. Valid values are ``raw``,
4369 peer instance using the specified class type. Valid values are ``raw``,
4373 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4370 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4374 raw data payloads and don't support higher-level command actions.
4371 raw data payloads and don't support higher-level command actions.
4375
4372
4376 ``--noreadstderr`` can be used to disable automatic reading from stderr
4373 ``--noreadstderr`` can be used to disable automatic reading from stderr
4377 of the peer (for SSH connections only). Disabling automatic reading of
4374 of the peer (for SSH connections only). Disabling automatic reading of
4378 stderr is useful for making output more deterministic.
4375 stderr is useful for making output more deterministic.
4379
4376
4380 Commands are issued via a mini language which is specified via stdin.
4377 Commands are issued via a mini language which is specified via stdin.
4381 The language consists of individual actions to perform. An action is
4378 The language consists of individual actions to perform. An action is
4382 defined by a block. A block is defined as a line with no leading
4379 defined by a block. A block is defined as a line with no leading
4383 space followed by 0 or more lines with leading space. Blocks are
4380 space followed by 0 or more lines with leading space. Blocks are
4384 effectively a high-level command with additional metadata.
4381 effectively a high-level command with additional metadata.
4385
4382
4386 Lines beginning with ``#`` are ignored.
4383 Lines beginning with ``#`` are ignored.
4387
4384
4388 The following sections denote available actions.
4385 The following sections denote available actions.
4389
4386
4390 raw
4387 raw
4391 ---
4388 ---
4392
4389
4393 Send raw data to the server.
4390 Send raw data to the server.
4394
4391
4395 The block payload contains the raw data to send as one atomic send
4392 The block payload contains the raw data to send as one atomic send
4396 operation. The data may not actually be delivered in a single system
4393 operation. The data may not actually be delivered in a single system
4397 call: it depends on the abilities of the transport being used.
4394 call: it depends on the abilities of the transport being used.
4398
4395
4399 Each line in the block is de-indented and concatenated. Then, that
4396 Each line in the block is de-indented and concatenated. Then, that
4400 value is evaluated as a Python b'' literal. This allows the use of
4397 value is evaluated as a Python b'' literal. This allows the use of
4401 backslash escaping, etc.
4398 backslash escaping, etc.
4402
4399
4403 raw+
4400 raw+
4404 ----
4401 ----
4405
4402
4406 Behaves like ``raw`` except flushes output afterwards.
4403 Behaves like ``raw`` except flushes output afterwards.
4407
4404
4408 command <X>
4405 command <X>
4409 -----------
4406 -----------
4410
4407
4411 Send a request to run a named command, whose name follows the ``command``
4408 Send a request to run a named command, whose name follows the ``command``
4412 string.
4409 string.
4413
4410
4414 Arguments to the command are defined as lines in this block. The format of
4411 Arguments to the command are defined as lines in this block. The format of
4415 each line is ``<key> <value>``. e.g.::
4412 each line is ``<key> <value>``. e.g.::
4416
4413
4417 command listkeys
4414 command listkeys
4418 namespace bookmarks
4415 namespace bookmarks
4419
4416
4420 If the value begins with ``eval:``, it will be interpreted as a Python
4417 If the value begins with ``eval:``, it will be interpreted as a Python
4421 literal expression. Otherwise values are interpreted as Python b'' literals.
4418 literal expression. Otherwise values are interpreted as Python b'' literals.
4422 This allows sending complex types and encoding special byte sequences via
4419 This allows sending complex types and encoding special byte sequences via
4423 backslash escaping.
4420 backslash escaping.
4424
4421
4425 The following arguments have special meaning:
4422 The following arguments have special meaning:
4426
4423
4427 ``PUSHFILE``
4424 ``PUSHFILE``
4428 When defined, the *push* mechanism of the peer will be used instead
4425 When defined, the *push* mechanism of the peer will be used instead
4429 of the static request-response mechanism and the content of the
4426 of the static request-response mechanism and the content of the
4430 file specified in the value of this argument will be sent as the
4427 file specified in the value of this argument will be sent as the
4431 command payload.
4428 command payload.
4432
4429
4433 This can be used to submit a local bundle file to the remote.
4430 This can be used to submit a local bundle file to the remote.
4434
4431
4435 batchbegin
4432 batchbegin
4436 ----------
4433 ----------
4437
4434
4438 Instruct the peer to begin a batched send.
4435 Instruct the peer to begin a batched send.
4439
4436
4440 All ``command`` blocks are queued for execution until the next
4437 All ``command`` blocks are queued for execution until the next
4441 ``batchsubmit`` block.
4438 ``batchsubmit`` block.
4442
4439
4443 batchsubmit
4440 batchsubmit
4444 -----------
4441 -----------
4445
4442
4446 Submit previously queued ``command`` blocks as a batch request.
4443 Submit previously queued ``command`` blocks as a batch request.
4447
4444
4448 This action MUST be paired with a ``batchbegin`` action.
4445 This action MUST be paired with a ``batchbegin`` action.
4449
4446
4450 httprequest <method> <path>
4447 httprequest <method> <path>
4451 ---------------------------
4448 ---------------------------
4452
4449
4453 (HTTP peer only)
4450 (HTTP peer only)
4454
4451
4455 Send an HTTP request to the peer.
4452 Send an HTTP request to the peer.
4456
4453
4457 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4454 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4458
4455
4459 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4456 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4460 headers to add to the request. e.g. ``Accept: foo``.
4457 headers to add to the request. e.g. ``Accept: foo``.
4461
4458
4462 The following arguments are special:
4459 The following arguments are special:
4463
4460
4464 ``BODYFILE``
4461 ``BODYFILE``
4465 The content of the file defined as the value to this argument will be
4462 The content of the file defined as the value to this argument will be
4466 transferred verbatim as the HTTP request body.
4463 transferred verbatim as the HTTP request body.
4467
4464
4468 ``frame <type> <flags> <payload>``
4465 ``frame <type> <flags> <payload>``
4469 Send a unified protocol frame as part of the request body.
4466 Send a unified protocol frame as part of the request body.
4470
4467
4471 All frames will be collected and sent as the body to the HTTP
4468 All frames will be collected and sent as the body to the HTTP
4472 request.
4469 request.
4473
4470
4474 close
4471 close
4475 -----
4472 -----
4476
4473
4477 Close the connection to the server.
4474 Close the connection to the server.
4478
4475
4479 flush
4476 flush
4480 -----
4477 -----
4481
4478
4482 Flush data written to the server.
4479 Flush data written to the server.
4483
4480
4484 readavailable
4481 readavailable
4485 -------------
4482 -------------
4486
4483
4487 Close the write end of the connection and read all available data from
4484 Close the write end of the connection and read all available data from
4488 the server.
4485 the server.
4489
4486
4490 If the connection to the server encompasses multiple pipes, we poll both
4487 If the connection to the server encompasses multiple pipes, we poll both
4491 pipes and read available data.
4488 pipes and read available data.
4492
4489
4493 readline
4490 readline
4494 --------
4491 --------
4495
4492
4496 Read a line of output from the server. If there are multiple output
4493 Read a line of output from the server. If there are multiple output
4497 pipes, reads only the main pipe.
4494 pipes, reads only the main pipe.
4498
4495
4499 ereadline
4496 ereadline
4500 ---------
4497 ---------
4501
4498
4502 Like ``readline``, but read from the stderr pipe, if available.
4499 Like ``readline``, but read from the stderr pipe, if available.
4503
4500
4504 read <X>
4501 read <X>
4505 --------
4502 --------
4506
4503
4507 ``read()`` N bytes from the server's main output pipe.
4504 ``read()`` N bytes from the server's main output pipe.
4508
4505
4509 eread <X>
4506 eread <X>
4510 ---------
4507 ---------
4511
4508
4512 ``read()`` N bytes from the server's stderr pipe, if available.
4509 ``read()`` N bytes from the server's stderr pipe, if available.
4513
4510
4514 Specifying Unified Frame-Based Protocol Frames
4511 Specifying Unified Frame-Based Protocol Frames
4515 ----------------------------------------------
4512 ----------------------------------------------
4516
4513
4517 It is possible to emit a *Unified Frame-Based Protocol* by using special
4514 It is possible to emit a *Unified Frame-Based Protocol* by using special
4518 syntax.
4515 syntax.
4519
4516
4520 A frame is composed as a type, flags, and payload. These can be parsed
4517 A frame is composed as a type, flags, and payload. These can be parsed
4521 from a string of the form:
4518 from a string of the form:
4522
4519
4523 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4520 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4524
4521
4525 ``request-id`` and ``stream-id`` are integers defining the request and
4522 ``request-id`` and ``stream-id`` are integers defining the request and
4526 stream identifiers.
4523 stream identifiers.
4527
4524
4528 ``type`` can be an integer value for the frame type or the string name
4525 ``type`` can be an integer value for the frame type or the string name
4529 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4526 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4530 ``command-name``.
4527 ``command-name``.
4531
4528
4532 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4529 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4533 components. Each component (and there can be just one) can be an integer
4530 components. Each component (and there can be just one) can be an integer
4534 or a flag name for stream flags or frame flags, respectively. Values are
4531 or a flag name for stream flags or frame flags, respectively. Values are
4535 resolved to integers and then bitwise OR'd together.
4532 resolved to integers and then bitwise OR'd together.
4536
4533
4537 ``payload`` represents the raw frame payload. If it begins with
4534 ``payload`` represents the raw frame payload. If it begins with
4538 ``cbor:``, the following string is evaluated as Python code and the
4535 ``cbor:``, the following string is evaluated as Python code and the
4539 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4536 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4540 as a Python byte string literal.
4537 as a Python byte string literal.
4541 """
4538 """
4542 opts = pycompat.byteskwargs(opts)
4539 opts = pycompat.byteskwargs(opts)
4543
4540
4544 if opts[b'localssh'] and not repo:
4541 if opts[b'localssh'] and not repo:
4545 raise error.Abort(_(b'--localssh requires a repository'))
4542 raise error.Abort(_(b'--localssh requires a repository'))
4546
4543
4547 if opts[b'peer'] and opts[b'peer'] not in (
4544 if opts[b'peer'] and opts[b'peer'] not in (
4548 b'raw',
4545 b'raw',
4549 b'http2',
4546 b'http2',
4550 b'ssh1',
4547 b'ssh1',
4551 b'ssh2',
4548 b'ssh2',
4552 ):
4549 ):
4553 raise error.Abort(
4550 raise error.Abort(
4554 _(b'invalid value for --peer'),
4551 _(b'invalid value for --peer'),
4555 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4552 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4556 )
4553 )
4557
4554
4558 if path and opts[b'localssh']:
4555 if path and opts[b'localssh']:
4559 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4556 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4560
4557
4561 if ui.interactive():
4558 if ui.interactive():
4562 ui.write(_(b'(waiting for commands on stdin)\n'))
4559 ui.write(_(b'(waiting for commands on stdin)\n'))
4563
4560
4564 blocks = list(_parsewirelangblocks(ui.fin))
4561 blocks = list(_parsewirelangblocks(ui.fin))
4565
4562
4566 proc = None
4563 proc = None
4567 stdin = None
4564 stdin = None
4568 stdout = None
4565 stdout = None
4569 stderr = None
4566 stderr = None
4570 opener = None
4567 opener = None
4571
4568
4572 if opts[b'localssh']:
4569 if opts[b'localssh']:
4573 # We start the SSH server in its own process so there is process
4570 # We start the SSH server in its own process so there is process
4574 # separation. This prevents a whole class of potential bugs around
4571 # separation. This prevents a whole class of potential bugs around
4575 # shared state from interfering with server operation.
4572 # shared state from interfering with server operation.
4576 args = procutil.hgcmd() + [
4573 args = procutil.hgcmd() + [
4577 b'-R',
4574 b'-R',
4578 repo.root,
4575 repo.root,
4579 b'debugserve',
4576 b'debugserve',
4580 b'--sshstdio',
4577 b'--sshstdio',
4581 ]
4578 ]
4582 proc = subprocess.Popen(
4579 proc = subprocess.Popen(
4583 pycompat.rapply(procutil.tonativestr, args),
4580 pycompat.rapply(procutil.tonativestr, args),
4584 stdin=subprocess.PIPE,
4581 stdin=subprocess.PIPE,
4585 stdout=subprocess.PIPE,
4582 stdout=subprocess.PIPE,
4586 stderr=subprocess.PIPE,
4583 stderr=subprocess.PIPE,
4587 bufsize=0,
4584 bufsize=0,
4588 )
4585 )
4589
4586
4590 stdin = proc.stdin
4587 stdin = proc.stdin
4591 stdout = proc.stdout
4588 stdout = proc.stdout
4592 stderr = proc.stderr
4589 stderr = proc.stderr
4593
4590
4594 # We turn the pipes into observers so we can log I/O.
4591 # We turn the pipes into observers so we can log I/O.
4595 if ui.verbose or opts[b'peer'] == b'raw':
4592 if ui.verbose or opts[b'peer'] == b'raw':
4596 stdin = util.makeloggingfileobject(
4593 stdin = util.makeloggingfileobject(
4597 ui, proc.stdin, b'i', logdata=True
4594 ui, proc.stdin, b'i', logdata=True
4598 )
4595 )
4599 stdout = util.makeloggingfileobject(
4596 stdout = util.makeloggingfileobject(
4600 ui, proc.stdout, b'o', logdata=True
4597 ui, proc.stdout, b'o', logdata=True
4601 )
4598 )
4602 stderr = util.makeloggingfileobject(
4599 stderr = util.makeloggingfileobject(
4603 ui, proc.stderr, b'e', logdata=True
4600 ui, proc.stderr, b'e', logdata=True
4604 )
4601 )
4605
4602
4606 # --localssh also implies the peer connection settings.
4603 # --localssh also implies the peer connection settings.
4607
4604
4608 url = b'ssh://localserver'
4605 url = b'ssh://localserver'
4609 autoreadstderr = not opts[b'noreadstderr']
4606 autoreadstderr = not opts[b'noreadstderr']
4610
4607
4611 if opts[b'peer'] == b'ssh1':
4608 if opts[b'peer'] == b'ssh1':
4612 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4609 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4613 peer = sshpeer.sshv1peer(
4610 peer = sshpeer.sshv1peer(
4614 ui,
4611 ui,
4615 url,
4612 url,
4616 proc,
4613 proc,
4617 stdin,
4614 stdin,
4618 stdout,
4615 stdout,
4619 stderr,
4616 stderr,
4620 None,
4617 None,
4621 autoreadstderr=autoreadstderr,
4618 autoreadstderr=autoreadstderr,
4622 )
4619 )
4623 elif opts[b'peer'] == b'ssh2':
4620 elif opts[b'peer'] == b'ssh2':
4624 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4621 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4625 peer = sshpeer.sshv2peer(
4622 peer = sshpeer.sshv2peer(
4626 ui,
4623 ui,
4627 url,
4624 url,
4628 proc,
4625 proc,
4629 stdin,
4626 stdin,
4630 stdout,
4627 stdout,
4631 stderr,
4628 stderr,
4632 None,
4629 None,
4633 autoreadstderr=autoreadstderr,
4630 autoreadstderr=autoreadstderr,
4634 )
4631 )
4635 elif opts[b'peer'] == b'raw':
4632 elif opts[b'peer'] == b'raw':
4636 ui.write(_(b'using raw connection to peer\n'))
4633 ui.write(_(b'using raw connection to peer\n'))
4637 peer = None
4634 peer = None
4638 else:
4635 else:
4639 ui.write(_(b'creating ssh peer from handshake results\n'))
4636 ui.write(_(b'creating ssh peer from handshake results\n'))
4640 peer = sshpeer.makepeer(
4637 peer = sshpeer.makepeer(
4641 ui,
4638 ui,
4642 url,
4639 url,
4643 proc,
4640 proc,
4644 stdin,
4641 stdin,
4645 stdout,
4642 stdout,
4646 stderr,
4643 stderr,
4647 autoreadstderr=autoreadstderr,
4644 autoreadstderr=autoreadstderr,
4648 )
4645 )
4649
4646
4650 elif path:
4647 elif path:
4651 # We bypass hg.peer() so we can proxy the sockets.
4648 # We bypass hg.peer() so we can proxy the sockets.
4652 # TODO consider not doing this because we skip
4649 # TODO consider not doing this because we skip
4653 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4650 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4654 u = urlutil.url(path)
4651 u = urlutil.url(path)
4655 if u.scheme != b'http':
4652 if u.scheme != b'http':
4656 raise error.Abort(_(b'only http:// paths are currently supported'))
4653 raise error.Abort(_(b'only http:// paths are currently supported'))
4657
4654
4658 url, authinfo = u.authinfo()
4655 url, authinfo = u.authinfo()
4659 openerargs = {
4656 openerargs = {
4660 'useragent': b'Mercurial debugwireproto',
4657 'useragent': b'Mercurial debugwireproto',
4661 }
4658 }
4662
4659
4663 # Turn pipes/sockets into observers so we can log I/O.
4660 # Turn pipes/sockets into observers so we can log I/O.
4664 if ui.verbose:
4661 if ui.verbose:
4665 openerargs.update(
4662 openerargs.update(
4666 {
4663 {
4667 'loggingfh': ui,
4664 'loggingfh': ui,
4668 'loggingname': b's',
4665 'loggingname': b's',
4669 'loggingopts': {
4666 'loggingopts': {
4670 'logdata': True,
4667 'logdata': True,
4671 'logdataapis': False,
4668 'logdataapis': False,
4672 },
4669 },
4673 }
4670 }
4674 )
4671 )
4675
4672
4676 if ui.debugflag:
4673 if ui.debugflag:
4677 openerargs['loggingopts']['logdataapis'] = True
4674 openerargs['loggingopts']['logdataapis'] = True
4678
4675
4679 # Don't send default headers when in raw mode. This allows us to
4676 # Don't send default headers when in raw mode. This allows us to
4680 # bypass most of the behavior of our URL handling code so we can
4677 # bypass most of the behavior of our URL handling code so we can
4681 # have near complete control over what's sent on the wire.
4678 # have near complete control over what's sent on the wire.
4682 if opts[b'peer'] == b'raw':
4679 if opts[b'peer'] == b'raw':
4683 openerargs['sendaccept'] = False
4680 openerargs['sendaccept'] = False
4684
4681
4685 opener = urlmod.opener(ui, authinfo, **openerargs)
4682 opener = urlmod.opener(ui, authinfo, **openerargs)
4686
4683
4687 if opts[b'peer'] == b'http2':
4684 if opts[b'peer'] == b'http2':
4688 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4685 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4689 # We go through makepeer() because we need an API descriptor for
4686 # We go through makepeer() because we need an API descriptor for
4690 # the peer instance to be useful.
4687 # the peer instance to be useful.
4691 maybe_silent = (
4688 maybe_silent = (
4692 ui.silent()
4689 ui.silent()
4693 if opts[b'nologhandshake']
4690 if opts[b'nologhandshake']
4694 else util.nullcontextmanager()
4691 else util.nullcontextmanager()
4695 )
4692 )
4696 with maybe_silent, ui.configoverride(
4693 with maybe_silent, ui.configoverride(
4697 {(b'experimental', b'httppeer.advertise-v2'): True}
4694 {(b'experimental', b'httppeer.advertise-v2'): True}
4698 ):
4695 ):
4699 peer = httppeer.makepeer(ui, path, opener=opener)
4696 peer = httppeer.makepeer(ui, path, opener=opener)
4700
4697
4701 if not isinstance(peer, httppeer.httpv2peer):
4698 if not isinstance(peer, httppeer.httpv2peer):
4702 raise error.Abort(
4699 raise error.Abort(
4703 _(
4700 _(
4704 b'could not instantiate HTTP peer for '
4701 b'could not instantiate HTTP peer for '
4705 b'wire protocol version 2'
4702 b'wire protocol version 2'
4706 ),
4703 ),
4707 hint=_(
4704 hint=_(
4708 b'the server may not have the feature '
4705 b'the server may not have the feature '
4709 b'enabled or is not allowing this '
4706 b'enabled or is not allowing this '
4710 b'client version'
4707 b'client version'
4711 ),
4708 ),
4712 )
4709 )
4713
4710
4714 elif opts[b'peer'] == b'raw':
4711 elif opts[b'peer'] == b'raw':
4715 ui.write(_(b'using raw connection to peer\n'))
4712 ui.write(_(b'using raw connection to peer\n'))
4716 peer = None
4713 peer = None
4717 elif opts[b'peer']:
4714 elif opts[b'peer']:
4718 raise error.Abort(
4715 raise error.Abort(
4719 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4716 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4720 )
4717 )
4721 else:
4718 else:
4722 peer = httppeer.makepeer(ui, path, opener=opener)
4719 peer = httppeer.makepeer(ui, path, opener=opener)
4723
4720
4724 # We /could/ populate stdin/stdout with sock.makefile()...
4721 # We /could/ populate stdin/stdout with sock.makefile()...
4725 else:
4722 else:
4726 raise error.Abort(_(b'unsupported connection configuration'))
4723 raise error.Abort(_(b'unsupported connection configuration'))
4727
4724
4728 batchedcommands = None
4725 batchedcommands = None
4729
4726
4730 # Now perform actions based on the parsed wire language instructions.
4727 # Now perform actions based on the parsed wire language instructions.
4731 for action, lines in blocks:
4728 for action, lines in blocks:
4732 if action in (b'raw', b'raw+'):
4729 if action in (b'raw', b'raw+'):
4733 if not stdin:
4730 if not stdin:
4734 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4731 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4735
4732
4736 # Concatenate the data together.
4733 # Concatenate the data together.
4737 data = b''.join(l.lstrip() for l in lines)
4734 data = b''.join(l.lstrip() for l in lines)
4738 data = stringutil.unescapestr(data)
4735 data = stringutil.unescapestr(data)
4739 stdin.write(data)
4736 stdin.write(data)
4740
4737
4741 if action == b'raw+':
4738 if action == b'raw+':
4742 stdin.flush()
4739 stdin.flush()
4743 elif action == b'flush':
4740 elif action == b'flush':
4744 if not stdin:
4741 if not stdin:
4745 raise error.Abort(_(b'cannot call flush on this peer'))
4742 raise error.Abort(_(b'cannot call flush on this peer'))
4746 stdin.flush()
4743 stdin.flush()
4747 elif action.startswith(b'command'):
4744 elif action.startswith(b'command'):
4748 if not peer:
4745 if not peer:
4749 raise error.Abort(
4746 raise error.Abort(
4750 _(
4747 _(
4751 b'cannot send commands unless peer instance '
4748 b'cannot send commands unless peer instance '
4752 b'is available'
4749 b'is available'
4753 )
4750 )
4754 )
4751 )
4755
4752
4756 command = action.split(b' ', 1)[1]
4753 command = action.split(b' ', 1)[1]
4757
4754
4758 args = {}
4755 args = {}
4759 for line in lines:
4756 for line in lines:
4760 # We need to allow empty values.
4757 # We need to allow empty values.
4761 fields = line.lstrip().split(b' ', 1)
4758 fields = line.lstrip().split(b' ', 1)
4762 if len(fields) == 1:
4759 if len(fields) == 1:
4763 key = fields[0]
4760 key = fields[0]
4764 value = b''
4761 value = b''
4765 else:
4762 else:
4766 key, value = fields
4763 key, value = fields
4767
4764
4768 if value.startswith(b'eval:'):
4765 if value.startswith(b'eval:'):
4769 value = stringutil.evalpythonliteral(value[5:])
4766 value = stringutil.evalpythonliteral(value[5:])
4770 else:
4767 else:
4771 value = stringutil.unescapestr(value)
4768 value = stringutil.unescapestr(value)
4772
4769
4773 args[key] = value
4770 args[key] = value
4774
4771
4775 if batchedcommands is not None:
4772 if batchedcommands is not None:
4776 batchedcommands.append((command, args))
4773 batchedcommands.append((command, args))
4777 continue
4774 continue
4778
4775
4779 ui.status(_(b'sending %s command\n') % command)
4776 ui.status(_(b'sending %s command\n') % command)
4780
4777
4781 if b'PUSHFILE' in args:
4778 if b'PUSHFILE' in args:
4782 with open(args[b'PUSHFILE'], 'rb') as fh:
4779 with open(args[b'PUSHFILE'], 'rb') as fh:
4783 del args[b'PUSHFILE']
4780 del args[b'PUSHFILE']
4784 res, output = peer._callpush(
4781 res, output = peer._callpush(
4785 command, fh, **pycompat.strkwargs(args)
4782 command, fh, **pycompat.strkwargs(args)
4786 )
4783 )
4787 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4784 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4788 ui.status(
4785 ui.status(
4789 _(b'remote output: %s\n') % stringutil.escapestr(output)
4786 _(b'remote output: %s\n') % stringutil.escapestr(output)
4790 )
4787 )
4791 else:
4788 else:
4792 with peer.commandexecutor() as e:
4789 with peer.commandexecutor() as e:
4793 res = e.callcommand(command, args).result()
4790 res = e.callcommand(command, args).result()
4794
4791
4795 if isinstance(res, wireprotov2peer.commandresponse):
4792 if isinstance(res, wireprotov2peer.commandresponse):
4796 val = res.objects()
4793 val = res.objects()
4797 ui.status(
4794 ui.status(
4798 _(b'response: %s\n')
4795 _(b'response: %s\n')
4799 % stringutil.pprint(val, bprefix=True, indent=2)
4796 % stringutil.pprint(val, bprefix=True, indent=2)
4800 )
4797 )
4801 else:
4798 else:
4802 ui.status(
4799 ui.status(
4803 _(b'response: %s\n')
4800 _(b'response: %s\n')
4804 % stringutil.pprint(res, bprefix=True, indent=2)
4801 % stringutil.pprint(res, bprefix=True, indent=2)
4805 )
4802 )
4806
4803
4807 elif action == b'batchbegin':
4804 elif action == b'batchbegin':
4808 if batchedcommands is not None:
4805 if batchedcommands is not None:
4809 raise error.Abort(_(b'nested batchbegin not allowed'))
4806 raise error.Abort(_(b'nested batchbegin not allowed'))
4810
4807
4811 batchedcommands = []
4808 batchedcommands = []
4812 elif action == b'batchsubmit':
4809 elif action == b'batchsubmit':
4813 # There is a batching API we could go through. But it would be
4810 # There is a batching API we could go through. But it would be
4814 # difficult to normalize requests into function calls. It is easier
4811 # difficult to normalize requests into function calls. It is easier
4815 # to bypass this layer and normalize to commands + args.
4812 # to bypass this layer and normalize to commands + args.
4816 ui.status(
4813 ui.status(
4817 _(b'sending batch with %d sub-commands\n')
4814 _(b'sending batch with %d sub-commands\n')
4818 % len(batchedcommands)
4815 % len(batchedcommands)
4819 )
4816 )
4820 assert peer is not None
4817 assert peer is not None
4821 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4818 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4822 ui.status(
4819 ui.status(
4823 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4820 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4824 )
4821 )
4825
4822
4826 batchedcommands = None
4823 batchedcommands = None
4827
4824
4828 elif action.startswith(b'httprequest '):
4825 elif action.startswith(b'httprequest '):
4829 if not opener:
4826 if not opener:
4830 raise error.Abort(
4827 raise error.Abort(
4831 _(b'cannot use httprequest without an HTTP peer')
4828 _(b'cannot use httprequest without an HTTP peer')
4832 )
4829 )
4833
4830
4834 request = action.split(b' ', 2)
4831 request = action.split(b' ', 2)
4835 if len(request) != 3:
4832 if len(request) != 3:
4836 raise error.Abort(
4833 raise error.Abort(
4837 _(
4834 _(
4838 b'invalid httprequest: expected format is '
4835 b'invalid httprequest: expected format is '
4839 b'"httprequest <method> <path>'
4836 b'"httprequest <method> <path>'
4840 )
4837 )
4841 )
4838 )
4842
4839
4843 method, httppath = request[1:]
4840 method, httppath = request[1:]
4844 headers = {}
4841 headers = {}
4845 body = None
4842 body = None
4846 frames = []
4843 frames = []
4847 for line in lines:
4844 for line in lines:
4848 line = line.lstrip()
4845 line = line.lstrip()
4849 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4846 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4850 if m:
4847 if m:
4851 # Headers need to use native strings.
4848 # Headers need to use native strings.
4852 key = pycompat.strurl(m.group(1))
4849 key = pycompat.strurl(m.group(1))
4853 value = pycompat.strurl(m.group(2))
4850 value = pycompat.strurl(m.group(2))
4854 headers[key] = value
4851 headers[key] = value
4855 continue
4852 continue
4856
4853
4857 if line.startswith(b'BODYFILE '):
4854 if line.startswith(b'BODYFILE '):
4858 with open(line.split(b' ', 1), b'rb') as fh:
4855 with open(line.split(b' ', 1), b'rb') as fh:
4859 body = fh.read()
4856 body = fh.read()
4860 elif line.startswith(b'frame '):
4857 elif line.startswith(b'frame '):
4861 frame = wireprotoframing.makeframefromhumanstring(
4858 frame = wireprotoframing.makeframefromhumanstring(
4862 line[len(b'frame ') :]
4859 line[len(b'frame ') :]
4863 )
4860 )
4864
4861
4865 frames.append(frame)
4862 frames.append(frame)
4866 else:
4863 else:
4867 raise error.Abort(
4864 raise error.Abort(
4868 _(b'unknown argument to httprequest: %s') % line
4865 _(b'unknown argument to httprequest: %s') % line
4869 )
4866 )
4870
4867
4871 url = path + httppath
4868 url = path + httppath
4872
4869
4873 if frames:
4870 if frames:
4874 body = b''.join(bytes(f) for f in frames)
4871 body = b''.join(bytes(f) for f in frames)
4875
4872
4876 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4873 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4877
4874
4878 # urllib.Request insists on using has_data() as a proxy for
4875 # urllib.Request insists on using has_data() as a proxy for
4879 # determining the request method. Override that to use our
4876 # determining the request method. Override that to use our
4880 # explicitly requested method.
4877 # explicitly requested method.
4881 req.get_method = lambda: pycompat.sysstr(method)
4878 req.get_method = lambda: pycompat.sysstr(method)
4882
4879
4883 try:
4880 try:
4884 res = opener.open(req)
4881 res = opener.open(req)
4885 body = res.read()
4882 body = res.read()
4886 except util.urlerr.urlerror as e:
4883 except util.urlerr.urlerror as e:
4887 # read() method must be called, but only exists in Python 2
4884 # read() method must be called, but only exists in Python 2
4888 getattr(e, 'read', lambda: None)()
4885 getattr(e, 'read', lambda: None)()
4889 continue
4886 continue
4890
4887
4891 ct = res.headers.get('Content-Type')
4888 ct = res.headers.get('Content-Type')
4892 if ct == 'application/mercurial-cbor':
4889 if ct == 'application/mercurial-cbor':
4893 ui.write(
4890 ui.write(
4894 _(b'cbor> %s\n')
4891 _(b'cbor> %s\n')
4895 % stringutil.pprint(
4892 % stringutil.pprint(
4896 cborutil.decodeall(body), bprefix=True, indent=2
4893 cborutil.decodeall(body), bprefix=True, indent=2
4897 )
4894 )
4898 )
4895 )
4899
4896
4900 elif action == b'close':
4897 elif action == b'close':
4901 assert peer is not None
4898 assert peer is not None
4902 peer.close()
4899 peer.close()
4903 elif action == b'readavailable':
4900 elif action == b'readavailable':
4904 if not stdout or not stderr:
4901 if not stdout or not stderr:
4905 raise error.Abort(
4902 raise error.Abort(
4906 _(b'readavailable not available on this peer')
4903 _(b'readavailable not available on this peer')
4907 )
4904 )
4908
4905
4909 stdin.close()
4906 stdin.close()
4910 stdout.read()
4907 stdout.read()
4911 stderr.read()
4908 stderr.read()
4912
4909
4913 elif action == b'readline':
4910 elif action == b'readline':
4914 if not stdout:
4911 if not stdout:
4915 raise error.Abort(_(b'readline not available on this peer'))
4912 raise error.Abort(_(b'readline not available on this peer'))
4916 stdout.readline()
4913 stdout.readline()
4917 elif action == b'ereadline':
4914 elif action == b'ereadline':
4918 if not stderr:
4915 if not stderr:
4919 raise error.Abort(_(b'ereadline not available on this peer'))
4916 raise error.Abort(_(b'ereadline not available on this peer'))
4920 stderr.readline()
4917 stderr.readline()
4921 elif action.startswith(b'read '):
4918 elif action.startswith(b'read '):
4922 count = int(action.split(b' ', 1)[1])
4919 count = int(action.split(b' ', 1)[1])
4923 if not stdout:
4920 if not stdout:
4924 raise error.Abort(_(b'read not available on this peer'))
4921 raise error.Abort(_(b'read not available on this peer'))
4925 stdout.read(count)
4922 stdout.read(count)
4926 elif action.startswith(b'eread '):
4923 elif action.startswith(b'eread '):
4927 count = int(action.split(b' ', 1)[1])
4924 count = int(action.split(b' ', 1)[1])
4928 if not stderr:
4925 if not stderr:
4929 raise error.Abort(_(b'eread not available on this peer'))
4926 raise error.Abort(_(b'eread not available on this peer'))
4930 stderr.read(count)
4927 stderr.read(count)
4931 else:
4928 else:
4932 raise error.Abort(_(b'unknown action: %s') % action)
4929 raise error.Abort(_(b'unknown action: %s') % action)
4933
4930
4934 if batchedcommands is not None:
4931 if batchedcommands is not None:
4935 raise error.Abort(_(b'unclosed "batchbegin" request'))
4932 raise error.Abort(_(b'unclosed "batchbegin" request'))
4936
4933
4937 if peer:
4934 if peer:
4938 peer.close()
4935 peer.close()
4939
4936
4940 if proc:
4937 if proc:
4941 proc.kill()
4938 proc.kill()
@@ -1,952 +1,962 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 )
23 )
24
24
25 parsers = policy.importmod('parsers')
25 parsers = policy.importmod('parsers')
26 rustmod = policy.importrust('dirstate')
26 rustmod = policy.importrust('dirstate')
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29
29
30 DirstateItem = parsers.DirstateItem
30 DirstateItem = parsers.DirstateItem
31
31
32 rangemask = 0x7FFFFFFF
32 rangemask = 0x7FFFFFFF
33
33
34
34
35 class dirstatemap(object):
35 class dirstatemap(object):
36 """Map encapsulating the dirstate's contents.
36 """Map encapsulating the dirstate's contents.
37
37
38 The dirstate contains the following state:
38 The dirstate contains the following state:
39
39
40 - `identity` is the identity of the dirstate file, which can be used to
40 - `identity` is the identity of the dirstate file, which can be used to
41 detect when changes have occurred to the dirstate file.
41 detect when changes have occurred to the dirstate file.
42
42
43 - `parents` is a pair containing the parents of the working copy. The
43 - `parents` is a pair containing the parents of the working copy. The
44 parents are updated by calling `setparents`.
44 parents are updated by calling `setparents`.
45
45
46 - the state map maps filenames to tuples of (state, mode, size, mtime),
46 - the state map maps filenames to tuples of (state, mode, size, mtime),
47 where state is a single character representing 'normal', 'added',
47 where state is a single character representing 'normal', 'added',
48 'removed', or 'merged'. It is read by treating the dirstate as a
48 'removed', or 'merged'. It is read by treating the dirstate as a
49 dict. File state is updated by calling various methods (see each
49 dict. File state is updated by calling various methods (see each
50 documentation for details):
50 documentation for details):
51
51
52 - `reset_state`,
52 - `reset_state`,
53 - `set_tracked`
53 - `set_tracked`
54 - `set_untracked`
54 - `set_untracked`
55 - `set_clean`
55 - `set_clean`
56 - `set_possibly_dirty`
56 - `set_possibly_dirty`
57
57
58 - `copymap` maps destination filenames to their source filename.
58 - `copymap` maps destination filenames to their source filename.
59
59
60 The dirstate also provides the following views onto the state:
60 The dirstate also provides the following views onto the state:
61
61
62 - `nonnormalset` is a set of the filenames that have state other
62 - `nonnormalset` is a set of the filenames that have state other
63 than 'normal', or are normal but have an mtime of -1 ('normallookup').
63 than 'normal', or are normal but have an mtime of -1 ('normallookup').
64
64
65 - `otherparentset` is a set of the filenames that are marked as coming
65 - `otherparentset` is a set of the filenames that are marked as coming
66 from the second parent when the dirstate is currently being merged.
66 from the second parent when the dirstate is currently being merged.
67
67
68 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
68 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
69 form that they appear as in the dirstate.
69 form that they appear as in the dirstate.
70
70
71 - `dirfoldmap` is a dict mapping normalized directory names to the
71 - `dirfoldmap` is a dict mapping normalized directory names to the
72 denormalized form that they appear as in the dirstate.
72 denormalized form that they appear as in the dirstate.
73 """
73 """
74
74
75 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
75 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
76 self._ui = ui
76 self._ui = ui
77 self._opener = opener
77 self._opener = opener
78 self._root = root
78 self._root = root
79 self._filename = b'dirstate'
79 self._filename = b'dirstate'
80 self._nodelen = 20
80 self._nodelen = 20
81 self._nodeconstants = nodeconstants
81 self._nodeconstants = nodeconstants
82 assert (
82 assert (
83 not use_dirstate_v2
83 not use_dirstate_v2
84 ), "should have detected unsupported requirement"
84 ), "should have detected unsupported requirement"
85
85
86 self._parents = None
86 self._parents = None
87 self._dirtyparents = False
87 self._dirtyparents = False
88
88
89 # for consistent view between _pl() and _read() invocations
89 # for consistent view between _pl() and _read() invocations
90 self._pendingmode = None
90 self._pendingmode = None
91
91
92 @propertycache
92 @propertycache
93 def _map(self):
93 def _map(self):
94 self._map = {}
94 self._map = {}
95 self.read()
95 self.read()
96 return self._map
96 return self._map
97
97
98 @propertycache
98 @propertycache
99 def copymap(self):
99 def copymap(self):
100 self.copymap = {}
100 self.copymap = {}
101 self._map
101 self._map
102 return self.copymap
102 return self.copymap
103
103
104 def clear(self):
104 def clear(self):
105 self._map.clear()
105 self._map.clear()
106 self.copymap.clear()
106 self.copymap.clear()
107 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
107 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
108 util.clearcachedproperty(self, b"_dirs")
108 util.clearcachedproperty(self, b"_dirs")
109 util.clearcachedproperty(self, b"_alldirs")
109 util.clearcachedproperty(self, b"_alldirs")
110 util.clearcachedproperty(self, b"filefoldmap")
110 util.clearcachedproperty(self, b"filefoldmap")
111 util.clearcachedproperty(self, b"dirfoldmap")
111 util.clearcachedproperty(self, b"dirfoldmap")
112 util.clearcachedproperty(self, b"nonnormalset")
112 util.clearcachedproperty(self, b"nonnormalset")
113 util.clearcachedproperty(self, b"otherparentset")
113 util.clearcachedproperty(self, b"otherparentset")
114
114
115 def items(self):
115 def items(self):
116 return pycompat.iteritems(self._map)
116 return pycompat.iteritems(self._map)
117
117
118 # forward for python2,3 compat
118 # forward for python2,3 compat
119 iteritems = items
119 iteritems = items
120
120
121 def debug_iter(self, all):
121 def debug_iter(self, all):
122 """
122 """
123 Return an iterator of (filename, state, mode, size, mtime) tuples
124
123 `all` is unused when Rust is not enabled
125 `all` is unused when Rust is not enabled
124 """
126 """
125 return self.item()
127 for (filename, item) in self.items():
128 yield (filename, item.state, item.mode, item.size, item.mtime)
126
129
127 def __len__(self):
130 def __len__(self):
128 return len(self._map)
131 return len(self._map)
129
132
130 def __iter__(self):
133 def __iter__(self):
131 return iter(self._map)
134 return iter(self._map)
132
135
133 def get(self, key, default=None):
136 def get(self, key, default=None):
134 return self._map.get(key, default)
137 return self._map.get(key, default)
135
138
136 def __contains__(self, key):
139 def __contains__(self, key):
137 return key in self._map
140 return key in self._map
138
141
139 def __getitem__(self, key):
142 def __getitem__(self, key):
140 return self._map[key]
143 return self._map[key]
141
144
142 def keys(self):
145 def keys(self):
143 return self._map.keys()
146 return self._map.keys()
144
147
145 def preload(self):
148 def preload(self):
146 """Loads the underlying data, if it's not already loaded"""
149 """Loads the underlying data, if it's not already loaded"""
147 self._map
150 self._map
148
151
149 def _dirs_incr(self, filename, old_entry=None):
152 def _dirs_incr(self, filename, old_entry=None):
150 """incremente the dirstate counter if applicable"""
153 """incremente the dirstate counter if applicable"""
151 if (
154 if (
152 old_entry is None or old_entry.removed
155 old_entry is None or old_entry.removed
153 ) and "_dirs" in self.__dict__:
156 ) and "_dirs" in self.__dict__:
154 self._dirs.addpath(filename)
157 self._dirs.addpath(filename)
155 if old_entry is None and "_alldirs" in self.__dict__:
158 if old_entry is None and "_alldirs" in self.__dict__:
156 self._alldirs.addpath(filename)
159 self._alldirs.addpath(filename)
157
160
158 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
161 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
159 """decremente the dirstate counter if applicable"""
162 """decremente the dirstate counter if applicable"""
160 if old_entry is not None:
163 if old_entry is not None:
161 if "_dirs" in self.__dict__ and not old_entry.removed:
164 if "_dirs" in self.__dict__ and not old_entry.removed:
162 self._dirs.delpath(filename)
165 self._dirs.delpath(filename)
163 if "_alldirs" in self.__dict__ and not remove_variant:
166 if "_alldirs" in self.__dict__ and not remove_variant:
164 self._alldirs.delpath(filename)
167 self._alldirs.delpath(filename)
165 elif remove_variant and "_alldirs" in self.__dict__:
168 elif remove_variant and "_alldirs" in self.__dict__:
166 self._alldirs.addpath(filename)
169 self._alldirs.addpath(filename)
167 if "filefoldmap" in self.__dict__:
170 if "filefoldmap" in self.__dict__:
168 normed = util.normcase(filename)
171 normed = util.normcase(filename)
169 self.filefoldmap.pop(normed, None)
172 self.filefoldmap.pop(normed, None)
170
173
171 def set_possibly_dirty(self, filename):
174 def set_possibly_dirty(self, filename):
172 """record that the current state of the file on disk is unknown"""
175 """record that the current state of the file on disk is unknown"""
173 self[filename].set_possibly_dirty()
176 self[filename].set_possibly_dirty()
174
177
175 def set_clean(self, filename, mode, size, mtime):
178 def set_clean(self, filename, mode, size, mtime):
176 """mark a file as back to a clean state"""
179 """mark a file as back to a clean state"""
177 entry = self[filename]
180 entry = self[filename]
178 mtime = mtime & rangemask
181 mtime = mtime & rangemask
179 size = size & rangemask
182 size = size & rangemask
180 entry.set_clean(mode, size, mtime)
183 entry.set_clean(mode, size, mtime)
181 self.copymap.pop(filename, None)
184 self.copymap.pop(filename, None)
182 self.nonnormalset.discard(filename)
185 self.nonnormalset.discard(filename)
183
186
184 def reset_state(
187 def reset_state(
185 self,
188 self,
186 filename,
189 filename,
187 wc_tracked=False,
190 wc_tracked=False,
188 p1_tracked=False,
191 p1_tracked=False,
189 p2_tracked=False,
192 p2_tracked=False,
190 merged=False,
193 merged=False,
191 clean_p1=False,
194 clean_p1=False,
192 clean_p2=False,
195 clean_p2=False,
193 possibly_dirty=False,
196 possibly_dirty=False,
194 parentfiledata=None,
197 parentfiledata=None,
195 ):
198 ):
196 """Set a entry to a given state, diregarding all previous state
199 """Set a entry to a given state, diregarding all previous state
197
200
198 This is to be used by the part of the dirstate API dedicated to
201 This is to be used by the part of the dirstate API dedicated to
199 adjusting the dirstate after a update/merge.
202 adjusting the dirstate after a update/merge.
200
203
201 note: calling this might result to no entry existing at all if the
204 note: calling this might result to no entry existing at all if the
202 dirstate map does not see any point at having one for this file
205 dirstate map does not see any point at having one for this file
203 anymore.
206 anymore.
204 """
207 """
205 if merged and (clean_p1 or clean_p2):
208 if merged and (clean_p1 or clean_p2):
206 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
209 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
207 raise error.ProgrammingError(msg)
210 raise error.ProgrammingError(msg)
208 # copy information are now outdated
211 # copy information are now outdated
209 # (maybe new information should be in directly passed to this function)
212 # (maybe new information should be in directly passed to this function)
210 self.copymap.pop(filename, None)
213 self.copymap.pop(filename, None)
211
214
212 if not (p1_tracked or p2_tracked or wc_tracked):
215 if not (p1_tracked or p2_tracked or wc_tracked):
213 old_entry = self._map.pop(filename, None)
216 old_entry = self._map.pop(filename, None)
214 self._dirs_decr(filename, old_entry=old_entry)
217 self._dirs_decr(filename, old_entry=old_entry)
215 self.nonnormalset.discard(filename)
218 self.nonnormalset.discard(filename)
216 self.copymap.pop(filename, None)
219 self.copymap.pop(filename, None)
217 return
220 return
218 elif merged:
221 elif merged:
219 # XXX might be merged and removed ?
222 # XXX might be merged and removed ?
220 entry = self.get(filename)
223 entry = self.get(filename)
221 if entry is None or not entry.tracked:
224 if entry is None or not entry.tracked:
222 # XXX mostly replicate dirstate.other parent. We should get
225 # XXX mostly replicate dirstate.other parent. We should get
223 # the higher layer to pass us more reliable data where `merged`
226 # the higher layer to pass us more reliable data where `merged`
224 # actually mean merged. Dropping this clause will show failure
227 # actually mean merged. Dropping this clause will show failure
225 # in `test-graft.t`
228 # in `test-graft.t`
226 merged = False
229 merged = False
227 clean_p2 = True
230 clean_p2 = True
228 elif not (p1_tracked or p2_tracked) and wc_tracked:
231 elif not (p1_tracked or p2_tracked) and wc_tracked:
229 pass # file is added, nothing special to adjust
232 pass # file is added, nothing special to adjust
230 elif (p1_tracked or p2_tracked) and not wc_tracked:
233 elif (p1_tracked or p2_tracked) and not wc_tracked:
231 pass
234 pass
232 elif clean_p2 and wc_tracked:
235 elif clean_p2 and wc_tracked:
233 if p1_tracked or self.get(filename) is not None:
236 if p1_tracked or self.get(filename) is not None:
234 # XXX the `self.get` call is catching some case in
237 # XXX the `self.get` call is catching some case in
235 # `test-merge-remove.t` where the file is tracked in p1, the
238 # `test-merge-remove.t` where the file is tracked in p1, the
236 # p1_tracked argument is False.
239 # p1_tracked argument is False.
237 #
240 #
238 # In addition, this seems to be a case where the file is marked
241 # In addition, this seems to be a case where the file is marked
239 # as merged without actually being the result of a merge
242 # as merged without actually being the result of a merge
240 # action. So thing are not ideal here.
243 # action. So thing are not ideal here.
241 merged = True
244 merged = True
242 clean_p2 = False
245 clean_p2 = False
243 elif not p1_tracked and p2_tracked and wc_tracked:
246 elif not p1_tracked and p2_tracked and wc_tracked:
244 clean_p2 = True
247 clean_p2 = True
245 elif possibly_dirty:
248 elif possibly_dirty:
246 pass
249 pass
247 elif wc_tracked:
250 elif wc_tracked:
248 # this is a "normal" file
251 # this is a "normal" file
249 if parentfiledata is None:
252 if parentfiledata is None:
250 msg = b'failed to pass parentfiledata for a normal file: %s'
253 msg = b'failed to pass parentfiledata for a normal file: %s'
251 msg %= filename
254 msg %= filename
252 raise error.ProgrammingError(msg)
255 raise error.ProgrammingError(msg)
253 else:
256 else:
254 assert False, 'unreachable'
257 assert False, 'unreachable'
255
258
256 old_entry = self._map.get(filename)
259 old_entry = self._map.get(filename)
257 self._dirs_incr(filename, old_entry)
260 self._dirs_incr(filename, old_entry)
258 entry = DirstateItem(
261 entry = DirstateItem(
259 wc_tracked=wc_tracked,
262 wc_tracked=wc_tracked,
260 p1_tracked=p1_tracked,
263 p1_tracked=p1_tracked,
261 p2_tracked=p2_tracked,
264 p2_tracked=p2_tracked,
262 merged=merged,
265 merged=merged,
263 clean_p1=clean_p1,
266 clean_p1=clean_p1,
264 clean_p2=clean_p2,
267 clean_p2=clean_p2,
265 possibly_dirty=possibly_dirty,
268 possibly_dirty=possibly_dirty,
266 parentfiledata=parentfiledata,
269 parentfiledata=parentfiledata,
267 )
270 )
268 if entry.dm_nonnormal:
271 if entry.dm_nonnormal:
269 self.nonnormalset.add(filename)
272 self.nonnormalset.add(filename)
270 else:
273 else:
271 self.nonnormalset.discard(filename)
274 self.nonnormalset.discard(filename)
272 if entry.dm_otherparent:
275 if entry.dm_otherparent:
273 self.otherparentset.add(filename)
276 self.otherparentset.add(filename)
274 else:
277 else:
275 self.otherparentset.discard(filename)
278 self.otherparentset.discard(filename)
276 self._map[filename] = entry
279 self._map[filename] = entry
277
280
278 def set_tracked(self, filename):
281 def set_tracked(self, filename):
279 new = False
282 new = False
280 entry = self.get(filename)
283 entry = self.get(filename)
281 if entry is None:
284 if entry is None:
282 self._dirs_incr(filename)
285 self._dirs_incr(filename)
283 entry = DirstateItem(
286 entry = DirstateItem(
284 p1_tracked=False,
287 p1_tracked=False,
285 p2_tracked=False,
288 p2_tracked=False,
286 wc_tracked=True,
289 wc_tracked=True,
287 merged=False,
290 merged=False,
288 clean_p1=False,
291 clean_p1=False,
289 clean_p2=False,
292 clean_p2=False,
290 possibly_dirty=False,
293 possibly_dirty=False,
291 parentfiledata=None,
294 parentfiledata=None,
292 )
295 )
293 self._map[filename] = entry
296 self._map[filename] = entry
294 if entry.dm_nonnormal:
297 if entry.dm_nonnormal:
295 self.nonnormalset.add(filename)
298 self.nonnormalset.add(filename)
296 new = True
299 new = True
297 elif not entry.tracked:
300 elif not entry.tracked:
298 self._dirs_incr(filename, entry)
301 self._dirs_incr(filename, entry)
299 entry.set_tracked()
302 entry.set_tracked()
300 new = True
303 new = True
301 else:
304 else:
302 # XXX This is probably overkill for more case, but we need this to
305 # XXX This is probably overkill for more case, but we need this to
303 # fully replace the `normallookup` call with `set_tracked` one.
306 # fully replace the `normallookup` call with `set_tracked` one.
304 # Consider smoothing this in the future.
307 # Consider smoothing this in the future.
305 self.set_possibly_dirty(filename)
308 self.set_possibly_dirty(filename)
306 return new
309 return new
307
310
308 def set_untracked(self, f):
311 def set_untracked(self, f):
309 """Mark a file as no longer tracked in the dirstate map"""
312 """Mark a file as no longer tracked in the dirstate map"""
310 entry = self.get(f)
313 entry = self.get(f)
311 if entry is None:
314 if entry is None:
312 return False
315 return False
313 else:
316 else:
314 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
317 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
315 if not entry.merged:
318 if not entry.merged:
316 self.copymap.pop(f, None)
319 self.copymap.pop(f, None)
317 if entry.added:
320 if entry.added:
318 self.nonnormalset.discard(f)
321 self.nonnormalset.discard(f)
319 self._map.pop(f, None)
322 self._map.pop(f, None)
320 else:
323 else:
321 self.nonnormalset.add(f)
324 self.nonnormalset.add(f)
322 if entry.from_p2:
325 if entry.from_p2:
323 self.otherparentset.add(f)
326 self.otherparentset.add(f)
324 entry.set_untracked()
327 entry.set_untracked()
325 return True
328 return True
326
329
327 def clearambiguoustimes(self, files, now):
330 def clearambiguoustimes(self, files, now):
328 for f in files:
331 for f in files:
329 e = self.get(f)
332 e = self.get(f)
330 if e is not None and e.need_delay(now):
333 if e is not None and e.need_delay(now):
331 e.set_possibly_dirty()
334 e.set_possibly_dirty()
332 self.nonnormalset.add(f)
335 self.nonnormalset.add(f)
333
336
334 def nonnormalentries(self):
337 def nonnormalentries(self):
335 '''Compute the nonnormal dirstate entries from the dmap'''
338 '''Compute the nonnormal dirstate entries from the dmap'''
336 try:
339 try:
337 return parsers.nonnormalotherparententries(self._map)
340 return parsers.nonnormalotherparententries(self._map)
338 except AttributeError:
341 except AttributeError:
339 nonnorm = set()
342 nonnorm = set()
340 otherparent = set()
343 otherparent = set()
341 for fname, e in pycompat.iteritems(self._map):
344 for fname, e in pycompat.iteritems(self._map):
342 if e.dm_nonnormal:
345 if e.dm_nonnormal:
343 nonnorm.add(fname)
346 nonnorm.add(fname)
344 if e.from_p2:
347 if e.from_p2:
345 otherparent.add(fname)
348 otherparent.add(fname)
346 return nonnorm, otherparent
349 return nonnorm, otherparent
347
350
348 @propertycache
351 @propertycache
349 def filefoldmap(self):
352 def filefoldmap(self):
350 """Returns a dictionary mapping normalized case paths to their
353 """Returns a dictionary mapping normalized case paths to their
351 non-normalized versions.
354 non-normalized versions.
352 """
355 """
353 try:
356 try:
354 makefilefoldmap = parsers.make_file_foldmap
357 makefilefoldmap = parsers.make_file_foldmap
355 except AttributeError:
358 except AttributeError:
356 pass
359 pass
357 else:
360 else:
358 return makefilefoldmap(
361 return makefilefoldmap(
359 self._map, util.normcasespec, util.normcasefallback
362 self._map, util.normcasespec, util.normcasefallback
360 )
363 )
361
364
362 f = {}
365 f = {}
363 normcase = util.normcase
366 normcase = util.normcase
364 for name, s in pycompat.iteritems(self._map):
367 for name, s in pycompat.iteritems(self._map):
365 if not s.removed:
368 if not s.removed:
366 f[normcase(name)] = name
369 f[normcase(name)] = name
367 f[b'.'] = b'.' # prevents useless util.fspath() invocation
370 f[b'.'] = b'.' # prevents useless util.fspath() invocation
368 return f
371 return f
369
372
370 def hastrackeddir(self, d):
373 def hastrackeddir(self, d):
371 """
374 """
372 Returns True if the dirstate contains a tracked (not removed) file
375 Returns True if the dirstate contains a tracked (not removed) file
373 in this directory.
376 in this directory.
374 """
377 """
375 return d in self._dirs
378 return d in self._dirs
376
379
377 def hasdir(self, d):
380 def hasdir(self, d):
378 """
381 """
379 Returns True if the dirstate contains a file (tracked or removed)
382 Returns True if the dirstate contains a file (tracked or removed)
380 in this directory.
383 in this directory.
381 """
384 """
382 return d in self._alldirs
385 return d in self._alldirs
383
386
384 @propertycache
387 @propertycache
385 def _dirs(self):
388 def _dirs(self):
386 return pathutil.dirs(self._map, only_tracked=True)
389 return pathutil.dirs(self._map, only_tracked=True)
387
390
388 @propertycache
391 @propertycache
389 def _alldirs(self):
392 def _alldirs(self):
390 return pathutil.dirs(self._map)
393 return pathutil.dirs(self._map)
391
394
392 def _opendirstatefile(self):
395 def _opendirstatefile(self):
393 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
396 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
394 if self._pendingmode is not None and self._pendingmode != mode:
397 if self._pendingmode is not None and self._pendingmode != mode:
395 fp.close()
398 fp.close()
396 raise error.Abort(
399 raise error.Abort(
397 _(b'working directory state may be changed parallelly')
400 _(b'working directory state may be changed parallelly')
398 )
401 )
399 self._pendingmode = mode
402 self._pendingmode = mode
400 return fp
403 return fp
401
404
402 def parents(self):
405 def parents(self):
403 if not self._parents:
406 if not self._parents:
404 try:
407 try:
405 fp = self._opendirstatefile()
408 fp = self._opendirstatefile()
406 st = fp.read(2 * self._nodelen)
409 st = fp.read(2 * self._nodelen)
407 fp.close()
410 fp.close()
408 except IOError as err:
411 except IOError as err:
409 if err.errno != errno.ENOENT:
412 if err.errno != errno.ENOENT:
410 raise
413 raise
411 # File doesn't exist, so the current state is empty
414 # File doesn't exist, so the current state is empty
412 st = b''
415 st = b''
413
416
414 l = len(st)
417 l = len(st)
415 if l == self._nodelen * 2:
418 if l == self._nodelen * 2:
416 self._parents = (
419 self._parents = (
417 st[: self._nodelen],
420 st[: self._nodelen],
418 st[self._nodelen : 2 * self._nodelen],
421 st[self._nodelen : 2 * self._nodelen],
419 )
422 )
420 elif l == 0:
423 elif l == 0:
421 self._parents = (
424 self._parents = (
422 self._nodeconstants.nullid,
425 self._nodeconstants.nullid,
423 self._nodeconstants.nullid,
426 self._nodeconstants.nullid,
424 )
427 )
425 else:
428 else:
426 raise error.Abort(
429 raise error.Abort(
427 _(b'working directory state appears damaged!')
430 _(b'working directory state appears damaged!')
428 )
431 )
429
432
430 return self._parents
433 return self._parents
431
434
432 def setparents(self, p1, p2):
435 def setparents(self, p1, p2):
433 self._parents = (p1, p2)
436 self._parents = (p1, p2)
434 self._dirtyparents = True
437 self._dirtyparents = True
435
438
436 def read(self):
439 def read(self):
437 # ignore HG_PENDING because identity is used only for writing
440 # ignore HG_PENDING because identity is used only for writing
438 self.identity = util.filestat.frompath(
441 self.identity = util.filestat.frompath(
439 self._opener.join(self._filename)
442 self._opener.join(self._filename)
440 )
443 )
441
444
442 try:
445 try:
443 fp = self._opendirstatefile()
446 fp = self._opendirstatefile()
444 try:
447 try:
445 st = fp.read()
448 st = fp.read()
446 finally:
449 finally:
447 fp.close()
450 fp.close()
448 except IOError as err:
451 except IOError as err:
449 if err.errno != errno.ENOENT:
452 if err.errno != errno.ENOENT:
450 raise
453 raise
451 return
454 return
452 if not st:
455 if not st:
453 return
456 return
454
457
455 if util.safehasattr(parsers, b'dict_new_presized'):
458 if util.safehasattr(parsers, b'dict_new_presized'):
456 # Make an estimate of the number of files in the dirstate based on
459 # Make an estimate of the number of files in the dirstate based on
457 # its size. This trades wasting some memory for avoiding costly
460 # its size. This trades wasting some memory for avoiding costly
458 # resizes. Each entry have a prefix of 17 bytes followed by one or
461 # resizes. Each entry have a prefix of 17 bytes followed by one or
459 # two path names. Studies on various large-scale real-world repositories
462 # two path names. Studies on various large-scale real-world repositories
460 # found 54 bytes a reasonable upper limit for the average path names.
463 # found 54 bytes a reasonable upper limit for the average path names.
461 # Copy entries are ignored for the sake of this estimate.
464 # Copy entries are ignored for the sake of this estimate.
462 self._map = parsers.dict_new_presized(len(st) // 71)
465 self._map = parsers.dict_new_presized(len(st) // 71)
463
466
464 # Python's garbage collector triggers a GC each time a certain number
467 # Python's garbage collector triggers a GC each time a certain number
465 # of container objects (the number being defined by
468 # of container objects (the number being defined by
466 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
469 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
467 # for each file in the dirstate. The C version then immediately marks
470 # for each file in the dirstate. The C version then immediately marks
468 # them as not to be tracked by the collector. However, this has no
471 # them as not to be tracked by the collector. However, this has no
469 # effect on when GCs are triggered, only on what objects the GC looks
472 # effect on when GCs are triggered, only on what objects the GC looks
470 # into. This means that O(number of files) GCs are unavoidable.
473 # into. This means that O(number of files) GCs are unavoidable.
471 # Depending on when in the process's lifetime the dirstate is parsed,
474 # Depending on when in the process's lifetime the dirstate is parsed,
472 # this can get very expensive. As a workaround, disable GC while
475 # this can get very expensive. As a workaround, disable GC while
473 # parsing the dirstate.
476 # parsing the dirstate.
474 #
477 #
475 # (we cannot decorate the function directly since it is in a C module)
478 # (we cannot decorate the function directly since it is in a C module)
476 parse_dirstate = util.nogc(parsers.parse_dirstate)
479 parse_dirstate = util.nogc(parsers.parse_dirstate)
477 p = parse_dirstate(self._map, self.copymap, st)
480 p = parse_dirstate(self._map, self.copymap, st)
478 if not self._dirtyparents:
481 if not self._dirtyparents:
479 self.setparents(*p)
482 self.setparents(*p)
480
483
481 # Avoid excess attribute lookups by fast pathing certain checks
484 # Avoid excess attribute lookups by fast pathing certain checks
482 self.__contains__ = self._map.__contains__
485 self.__contains__ = self._map.__contains__
483 self.__getitem__ = self._map.__getitem__
486 self.__getitem__ = self._map.__getitem__
484 self.get = self._map.get
487 self.get = self._map.get
485
488
486 def write(self, _tr, st, now):
489 def write(self, _tr, st, now):
487 st.write(
490 st.write(
488 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
491 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
489 )
492 )
490 st.close()
493 st.close()
491 self._dirtyparents = False
494 self._dirtyparents = False
492 self.nonnormalset, self.otherparentset = self.nonnormalentries()
495 self.nonnormalset, self.otherparentset = self.nonnormalentries()
493
496
494 @propertycache
497 @propertycache
495 def nonnormalset(self):
498 def nonnormalset(self):
496 nonnorm, otherparents = self.nonnormalentries()
499 nonnorm, otherparents = self.nonnormalentries()
497 self.otherparentset = otherparents
500 self.otherparentset = otherparents
498 return nonnorm
501 return nonnorm
499
502
500 @propertycache
503 @propertycache
501 def otherparentset(self):
504 def otherparentset(self):
502 nonnorm, otherparents = self.nonnormalentries()
505 nonnorm, otherparents = self.nonnormalentries()
503 self.nonnormalset = nonnorm
506 self.nonnormalset = nonnorm
504 return otherparents
507 return otherparents
505
508
506 def non_normal_or_other_parent_paths(self):
509 def non_normal_or_other_parent_paths(self):
507 return self.nonnormalset.union(self.otherparentset)
510 return self.nonnormalset.union(self.otherparentset)
508
511
509 @propertycache
512 @propertycache
510 def identity(self):
513 def identity(self):
511 self._map
514 self._map
512 return self.identity
515 return self.identity
513
516
514 @propertycache
517 @propertycache
515 def dirfoldmap(self):
518 def dirfoldmap(self):
516 f = {}
519 f = {}
517 normcase = util.normcase
520 normcase = util.normcase
518 for name in self._dirs:
521 for name in self._dirs:
519 f[normcase(name)] = name
522 f[normcase(name)] = name
520 return f
523 return f
521
524
522
525
523 if rustmod is not None:
526 if rustmod is not None:
524
527
525 class dirstatemap(object):
528 class dirstatemap(object):
526 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
529 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
527 self._use_dirstate_v2 = use_dirstate_v2
530 self._use_dirstate_v2 = use_dirstate_v2
528 self._nodeconstants = nodeconstants
531 self._nodeconstants = nodeconstants
529 self._ui = ui
532 self._ui = ui
530 self._opener = opener
533 self._opener = opener
531 self._root = root
534 self._root = root
532 self._filename = b'dirstate'
535 self._filename = b'dirstate'
533 self._nodelen = 20 # Also update Rust code when changing this!
536 self._nodelen = 20 # Also update Rust code when changing this!
534 self._parents = None
537 self._parents = None
535 self._dirtyparents = False
538 self._dirtyparents = False
536 self._docket = None
539 self._docket = None
537
540
538 # for consistent view between _pl() and _read() invocations
541 # for consistent view between _pl() and _read() invocations
539 self._pendingmode = None
542 self._pendingmode = None
540
543
541 self._use_dirstate_tree = self._ui.configbool(
544 self._use_dirstate_tree = self._ui.configbool(
542 b"experimental",
545 b"experimental",
543 b"dirstate-tree.in-memory",
546 b"dirstate-tree.in-memory",
544 False,
547 False,
545 )
548 )
546
549
547 def addfile(
550 def addfile(
548 self,
551 self,
549 f,
552 f,
550 mode=0,
553 mode=0,
551 size=None,
554 size=None,
552 mtime=None,
555 mtime=None,
553 added=False,
556 added=False,
554 merged=False,
557 merged=False,
555 from_p2=False,
558 from_p2=False,
556 possibly_dirty=False,
559 possibly_dirty=False,
557 ):
560 ):
558 ret = self._rustmap.addfile(
561 ret = self._rustmap.addfile(
559 f,
562 f,
560 mode,
563 mode,
561 size,
564 size,
562 mtime,
565 mtime,
563 added,
566 added,
564 merged,
567 merged,
565 from_p2,
568 from_p2,
566 possibly_dirty,
569 possibly_dirty,
567 )
570 )
568 if added:
571 if added:
569 self.copymap.pop(f, None)
572 self.copymap.pop(f, None)
570 return ret
573 return ret
571
574
572 def reset_state(
575 def reset_state(
573 self,
576 self,
574 filename,
577 filename,
575 wc_tracked=False,
578 wc_tracked=False,
576 p1_tracked=False,
579 p1_tracked=False,
577 p2_tracked=False,
580 p2_tracked=False,
578 merged=False,
581 merged=False,
579 clean_p1=False,
582 clean_p1=False,
580 clean_p2=False,
583 clean_p2=False,
581 possibly_dirty=False,
584 possibly_dirty=False,
582 parentfiledata=None,
585 parentfiledata=None,
583 ):
586 ):
584 """Set a entry to a given state, disregarding all previous state
587 """Set a entry to a given state, disregarding all previous state
585
588
586 This is to be used by the part of the dirstate API dedicated to
589 This is to be used by the part of the dirstate API dedicated to
587 adjusting the dirstate after a update/merge.
590 adjusting the dirstate after a update/merge.
588
591
589 note: calling this might result to no entry existing at all if the
592 note: calling this might result to no entry existing at all if the
590 dirstate map does not see any point at having one for this file
593 dirstate map does not see any point at having one for this file
591 anymore.
594 anymore.
592 """
595 """
593 if merged and (clean_p1 or clean_p2):
596 if merged and (clean_p1 or clean_p2):
594 msg = (
597 msg = (
595 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
598 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
596 )
599 )
597 raise error.ProgrammingError(msg)
600 raise error.ProgrammingError(msg)
598 # copy information are now outdated
601 # copy information are now outdated
599 # (maybe new information should be in directly passed to this function)
602 # (maybe new information should be in directly passed to this function)
600 self.copymap.pop(filename, None)
603 self.copymap.pop(filename, None)
601
604
602 if not (p1_tracked or p2_tracked or wc_tracked):
605 if not (p1_tracked or p2_tracked or wc_tracked):
603 self.dropfile(filename)
606 self.dropfile(filename)
604 elif merged:
607 elif merged:
605 # XXX might be merged and removed ?
608 # XXX might be merged and removed ?
606 entry = self.get(filename)
609 entry = self.get(filename)
607 if entry is not None and entry.tracked:
610 if entry is not None and entry.tracked:
608 # XXX mostly replicate dirstate.other parent. We should get
611 # XXX mostly replicate dirstate.other parent. We should get
609 # the higher layer to pass us more reliable data where `merged`
612 # the higher layer to pass us more reliable data where `merged`
610 # actually mean merged. Dropping the else clause will show
613 # actually mean merged. Dropping the else clause will show
611 # failure in `test-graft.t`
614 # failure in `test-graft.t`
612 self.addfile(filename, merged=True)
615 self.addfile(filename, merged=True)
613 else:
616 else:
614 self.addfile(filename, from_p2=True)
617 self.addfile(filename, from_p2=True)
615 elif not (p1_tracked or p2_tracked) and wc_tracked:
618 elif not (p1_tracked or p2_tracked) and wc_tracked:
616 self.addfile(
619 self.addfile(
617 filename, added=True, possibly_dirty=possibly_dirty
620 filename, added=True, possibly_dirty=possibly_dirty
618 )
621 )
619 elif (p1_tracked or p2_tracked) and not wc_tracked:
622 elif (p1_tracked or p2_tracked) and not wc_tracked:
620 # XXX might be merged and removed ?
623 # XXX might be merged and removed ?
621 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
624 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
622 self.nonnormalset.add(filename)
625 self.nonnormalset.add(filename)
623 elif clean_p2 and wc_tracked:
626 elif clean_p2 and wc_tracked:
624 if p1_tracked or self.get(filename) is not None:
627 if p1_tracked or self.get(filename) is not None:
625 # XXX the `self.get` call is catching some case in
628 # XXX the `self.get` call is catching some case in
626 # `test-merge-remove.t` where the file is tracked in p1, the
629 # `test-merge-remove.t` where the file is tracked in p1, the
627 # p1_tracked argument is False.
630 # p1_tracked argument is False.
628 #
631 #
629 # In addition, this seems to be a case where the file is marked
632 # In addition, this seems to be a case where the file is marked
630 # as merged without actually being the result of a merge
633 # as merged without actually being the result of a merge
631 # action. So thing are not ideal here.
634 # action. So thing are not ideal here.
632 self.addfile(filename, merged=True)
635 self.addfile(filename, merged=True)
633 else:
636 else:
634 self.addfile(filename, from_p2=True)
637 self.addfile(filename, from_p2=True)
635 elif not p1_tracked and p2_tracked and wc_tracked:
638 elif not p1_tracked and p2_tracked and wc_tracked:
636 self.addfile(
639 self.addfile(
637 filename, from_p2=True, possibly_dirty=possibly_dirty
640 filename, from_p2=True, possibly_dirty=possibly_dirty
638 )
641 )
639 elif possibly_dirty:
642 elif possibly_dirty:
640 self.addfile(filename, possibly_dirty=possibly_dirty)
643 self.addfile(filename, possibly_dirty=possibly_dirty)
641 elif wc_tracked:
644 elif wc_tracked:
642 # this is a "normal" file
645 # this is a "normal" file
643 if parentfiledata is None:
646 if parentfiledata is None:
644 msg = b'failed to pass parentfiledata for a normal file: %s'
647 msg = b'failed to pass parentfiledata for a normal file: %s'
645 msg %= filename
648 msg %= filename
646 raise error.ProgrammingError(msg)
649 raise error.ProgrammingError(msg)
647 mode, size, mtime = parentfiledata
650 mode, size, mtime = parentfiledata
648 self.addfile(filename, mode=mode, size=size, mtime=mtime)
651 self.addfile(filename, mode=mode, size=size, mtime=mtime)
649 self.nonnormalset.discard(filename)
652 self.nonnormalset.discard(filename)
650 else:
653 else:
651 assert False, 'unreachable'
654 assert False, 'unreachable'
652
655
653 def set_tracked(self, filename):
656 def set_tracked(self, filename):
654 new = False
657 new = False
655 entry = self.get(filename)
658 entry = self.get(filename)
656 if entry is None:
659 if entry is None:
657 self.addfile(filename, added=True)
660 self.addfile(filename, added=True)
658 new = True
661 new = True
659 elif not entry.tracked:
662 elif not entry.tracked:
660 entry.set_tracked()
663 entry.set_tracked()
661 self._rustmap.set_v1(filename, entry)
664 self._rustmap.set_v1(filename, entry)
662 new = True
665 new = True
663 else:
666 else:
664 # XXX This is probably overkill for more case, but we need this to
667 # XXX This is probably overkill for more case, but we need this to
665 # fully replace the `normallookup` call with `set_tracked` one.
668 # fully replace the `normallookup` call with `set_tracked` one.
666 # Consider smoothing this in the future.
669 # Consider smoothing this in the future.
667 self.set_possibly_dirty(filename)
670 self.set_possibly_dirty(filename)
668 return new
671 return new
669
672
670 def set_untracked(self, f):
673 def set_untracked(self, f):
671 """Mark a file as no longer tracked in the dirstate map"""
674 """Mark a file as no longer tracked in the dirstate map"""
672 # in merge is only trigger more logic, so it "fine" to pass it.
675 # in merge is only trigger more logic, so it "fine" to pass it.
673 #
676 #
674 # the inner rust dirstate map code need to be adjusted once the API
677 # the inner rust dirstate map code need to be adjusted once the API
675 # for dirstate/dirstatemap/DirstateItem is a bit more settled
678 # for dirstate/dirstatemap/DirstateItem is a bit more settled
676 entry = self.get(f)
679 entry = self.get(f)
677 if entry is None:
680 if entry is None:
678 return False
681 return False
679 else:
682 else:
680 if entry.added:
683 if entry.added:
681 self._rustmap.copymap().pop(f, None)
684 self._rustmap.copymap().pop(f, None)
682 self._rustmap.dropfile(f)
685 self._rustmap.dropfile(f)
683 else:
686 else:
684 self._rustmap.removefile(f, in_merge=True)
687 self._rustmap.removefile(f, in_merge=True)
685 return True
688 return True
686
689
687 def removefile(self, *args, **kwargs):
690 def removefile(self, *args, **kwargs):
688 return self._rustmap.removefile(*args, **kwargs)
691 return self._rustmap.removefile(*args, **kwargs)
689
692
690 def dropfile(self, f, *args, **kwargs):
693 def dropfile(self, f, *args, **kwargs):
691 self._rustmap.copymap().pop(f, None)
694 self._rustmap.copymap().pop(f, None)
692 return self._rustmap.dropfile(f, *args, **kwargs)
695 return self._rustmap.dropfile(f, *args, **kwargs)
693
696
694 def clearambiguoustimes(self, *args, **kwargs):
697 def clearambiguoustimes(self, *args, **kwargs):
695 return self._rustmap.clearambiguoustimes(*args, **kwargs)
698 return self._rustmap.clearambiguoustimes(*args, **kwargs)
696
699
697 def nonnormalentries(self):
700 def nonnormalentries(self):
698 return self._rustmap.nonnormalentries()
701 return self._rustmap.nonnormalentries()
699
702
700 def get(self, *args, **kwargs):
703 def get(self, *args, **kwargs):
701 return self._rustmap.get(*args, **kwargs)
704 return self._rustmap.get(*args, **kwargs)
702
705
703 @property
706 @property
704 def copymap(self):
707 def copymap(self):
705 return self._rustmap.copymap()
708 return self._rustmap.copymap()
706
709
707 def debug_iter(self, all):
710 def debug_iter(self, all):
711 """
712 Return an iterator of (filename, state, mode, size, mtime) tuples
713
714 `all`: also include with `state == b' '` dirstate tree nodes that
715 don't have an associated `DirstateItem`.
716
717 """
708 return self._rustmap.debug_iter(all)
718 return self._rustmap.debug_iter(all)
709
719
710 def preload(self):
720 def preload(self):
711 self._rustmap
721 self._rustmap
712
722
713 def clear(self):
723 def clear(self):
714 self._rustmap.clear()
724 self._rustmap.clear()
715 self.setparents(
725 self.setparents(
716 self._nodeconstants.nullid, self._nodeconstants.nullid
726 self._nodeconstants.nullid, self._nodeconstants.nullid
717 )
727 )
718 util.clearcachedproperty(self, b"_dirs")
728 util.clearcachedproperty(self, b"_dirs")
719 util.clearcachedproperty(self, b"_alldirs")
729 util.clearcachedproperty(self, b"_alldirs")
720 util.clearcachedproperty(self, b"dirfoldmap")
730 util.clearcachedproperty(self, b"dirfoldmap")
721
731
722 def items(self):
732 def items(self):
723 return self._rustmap.items()
733 return self._rustmap.items()
724
734
725 def keys(self):
735 def keys(self):
726 return iter(self._rustmap)
736 return iter(self._rustmap)
727
737
728 def __contains__(self, key):
738 def __contains__(self, key):
729 return key in self._rustmap
739 return key in self._rustmap
730
740
731 def __getitem__(self, item):
741 def __getitem__(self, item):
732 return self._rustmap[item]
742 return self._rustmap[item]
733
743
734 def __len__(self):
744 def __len__(self):
735 return len(self._rustmap)
745 return len(self._rustmap)
736
746
737 def __iter__(self):
747 def __iter__(self):
738 return iter(self._rustmap)
748 return iter(self._rustmap)
739
749
740 # forward for python2,3 compat
750 # forward for python2,3 compat
741 iteritems = items
751 iteritems = items
742
752
743 def _opendirstatefile(self):
753 def _opendirstatefile(self):
744 fp, mode = txnutil.trypending(
754 fp, mode = txnutil.trypending(
745 self._root, self._opener, self._filename
755 self._root, self._opener, self._filename
746 )
756 )
747 if self._pendingmode is not None and self._pendingmode != mode:
757 if self._pendingmode is not None and self._pendingmode != mode:
748 fp.close()
758 fp.close()
749 raise error.Abort(
759 raise error.Abort(
750 _(b'working directory state may be changed parallelly')
760 _(b'working directory state may be changed parallelly')
751 )
761 )
752 self._pendingmode = mode
762 self._pendingmode = mode
753 return fp
763 return fp
754
764
755 def _readdirstatefile(self, size=-1):
765 def _readdirstatefile(self, size=-1):
756 try:
766 try:
757 with self._opendirstatefile() as fp:
767 with self._opendirstatefile() as fp:
758 return fp.read(size)
768 return fp.read(size)
759 except IOError as err:
769 except IOError as err:
760 if err.errno != errno.ENOENT:
770 if err.errno != errno.ENOENT:
761 raise
771 raise
762 # File doesn't exist, so the current state is empty
772 # File doesn't exist, so the current state is empty
763 return b''
773 return b''
764
774
765 def setparents(self, p1, p2):
775 def setparents(self, p1, p2):
766 self._parents = (p1, p2)
776 self._parents = (p1, p2)
767 self._dirtyparents = True
777 self._dirtyparents = True
768
778
769 def parents(self):
779 def parents(self):
770 if not self._parents:
780 if not self._parents:
771 if self._use_dirstate_v2:
781 if self._use_dirstate_v2:
772 self._parents = self.docket.parents
782 self._parents = self.docket.parents
773 else:
783 else:
774 read_len = self._nodelen * 2
784 read_len = self._nodelen * 2
775 st = self._readdirstatefile(read_len)
785 st = self._readdirstatefile(read_len)
776 l = len(st)
786 l = len(st)
777 if l == read_len:
787 if l == read_len:
778 self._parents = (
788 self._parents = (
779 st[: self._nodelen],
789 st[: self._nodelen],
780 st[self._nodelen : 2 * self._nodelen],
790 st[self._nodelen : 2 * self._nodelen],
781 )
791 )
782 elif l == 0:
792 elif l == 0:
783 self._parents = (
793 self._parents = (
784 self._nodeconstants.nullid,
794 self._nodeconstants.nullid,
785 self._nodeconstants.nullid,
795 self._nodeconstants.nullid,
786 )
796 )
787 else:
797 else:
788 raise error.Abort(
798 raise error.Abort(
789 _(b'working directory state appears damaged!')
799 _(b'working directory state appears damaged!')
790 )
800 )
791
801
792 return self._parents
802 return self._parents
793
803
794 @property
804 @property
795 def docket(self):
805 def docket(self):
796 if not self._docket:
806 if not self._docket:
797 if not self._use_dirstate_v2:
807 if not self._use_dirstate_v2:
798 raise error.ProgrammingError(
808 raise error.ProgrammingError(
799 b'dirstate only has a docket in v2 format'
809 b'dirstate only has a docket in v2 format'
800 )
810 )
801 self._docket = docketmod.DirstateDocket.parse(
811 self._docket = docketmod.DirstateDocket.parse(
802 self._readdirstatefile(), self._nodeconstants
812 self._readdirstatefile(), self._nodeconstants
803 )
813 )
804 return self._docket
814 return self._docket
805
815
806 @propertycache
816 @propertycache
807 def _rustmap(self):
817 def _rustmap(self):
808 """
818 """
809 Fills the Dirstatemap when called.
819 Fills the Dirstatemap when called.
810 """
820 """
811 # ignore HG_PENDING because identity is used only for writing
821 # ignore HG_PENDING because identity is used only for writing
812 self.identity = util.filestat.frompath(
822 self.identity = util.filestat.frompath(
813 self._opener.join(self._filename)
823 self._opener.join(self._filename)
814 )
824 )
815
825
816 if self._use_dirstate_v2:
826 if self._use_dirstate_v2:
817 if self.docket.uuid:
827 if self.docket.uuid:
818 # TODO: use mmap when possible
828 # TODO: use mmap when possible
819 data = self._opener.read(self.docket.data_filename())
829 data = self._opener.read(self.docket.data_filename())
820 else:
830 else:
821 data = b''
831 data = b''
822 self._rustmap = rustmod.DirstateMap.new_v2(
832 self._rustmap = rustmod.DirstateMap.new_v2(
823 data, self.docket.data_size, self.docket.tree_metadata
833 data, self.docket.data_size, self.docket.tree_metadata
824 )
834 )
825 parents = self.docket.parents
835 parents = self.docket.parents
826 else:
836 else:
827 self._rustmap, parents = rustmod.DirstateMap.new_v1(
837 self._rustmap, parents = rustmod.DirstateMap.new_v1(
828 self._use_dirstate_tree, self._readdirstatefile()
838 self._use_dirstate_tree, self._readdirstatefile()
829 )
839 )
830
840
831 if parents and not self._dirtyparents:
841 if parents and not self._dirtyparents:
832 self.setparents(*parents)
842 self.setparents(*parents)
833
843
834 self.__contains__ = self._rustmap.__contains__
844 self.__contains__ = self._rustmap.__contains__
835 self.__getitem__ = self._rustmap.__getitem__
845 self.__getitem__ = self._rustmap.__getitem__
836 self.get = self._rustmap.get
846 self.get = self._rustmap.get
837 return self._rustmap
847 return self._rustmap
838
848
839 def write(self, tr, st, now):
849 def write(self, tr, st, now):
840 if not self._use_dirstate_v2:
850 if not self._use_dirstate_v2:
841 p1, p2 = self.parents()
851 p1, p2 = self.parents()
842 packed = self._rustmap.write_v1(p1, p2, now)
852 packed = self._rustmap.write_v1(p1, p2, now)
843 st.write(packed)
853 st.write(packed)
844 st.close()
854 st.close()
845 self._dirtyparents = False
855 self._dirtyparents = False
846 return
856 return
847
857
848 # We can only append to an existing data file if there is one
858 # We can only append to an existing data file if there is one
849 can_append = self.docket.uuid is not None
859 can_append = self.docket.uuid is not None
850 packed, meta, append = self._rustmap.write_v2(now, can_append)
860 packed, meta, append = self._rustmap.write_v2(now, can_append)
851 if append:
861 if append:
852 docket = self.docket
862 docket = self.docket
853 data_filename = docket.data_filename()
863 data_filename = docket.data_filename()
854 if tr:
864 if tr:
855 tr.add(data_filename, docket.data_size)
865 tr.add(data_filename, docket.data_size)
856 with self._opener(data_filename, b'r+b') as fp:
866 with self._opener(data_filename, b'r+b') as fp:
857 fp.seek(docket.data_size)
867 fp.seek(docket.data_size)
858 assert fp.tell() == docket.data_size
868 assert fp.tell() == docket.data_size
859 written = fp.write(packed)
869 written = fp.write(packed)
860 if written is not None: # py2 may return None
870 if written is not None: # py2 may return None
861 assert written == len(packed), (written, len(packed))
871 assert written == len(packed), (written, len(packed))
862 docket.data_size += len(packed)
872 docket.data_size += len(packed)
863 docket.parents = self.parents()
873 docket.parents = self.parents()
864 docket.tree_metadata = meta
874 docket.tree_metadata = meta
865 st.write(docket.serialize())
875 st.write(docket.serialize())
866 st.close()
876 st.close()
867 else:
877 else:
868 old_docket = self.docket
878 old_docket = self.docket
869 new_docket = docketmod.DirstateDocket.with_new_uuid(
879 new_docket = docketmod.DirstateDocket.with_new_uuid(
870 self.parents(), len(packed), meta
880 self.parents(), len(packed), meta
871 )
881 )
872 data_filename = new_docket.data_filename()
882 data_filename = new_docket.data_filename()
873 if tr:
883 if tr:
874 tr.add(data_filename, 0)
884 tr.add(data_filename, 0)
875 self._opener.write(data_filename, packed)
885 self._opener.write(data_filename, packed)
876 # Write the new docket after the new data file has been
886 # Write the new docket after the new data file has been
877 # written. Because `st` was opened with `atomictemp=True`,
887 # written. Because `st` was opened with `atomictemp=True`,
878 # the actual `.hg/dirstate` file is only affected on close.
888 # the actual `.hg/dirstate` file is only affected on close.
879 st.write(new_docket.serialize())
889 st.write(new_docket.serialize())
880 st.close()
890 st.close()
881 # Remove the old data file after the new docket pointing to
891 # Remove the old data file after the new docket pointing to
882 # the new data file was written.
892 # the new data file was written.
883 if old_docket.uuid:
893 if old_docket.uuid:
884 data_filename = old_docket.data_filename()
894 data_filename = old_docket.data_filename()
885 unlink = lambda _tr=None: self._opener.unlink(data_filename)
895 unlink = lambda _tr=None: self._opener.unlink(data_filename)
886 if tr:
896 if tr:
887 category = b"dirstate-v2-clean-" + old_docket.uuid
897 category = b"dirstate-v2-clean-" + old_docket.uuid
888 tr.addpostclose(category, unlink)
898 tr.addpostclose(category, unlink)
889 else:
899 else:
890 unlink()
900 unlink()
891 self._docket = new_docket
901 self._docket = new_docket
892 # Reload from the newly-written file
902 # Reload from the newly-written file
893 util.clearcachedproperty(self, b"_rustmap")
903 util.clearcachedproperty(self, b"_rustmap")
894 self._dirtyparents = False
904 self._dirtyparents = False
895
905
896 @propertycache
906 @propertycache
897 def filefoldmap(self):
907 def filefoldmap(self):
898 """Returns a dictionary mapping normalized case paths to their
908 """Returns a dictionary mapping normalized case paths to their
899 non-normalized versions.
909 non-normalized versions.
900 """
910 """
901 return self._rustmap.filefoldmapasdict()
911 return self._rustmap.filefoldmapasdict()
902
912
903 def hastrackeddir(self, d):
913 def hastrackeddir(self, d):
904 return self._rustmap.hastrackeddir(d)
914 return self._rustmap.hastrackeddir(d)
905
915
906 def hasdir(self, d):
916 def hasdir(self, d):
907 return self._rustmap.hasdir(d)
917 return self._rustmap.hasdir(d)
908
918
909 @propertycache
919 @propertycache
910 def identity(self):
920 def identity(self):
911 self._rustmap
921 self._rustmap
912 return self.identity
922 return self.identity
913
923
914 @property
924 @property
915 def nonnormalset(self):
925 def nonnormalset(self):
916 nonnorm = self._rustmap.non_normal_entries()
926 nonnorm = self._rustmap.non_normal_entries()
917 return nonnorm
927 return nonnorm
918
928
919 @propertycache
929 @propertycache
920 def otherparentset(self):
930 def otherparentset(self):
921 otherparents = self._rustmap.other_parent_entries()
931 otherparents = self._rustmap.other_parent_entries()
922 return otherparents
932 return otherparents
923
933
924 def non_normal_or_other_parent_paths(self):
934 def non_normal_or_other_parent_paths(self):
925 return self._rustmap.non_normal_or_other_parent_paths()
935 return self._rustmap.non_normal_or_other_parent_paths()
926
936
927 @propertycache
937 @propertycache
928 def dirfoldmap(self):
938 def dirfoldmap(self):
929 f = {}
939 f = {}
930 normcase = util.normcase
940 normcase = util.normcase
931 for name in self._rustmap.tracked_dirs():
941 for name in self._rustmap.tracked_dirs():
932 f[normcase(name)] = name
942 f[normcase(name)] = name
933 return f
943 return f
934
944
935 def set_possibly_dirty(self, filename):
945 def set_possibly_dirty(self, filename):
936 """record that the current state of the file on disk is unknown"""
946 """record that the current state of the file on disk is unknown"""
937 entry = self[filename]
947 entry = self[filename]
938 entry.set_possibly_dirty()
948 entry.set_possibly_dirty()
939 self._rustmap.set_v1(filename, entry)
949 self._rustmap.set_v1(filename, entry)
940
950
941 def set_clean(self, filename, mode, size, mtime):
951 def set_clean(self, filename, mode, size, mtime):
942 """mark a file as back to a clean state"""
952 """mark a file as back to a clean state"""
943 entry = self[filename]
953 entry = self[filename]
944 mtime = mtime & rangemask
954 mtime = mtime & rangemask
945 size = size & rangemask
955 size = size & rangemask
946 entry.set_clean(mode, size, mtime)
956 entry.set_clean(mode, size, mtime)
947 self._rustmap.set_v1(filename, entry)
957 self._rustmap.set_v1(filename, entry)
948 self._rustmap.copymap().pop(filename, None)
958 self._rustmap.copymap().pop(filename, None)
949
959
950 def __setitem__(self, key, value):
960 def __setitem__(self, key, value):
951 assert isinstance(value, DirstateItem)
961 assert isinstance(value, DirstateItem)
952 self._rustmap.set_v1(key, value)
962 self._rustmap.set_v1(key, value)
@@ -1,676 +1,675 b''
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
9 //! `hg-core` package.
9 //! `hg-core` package.
10
10
11 use std::cell::{RefCell, RefMut};
11 use std::cell::{RefCell, RefMut};
12 use std::convert::TryInto;
12 use std::convert::TryInto;
13
13
14 use cpython::{
14 use cpython::{
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
17 UnsafePyLeaked,
17 UnsafePyLeaked,
18 };
18 };
19
19
20 use crate::{
20 use crate::{
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 dirstate::make_dirstate_item,
22 dirstate::make_dirstate_item,
23 dirstate::make_dirstate_item_raw,
24 dirstate::non_normal_entries::{
23 dirstate::non_normal_entries::{
25 NonNormalEntries, NonNormalEntriesIterator,
24 NonNormalEntries, NonNormalEntriesIterator,
26 },
25 },
27 pybytes_deref::PyBytesDeref,
26 pybytes_deref::PyBytesDeref,
28 };
27 };
29 use hg::{
28 use hg::{
30 dirstate::parsers::Timestamp,
29 dirstate::parsers::Timestamp,
31 dirstate::MTIME_UNSET,
30 dirstate::MTIME_UNSET,
32 dirstate::SIZE_NON_NORMAL,
31 dirstate::SIZE_NON_NORMAL,
33 dirstate_tree::dirstate_map::DirstateMap as TreeDirstateMap,
32 dirstate_tree::dirstate_map::DirstateMap as TreeDirstateMap,
34 dirstate_tree::dispatch::DirstateMapMethods,
33 dirstate_tree::dispatch::DirstateMapMethods,
35 dirstate_tree::on_disk::DirstateV2ParseError,
34 dirstate_tree::on_disk::DirstateV2ParseError,
36 dirstate_tree::owning::OwningDirstateMap,
35 dirstate_tree::owning::OwningDirstateMap,
37 revlog::Node,
36 revlog::Node,
38 utils::files::normalize_case,
37 utils::files::normalize_case,
39 utils::hg_path::{HgPath, HgPathBuf},
38 utils::hg_path::{HgPath, HgPathBuf},
40 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
41 DirstateParents, EntryState, StateMapIter,
40 DirstateParents, EntryState, StateMapIter,
42 };
41 };
43
42
44 // TODO
43 // TODO
45 // This object needs to share references to multiple members of its Rust
44 // This object needs to share references to multiple members of its Rust
46 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
45 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
47 // Right now `CopyMap` is done, but it needs to have an explicit reference
46 // Right now `CopyMap` is done, but it needs to have an explicit reference
48 // to `RustDirstateMap` which itself needs to have an encapsulation for
47 // to `RustDirstateMap` which itself needs to have an encapsulation for
49 // every method in `CopyMap` (copymapcopy, etc.).
48 // every method in `CopyMap` (copymapcopy, etc.).
50 // This is ugly and hard to maintain.
49 // This is ugly and hard to maintain.
51 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
50 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
52 // `py_class!` is already implemented and does not mention
51 // `py_class!` is already implemented and does not mention
53 // `RustDirstateMap`, rightfully so.
52 // `RustDirstateMap`, rightfully so.
54 // All attributes also have to have a separate refcount data attribute for
53 // All attributes also have to have a separate refcount data attribute for
55 // leaks, with all methods that go along for reference sharing.
54 // leaks, with all methods that go along for reference sharing.
56 py_class!(pub class DirstateMap |py| {
55 py_class!(pub class DirstateMap |py| {
57 @shared data inner: Box<dyn DirstateMapMethods + Send>;
56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
58
57
59 /// Returns a `(dirstate_map, parents)` tuple
58 /// Returns a `(dirstate_map, parents)` tuple
60 @staticmethod
59 @staticmethod
61 def new_v1(
60 def new_v1(
62 use_dirstate_tree: bool,
61 use_dirstate_tree: bool,
63 on_disk: PyBytes,
62 on_disk: PyBytes,
64 ) -> PyResult<PyObject> {
63 ) -> PyResult<PyObject> {
65 let (inner, parents) = if use_dirstate_tree {
64 let (inner, parents) = if use_dirstate_tree {
66 let on_disk = PyBytesDeref::new(py, on_disk);
65 let on_disk = PyBytesDeref::new(py, on_disk);
67 let mut map = OwningDirstateMap::new_empty(on_disk);
66 let mut map = OwningDirstateMap::new_empty(on_disk);
68 let (on_disk, map_placeholder) = map.get_mut_pair();
67 let (on_disk, map_placeholder) = map.get_mut_pair();
69
68
70 let (actual_map, parents) = TreeDirstateMap::new_v1(on_disk)
69 let (actual_map, parents) = TreeDirstateMap::new_v1(on_disk)
71 .map_err(|e| dirstate_error(py, e))?;
70 .map_err(|e| dirstate_error(py, e))?;
72 *map_placeholder = actual_map;
71 *map_placeholder = actual_map;
73 (Box::new(map) as _, parents)
72 (Box::new(map) as _, parents)
74 } else {
73 } else {
75 let bytes = on_disk.data(py);
74 let bytes = on_disk.data(py);
76 let mut map = RustDirstateMap::default();
75 let mut map = RustDirstateMap::default();
77 let parents = map.read(bytes).map_err(|e| dirstate_error(py, e))?;
76 let parents = map.read(bytes).map_err(|e| dirstate_error(py, e))?;
78 (Box::new(map) as _, parents)
77 (Box::new(map) as _, parents)
79 };
78 };
80 let map = Self::create_instance(py, inner)?;
79 let map = Self::create_instance(py, inner)?;
81 let parents = parents.map(|p| {
80 let parents = parents.map(|p| {
82 let p1 = PyBytes::new(py, p.p1.as_bytes());
81 let p1 = PyBytes::new(py, p.p1.as_bytes());
83 let p2 = PyBytes::new(py, p.p2.as_bytes());
82 let p2 = PyBytes::new(py, p.p2.as_bytes());
84 (p1, p2)
83 (p1, p2)
85 });
84 });
86 Ok((map, parents).to_py_object(py).into_object())
85 Ok((map, parents).to_py_object(py).into_object())
87 }
86 }
88
87
89 /// Returns a DirstateMap
88 /// Returns a DirstateMap
90 @staticmethod
89 @staticmethod
91 def new_v2(
90 def new_v2(
92 on_disk: PyBytes,
91 on_disk: PyBytes,
93 data_size: usize,
92 data_size: usize,
94 tree_metadata: PyBytes,
93 tree_metadata: PyBytes,
95 ) -> PyResult<PyObject> {
94 ) -> PyResult<PyObject> {
96 let dirstate_error = |e: DirstateError| {
95 let dirstate_error = |e: DirstateError| {
97 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
96 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
98 };
97 };
99 let on_disk = PyBytesDeref::new(py, on_disk);
98 let on_disk = PyBytesDeref::new(py, on_disk);
100 let mut map = OwningDirstateMap::new_empty(on_disk);
99 let mut map = OwningDirstateMap::new_empty(on_disk);
101 let (on_disk, map_placeholder) = map.get_mut_pair();
100 let (on_disk, map_placeholder) = map.get_mut_pair();
102 *map_placeholder = TreeDirstateMap::new_v2(
101 *map_placeholder = TreeDirstateMap::new_v2(
103 on_disk, data_size, tree_metadata.data(py),
102 on_disk, data_size, tree_metadata.data(py),
104 ).map_err(dirstate_error)?;
103 ).map_err(dirstate_error)?;
105 let map = Self::create_instance(py, Box::new(map))?;
104 let map = Self::create_instance(py, Box::new(map))?;
106 Ok(map.into_object())
105 Ok(map.into_object())
107 }
106 }
108
107
109 def clear(&self) -> PyResult<PyObject> {
108 def clear(&self) -> PyResult<PyObject> {
110 self.inner(py).borrow_mut().clear();
109 self.inner(py).borrow_mut().clear();
111 Ok(py.None())
110 Ok(py.None())
112 }
111 }
113
112
114 def get(
113 def get(
115 &self,
114 &self,
116 key: PyObject,
115 key: PyObject,
117 default: Option<PyObject> = None
116 default: Option<PyObject> = None
118 ) -> PyResult<Option<PyObject>> {
117 ) -> PyResult<Option<PyObject>> {
119 let key = key.extract::<PyBytes>(py)?;
118 let key = key.extract::<PyBytes>(py)?;
120 match self
119 match self
121 .inner(py)
120 .inner(py)
122 .borrow()
121 .borrow()
123 .get(HgPath::new(key.data(py)))
122 .get(HgPath::new(key.data(py)))
124 .map_err(|e| v2_error(py, e))?
123 .map_err(|e| v2_error(py, e))?
125 {
124 {
126 Some(entry) => {
125 Some(entry) => {
127 Ok(Some(make_dirstate_item(py, &entry)?))
126 Ok(Some(make_dirstate_item(py, &entry)?))
128 },
127 },
129 None => Ok(default)
128 None => Ok(default)
130 }
129 }
131 }
130 }
132
131
133 def set_v1(&self, path: PyObject, item: PyObject) -> PyResult<PyObject> {
132 def set_v1(&self, path: PyObject, item: PyObject) -> PyResult<PyObject> {
134 let f = path.extract::<PyBytes>(py)?;
133 let f = path.extract::<PyBytes>(py)?;
135 let filename = HgPath::new(f.data(py));
134 let filename = HgPath::new(f.data(py));
136 let state = item.getattr(py, "state")?.extract::<PyBytes>(py)?;
135 let state = item.getattr(py, "state")?.extract::<PyBytes>(py)?;
137 let state = state.data(py)[0];
136 let state = state.data(py)[0];
138 let entry = DirstateEntry::from_v1_data(
137 let entry = DirstateEntry::from_v1_data(
139 state.try_into().expect("state is always valid"),
138 state.try_into().expect("state is always valid"),
140 item.getattr(py, "mode")?.extract(py)?,
139 item.getattr(py, "mode")?.extract(py)?,
141 item.getattr(py, "size")?.extract(py)?,
140 item.getattr(py, "size")?.extract(py)?,
142 item.getattr(py, "mtime")?.extract(py)?,
141 item.getattr(py, "mtime")?.extract(py)?,
143 );
142 );
144 self.inner(py).borrow_mut().set_v1(filename, entry);
143 self.inner(py).borrow_mut().set_v1(filename, entry);
145 Ok(py.None())
144 Ok(py.None())
146 }
145 }
147
146
148 def addfile(
147 def addfile(
149 &self,
148 &self,
150 f: PyObject,
149 f: PyObject,
151 mode: PyObject,
150 mode: PyObject,
152 size: PyObject,
151 size: PyObject,
153 mtime: PyObject,
152 mtime: PyObject,
154 added: PyObject,
153 added: PyObject,
155 merged: PyObject,
154 merged: PyObject,
156 from_p2: PyObject,
155 from_p2: PyObject,
157 possibly_dirty: PyObject,
156 possibly_dirty: PyObject,
158 ) -> PyResult<PyObject> {
157 ) -> PyResult<PyObject> {
159 let f = f.extract::<PyBytes>(py)?;
158 let f = f.extract::<PyBytes>(py)?;
160 let filename = HgPath::new(f.data(py));
159 let filename = HgPath::new(f.data(py));
161 let mode = if mode.is_none(py) {
160 let mode = if mode.is_none(py) {
162 // fallback default value
161 // fallback default value
163 0
162 0
164 } else {
163 } else {
165 mode.extract(py)?
164 mode.extract(py)?
166 };
165 };
167 let size = if size.is_none(py) {
166 let size = if size.is_none(py) {
168 // fallback default value
167 // fallback default value
169 SIZE_NON_NORMAL
168 SIZE_NON_NORMAL
170 } else {
169 } else {
171 size.extract(py)?
170 size.extract(py)?
172 };
171 };
173 let mtime = if mtime.is_none(py) {
172 let mtime = if mtime.is_none(py) {
174 // fallback default value
173 // fallback default value
175 MTIME_UNSET
174 MTIME_UNSET
176 } else {
175 } else {
177 mtime.extract(py)?
176 mtime.extract(py)?
178 };
177 };
179 let entry = DirstateEntry::new_for_add_file(mode, size, mtime);
178 let entry = DirstateEntry::new_for_add_file(mode, size, mtime);
180 let added = added.extract::<PyBool>(py)?.is_true();
179 let added = added.extract::<PyBool>(py)?.is_true();
181 let merged = merged.extract::<PyBool>(py)?.is_true();
180 let merged = merged.extract::<PyBool>(py)?.is_true();
182 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
181 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
183 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
182 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
184 self.inner(py).borrow_mut().add_file(
183 self.inner(py).borrow_mut().add_file(
185 filename,
184 filename,
186 entry,
185 entry,
187 added,
186 added,
188 merged,
187 merged,
189 from_p2,
188 from_p2,
190 possibly_dirty
189 possibly_dirty
191 ).and(Ok(py.None())).or_else(|e: DirstateError| {
190 ).and(Ok(py.None())).or_else(|e: DirstateError| {
192 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
191 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
193 })
192 })
194 }
193 }
195
194
196 def removefile(
195 def removefile(
197 &self,
196 &self,
198 f: PyObject,
197 f: PyObject,
199 in_merge: PyObject
198 in_merge: PyObject
200 ) -> PyResult<PyObject> {
199 ) -> PyResult<PyObject> {
201 self.inner(py).borrow_mut()
200 self.inner(py).borrow_mut()
202 .remove_file(
201 .remove_file(
203 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
202 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
204 in_merge.extract::<PyBool>(py)?.is_true(),
203 in_merge.extract::<PyBool>(py)?.is_true(),
205 )
204 )
206 .or_else(|_| {
205 .or_else(|_| {
207 Err(PyErr::new::<exc::OSError, _>(
206 Err(PyErr::new::<exc::OSError, _>(
208 py,
207 py,
209 "Dirstate error".to_string(),
208 "Dirstate error".to_string(),
210 ))
209 ))
211 })?;
210 })?;
212 Ok(py.None())
211 Ok(py.None())
213 }
212 }
214
213
215 def dropfile(
214 def dropfile(
216 &self,
215 &self,
217 f: PyObject,
216 f: PyObject,
218 ) -> PyResult<PyBool> {
217 ) -> PyResult<PyBool> {
219 self.inner(py).borrow_mut()
218 self.inner(py).borrow_mut()
220 .drop_file(
219 .drop_file(
221 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
220 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
222 )
221 )
223 .and_then(|b| Ok(b.to_py_object(py)))
222 .and_then(|b| Ok(b.to_py_object(py)))
224 .or_else(|e| {
223 .or_else(|e| {
225 Err(PyErr::new::<exc::OSError, _>(
224 Err(PyErr::new::<exc::OSError, _>(
226 py,
225 py,
227 format!("Dirstate error: {}", e.to_string()),
226 format!("Dirstate error: {}", e.to_string()),
228 ))
227 ))
229 })
228 })
230 }
229 }
231
230
232 def clearambiguoustimes(
231 def clearambiguoustimes(
233 &self,
232 &self,
234 files: PyObject,
233 files: PyObject,
235 now: PyObject
234 now: PyObject
236 ) -> PyResult<PyObject> {
235 ) -> PyResult<PyObject> {
237 let files: PyResult<Vec<HgPathBuf>> = files
236 let files: PyResult<Vec<HgPathBuf>> = files
238 .iter(py)?
237 .iter(py)?
239 .map(|filename| {
238 .map(|filename| {
240 Ok(HgPathBuf::from_bytes(
239 Ok(HgPathBuf::from_bytes(
241 filename?.extract::<PyBytes>(py)?.data(py),
240 filename?.extract::<PyBytes>(py)?.data(py),
242 ))
241 ))
243 })
242 })
244 .collect();
243 .collect();
245 self.inner(py)
244 self.inner(py)
246 .borrow_mut()
245 .borrow_mut()
247 .clear_ambiguous_times(files?, now.extract(py)?)
246 .clear_ambiguous_times(files?, now.extract(py)?)
248 .map_err(|e| v2_error(py, e))?;
247 .map_err(|e| v2_error(py, e))?;
249 Ok(py.None())
248 Ok(py.None())
250 }
249 }
251
250
252 def other_parent_entries(&self) -> PyResult<PyObject> {
251 def other_parent_entries(&self) -> PyResult<PyObject> {
253 let mut inner_shared = self.inner(py).borrow_mut();
252 let mut inner_shared = self.inner(py).borrow_mut();
254 let set = PySet::empty(py)?;
253 let set = PySet::empty(py)?;
255 for path in inner_shared.iter_other_parent_paths() {
254 for path in inner_shared.iter_other_parent_paths() {
256 let path = path.map_err(|e| v2_error(py, e))?;
255 let path = path.map_err(|e| v2_error(py, e))?;
257 set.add(py, PyBytes::new(py, path.as_bytes()))?;
256 set.add(py, PyBytes::new(py, path.as_bytes()))?;
258 }
257 }
259 Ok(set.into_object())
258 Ok(set.into_object())
260 }
259 }
261
260
262 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
261 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
263 NonNormalEntries::from_inner(py, self.clone_ref(py))
262 NonNormalEntries::from_inner(py, self.clone_ref(py))
264 }
263 }
265
264
266 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
265 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
267 let key = key.extract::<PyBytes>(py)?;
266 let key = key.extract::<PyBytes>(py)?;
268 self.inner(py)
267 self.inner(py)
269 .borrow_mut()
268 .borrow_mut()
270 .non_normal_entries_contains(HgPath::new(key.data(py)))
269 .non_normal_entries_contains(HgPath::new(key.data(py)))
271 .map_err(|e| v2_error(py, e))
270 .map_err(|e| v2_error(py, e))
272 }
271 }
273
272
274 def non_normal_entries_display(&self) -> PyResult<PyString> {
273 def non_normal_entries_display(&self) -> PyResult<PyString> {
275 let mut inner = self.inner(py).borrow_mut();
274 let mut inner = self.inner(py).borrow_mut();
276 let paths = inner
275 let paths = inner
277 .iter_non_normal_paths()
276 .iter_non_normal_paths()
278 .collect::<Result<Vec<_>, _>>()
277 .collect::<Result<Vec<_>, _>>()
279 .map_err(|e| v2_error(py, e))?;
278 .map_err(|e| v2_error(py, e))?;
280 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
279 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
281 Ok(PyString::new(py, &formatted))
280 Ok(PyString::new(py, &formatted))
282 }
281 }
283
282
284 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
283 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
285 let key = key.extract::<PyBytes>(py)?;
284 let key = key.extract::<PyBytes>(py)?;
286 let key = key.data(py);
285 let key = key.data(py);
287 let was_present = self
286 let was_present = self
288 .inner(py)
287 .inner(py)
289 .borrow_mut()
288 .borrow_mut()
290 .non_normal_entries_remove(HgPath::new(key));
289 .non_normal_entries_remove(HgPath::new(key));
291 if !was_present {
290 if !was_present {
292 let msg = String::from_utf8_lossy(key);
291 let msg = String::from_utf8_lossy(key);
293 Err(PyErr::new::<exc::KeyError, _>(py, msg))
292 Err(PyErr::new::<exc::KeyError, _>(py, msg))
294 } else {
293 } else {
295 Ok(py.None())
294 Ok(py.None())
296 }
295 }
297 }
296 }
298
297
299 def non_normal_entries_discard(&self, key: PyObject) -> PyResult<PyObject>
298 def non_normal_entries_discard(&self, key: PyObject) -> PyResult<PyObject>
300 {
299 {
301 let key = key.extract::<PyBytes>(py)?;
300 let key = key.extract::<PyBytes>(py)?;
302 self
301 self
303 .inner(py)
302 .inner(py)
304 .borrow_mut()
303 .borrow_mut()
305 .non_normal_entries_remove(HgPath::new(key.data(py)));
304 .non_normal_entries_remove(HgPath::new(key.data(py)));
306 Ok(py.None())
305 Ok(py.None())
307 }
306 }
308
307
309 def non_normal_entries_add(&self, key: PyObject) -> PyResult<PyObject> {
308 def non_normal_entries_add(&self, key: PyObject) -> PyResult<PyObject> {
310 let key = key.extract::<PyBytes>(py)?;
309 let key = key.extract::<PyBytes>(py)?;
311 self
310 self
312 .inner(py)
311 .inner(py)
313 .borrow_mut()
312 .borrow_mut()
314 .non_normal_entries_add(HgPath::new(key.data(py)));
313 .non_normal_entries_add(HgPath::new(key.data(py)));
315 Ok(py.None())
314 Ok(py.None())
316 }
315 }
317
316
318 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
317 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
319 let mut inner = self.inner(py).borrow_mut();
318 let mut inner = self.inner(py).borrow_mut();
320
319
321 let ret = PyList::new(py, &[]);
320 let ret = PyList::new(py, &[]);
322 for filename in inner.non_normal_or_other_parent_paths() {
321 for filename in inner.non_normal_or_other_parent_paths() {
323 let filename = filename.map_err(|e| v2_error(py, e))?;
322 let filename = filename.map_err(|e| v2_error(py, e))?;
324 let as_pystring = PyBytes::new(py, filename.as_bytes());
323 let as_pystring = PyBytes::new(py, filename.as_bytes());
325 ret.append(py, as_pystring.into_object());
324 ret.append(py, as_pystring.into_object());
326 }
325 }
327 Ok(ret)
326 Ok(ret)
328 }
327 }
329
328
330 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
329 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
331 // Make sure the sets are defined before we no longer have a mutable
330 // Make sure the sets are defined before we no longer have a mutable
332 // reference to the dmap.
331 // reference to the dmap.
333 self.inner(py)
332 self.inner(py)
334 .borrow_mut()
333 .borrow_mut()
335 .set_non_normal_other_parent_entries(false);
334 .set_non_normal_other_parent_entries(false);
336
335
337 let leaked_ref = self.inner(py).leak_immutable();
336 let leaked_ref = self.inner(py).leak_immutable();
338
337
339 NonNormalEntriesIterator::from_inner(py, unsafe {
338 NonNormalEntriesIterator::from_inner(py, unsafe {
340 leaked_ref.map(py, |o| {
339 leaked_ref.map(py, |o| {
341 o.iter_non_normal_paths_panic()
340 o.iter_non_normal_paths_panic()
342 })
341 })
343 })
342 })
344 }
343 }
345
344
346 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
345 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
347 let d = d.extract::<PyBytes>(py)?;
346 let d = d.extract::<PyBytes>(py)?;
348 Ok(self.inner(py).borrow_mut()
347 Ok(self.inner(py).borrow_mut()
349 .has_tracked_dir(HgPath::new(d.data(py)))
348 .has_tracked_dir(HgPath::new(d.data(py)))
350 .map_err(|e| {
349 .map_err(|e| {
351 PyErr::new::<exc::ValueError, _>(py, e.to_string())
350 PyErr::new::<exc::ValueError, _>(py, e.to_string())
352 })?
351 })?
353 .to_py_object(py))
352 .to_py_object(py))
354 }
353 }
355
354
356 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
355 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
357 let d = d.extract::<PyBytes>(py)?;
356 let d = d.extract::<PyBytes>(py)?;
358 Ok(self.inner(py).borrow_mut()
357 Ok(self.inner(py).borrow_mut()
359 .has_dir(HgPath::new(d.data(py)))
358 .has_dir(HgPath::new(d.data(py)))
360 .map_err(|e| {
359 .map_err(|e| {
361 PyErr::new::<exc::ValueError, _>(py, e.to_string())
360 PyErr::new::<exc::ValueError, _>(py, e.to_string())
362 })?
361 })?
363 .to_py_object(py))
362 .to_py_object(py))
364 }
363 }
365
364
366 def write_v1(
365 def write_v1(
367 &self,
366 &self,
368 p1: PyObject,
367 p1: PyObject,
369 p2: PyObject,
368 p2: PyObject,
370 now: PyObject
369 now: PyObject
371 ) -> PyResult<PyBytes> {
370 ) -> PyResult<PyBytes> {
372 let now = Timestamp(now.extract(py)?);
371 let now = Timestamp(now.extract(py)?);
373
372
374 let mut inner = self.inner(py).borrow_mut();
373 let mut inner = self.inner(py).borrow_mut();
375 let parents = DirstateParents {
374 let parents = DirstateParents {
376 p1: extract_node_id(py, &p1)?,
375 p1: extract_node_id(py, &p1)?,
377 p2: extract_node_id(py, &p2)?,
376 p2: extract_node_id(py, &p2)?,
378 };
377 };
379 let result = inner.pack_v1(parents, now);
378 let result = inner.pack_v1(parents, now);
380 match result {
379 match result {
381 Ok(packed) => Ok(PyBytes::new(py, &packed)),
380 Ok(packed) => Ok(PyBytes::new(py, &packed)),
382 Err(_) => Err(PyErr::new::<exc::OSError, _>(
381 Err(_) => Err(PyErr::new::<exc::OSError, _>(
383 py,
382 py,
384 "Dirstate error".to_string(),
383 "Dirstate error".to_string(),
385 )),
384 )),
386 }
385 }
387 }
386 }
388
387
389 /// Returns new data together with whether that data should be appended to
388 /// Returns new data together with whether that data should be appended to
390 /// the existing data file whose content is at `self.on_disk` (True),
389 /// the existing data file whose content is at `self.on_disk` (True),
391 /// instead of written to a new data file (False).
390 /// instead of written to a new data file (False).
392 def write_v2(
391 def write_v2(
393 &self,
392 &self,
394 now: PyObject,
393 now: PyObject,
395 can_append: bool,
394 can_append: bool,
396 ) -> PyResult<PyObject> {
395 ) -> PyResult<PyObject> {
397 let now = Timestamp(now.extract(py)?);
396 let now = Timestamp(now.extract(py)?);
398
397
399 let mut inner = self.inner(py).borrow_mut();
398 let mut inner = self.inner(py).borrow_mut();
400 let result = inner.pack_v2(now, can_append);
399 let result = inner.pack_v2(now, can_append);
401 match result {
400 match result {
402 Ok((packed, tree_metadata, append)) => {
401 Ok((packed, tree_metadata, append)) => {
403 let packed = PyBytes::new(py, &packed);
402 let packed = PyBytes::new(py, &packed);
404 let tree_metadata = PyBytes::new(py, &tree_metadata);
403 let tree_metadata = PyBytes::new(py, &tree_metadata);
405 let tuple = (packed, tree_metadata, append);
404 let tuple = (packed, tree_metadata, append);
406 Ok(tuple.to_py_object(py).into_object())
405 Ok(tuple.to_py_object(py).into_object())
407 },
406 },
408 Err(_) => Err(PyErr::new::<exc::OSError, _>(
407 Err(_) => Err(PyErr::new::<exc::OSError, _>(
409 py,
408 py,
410 "Dirstate error".to_string(),
409 "Dirstate error".to_string(),
411 )),
410 )),
412 }
411 }
413 }
412 }
414
413
415 def filefoldmapasdict(&self) -> PyResult<PyDict> {
414 def filefoldmapasdict(&self) -> PyResult<PyDict> {
416 let dict = PyDict::new(py);
415 let dict = PyDict::new(py);
417 for item in self.inner(py).borrow_mut().iter() {
416 for item in self.inner(py).borrow_mut().iter() {
418 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
417 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
419 if entry.state() != EntryState::Removed {
418 if entry.state() != EntryState::Removed {
420 let key = normalize_case(path);
419 let key = normalize_case(path);
421 let value = path;
420 let value = path;
422 dict.set_item(
421 dict.set_item(
423 py,
422 py,
424 PyBytes::new(py, key.as_bytes()).into_object(),
423 PyBytes::new(py, key.as_bytes()).into_object(),
425 PyBytes::new(py, value.as_bytes()).into_object(),
424 PyBytes::new(py, value.as_bytes()).into_object(),
426 )?;
425 )?;
427 }
426 }
428 }
427 }
429 Ok(dict)
428 Ok(dict)
430 }
429 }
431
430
432 def __len__(&self) -> PyResult<usize> {
431 def __len__(&self) -> PyResult<usize> {
433 Ok(self.inner(py).borrow().len())
432 Ok(self.inner(py).borrow().len())
434 }
433 }
435
434
436 def __contains__(&self, key: PyObject) -> PyResult<bool> {
435 def __contains__(&self, key: PyObject) -> PyResult<bool> {
437 let key = key.extract::<PyBytes>(py)?;
436 let key = key.extract::<PyBytes>(py)?;
438 self.inner(py)
437 self.inner(py)
439 .borrow()
438 .borrow()
440 .contains_key(HgPath::new(key.data(py)))
439 .contains_key(HgPath::new(key.data(py)))
441 .map_err(|e| v2_error(py, e))
440 .map_err(|e| v2_error(py, e))
442 }
441 }
443
442
444 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
443 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
445 let key = key.extract::<PyBytes>(py)?;
444 let key = key.extract::<PyBytes>(py)?;
446 let key = HgPath::new(key.data(py));
445 let key = HgPath::new(key.data(py));
447 match self
446 match self
448 .inner(py)
447 .inner(py)
449 .borrow()
448 .borrow()
450 .get(key)
449 .get(key)
451 .map_err(|e| v2_error(py, e))?
450 .map_err(|e| v2_error(py, e))?
452 {
451 {
453 Some(entry) => {
452 Some(entry) => {
454 Ok(make_dirstate_item(py, &entry)?)
453 Ok(make_dirstate_item(py, &entry)?)
455 },
454 },
456 None => Err(PyErr::new::<exc::KeyError, _>(
455 None => Err(PyErr::new::<exc::KeyError, _>(
457 py,
456 py,
458 String::from_utf8_lossy(key.as_bytes()),
457 String::from_utf8_lossy(key.as_bytes()),
459 )),
458 )),
460 }
459 }
461 }
460 }
462
461
463 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
462 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
464 let leaked_ref = self.inner(py).leak_immutable();
463 let leaked_ref = self.inner(py).leak_immutable();
465 DirstateMapKeysIterator::from_inner(
464 DirstateMapKeysIterator::from_inner(
466 py,
465 py,
467 unsafe { leaked_ref.map(py, |o| o.iter()) },
466 unsafe { leaked_ref.map(py, |o| o.iter()) },
468 )
467 )
469 }
468 }
470
469
471 def items(&self) -> PyResult<DirstateMapItemsIterator> {
470 def items(&self) -> PyResult<DirstateMapItemsIterator> {
472 let leaked_ref = self.inner(py).leak_immutable();
471 let leaked_ref = self.inner(py).leak_immutable();
473 DirstateMapItemsIterator::from_inner(
472 DirstateMapItemsIterator::from_inner(
474 py,
473 py,
475 unsafe { leaked_ref.map(py, |o| o.iter()) },
474 unsafe { leaked_ref.map(py, |o| o.iter()) },
476 )
475 )
477 }
476 }
478
477
479 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
478 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
480 let leaked_ref = self.inner(py).leak_immutable();
479 let leaked_ref = self.inner(py).leak_immutable();
481 DirstateMapKeysIterator::from_inner(
480 DirstateMapKeysIterator::from_inner(
482 py,
481 py,
483 unsafe { leaked_ref.map(py, |o| o.iter()) },
482 unsafe { leaked_ref.map(py, |o| o.iter()) },
484 )
483 )
485 }
484 }
486
485
487 // TODO all copymap* methods, see docstring above
486 // TODO all copymap* methods, see docstring above
488 def copymapcopy(&self) -> PyResult<PyDict> {
487 def copymapcopy(&self) -> PyResult<PyDict> {
489 let dict = PyDict::new(py);
488 let dict = PyDict::new(py);
490 for item in self.inner(py).borrow().copy_map_iter() {
489 for item in self.inner(py).borrow().copy_map_iter() {
491 let (key, value) = item.map_err(|e| v2_error(py, e))?;
490 let (key, value) = item.map_err(|e| v2_error(py, e))?;
492 dict.set_item(
491 dict.set_item(
493 py,
492 py,
494 PyBytes::new(py, key.as_bytes()),
493 PyBytes::new(py, key.as_bytes()),
495 PyBytes::new(py, value.as_bytes()),
494 PyBytes::new(py, value.as_bytes()),
496 )?;
495 )?;
497 }
496 }
498 Ok(dict)
497 Ok(dict)
499 }
498 }
500
499
501 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
500 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
502 let key = key.extract::<PyBytes>(py)?;
501 let key = key.extract::<PyBytes>(py)?;
503 match self
502 match self
504 .inner(py)
503 .inner(py)
505 .borrow()
504 .borrow()
506 .copy_map_get(HgPath::new(key.data(py)))
505 .copy_map_get(HgPath::new(key.data(py)))
507 .map_err(|e| v2_error(py, e))?
506 .map_err(|e| v2_error(py, e))?
508 {
507 {
509 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
508 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
510 None => Err(PyErr::new::<exc::KeyError, _>(
509 None => Err(PyErr::new::<exc::KeyError, _>(
511 py,
510 py,
512 String::from_utf8_lossy(key.data(py)),
511 String::from_utf8_lossy(key.data(py)),
513 )),
512 )),
514 }
513 }
515 }
514 }
516 def copymap(&self) -> PyResult<CopyMap> {
515 def copymap(&self) -> PyResult<CopyMap> {
517 CopyMap::from_inner(py, self.clone_ref(py))
516 CopyMap::from_inner(py, self.clone_ref(py))
518 }
517 }
519
518
520 def copymaplen(&self) -> PyResult<usize> {
519 def copymaplen(&self) -> PyResult<usize> {
521 Ok(self.inner(py).borrow().copy_map_len())
520 Ok(self.inner(py).borrow().copy_map_len())
522 }
521 }
523 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
522 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
524 let key = key.extract::<PyBytes>(py)?;
523 let key = key.extract::<PyBytes>(py)?;
525 self.inner(py)
524 self.inner(py)
526 .borrow()
525 .borrow()
527 .copy_map_contains_key(HgPath::new(key.data(py)))
526 .copy_map_contains_key(HgPath::new(key.data(py)))
528 .map_err(|e| v2_error(py, e))
527 .map_err(|e| v2_error(py, e))
529 }
528 }
530 def copymapget(
529 def copymapget(
531 &self,
530 &self,
532 key: PyObject,
531 key: PyObject,
533 default: Option<PyObject>
532 default: Option<PyObject>
534 ) -> PyResult<Option<PyObject>> {
533 ) -> PyResult<Option<PyObject>> {
535 let key = key.extract::<PyBytes>(py)?;
534 let key = key.extract::<PyBytes>(py)?;
536 match self
535 match self
537 .inner(py)
536 .inner(py)
538 .borrow()
537 .borrow()
539 .copy_map_get(HgPath::new(key.data(py)))
538 .copy_map_get(HgPath::new(key.data(py)))
540 .map_err(|e| v2_error(py, e))?
539 .map_err(|e| v2_error(py, e))?
541 {
540 {
542 Some(copy) => Ok(Some(
541 Some(copy) => Ok(Some(
543 PyBytes::new(py, copy.as_bytes()).into_object(),
542 PyBytes::new(py, copy.as_bytes()).into_object(),
544 )),
543 )),
545 None => Ok(default),
544 None => Ok(default),
546 }
545 }
547 }
546 }
548 def copymapsetitem(
547 def copymapsetitem(
549 &self,
548 &self,
550 key: PyObject,
549 key: PyObject,
551 value: PyObject
550 value: PyObject
552 ) -> PyResult<PyObject> {
551 ) -> PyResult<PyObject> {
553 let key = key.extract::<PyBytes>(py)?;
552 let key = key.extract::<PyBytes>(py)?;
554 let value = value.extract::<PyBytes>(py)?;
553 let value = value.extract::<PyBytes>(py)?;
555 self.inner(py)
554 self.inner(py)
556 .borrow_mut()
555 .borrow_mut()
557 .copy_map_insert(
556 .copy_map_insert(
558 HgPathBuf::from_bytes(key.data(py)),
557 HgPathBuf::from_bytes(key.data(py)),
559 HgPathBuf::from_bytes(value.data(py)),
558 HgPathBuf::from_bytes(value.data(py)),
560 )
559 )
561 .map_err(|e| v2_error(py, e))?;
560 .map_err(|e| v2_error(py, e))?;
562 Ok(py.None())
561 Ok(py.None())
563 }
562 }
564 def copymappop(
563 def copymappop(
565 &self,
564 &self,
566 key: PyObject,
565 key: PyObject,
567 default: Option<PyObject>
566 default: Option<PyObject>
568 ) -> PyResult<Option<PyObject>> {
567 ) -> PyResult<Option<PyObject>> {
569 let key = key.extract::<PyBytes>(py)?;
568 let key = key.extract::<PyBytes>(py)?;
570 match self
569 match self
571 .inner(py)
570 .inner(py)
572 .borrow_mut()
571 .borrow_mut()
573 .copy_map_remove(HgPath::new(key.data(py)))
572 .copy_map_remove(HgPath::new(key.data(py)))
574 .map_err(|e| v2_error(py, e))?
573 .map_err(|e| v2_error(py, e))?
575 {
574 {
576 Some(_) => Ok(None),
575 Some(_) => Ok(None),
577 None => Ok(default),
576 None => Ok(default),
578 }
577 }
579 }
578 }
580
579
581 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
580 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
582 let leaked_ref = self.inner(py).leak_immutable();
581 let leaked_ref = self.inner(py).leak_immutable();
583 CopyMapKeysIterator::from_inner(
582 CopyMapKeysIterator::from_inner(
584 py,
583 py,
585 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
584 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
586 )
585 )
587 }
586 }
588
587
589 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
588 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
590 let leaked_ref = self.inner(py).leak_immutable();
589 let leaked_ref = self.inner(py).leak_immutable();
591 CopyMapItemsIterator::from_inner(
590 CopyMapItemsIterator::from_inner(
592 py,
591 py,
593 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
592 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
594 )
593 )
595 }
594 }
596
595
597 def tracked_dirs(&self) -> PyResult<PyList> {
596 def tracked_dirs(&self) -> PyResult<PyList> {
598 let dirs = PyList::new(py, &[]);
597 let dirs = PyList::new(py, &[]);
599 for path in self.inner(py).borrow_mut().iter_tracked_dirs()
598 for path in self.inner(py).borrow_mut().iter_tracked_dirs()
600 .map_err(|e |dirstate_error(py, e))?
599 .map_err(|e |dirstate_error(py, e))?
601 {
600 {
602 let path = path.map_err(|e| v2_error(py, e))?;
601 let path = path.map_err(|e| v2_error(py, e))?;
603 let path = PyBytes::new(py, path.as_bytes());
602 let path = PyBytes::new(py, path.as_bytes());
604 dirs.append(py, path.into_object())
603 dirs.append(py, path.into_object())
605 }
604 }
606 Ok(dirs)
605 Ok(dirs)
607 }
606 }
608
607
609 def debug_iter(&self, all: bool) -> PyResult<PyList> {
608 def debug_iter(&self, all: bool) -> PyResult<PyList> {
610 let dirs = PyList::new(py, &[]);
609 let dirs = PyList::new(py, &[]);
611 for item in self.inner(py).borrow().debug_iter(all) {
610 for item in self.inner(py).borrow().debug_iter(all) {
612 let (path, (state, mode, size, mtime)) =
611 let (path, (state, mode, size, mtime)) =
613 item.map_err(|e| v2_error(py, e))?;
612 item.map_err(|e| v2_error(py, e))?;
614 let path = PyBytes::new(py, path.as_bytes());
613 let path = PyBytes::new(py, path.as_bytes());
615 let item = make_dirstate_item_raw(py, state, mode, size, mtime)?;
614 let item = (path, state, mode, size, mtime);
616 dirs.append(py, (path, item).to_py_object(py).into_object())
615 dirs.append(py, item.to_py_object(py).into_object())
617 }
616 }
618 Ok(dirs)
617 Ok(dirs)
619 }
618 }
620 });
619 });
621
620
622 impl DirstateMap {
621 impl DirstateMap {
623 pub fn get_inner_mut<'a>(
622 pub fn get_inner_mut<'a>(
624 &'a self,
623 &'a self,
625 py: Python<'a>,
624 py: Python<'a>,
626 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
625 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
627 self.inner(py).borrow_mut()
626 self.inner(py).borrow_mut()
628 }
627 }
629 fn translate_key(
628 fn translate_key(
630 py: Python,
629 py: Python,
631 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
630 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
632 ) -> PyResult<Option<PyBytes>> {
631 ) -> PyResult<Option<PyBytes>> {
633 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
632 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
634 Ok(Some(PyBytes::new(py, f.as_bytes())))
633 Ok(Some(PyBytes::new(py, f.as_bytes())))
635 }
634 }
636 fn translate_key_value(
635 fn translate_key_value(
637 py: Python,
636 py: Python,
638 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
637 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
639 ) -> PyResult<Option<(PyBytes, PyObject)>> {
638 ) -> PyResult<Option<(PyBytes, PyObject)>> {
640 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
639 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
641 Ok(Some((
640 Ok(Some((
642 PyBytes::new(py, f.as_bytes()),
641 PyBytes::new(py, f.as_bytes()),
643 make_dirstate_item(py, &entry)?,
642 make_dirstate_item(py, &entry)?,
644 )))
643 )))
645 }
644 }
646 }
645 }
647
646
648 py_shared_iterator!(
647 py_shared_iterator!(
649 DirstateMapKeysIterator,
648 DirstateMapKeysIterator,
650 UnsafePyLeaked<StateMapIter<'static>>,
649 UnsafePyLeaked<StateMapIter<'static>>,
651 DirstateMap::translate_key,
650 DirstateMap::translate_key,
652 Option<PyBytes>
651 Option<PyBytes>
653 );
652 );
654
653
655 py_shared_iterator!(
654 py_shared_iterator!(
656 DirstateMapItemsIterator,
655 DirstateMapItemsIterator,
657 UnsafePyLeaked<StateMapIter<'static>>,
656 UnsafePyLeaked<StateMapIter<'static>>,
658 DirstateMap::translate_key_value,
657 DirstateMap::translate_key_value,
659 Option<(PyBytes, PyObject)>
658 Option<(PyBytes, PyObject)>
660 );
659 );
661
660
662 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
661 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
663 let bytes = obj.extract::<PyBytes>(py)?;
662 let bytes = obj.extract::<PyBytes>(py)?;
664 match bytes.data(py).try_into() {
663 match bytes.data(py).try_into() {
665 Ok(s) => Ok(s),
664 Ok(s) => Ok(s),
666 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
665 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
667 }
666 }
668 }
667 }
669
668
670 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
669 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
671 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
670 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
672 }
671 }
673
672
674 fn dirstate_error(py: Python<'_>, e: DirstateError) -> PyErr {
673 fn dirstate_error(py: Python<'_>, e: DirstateError) -> PyErr {
675 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
674 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
676 }
675 }
General Comments 0
You need to be logged in to leave comments. Login now