##// END OF EJS Templates
re2: fix reporting of availability in `hg debuginstall`...
marmoute -
r51582:a45460e2 stable
parent child Browse files
Show More
@@ -1,4783 +1,4783 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import binascii
9 import binascii
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import difflib
13 import difflib
14 import errno
14 import errno
15 import glob
15 import glob
16 import operator
16 import operator
17 import os
17 import os
18 import platform
18 import platform
19 import random
19 import random
20 import re
20 import re
21 import socket
21 import socket
22 import ssl
22 import ssl
23 import stat
23 import stat
24 import subprocess
24 import subprocess
25 import sys
25 import sys
26 import time
26 import time
27
27
28 from .i18n import _
28 from .i18n import _
29 from .node import (
29 from .node import (
30 bin,
30 bin,
31 hex,
31 hex,
32 nullrev,
32 nullrev,
33 short,
33 short,
34 )
34 )
35 from .pycompat import (
35 from .pycompat import (
36 getattr,
36 getattr,
37 open,
37 open,
38 )
38 )
39 from . import (
39 from . import (
40 bundle2,
40 bundle2,
41 bundlerepo,
41 bundlerepo,
42 changegroup,
42 changegroup,
43 cmdutil,
43 cmdutil,
44 color,
44 color,
45 context,
45 context,
46 copies,
46 copies,
47 dagparser,
47 dagparser,
48 dirstateutils,
48 dirstateutils,
49 encoding,
49 encoding,
50 error,
50 error,
51 exchange,
51 exchange,
52 extensions,
52 extensions,
53 filemerge,
53 filemerge,
54 filesetlang,
54 filesetlang,
55 formatter,
55 formatter,
56 hg,
56 hg,
57 httppeer,
57 httppeer,
58 localrepo,
58 localrepo,
59 lock as lockmod,
59 lock as lockmod,
60 logcmdutil,
60 logcmdutil,
61 mergestate as mergestatemod,
61 mergestate as mergestatemod,
62 metadata,
62 metadata,
63 obsolete,
63 obsolete,
64 obsutil,
64 obsutil,
65 pathutil,
65 pathutil,
66 phases,
66 phases,
67 policy,
67 policy,
68 pvec,
68 pvec,
69 pycompat,
69 pycompat,
70 registrar,
70 registrar,
71 repair,
71 repair,
72 repoview,
72 repoview,
73 requirements,
73 requirements,
74 revlog,
74 revlog,
75 revset,
75 revset,
76 revsetlang,
76 revsetlang,
77 scmutil,
77 scmutil,
78 setdiscovery,
78 setdiscovery,
79 simplemerge,
79 simplemerge,
80 sshpeer,
80 sshpeer,
81 sslutil,
81 sslutil,
82 streamclone,
82 streamclone,
83 strip,
83 strip,
84 tags as tagsmod,
84 tags as tagsmod,
85 templater,
85 templater,
86 treediscovery,
86 treediscovery,
87 upgrade,
87 upgrade,
88 url as urlmod,
88 url as urlmod,
89 util,
89 util,
90 verify,
90 verify,
91 vfs as vfsmod,
91 vfs as vfsmod,
92 wireprotoframing,
92 wireprotoframing,
93 wireprotoserver,
93 wireprotoserver,
94 )
94 )
95 from .interfaces import repository
95 from .interfaces import repository
96 from .utils import (
96 from .utils import (
97 cborutil,
97 cborutil,
98 compression,
98 compression,
99 dateutil,
99 dateutil,
100 procutil,
100 procutil,
101 stringutil,
101 stringutil,
102 urlutil,
102 urlutil,
103 )
103 )
104
104
105 from .revlogutils import (
105 from .revlogutils import (
106 constants as revlog_constants,
106 constants as revlog_constants,
107 debug as revlog_debug,
107 debug as revlog_debug,
108 deltas as deltautil,
108 deltas as deltautil,
109 nodemap,
109 nodemap,
110 rewrite,
110 rewrite,
111 sidedata,
111 sidedata,
112 )
112 )
113
113
114 release = lockmod.release
114 release = lockmod.release
115
115
116 table = {}
116 table = {}
117 table.update(strip.command._table)
117 table.update(strip.command._table)
118 command = registrar.command(table)
118 command = registrar.command(table)
119
119
120
120
121 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
121 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
122 def debugancestor(ui, repo, *args):
122 def debugancestor(ui, repo, *args):
123 """find the ancestor revision of two revisions in a given index"""
123 """find the ancestor revision of two revisions in a given index"""
124 if len(args) == 3:
124 if len(args) == 3:
125 index, rev1, rev2 = args
125 index, rev1, rev2 = args
126 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
126 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
127 lookup = r.lookup
127 lookup = r.lookup
128 elif len(args) == 2:
128 elif len(args) == 2:
129 if not repo:
129 if not repo:
130 raise error.Abort(
130 raise error.Abort(
131 _(b'there is no Mercurial repository here (.hg not found)')
131 _(b'there is no Mercurial repository here (.hg not found)')
132 )
132 )
133 rev1, rev2 = args
133 rev1, rev2 = args
134 r = repo.changelog
134 r = repo.changelog
135 lookup = repo.lookup
135 lookup = repo.lookup
136 else:
136 else:
137 raise error.Abort(_(b'either two or three arguments required'))
137 raise error.Abort(_(b'either two or three arguments required'))
138 a = r.ancestor(lookup(rev1), lookup(rev2))
138 a = r.ancestor(lookup(rev1), lookup(rev2))
139 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
139 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
140
140
141
141
142 @command(b'debugantivirusrunning', [])
142 @command(b'debugantivirusrunning', [])
143 def debugantivirusrunning(ui, repo):
143 def debugantivirusrunning(ui, repo):
144 """attempt to trigger an antivirus scanner to see if one is active"""
144 """attempt to trigger an antivirus scanner to see if one is active"""
145 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
145 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
146 f.write(
146 f.write(
147 util.b85decode(
147 util.b85decode(
148 # This is a base85-armored version of the EICAR test file. See
148 # This is a base85-armored version of the EICAR test file. See
149 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
149 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
150 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
150 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
151 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
151 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
152 )
152 )
153 )
153 )
154 # Give an AV engine time to scan the file.
154 # Give an AV engine time to scan the file.
155 time.sleep(2)
155 time.sleep(2)
156 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
156 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
157
157
158
158
159 @command(b'debugapplystreamclonebundle', [], b'FILE')
159 @command(b'debugapplystreamclonebundle', [], b'FILE')
160 def debugapplystreamclonebundle(ui, repo, fname):
160 def debugapplystreamclonebundle(ui, repo, fname):
161 """apply a stream clone bundle file"""
161 """apply a stream clone bundle file"""
162 f = hg.openpath(ui, fname)
162 f = hg.openpath(ui, fname)
163 gen = exchange.readbundle(ui, f, fname)
163 gen = exchange.readbundle(ui, f, fname)
164 gen.apply(repo)
164 gen.apply(repo)
165
165
166
166
167 @command(
167 @command(
168 b'debugbuilddag',
168 b'debugbuilddag',
169 [
169 [
170 (
170 (
171 b'm',
171 b'm',
172 b'mergeable-file',
172 b'mergeable-file',
173 None,
173 None,
174 _(b'add single file mergeable changes'),
174 _(b'add single file mergeable changes'),
175 ),
175 ),
176 (
176 (
177 b'o',
177 b'o',
178 b'overwritten-file',
178 b'overwritten-file',
179 None,
179 None,
180 _(b'add single file all revs overwrite'),
180 _(b'add single file all revs overwrite'),
181 ),
181 ),
182 (b'n', b'new-file', None, _(b'add new file at each rev')),
182 (b'n', b'new-file', None, _(b'add new file at each rev')),
183 (
183 (
184 b'',
184 b'',
185 b'from-existing',
185 b'from-existing',
186 None,
186 None,
187 _(b'continue from a non-empty repository'),
187 _(b'continue from a non-empty repository'),
188 ),
188 ),
189 ],
189 ],
190 _(b'[OPTION]... [TEXT]'),
190 _(b'[OPTION]... [TEXT]'),
191 )
191 )
192 def debugbuilddag(
192 def debugbuilddag(
193 ui,
193 ui,
194 repo,
194 repo,
195 text=None,
195 text=None,
196 mergeable_file=False,
196 mergeable_file=False,
197 overwritten_file=False,
197 overwritten_file=False,
198 new_file=False,
198 new_file=False,
199 from_existing=False,
199 from_existing=False,
200 ):
200 ):
201 """builds a repo with a given DAG from scratch in the current empty repo
201 """builds a repo with a given DAG from scratch in the current empty repo
202
202
203 The description of the DAG is read from stdin if not given on the
203 The description of the DAG is read from stdin if not given on the
204 command line.
204 command line.
205
205
206 Elements:
206 Elements:
207
207
208 - "+n" is a linear run of n nodes based on the current default parent
208 - "+n" is a linear run of n nodes based on the current default parent
209 - "." is a single node based on the current default parent
209 - "." is a single node based on the current default parent
210 - "$" resets the default parent to null (implied at the start);
210 - "$" resets the default parent to null (implied at the start);
211 otherwise the default parent is always the last node created
211 otherwise the default parent is always the last node created
212 - "<p" sets the default parent to the backref p
212 - "<p" sets the default parent to the backref p
213 - "*p" is a fork at parent p, which is a backref
213 - "*p" is a fork at parent p, which is a backref
214 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
214 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
215 - "/p2" is a merge of the preceding node and p2
215 - "/p2" is a merge of the preceding node and p2
216 - ":tag" defines a local tag for the preceding node
216 - ":tag" defines a local tag for the preceding node
217 - "@branch" sets the named branch for subsequent nodes
217 - "@branch" sets the named branch for subsequent nodes
218 - "#...\\n" is a comment up to the end of the line
218 - "#...\\n" is a comment up to the end of the line
219
219
220 Whitespace between the above elements is ignored.
220 Whitespace between the above elements is ignored.
221
221
222 A backref is either
222 A backref is either
223
223
224 - a number n, which references the node curr-n, where curr is the current
224 - a number n, which references the node curr-n, where curr is the current
225 node, or
225 node, or
226 - the name of a local tag you placed earlier using ":tag", or
226 - the name of a local tag you placed earlier using ":tag", or
227 - empty to denote the default parent.
227 - empty to denote the default parent.
228
228
229 All string valued-elements are either strictly alphanumeric, or must
229 All string valued-elements are either strictly alphanumeric, or must
230 be enclosed in double quotes ("..."), with "\\" as escape character.
230 be enclosed in double quotes ("..."), with "\\" as escape character.
231 """
231 """
232
232
233 if text is None:
233 if text is None:
234 ui.status(_(b"reading DAG from stdin\n"))
234 ui.status(_(b"reading DAG from stdin\n"))
235 text = ui.fin.read()
235 text = ui.fin.read()
236
236
237 cl = repo.changelog
237 cl = repo.changelog
238 if len(cl) > 0 and not from_existing:
238 if len(cl) > 0 and not from_existing:
239 raise error.Abort(_(b'repository is not empty'))
239 raise error.Abort(_(b'repository is not empty'))
240
240
241 # determine number of revs in DAG
241 # determine number of revs in DAG
242 total = 0
242 total = 0
243 for type, data in dagparser.parsedag(text):
243 for type, data in dagparser.parsedag(text):
244 if type == b'n':
244 if type == b'n':
245 total += 1
245 total += 1
246
246
247 if mergeable_file:
247 if mergeable_file:
248 linesperrev = 2
248 linesperrev = 2
249 # make a file with k lines per rev
249 # make a file with k lines per rev
250 initialmergedlines = [b'%d' % i for i in range(0, total * linesperrev)]
250 initialmergedlines = [b'%d' % i for i in range(0, total * linesperrev)]
251 initialmergedlines.append(b"")
251 initialmergedlines.append(b"")
252
252
253 tags = []
253 tags = []
254 progress = ui.makeprogress(
254 progress = ui.makeprogress(
255 _(b'building'), unit=_(b'revisions'), total=total
255 _(b'building'), unit=_(b'revisions'), total=total
256 )
256 )
257 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
257 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
258 at = -1
258 at = -1
259 atbranch = b'default'
259 atbranch = b'default'
260 nodeids = []
260 nodeids = []
261 id = 0
261 id = 0
262 progress.update(id)
262 progress.update(id)
263 for type, data in dagparser.parsedag(text):
263 for type, data in dagparser.parsedag(text):
264 if type == b'n':
264 if type == b'n':
265 ui.note((b'node %s\n' % pycompat.bytestr(data)))
265 ui.note((b'node %s\n' % pycompat.bytestr(data)))
266 id, ps = data
266 id, ps = data
267
267
268 files = []
268 files = []
269 filecontent = {}
269 filecontent = {}
270
270
271 p2 = None
271 p2 = None
272 if mergeable_file:
272 if mergeable_file:
273 fn = b"mf"
273 fn = b"mf"
274 p1 = repo[ps[0]]
274 p1 = repo[ps[0]]
275 if len(ps) > 1:
275 if len(ps) > 1:
276 p2 = repo[ps[1]]
276 p2 = repo[ps[1]]
277 pa = p1.ancestor(p2)
277 pa = p1.ancestor(p2)
278 base, local, other = [
278 base, local, other = [
279 x[fn].data() for x in (pa, p1, p2)
279 x[fn].data() for x in (pa, p1, p2)
280 ]
280 ]
281 m3 = simplemerge.Merge3Text(base, local, other)
281 m3 = simplemerge.Merge3Text(base, local, other)
282 ml = [
282 ml = [
283 l.strip()
283 l.strip()
284 for l in simplemerge.render_minimized(m3)[0]
284 for l in simplemerge.render_minimized(m3)[0]
285 ]
285 ]
286 ml.append(b"")
286 ml.append(b"")
287 elif at > 0:
287 elif at > 0:
288 ml = p1[fn].data().split(b"\n")
288 ml = p1[fn].data().split(b"\n")
289 else:
289 else:
290 ml = initialmergedlines
290 ml = initialmergedlines
291 ml[id * linesperrev] += b" r%i" % id
291 ml[id * linesperrev] += b" r%i" % id
292 mergedtext = b"\n".join(ml)
292 mergedtext = b"\n".join(ml)
293 files.append(fn)
293 files.append(fn)
294 filecontent[fn] = mergedtext
294 filecontent[fn] = mergedtext
295
295
296 if overwritten_file:
296 if overwritten_file:
297 fn = b"of"
297 fn = b"of"
298 files.append(fn)
298 files.append(fn)
299 filecontent[fn] = b"r%i\n" % id
299 filecontent[fn] = b"r%i\n" % id
300
300
301 if new_file:
301 if new_file:
302 fn = b"nf%i" % id
302 fn = b"nf%i" % id
303 files.append(fn)
303 files.append(fn)
304 filecontent[fn] = b"r%i\n" % id
304 filecontent[fn] = b"r%i\n" % id
305 if len(ps) > 1:
305 if len(ps) > 1:
306 if not p2:
306 if not p2:
307 p2 = repo[ps[1]]
307 p2 = repo[ps[1]]
308 for fn in p2:
308 for fn in p2:
309 if fn.startswith(b"nf"):
309 if fn.startswith(b"nf"):
310 files.append(fn)
310 files.append(fn)
311 filecontent[fn] = p2[fn].data()
311 filecontent[fn] = p2[fn].data()
312
312
313 def fctxfn(repo, cx, path):
313 def fctxfn(repo, cx, path):
314 if path in filecontent:
314 if path in filecontent:
315 return context.memfilectx(
315 return context.memfilectx(
316 repo, cx, path, filecontent[path]
316 repo, cx, path, filecontent[path]
317 )
317 )
318 return None
318 return None
319
319
320 if len(ps) == 0 or ps[0] < 0:
320 if len(ps) == 0 or ps[0] < 0:
321 pars = [None, None]
321 pars = [None, None]
322 elif len(ps) == 1:
322 elif len(ps) == 1:
323 pars = [nodeids[ps[0]], None]
323 pars = [nodeids[ps[0]], None]
324 else:
324 else:
325 pars = [nodeids[p] for p in ps]
325 pars = [nodeids[p] for p in ps]
326 cx = context.memctx(
326 cx = context.memctx(
327 repo,
327 repo,
328 pars,
328 pars,
329 b"r%i" % id,
329 b"r%i" % id,
330 files,
330 files,
331 fctxfn,
331 fctxfn,
332 date=(id, 0),
332 date=(id, 0),
333 user=b"debugbuilddag",
333 user=b"debugbuilddag",
334 extra={b'branch': atbranch},
334 extra={b'branch': atbranch},
335 )
335 )
336 nodeid = repo.commitctx(cx)
336 nodeid = repo.commitctx(cx)
337 nodeids.append(nodeid)
337 nodeids.append(nodeid)
338 at = id
338 at = id
339 elif type == b'l':
339 elif type == b'l':
340 id, name = data
340 id, name = data
341 ui.note((b'tag %s\n' % name))
341 ui.note((b'tag %s\n' % name))
342 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
342 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
343 elif type == b'a':
343 elif type == b'a':
344 ui.note((b'branch %s\n' % data))
344 ui.note((b'branch %s\n' % data))
345 atbranch = data
345 atbranch = data
346 progress.update(id)
346 progress.update(id)
347
347
348 if tags:
348 if tags:
349 repo.vfs.write(b"localtags", b"".join(tags))
349 repo.vfs.write(b"localtags", b"".join(tags))
350
350
351
351
352 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
352 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
353 indent_string = b' ' * indent
353 indent_string = b' ' * indent
354 if all:
354 if all:
355 ui.writenoi18n(
355 ui.writenoi18n(
356 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
356 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
357 % indent_string
357 % indent_string
358 )
358 )
359
359
360 def showchunks(named):
360 def showchunks(named):
361 ui.write(b"\n%s%s\n" % (indent_string, named))
361 ui.write(b"\n%s%s\n" % (indent_string, named))
362 for deltadata in gen.deltaiter():
362 for deltadata in gen.deltaiter():
363 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
363 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
364 ui.write(
364 ui.write(
365 b"%s%s %s %s %s %s %d\n"
365 b"%s%s %s %s %s %s %d\n"
366 % (
366 % (
367 indent_string,
367 indent_string,
368 hex(node),
368 hex(node),
369 hex(p1),
369 hex(p1),
370 hex(p2),
370 hex(p2),
371 hex(cs),
371 hex(cs),
372 hex(deltabase),
372 hex(deltabase),
373 len(delta),
373 len(delta),
374 )
374 )
375 )
375 )
376
376
377 gen.changelogheader()
377 gen.changelogheader()
378 showchunks(b"changelog")
378 showchunks(b"changelog")
379 gen.manifestheader()
379 gen.manifestheader()
380 showchunks(b"manifest")
380 showchunks(b"manifest")
381 for chunkdata in iter(gen.filelogheader, {}):
381 for chunkdata in iter(gen.filelogheader, {}):
382 fname = chunkdata[b'filename']
382 fname = chunkdata[b'filename']
383 showchunks(fname)
383 showchunks(fname)
384 else:
384 else:
385 if isinstance(gen, bundle2.unbundle20):
385 if isinstance(gen, bundle2.unbundle20):
386 raise error.Abort(_(b'use debugbundle2 for this file'))
386 raise error.Abort(_(b'use debugbundle2 for this file'))
387 gen.changelogheader()
387 gen.changelogheader()
388 for deltadata in gen.deltaiter():
388 for deltadata in gen.deltaiter():
389 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
389 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
390 ui.write(b"%s%s\n" % (indent_string, hex(node)))
390 ui.write(b"%s%s\n" % (indent_string, hex(node)))
391
391
392
392
393 def _debugobsmarkers(ui, part, indent=0, **opts):
393 def _debugobsmarkers(ui, part, indent=0, **opts):
394 """display version and markers contained in 'data'"""
394 """display version and markers contained in 'data'"""
395 opts = pycompat.byteskwargs(opts)
395 opts = pycompat.byteskwargs(opts)
396 data = part.read()
396 data = part.read()
397 indent_string = b' ' * indent
397 indent_string = b' ' * indent
398 try:
398 try:
399 version, markers = obsolete._readmarkers(data)
399 version, markers = obsolete._readmarkers(data)
400 except error.UnknownVersion as exc:
400 except error.UnknownVersion as exc:
401 msg = b"%sunsupported version: %s (%d bytes)\n"
401 msg = b"%sunsupported version: %s (%d bytes)\n"
402 msg %= indent_string, exc.version, len(data)
402 msg %= indent_string, exc.version, len(data)
403 ui.write(msg)
403 ui.write(msg)
404 else:
404 else:
405 msg = b"%sversion: %d (%d bytes)\n"
405 msg = b"%sversion: %d (%d bytes)\n"
406 msg %= indent_string, version, len(data)
406 msg %= indent_string, version, len(data)
407 ui.write(msg)
407 ui.write(msg)
408 fm = ui.formatter(b'debugobsolete', opts)
408 fm = ui.formatter(b'debugobsolete', opts)
409 for rawmarker in sorted(markers):
409 for rawmarker in sorted(markers):
410 m = obsutil.marker(None, rawmarker)
410 m = obsutil.marker(None, rawmarker)
411 fm.startitem()
411 fm.startitem()
412 fm.plain(indent_string)
412 fm.plain(indent_string)
413 cmdutil.showmarker(fm, m)
413 cmdutil.showmarker(fm, m)
414 fm.end()
414 fm.end()
415
415
416
416
417 def _debugphaseheads(ui, data, indent=0):
417 def _debugphaseheads(ui, data, indent=0):
418 """display version and markers contained in 'data'"""
418 """display version and markers contained in 'data'"""
419 indent_string = b' ' * indent
419 indent_string = b' ' * indent
420 headsbyphase = phases.binarydecode(data)
420 headsbyphase = phases.binarydecode(data)
421 for phase in phases.allphases:
421 for phase in phases.allphases:
422 for head in headsbyphase[phase]:
422 for head in headsbyphase[phase]:
423 ui.write(indent_string)
423 ui.write(indent_string)
424 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
424 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
425
425
426
426
427 def _quasirepr(thing):
427 def _quasirepr(thing):
428 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
428 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
429 return b'{%s}' % (
429 return b'{%s}' % (
430 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
430 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
431 )
431 )
432 return pycompat.bytestr(repr(thing))
432 return pycompat.bytestr(repr(thing))
433
433
434
434
435 def _debugbundle2(ui, gen, all=None, **opts):
435 def _debugbundle2(ui, gen, all=None, **opts):
436 """lists the contents of a bundle2"""
436 """lists the contents of a bundle2"""
437 if not isinstance(gen, bundle2.unbundle20):
437 if not isinstance(gen, bundle2.unbundle20):
438 raise error.Abort(_(b'not a bundle2 file'))
438 raise error.Abort(_(b'not a bundle2 file'))
439 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
439 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
440 parttypes = opts.get('part_type', [])
440 parttypes = opts.get('part_type', [])
441 for part in gen.iterparts():
441 for part in gen.iterparts():
442 if parttypes and part.type not in parttypes:
442 if parttypes and part.type not in parttypes:
443 continue
443 continue
444 msg = b'%s -- %s (mandatory: %r)\n'
444 msg = b'%s -- %s (mandatory: %r)\n'
445 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
445 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
446 if part.type == b'changegroup':
446 if part.type == b'changegroup':
447 version = part.params.get(b'version', b'01')
447 version = part.params.get(b'version', b'01')
448 cg = changegroup.getunbundler(version, part, b'UN')
448 cg = changegroup.getunbundler(version, part, b'UN')
449 if not ui.quiet:
449 if not ui.quiet:
450 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
450 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
451 if part.type == b'obsmarkers':
451 if part.type == b'obsmarkers':
452 if not ui.quiet:
452 if not ui.quiet:
453 _debugobsmarkers(ui, part, indent=4, **opts)
453 _debugobsmarkers(ui, part, indent=4, **opts)
454 if part.type == b'phase-heads':
454 if part.type == b'phase-heads':
455 if not ui.quiet:
455 if not ui.quiet:
456 _debugphaseheads(ui, part, indent=4)
456 _debugphaseheads(ui, part, indent=4)
457
457
458
458
459 @command(
459 @command(
460 b'debugbundle',
460 b'debugbundle',
461 [
461 [
462 (b'a', b'all', None, _(b'show all details')),
462 (b'a', b'all', None, _(b'show all details')),
463 (b'', b'part-type', [], _(b'show only the named part type')),
463 (b'', b'part-type', [], _(b'show only the named part type')),
464 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
464 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
465 ],
465 ],
466 _(b'FILE'),
466 _(b'FILE'),
467 norepo=True,
467 norepo=True,
468 )
468 )
469 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
469 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
470 """lists the contents of a bundle"""
470 """lists the contents of a bundle"""
471 with hg.openpath(ui, bundlepath) as f:
471 with hg.openpath(ui, bundlepath) as f:
472 if spec:
472 if spec:
473 spec = exchange.getbundlespec(ui, f)
473 spec = exchange.getbundlespec(ui, f)
474 ui.write(b'%s\n' % spec)
474 ui.write(b'%s\n' % spec)
475 return
475 return
476
476
477 gen = exchange.readbundle(ui, f, bundlepath)
477 gen = exchange.readbundle(ui, f, bundlepath)
478 if isinstance(gen, bundle2.unbundle20):
478 if isinstance(gen, bundle2.unbundle20):
479 return _debugbundle2(ui, gen, all=all, **opts)
479 return _debugbundle2(ui, gen, all=all, **opts)
480 _debugchangegroup(ui, gen, all=all, **opts)
480 _debugchangegroup(ui, gen, all=all, **opts)
481
481
482
482
483 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
483 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
484 def debugcapabilities(ui, path, **opts):
484 def debugcapabilities(ui, path, **opts):
485 """lists the capabilities of a remote peer"""
485 """lists the capabilities of a remote peer"""
486 opts = pycompat.byteskwargs(opts)
486 opts = pycompat.byteskwargs(opts)
487 peer = hg.peer(ui, opts, path)
487 peer = hg.peer(ui, opts, path)
488 try:
488 try:
489 caps = peer.capabilities()
489 caps = peer.capabilities()
490 ui.writenoi18n(b'Main capabilities:\n')
490 ui.writenoi18n(b'Main capabilities:\n')
491 for c in sorted(caps):
491 for c in sorted(caps):
492 ui.write(b' %s\n' % c)
492 ui.write(b' %s\n' % c)
493 b2caps = bundle2.bundle2caps(peer)
493 b2caps = bundle2.bundle2caps(peer)
494 if b2caps:
494 if b2caps:
495 ui.writenoi18n(b'Bundle2 capabilities:\n')
495 ui.writenoi18n(b'Bundle2 capabilities:\n')
496 for key, values in sorted(b2caps.items()):
496 for key, values in sorted(b2caps.items()):
497 ui.write(b' %s\n' % key)
497 ui.write(b' %s\n' % key)
498 for v in values:
498 for v in values:
499 ui.write(b' %s\n' % v)
499 ui.write(b' %s\n' % v)
500 finally:
500 finally:
501 peer.close()
501 peer.close()
502
502
503
503
504 @command(
504 @command(
505 b'debugchangedfiles',
505 b'debugchangedfiles',
506 [
506 [
507 (
507 (
508 b'',
508 b'',
509 b'compute',
509 b'compute',
510 False,
510 False,
511 b"compute information instead of reading it from storage",
511 b"compute information instead of reading it from storage",
512 ),
512 ),
513 ],
513 ],
514 b'REV',
514 b'REV',
515 )
515 )
516 def debugchangedfiles(ui, repo, rev, **opts):
516 def debugchangedfiles(ui, repo, rev, **opts):
517 """list the stored files changes for a revision"""
517 """list the stored files changes for a revision"""
518 ctx = logcmdutil.revsingle(repo, rev, None)
518 ctx = logcmdutil.revsingle(repo, rev, None)
519 files = None
519 files = None
520
520
521 if opts['compute']:
521 if opts['compute']:
522 files = metadata.compute_all_files_changes(ctx)
522 files = metadata.compute_all_files_changes(ctx)
523 else:
523 else:
524 sd = repo.changelog.sidedata(ctx.rev())
524 sd = repo.changelog.sidedata(ctx.rev())
525 files_block = sd.get(sidedata.SD_FILES)
525 files_block = sd.get(sidedata.SD_FILES)
526 if files_block is not None:
526 if files_block is not None:
527 files = metadata.decode_files_sidedata(sd)
527 files = metadata.decode_files_sidedata(sd)
528 if files is not None:
528 if files is not None:
529 for f in sorted(files.touched):
529 for f in sorted(files.touched):
530 if f in files.added:
530 if f in files.added:
531 action = b"added"
531 action = b"added"
532 elif f in files.removed:
532 elif f in files.removed:
533 action = b"removed"
533 action = b"removed"
534 elif f in files.merged:
534 elif f in files.merged:
535 action = b"merged"
535 action = b"merged"
536 elif f in files.salvaged:
536 elif f in files.salvaged:
537 action = b"salvaged"
537 action = b"salvaged"
538 else:
538 else:
539 action = b"touched"
539 action = b"touched"
540
540
541 copy_parent = b""
541 copy_parent = b""
542 copy_source = b""
542 copy_source = b""
543 if f in files.copied_from_p1:
543 if f in files.copied_from_p1:
544 copy_parent = b"p1"
544 copy_parent = b"p1"
545 copy_source = files.copied_from_p1[f]
545 copy_source = files.copied_from_p1[f]
546 elif f in files.copied_from_p2:
546 elif f in files.copied_from_p2:
547 copy_parent = b"p2"
547 copy_parent = b"p2"
548 copy_source = files.copied_from_p2[f]
548 copy_source = files.copied_from_p2[f]
549
549
550 data = (action, copy_parent, f, copy_source)
550 data = (action, copy_parent, f, copy_source)
551 template = b"%-8s %2s: %s, %s;\n"
551 template = b"%-8s %2s: %s, %s;\n"
552 ui.write(template % data)
552 ui.write(template % data)
553
553
554
554
555 @command(b'debugcheckstate', [], b'')
555 @command(b'debugcheckstate', [], b'')
556 def debugcheckstate(ui, repo):
556 def debugcheckstate(ui, repo):
557 """validate the correctness of the current dirstate"""
557 """validate the correctness of the current dirstate"""
558 errors = verify.verifier(repo)._verify_dirstate()
558 errors = verify.verifier(repo)._verify_dirstate()
559 if errors:
559 if errors:
560 errstr = _(b"dirstate inconsistent with current parent's manifest")
560 errstr = _(b"dirstate inconsistent with current parent's manifest")
561 raise error.Abort(errstr)
561 raise error.Abort(errstr)
562
562
563
563
564 @command(
564 @command(
565 b'debugcolor',
565 b'debugcolor',
566 [(b'', b'style', None, _(b'show all configured styles'))],
566 [(b'', b'style', None, _(b'show all configured styles'))],
567 b'hg debugcolor',
567 b'hg debugcolor',
568 )
568 )
569 def debugcolor(ui, repo, **opts):
569 def debugcolor(ui, repo, **opts):
570 """show available color, effects or style"""
570 """show available color, effects or style"""
571 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
571 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
572 if opts.get('style'):
572 if opts.get('style'):
573 return _debugdisplaystyle(ui)
573 return _debugdisplaystyle(ui)
574 else:
574 else:
575 return _debugdisplaycolor(ui)
575 return _debugdisplaycolor(ui)
576
576
577
577
578 def _debugdisplaycolor(ui):
578 def _debugdisplaycolor(ui):
579 ui = ui.copy()
579 ui = ui.copy()
580 ui._styles.clear()
580 ui._styles.clear()
581 for effect in color._activeeffects(ui).keys():
581 for effect in color._activeeffects(ui).keys():
582 ui._styles[effect] = effect
582 ui._styles[effect] = effect
583 if ui._terminfoparams:
583 if ui._terminfoparams:
584 for k, v in ui.configitems(b'color'):
584 for k, v in ui.configitems(b'color'):
585 if k.startswith(b'color.'):
585 if k.startswith(b'color.'):
586 ui._styles[k] = k[6:]
586 ui._styles[k] = k[6:]
587 elif k.startswith(b'terminfo.'):
587 elif k.startswith(b'terminfo.'):
588 ui._styles[k] = k[9:]
588 ui._styles[k] = k[9:]
589 ui.write(_(b'available colors:\n'))
589 ui.write(_(b'available colors:\n'))
590 # sort label with a '_' after the other to group '_background' entry.
590 # sort label with a '_' after the other to group '_background' entry.
591 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
591 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
592 for colorname, label in items:
592 for colorname, label in items:
593 ui.write(b'%s\n' % colorname, label=label)
593 ui.write(b'%s\n' % colorname, label=label)
594
594
595
595
596 def _debugdisplaystyle(ui):
596 def _debugdisplaystyle(ui):
597 ui.write(_(b'available style:\n'))
597 ui.write(_(b'available style:\n'))
598 if not ui._styles:
598 if not ui._styles:
599 return
599 return
600 width = max(len(s) for s in ui._styles)
600 width = max(len(s) for s in ui._styles)
601 for label, effects in sorted(ui._styles.items()):
601 for label, effects in sorted(ui._styles.items()):
602 ui.write(b'%s' % label, label=label)
602 ui.write(b'%s' % label, label=label)
603 if effects:
603 if effects:
604 # 50
604 # 50
605 ui.write(b': ')
605 ui.write(b': ')
606 ui.write(b' ' * (max(0, width - len(label))))
606 ui.write(b' ' * (max(0, width - len(label))))
607 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
607 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
608 ui.write(b'\n')
608 ui.write(b'\n')
609
609
610
610
611 @command(b'debugcreatestreamclonebundle', [], b'FILE')
611 @command(b'debugcreatestreamclonebundle', [], b'FILE')
612 def debugcreatestreamclonebundle(ui, repo, fname):
612 def debugcreatestreamclonebundle(ui, repo, fname):
613 """create a stream clone bundle file
613 """create a stream clone bundle file
614
614
615 Stream bundles are special bundles that are essentially archives of
615 Stream bundles are special bundles that are essentially archives of
616 revlog files. They are commonly used for cloning very quickly.
616 revlog files. They are commonly used for cloning very quickly.
617 """
617 """
618 # TODO we may want to turn this into an abort when this functionality
618 # TODO we may want to turn this into an abort when this functionality
619 # is moved into `hg bundle`.
619 # is moved into `hg bundle`.
620 if phases.hassecret(repo):
620 if phases.hassecret(repo):
621 ui.warn(
621 ui.warn(
622 _(
622 _(
623 b'(warning: stream clone bundle will contain secret '
623 b'(warning: stream clone bundle will contain secret '
624 b'revisions)\n'
624 b'revisions)\n'
625 )
625 )
626 )
626 )
627
627
628 requirements, gen = streamclone.generatebundlev1(repo)
628 requirements, gen = streamclone.generatebundlev1(repo)
629 changegroup.writechunks(ui, gen, fname)
629 changegroup.writechunks(ui, gen, fname)
630
630
631 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
631 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
632
632
633
633
634 @command(
634 @command(
635 b'debugdag',
635 b'debugdag',
636 [
636 [
637 (b't', b'tags', None, _(b'use tags as labels')),
637 (b't', b'tags', None, _(b'use tags as labels')),
638 (b'b', b'branches', None, _(b'annotate with branch names')),
638 (b'b', b'branches', None, _(b'annotate with branch names')),
639 (b'', b'dots', None, _(b'use dots for runs')),
639 (b'', b'dots', None, _(b'use dots for runs')),
640 (b's', b'spaces', None, _(b'separate elements by spaces')),
640 (b's', b'spaces', None, _(b'separate elements by spaces')),
641 ],
641 ],
642 _(b'[OPTION]... [FILE [REV]...]'),
642 _(b'[OPTION]... [FILE [REV]...]'),
643 optionalrepo=True,
643 optionalrepo=True,
644 )
644 )
645 def debugdag(ui, repo, file_=None, *revs, **opts):
645 def debugdag(ui, repo, file_=None, *revs, **opts):
646 """format the changelog or an index DAG as a concise textual description
646 """format the changelog or an index DAG as a concise textual description
647
647
648 If you pass a revlog index, the revlog's DAG is emitted. If you list
648 If you pass a revlog index, the revlog's DAG is emitted. If you list
649 revision numbers, they get labeled in the output as rN.
649 revision numbers, they get labeled in the output as rN.
650
650
651 Otherwise, the changelog DAG of the current repo is emitted.
651 Otherwise, the changelog DAG of the current repo is emitted.
652 """
652 """
653 spaces = opts.get('spaces')
653 spaces = opts.get('spaces')
654 dots = opts.get('dots')
654 dots = opts.get('dots')
655 if file_:
655 if file_:
656 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
656 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
657 revs = {int(r) for r in revs}
657 revs = {int(r) for r in revs}
658
658
659 def events():
659 def events():
660 for r in rlog:
660 for r in rlog:
661 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
661 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
662 if r in revs:
662 if r in revs:
663 yield b'l', (r, b"r%i" % r)
663 yield b'l', (r, b"r%i" % r)
664
664
665 elif repo:
665 elif repo:
666 cl = repo.changelog
666 cl = repo.changelog
667 tags = opts.get('tags')
667 tags = opts.get('tags')
668 branches = opts.get('branches')
668 branches = opts.get('branches')
669 if tags:
669 if tags:
670 labels = {}
670 labels = {}
671 for l, n in repo.tags().items():
671 for l, n in repo.tags().items():
672 labels.setdefault(cl.rev(n), []).append(l)
672 labels.setdefault(cl.rev(n), []).append(l)
673
673
674 def events():
674 def events():
675 b = b"default"
675 b = b"default"
676 for r in cl:
676 for r in cl:
677 if branches:
677 if branches:
678 newb = cl.read(cl.node(r))[5][b'branch']
678 newb = cl.read(cl.node(r))[5][b'branch']
679 if newb != b:
679 if newb != b:
680 yield b'a', newb
680 yield b'a', newb
681 b = newb
681 b = newb
682 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
682 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
683 if tags:
683 if tags:
684 ls = labels.get(r)
684 ls = labels.get(r)
685 if ls:
685 if ls:
686 for l in ls:
686 for l in ls:
687 yield b'l', (r, l)
687 yield b'l', (r, l)
688
688
689 else:
689 else:
690 raise error.Abort(_(b'need repo for changelog dag'))
690 raise error.Abort(_(b'need repo for changelog dag'))
691
691
692 for line in dagparser.dagtextlines(
692 for line in dagparser.dagtextlines(
693 events(),
693 events(),
694 addspaces=spaces,
694 addspaces=spaces,
695 wraplabels=True,
695 wraplabels=True,
696 wrapannotations=True,
696 wrapannotations=True,
697 wrapnonlinear=dots,
697 wrapnonlinear=dots,
698 usedots=dots,
698 usedots=dots,
699 maxlinewidth=70,
699 maxlinewidth=70,
700 ):
700 ):
701 ui.write(line)
701 ui.write(line)
702 ui.write(b"\n")
702 ui.write(b"\n")
703
703
704
704
705 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
705 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
706 def debugdata(ui, repo, file_, rev=None, **opts):
706 def debugdata(ui, repo, file_, rev=None, **opts):
707 """dump the contents of a data file revision"""
707 """dump the contents of a data file revision"""
708 opts = pycompat.byteskwargs(opts)
708 opts = pycompat.byteskwargs(opts)
709 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
709 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
710 if rev is not None:
710 if rev is not None:
711 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
711 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
712 file_, rev = None, file_
712 file_, rev = None, file_
713 elif rev is None:
713 elif rev is None:
714 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
714 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
715 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
715 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
716 try:
716 try:
717 ui.write(r.rawdata(r.lookup(rev)))
717 ui.write(r.rawdata(r.lookup(rev)))
718 except KeyError:
718 except KeyError:
719 raise error.Abort(_(b'invalid revision identifier %s') % rev)
719 raise error.Abort(_(b'invalid revision identifier %s') % rev)
720
720
721
721
722 @command(
722 @command(
723 b'debugdate',
723 b'debugdate',
724 [(b'e', b'extended', None, _(b'try extended date formats'))],
724 [(b'e', b'extended', None, _(b'try extended date formats'))],
725 _(b'[-e] DATE [RANGE]'),
725 _(b'[-e] DATE [RANGE]'),
726 norepo=True,
726 norepo=True,
727 optionalrepo=True,
727 optionalrepo=True,
728 )
728 )
729 def debugdate(ui, date, range=None, **opts):
729 def debugdate(ui, date, range=None, **opts):
730 """parse and display a date"""
730 """parse and display a date"""
731 if opts["extended"]:
731 if opts["extended"]:
732 d = dateutil.parsedate(date, dateutil.extendeddateformats)
732 d = dateutil.parsedate(date, dateutil.extendeddateformats)
733 else:
733 else:
734 d = dateutil.parsedate(date)
734 d = dateutil.parsedate(date)
735 ui.writenoi18n(b"internal: %d %d\n" % d)
735 ui.writenoi18n(b"internal: %d %d\n" % d)
736 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
736 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
737 if range:
737 if range:
738 m = dateutil.matchdate(range)
738 m = dateutil.matchdate(range)
739 ui.writenoi18n(b"match: %s\n" % m(d[0]))
739 ui.writenoi18n(b"match: %s\n" % m(d[0]))
740
740
741
741
742 @command(
742 @command(
743 b'debugdeltachain',
743 b'debugdeltachain',
744 cmdutil.debugrevlogopts + cmdutil.formatteropts,
744 cmdutil.debugrevlogopts + cmdutil.formatteropts,
745 _(b'-c|-m|FILE'),
745 _(b'-c|-m|FILE'),
746 optionalrepo=True,
746 optionalrepo=True,
747 )
747 )
748 def debugdeltachain(ui, repo, file_=None, **opts):
748 def debugdeltachain(ui, repo, file_=None, **opts):
749 """dump information about delta chains in a revlog
749 """dump information about delta chains in a revlog
750
750
751 Output can be templatized. Available template keywords are:
751 Output can be templatized. Available template keywords are:
752
752
753 :``rev``: revision number
753 :``rev``: revision number
754 :``p1``: parent 1 revision number (for reference)
754 :``p1``: parent 1 revision number (for reference)
755 :``p2``: parent 2 revision number (for reference)
755 :``p2``: parent 2 revision number (for reference)
756 :``chainid``: delta chain identifier (numbered by unique base)
756 :``chainid``: delta chain identifier (numbered by unique base)
757 :``chainlen``: delta chain length to this revision
757 :``chainlen``: delta chain length to this revision
758 :``prevrev``: previous revision in delta chain
758 :``prevrev``: previous revision in delta chain
759 :``deltatype``: role of delta / how it was computed
759 :``deltatype``: role of delta / how it was computed
760 - base: a full snapshot
760 - base: a full snapshot
761 - snap: an intermediate snapshot
761 - snap: an intermediate snapshot
762 - p1: a delta against the first parent
762 - p1: a delta against the first parent
763 - p2: a delta against the second parent
763 - p2: a delta against the second parent
764 - skip1: a delta against the same base as p1
764 - skip1: a delta against the same base as p1
765 (when p1 has empty delta
765 (when p1 has empty delta
766 - skip2: a delta against the same base as p2
766 - skip2: a delta against the same base as p2
767 (when p2 has empty delta
767 (when p2 has empty delta
768 - prev: a delta against the previous revision
768 - prev: a delta against the previous revision
769 - other: a delta against an arbitrary revision
769 - other: a delta against an arbitrary revision
770 :``compsize``: compressed size of revision
770 :``compsize``: compressed size of revision
771 :``uncompsize``: uncompressed size of revision
771 :``uncompsize``: uncompressed size of revision
772 :``chainsize``: total size of compressed revisions in chain
772 :``chainsize``: total size of compressed revisions in chain
773 :``chainratio``: total chain size divided by uncompressed revision size
773 :``chainratio``: total chain size divided by uncompressed revision size
774 (new delta chains typically start at ratio 2.00)
774 (new delta chains typically start at ratio 2.00)
775 :``lindist``: linear distance from base revision in delta chain to end
775 :``lindist``: linear distance from base revision in delta chain to end
776 of this revision
776 of this revision
777 :``extradist``: total size of revisions not part of this delta chain from
777 :``extradist``: total size of revisions not part of this delta chain from
778 base of delta chain to end of this revision; a measurement
778 base of delta chain to end of this revision; a measurement
779 of how much extra data we need to read/seek across to read
779 of how much extra data we need to read/seek across to read
780 the delta chain for this revision
780 the delta chain for this revision
781 :``extraratio``: extradist divided by chainsize; another representation of
781 :``extraratio``: extradist divided by chainsize; another representation of
782 how much unrelated data is needed to load this delta chain
782 how much unrelated data is needed to load this delta chain
783
783
784 If the repository is configured to use the sparse read, additional keywords
784 If the repository is configured to use the sparse read, additional keywords
785 are available:
785 are available:
786
786
787 :``readsize``: total size of data read from the disk for a revision
787 :``readsize``: total size of data read from the disk for a revision
788 (sum of the sizes of all the blocks)
788 (sum of the sizes of all the blocks)
789 :``largestblock``: size of the largest block of data read from the disk
789 :``largestblock``: size of the largest block of data read from the disk
790 :``readdensity``: density of useful bytes in the data read from the disk
790 :``readdensity``: density of useful bytes in the data read from the disk
791 :``srchunks``: in how many data hunks the whole revision would be read
791 :``srchunks``: in how many data hunks the whole revision would be read
792
792
793 The sparse read can be enabled with experimental.sparse-read = True
793 The sparse read can be enabled with experimental.sparse-read = True
794 """
794 """
795 opts = pycompat.byteskwargs(opts)
795 opts = pycompat.byteskwargs(opts)
796 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
796 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
797 index = r.index
797 index = r.index
798 start = r.start
798 start = r.start
799 length = r.length
799 length = r.length
800 generaldelta = r._generaldelta
800 generaldelta = r._generaldelta
801 withsparseread = getattr(r, '_withsparseread', False)
801 withsparseread = getattr(r, '_withsparseread', False)
802
802
803 # security to avoid crash on corrupted revlogs
803 # security to avoid crash on corrupted revlogs
804 total_revs = len(index)
804 total_revs = len(index)
805
805
806 chain_size_cache = {}
806 chain_size_cache = {}
807
807
808 def revinfo(rev):
808 def revinfo(rev):
809 e = index[rev]
809 e = index[rev]
810 compsize = e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
810 compsize = e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
811 uncompsize = e[revlog_constants.ENTRY_DATA_UNCOMPRESSED_LENGTH]
811 uncompsize = e[revlog_constants.ENTRY_DATA_UNCOMPRESSED_LENGTH]
812
812
813 base = e[revlog_constants.ENTRY_DELTA_BASE]
813 base = e[revlog_constants.ENTRY_DELTA_BASE]
814 p1 = e[revlog_constants.ENTRY_PARENT_1]
814 p1 = e[revlog_constants.ENTRY_PARENT_1]
815 p2 = e[revlog_constants.ENTRY_PARENT_2]
815 p2 = e[revlog_constants.ENTRY_PARENT_2]
816
816
817 # If the parents of a revision has an empty delta, we never try to delta
817 # If the parents of a revision has an empty delta, we never try to delta
818 # against that parent, but directly against the delta base of that
818 # against that parent, but directly against the delta base of that
819 # parent (recursively). It avoids adding a useless entry in the chain.
819 # parent (recursively). It avoids adding a useless entry in the chain.
820 #
820 #
821 # However we need to detect that as a special case for delta-type, that
821 # However we need to detect that as a special case for delta-type, that
822 # is not simply "other".
822 # is not simply "other".
823 p1_base = p1
823 p1_base = p1
824 if p1 != nullrev and p1 < total_revs:
824 if p1 != nullrev and p1 < total_revs:
825 e1 = index[p1]
825 e1 = index[p1]
826 while e1[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
826 while e1[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
827 new_base = e1[revlog_constants.ENTRY_DELTA_BASE]
827 new_base = e1[revlog_constants.ENTRY_DELTA_BASE]
828 if (
828 if (
829 new_base == p1_base
829 new_base == p1_base
830 or new_base == nullrev
830 or new_base == nullrev
831 or new_base >= total_revs
831 or new_base >= total_revs
832 ):
832 ):
833 break
833 break
834 p1_base = new_base
834 p1_base = new_base
835 e1 = index[p1_base]
835 e1 = index[p1_base]
836 p2_base = p2
836 p2_base = p2
837 if p2 != nullrev and p2 < total_revs:
837 if p2 != nullrev and p2 < total_revs:
838 e2 = index[p2]
838 e2 = index[p2]
839 while e2[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
839 while e2[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
840 new_base = e2[revlog_constants.ENTRY_DELTA_BASE]
840 new_base = e2[revlog_constants.ENTRY_DELTA_BASE]
841 if (
841 if (
842 new_base == p2_base
842 new_base == p2_base
843 or new_base == nullrev
843 or new_base == nullrev
844 or new_base >= total_revs
844 or new_base >= total_revs
845 ):
845 ):
846 break
846 break
847 p2_base = new_base
847 p2_base = new_base
848 e2 = index[p2_base]
848 e2 = index[p2_base]
849
849
850 if generaldelta:
850 if generaldelta:
851 if base == p1:
851 if base == p1:
852 deltatype = b'p1'
852 deltatype = b'p1'
853 elif base == p2:
853 elif base == p2:
854 deltatype = b'p2'
854 deltatype = b'p2'
855 elif base == rev:
855 elif base == rev:
856 deltatype = b'base'
856 deltatype = b'base'
857 elif base == p1_base:
857 elif base == p1_base:
858 deltatype = b'skip1'
858 deltatype = b'skip1'
859 elif base == p2_base:
859 elif base == p2_base:
860 deltatype = b'skip2'
860 deltatype = b'skip2'
861 elif r.issnapshot(rev):
861 elif r.issnapshot(rev):
862 deltatype = b'snap'
862 deltatype = b'snap'
863 elif base == rev - 1:
863 elif base == rev - 1:
864 deltatype = b'prev'
864 deltatype = b'prev'
865 else:
865 else:
866 deltatype = b'other'
866 deltatype = b'other'
867 else:
867 else:
868 if base == rev:
868 if base == rev:
869 deltatype = b'base'
869 deltatype = b'base'
870 else:
870 else:
871 deltatype = b'prev'
871 deltatype = b'prev'
872
872
873 chain = r._deltachain(rev)[0]
873 chain = r._deltachain(rev)[0]
874 chain_size = 0
874 chain_size = 0
875 for iter_rev in reversed(chain):
875 for iter_rev in reversed(chain):
876 cached = chain_size_cache.get(iter_rev)
876 cached = chain_size_cache.get(iter_rev)
877 if cached is not None:
877 if cached is not None:
878 chain_size += cached
878 chain_size += cached
879 break
879 break
880 e = index[iter_rev]
880 e = index[iter_rev]
881 chain_size += e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
881 chain_size += e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
882 chain_size_cache[rev] = chain_size
882 chain_size_cache[rev] = chain_size
883
883
884 return p1, p2, compsize, uncompsize, deltatype, chain, chain_size
884 return p1, p2, compsize, uncompsize, deltatype, chain, chain_size
885
885
886 fm = ui.formatter(b'debugdeltachain', opts)
886 fm = ui.formatter(b'debugdeltachain', opts)
887
887
888 fm.plain(
888 fm.plain(
889 b' rev p1 p2 chain# chainlen prev delta '
889 b' rev p1 p2 chain# chainlen prev delta '
890 b'size rawsize chainsize ratio lindist extradist '
890 b'size rawsize chainsize ratio lindist extradist '
891 b'extraratio'
891 b'extraratio'
892 )
892 )
893 if withsparseread:
893 if withsparseread:
894 fm.plain(b' readsize largestblk rddensity srchunks')
894 fm.plain(b' readsize largestblk rddensity srchunks')
895 fm.plain(b'\n')
895 fm.plain(b'\n')
896
896
897 chainbases = {}
897 chainbases = {}
898 for rev in r:
898 for rev in r:
899 p1, p2, comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
899 p1, p2, comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
900 chainbase = chain[0]
900 chainbase = chain[0]
901 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
901 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
902 basestart = start(chainbase)
902 basestart = start(chainbase)
903 revstart = start(rev)
903 revstart = start(rev)
904 lineardist = revstart + comp - basestart
904 lineardist = revstart + comp - basestart
905 extradist = lineardist - chainsize
905 extradist = lineardist - chainsize
906 try:
906 try:
907 prevrev = chain[-2]
907 prevrev = chain[-2]
908 except IndexError:
908 except IndexError:
909 prevrev = -1
909 prevrev = -1
910
910
911 if uncomp != 0:
911 if uncomp != 0:
912 chainratio = float(chainsize) / float(uncomp)
912 chainratio = float(chainsize) / float(uncomp)
913 else:
913 else:
914 chainratio = chainsize
914 chainratio = chainsize
915
915
916 if chainsize != 0:
916 if chainsize != 0:
917 extraratio = float(extradist) / float(chainsize)
917 extraratio = float(extradist) / float(chainsize)
918 else:
918 else:
919 extraratio = extradist
919 extraratio = extradist
920
920
921 fm.startitem()
921 fm.startitem()
922 fm.write(
922 fm.write(
923 b'rev p1 p2 chainid chainlen prevrev deltatype compsize '
923 b'rev p1 p2 chainid chainlen prevrev deltatype compsize '
924 b'uncompsize chainsize chainratio lindist extradist '
924 b'uncompsize chainsize chainratio lindist extradist '
925 b'extraratio',
925 b'extraratio',
926 b'%7d %7d %7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
926 b'%7d %7d %7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
927 rev,
927 rev,
928 p1,
928 p1,
929 p2,
929 p2,
930 chainid,
930 chainid,
931 len(chain),
931 len(chain),
932 prevrev,
932 prevrev,
933 deltatype,
933 deltatype,
934 comp,
934 comp,
935 uncomp,
935 uncomp,
936 chainsize,
936 chainsize,
937 chainratio,
937 chainratio,
938 lineardist,
938 lineardist,
939 extradist,
939 extradist,
940 extraratio,
940 extraratio,
941 rev=rev,
941 rev=rev,
942 chainid=chainid,
942 chainid=chainid,
943 chainlen=len(chain),
943 chainlen=len(chain),
944 prevrev=prevrev,
944 prevrev=prevrev,
945 deltatype=deltatype,
945 deltatype=deltatype,
946 compsize=comp,
946 compsize=comp,
947 uncompsize=uncomp,
947 uncompsize=uncomp,
948 chainsize=chainsize,
948 chainsize=chainsize,
949 chainratio=chainratio,
949 chainratio=chainratio,
950 lindist=lineardist,
950 lindist=lineardist,
951 extradist=extradist,
951 extradist=extradist,
952 extraratio=extraratio,
952 extraratio=extraratio,
953 )
953 )
954 if withsparseread:
954 if withsparseread:
955 readsize = 0
955 readsize = 0
956 largestblock = 0
956 largestblock = 0
957 srchunks = 0
957 srchunks = 0
958
958
959 for revschunk in deltautil.slicechunk(r, chain):
959 for revschunk in deltautil.slicechunk(r, chain):
960 srchunks += 1
960 srchunks += 1
961 blkend = start(revschunk[-1]) + length(revschunk[-1])
961 blkend = start(revschunk[-1]) + length(revschunk[-1])
962 blksize = blkend - start(revschunk[0])
962 blksize = blkend - start(revschunk[0])
963
963
964 readsize += blksize
964 readsize += blksize
965 if largestblock < blksize:
965 if largestblock < blksize:
966 largestblock = blksize
966 largestblock = blksize
967
967
968 if readsize:
968 if readsize:
969 readdensity = float(chainsize) / float(readsize)
969 readdensity = float(chainsize) / float(readsize)
970 else:
970 else:
971 readdensity = 1
971 readdensity = 1
972
972
973 fm.write(
973 fm.write(
974 b'readsize largestblock readdensity srchunks',
974 b'readsize largestblock readdensity srchunks',
975 b' %10d %10d %9.5f %8d',
975 b' %10d %10d %9.5f %8d',
976 readsize,
976 readsize,
977 largestblock,
977 largestblock,
978 readdensity,
978 readdensity,
979 srchunks,
979 srchunks,
980 readsize=readsize,
980 readsize=readsize,
981 largestblock=largestblock,
981 largestblock=largestblock,
982 readdensity=readdensity,
982 readdensity=readdensity,
983 srchunks=srchunks,
983 srchunks=srchunks,
984 )
984 )
985
985
986 fm.plain(b'\n')
986 fm.plain(b'\n')
987
987
988 fm.end()
988 fm.end()
989
989
990
990
991 @command(
991 @command(
992 b'debug-delta-find',
992 b'debug-delta-find',
993 cmdutil.debugrevlogopts
993 cmdutil.debugrevlogopts
994 + cmdutil.formatteropts
994 + cmdutil.formatteropts
995 + [
995 + [
996 (
996 (
997 b'',
997 b'',
998 b'source',
998 b'source',
999 b'full',
999 b'full',
1000 _(b'input data feed to the process (full, storage, p1, p2, prev)'),
1000 _(b'input data feed to the process (full, storage, p1, p2, prev)'),
1001 ),
1001 ),
1002 ],
1002 ],
1003 _(b'-c|-m|FILE REV'),
1003 _(b'-c|-m|FILE REV'),
1004 optionalrepo=True,
1004 optionalrepo=True,
1005 )
1005 )
1006 def debugdeltafind(ui, repo, arg_1, arg_2=None, source=b'full', **opts):
1006 def debugdeltafind(ui, repo, arg_1, arg_2=None, source=b'full', **opts):
1007 """display the computation to get to a valid delta for storing REV
1007 """display the computation to get to a valid delta for storing REV
1008
1008
1009 This command will replay the process used to find the "best" delta to store
1009 This command will replay the process used to find the "best" delta to store
1010 a revision and display information about all the steps used to get to that
1010 a revision and display information about all the steps used to get to that
1011 result.
1011 result.
1012
1012
1013 By default, the process is fed with a the full-text for the revision. This
1013 By default, the process is fed with a the full-text for the revision. This
1014 can be controlled with the --source flag.
1014 can be controlled with the --source flag.
1015
1015
1016 The revision use the revision number of the target storage (not changelog
1016 The revision use the revision number of the target storage (not changelog
1017 revision number).
1017 revision number).
1018
1018
1019 note: the process is initiated from a full text of the revision to store.
1019 note: the process is initiated from a full text of the revision to store.
1020 """
1020 """
1021 opts = pycompat.byteskwargs(opts)
1021 opts = pycompat.byteskwargs(opts)
1022 if arg_2 is None:
1022 if arg_2 is None:
1023 file_ = None
1023 file_ = None
1024 rev = arg_1
1024 rev = arg_1
1025 else:
1025 else:
1026 file_ = arg_1
1026 file_ = arg_1
1027 rev = arg_2
1027 rev = arg_2
1028
1028
1029 rev = int(rev)
1029 rev = int(rev)
1030
1030
1031 revlog = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
1031 revlog = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
1032 p1r, p2r = revlog.parentrevs(rev)
1032 p1r, p2r = revlog.parentrevs(rev)
1033
1033
1034 if source == b'full':
1034 if source == b'full':
1035 base_rev = nullrev
1035 base_rev = nullrev
1036 elif source == b'storage':
1036 elif source == b'storage':
1037 base_rev = revlog.deltaparent(rev)
1037 base_rev = revlog.deltaparent(rev)
1038 elif source == b'p1':
1038 elif source == b'p1':
1039 base_rev = p1r
1039 base_rev = p1r
1040 elif source == b'p2':
1040 elif source == b'p2':
1041 base_rev = p2r
1041 base_rev = p2r
1042 elif source == b'prev':
1042 elif source == b'prev':
1043 base_rev = rev - 1
1043 base_rev = rev - 1
1044 else:
1044 else:
1045 raise error.InputError(b"invalid --source value: %s" % source)
1045 raise error.InputError(b"invalid --source value: %s" % source)
1046
1046
1047 revlog_debug.debug_delta_find(ui, revlog, rev, base_rev=base_rev)
1047 revlog_debug.debug_delta_find(ui, revlog, rev, base_rev=base_rev)
1048
1048
1049
1049
1050 @command(
1050 @command(
1051 b'debugdirstate|debugstate',
1051 b'debugdirstate|debugstate',
1052 [
1052 [
1053 (
1053 (
1054 b'',
1054 b'',
1055 b'nodates',
1055 b'nodates',
1056 None,
1056 None,
1057 _(b'do not display the saved mtime (DEPRECATED)'),
1057 _(b'do not display the saved mtime (DEPRECATED)'),
1058 ),
1058 ),
1059 (b'', b'dates', True, _(b'display the saved mtime')),
1059 (b'', b'dates', True, _(b'display the saved mtime')),
1060 (b'', b'datesort', None, _(b'sort by saved mtime')),
1060 (b'', b'datesort', None, _(b'sort by saved mtime')),
1061 (
1061 (
1062 b'',
1062 b'',
1063 b'docket',
1063 b'docket',
1064 False,
1064 False,
1065 _(b'display the docket (metadata file) instead'),
1065 _(b'display the docket (metadata file) instead'),
1066 ),
1066 ),
1067 (
1067 (
1068 b'',
1068 b'',
1069 b'all',
1069 b'all',
1070 False,
1070 False,
1071 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
1071 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
1072 ),
1072 ),
1073 ],
1073 ],
1074 _(b'[OPTION]...'),
1074 _(b'[OPTION]...'),
1075 )
1075 )
1076 def debugstate(ui, repo, **opts):
1076 def debugstate(ui, repo, **opts):
1077 """show the contents of the current dirstate"""
1077 """show the contents of the current dirstate"""
1078
1078
1079 if opts.get("docket"):
1079 if opts.get("docket"):
1080 if not repo.dirstate._use_dirstate_v2:
1080 if not repo.dirstate._use_dirstate_v2:
1081 raise error.Abort(_(b'dirstate v1 does not have a docket'))
1081 raise error.Abort(_(b'dirstate v1 does not have a docket'))
1082
1082
1083 docket = repo.dirstate._map.docket
1083 docket = repo.dirstate._map.docket
1084 (
1084 (
1085 start_offset,
1085 start_offset,
1086 root_nodes,
1086 root_nodes,
1087 nodes_with_entry,
1087 nodes_with_entry,
1088 nodes_with_copy,
1088 nodes_with_copy,
1089 unused_bytes,
1089 unused_bytes,
1090 _unused,
1090 _unused,
1091 ignore_pattern,
1091 ignore_pattern,
1092 ) = dirstateutils.v2.TREE_METADATA.unpack(docket.tree_metadata)
1092 ) = dirstateutils.v2.TREE_METADATA.unpack(docket.tree_metadata)
1093
1093
1094 ui.write(_(b"size of dirstate data: %d\n") % docket.data_size)
1094 ui.write(_(b"size of dirstate data: %d\n") % docket.data_size)
1095 ui.write(_(b"data file uuid: %s\n") % docket.uuid)
1095 ui.write(_(b"data file uuid: %s\n") % docket.uuid)
1096 ui.write(_(b"start offset of root nodes: %d\n") % start_offset)
1096 ui.write(_(b"start offset of root nodes: %d\n") % start_offset)
1097 ui.write(_(b"number of root nodes: %d\n") % root_nodes)
1097 ui.write(_(b"number of root nodes: %d\n") % root_nodes)
1098 ui.write(_(b"nodes with entries: %d\n") % nodes_with_entry)
1098 ui.write(_(b"nodes with entries: %d\n") % nodes_with_entry)
1099 ui.write(_(b"nodes with copies: %d\n") % nodes_with_copy)
1099 ui.write(_(b"nodes with copies: %d\n") % nodes_with_copy)
1100 ui.write(_(b"number of unused bytes: %d\n") % unused_bytes)
1100 ui.write(_(b"number of unused bytes: %d\n") % unused_bytes)
1101 ui.write(
1101 ui.write(
1102 _(b"ignore pattern hash: %s\n") % binascii.hexlify(ignore_pattern)
1102 _(b"ignore pattern hash: %s\n") % binascii.hexlify(ignore_pattern)
1103 )
1103 )
1104 return
1104 return
1105
1105
1106 nodates = not opts['dates']
1106 nodates = not opts['dates']
1107 if opts.get('nodates') is not None:
1107 if opts.get('nodates') is not None:
1108 nodates = True
1108 nodates = True
1109 datesort = opts.get('datesort')
1109 datesort = opts.get('datesort')
1110
1110
1111 if datesort:
1111 if datesort:
1112
1112
1113 def keyfunc(entry):
1113 def keyfunc(entry):
1114 filename, _state, _mode, _size, mtime = entry
1114 filename, _state, _mode, _size, mtime = entry
1115 return (mtime, filename)
1115 return (mtime, filename)
1116
1116
1117 else:
1117 else:
1118 keyfunc = None # sort by filename
1118 keyfunc = None # sort by filename
1119 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
1119 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
1120 entries.sort(key=keyfunc)
1120 entries.sort(key=keyfunc)
1121 for entry in entries:
1121 for entry in entries:
1122 filename, state, mode, size, mtime = entry
1122 filename, state, mode, size, mtime = entry
1123 if mtime == -1:
1123 if mtime == -1:
1124 timestr = b'unset '
1124 timestr = b'unset '
1125 elif nodates:
1125 elif nodates:
1126 timestr = b'set '
1126 timestr = b'set '
1127 else:
1127 else:
1128 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
1128 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
1129 timestr = encoding.strtolocal(timestr)
1129 timestr = encoding.strtolocal(timestr)
1130 if mode & 0o20000:
1130 if mode & 0o20000:
1131 mode = b'lnk'
1131 mode = b'lnk'
1132 else:
1132 else:
1133 mode = b'%3o' % (mode & 0o777 & ~util.umask)
1133 mode = b'%3o' % (mode & 0o777 & ~util.umask)
1134 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
1134 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
1135 for f in repo.dirstate.copies():
1135 for f in repo.dirstate.copies():
1136 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
1136 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
1137
1137
1138
1138
1139 @command(
1139 @command(
1140 b'debugdirstateignorepatternshash',
1140 b'debugdirstateignorepatternshash',
1141 [],
1141 [],
1142 _(b''),
1142 _(b''),
1143 )
1143 )
1144 def debugdirstateignorepatternshash(ui, repo, **opts):
1144 def debugdirstateignorepatternshash(ui, repo, **opts):
1145 """show the hash of ignore patterns stored in dirstate if v2,
1145 """show the hash of ignore patterns stored in dirstate if v2,
1146 or nothing for dirstate-v2
1146 or nothing for dirstate-v2
1147 """
1147 """
1148 if repo.dirstate._use_dirstate_v2:
1148 if repo.dirstate._use_dirstate_v2:
1149 docket = repo.dirstate._map.docket
1149 docket = repo.dirstate._map.docket
1150 hash_len = 20 # 160 bits for SHA-1
1150 hash_len = 20 # 160 bits for SHA-1
1151 hash_bytes = docket.tree_metadata[-hash_len:]
1151 hash_bytes = docket.tree_metadata[-hash_len:]
1152 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1152 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1153
1153
1154
1154
1155 @command(
1155 @command(
1156 b'debugdiscovery',
1156 b'debugdiscovery',
1157 [
1157 [
1158 (b'', b'old', None, _(b'use old-style discovery')),
1158 (b'', b'old', None, _(b'use old-style discovery')),
1159 (
1159 (
1160 b'',
1160 b'',
1161 b'nonheads',
1161 b'nonheads',
1162 None,
1162 None,
1163 _(b'use old-style discovery with non-heads included'),
1163 _(b'use old-style discovery with non-heads included'),
1164 ),
1164 ),
1165 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1165 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1166 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1166 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1167 (
1167 (
1168 b'',
1168 b'',
1169 b'local-as-revs',
1169 b'local-as-revs',
1170 b"",
1170 b"",
1171 b'treat local has having these revisions only',
1171 b'treat local has having these revisions only',
1172 ),
1172 ),
1173 (
1173 (
1174 b'',
1174 b'',
1175 b'remote-as-revs',
1175 b'remote-as-revs',
1176 b"",
1176 b"",
1177 b'use local as remote, with only these revisions',
1177 b'use local as remote, with only these revisions',
1178 ),
1178 ),
1179 ]
1179 ]
1180 + cmdutil.remoteopts
1180 + cmdutil.remoteopts
1181 + cmdutil.formatteropts,
1181 + cmdutil.formatteropts,
1182 _(b'[--rev REV] [OTHER]'),
1182 _(b'[--rev REV] [OTHER]'),
1183 )
1183 )
1184 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1184 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1185 """runs the changeset discovery protocol in isolation
1185 """runs the changeset discovery protocol in isolation
1186
1186
1187 The local peer can be "replaced" by a subset of the local repository by
1187 The local peer can be "replaced" by a subset of the local repository by
1188 using the `--local-as-revs` flag. In the same way, the usual `remote` peer
1188 using the `--local-as-revs` flag. In the same way, the usual `remote` peer
1189 can be "replaced" by a subset of the local repository using the
1189 can be "replaced" by a subset of the local repository using the
1190 `--remote-as-revs` flag. This is useful to efficiently debug pathological
1190 `--remote-as-revs` flag. This is useful to efficiently debug pathological
1191 discovery situations.
1191 discovery situations.
1192
1192
1193 The following developer oriented config are relevant for people playing with this command:
1193 The following developer oriented config are relevant for people playing with this command:
1194
1194
1195 * devel.discovery.exchange-heads=True
1195 * devel.discovery.exchange-heads=True
1196
1196
1197 If False, the discovery will not start with
1197 If False, the discovery will not start with
1198 remote head fetching and local head querying.
1198 remote head fetching and local head querying.
1199
1199
1200 * devel.discovery.grow-sample=True
1200 * devel.discovery.grow-sample=True
1201
1201
1202 If False, the sample size used in set discovery will not be increased
1202 If False, the sample size used in set discovery will not be increased
1203 through the process
1203 through the process
1204
1204
1205 * devel.discovery.grow-sample.dynamic=True
1205 * devel.discovery.grow-sample.dynamic=True
1206
1206
1207 When discovery.grow-sample.dynamic is True, the default, the sample size is
1207 When discovery.grow-sample.dynamic is True, the default, the sample size is
1208 adapted to the shape of the undecided set (it is set to the max of:
1208 adapted to the shape of the undecided set (it is set to the max of:
1209 <target-size>, len(roots(undecided)), len(heads(undecided)
1209 <target-size>, len(roots(undecided)), len(heads(undecided)
1210
1210
1211 * devel.discovery.grow-sample.rate=1.05
1211 * devel.discovery.grow-sample.rate=1.05
1212
1212
1213 the rate at which the sample grow
1213 the rate at which the sample grow
1214
1214
1215 * devel.discovery.randomize=True
1215 * devel.discovery.randomize=True
1216
1216
1217 If andom sampling during discovery are deterministic. It is meant for
1217 If andom sampling during discovery are deterministic. It is meant for
1218 integration tests.
1218 integration tests.
1219
1219
1220 * devel.discovery.sample-size=200
1220 * devel.discovery.sample-size=200
1221
1221
1222 Control the initial size of the discovery sample
1222 Control the initial size of the discovery sample
1223
1223
1224 * devel.discovery.sample-size.initial=100
1224 * devel.discovery.sample-size.initial=100
1225
1225
1226 Control the initial size of the discovery for initial change
1226 Control the initial size of the discovery for initial change
1227 """
1227 """
1228 opts = pycompat.byteskwargs(opts)
1228 opts = pycompat.byteskwargs(opts)
1229 unfi = repo.unfiltered()
1229 unfi = repo.unfiltered()
1230
1230
1231 # setup potential extra filtering
1231 # setup potential extra filtering
1232 local_revs = opts[b"local_as_revs"]
1232 local_revs = opts[b"local_as_revs"]
1233 remote_revs = opts[b"remote_as_revs"]
1233 remote_revs = opts[b"remote_as_revs"]
1234
1234
1235 # make sure tests are repeatable
1235 # make sure tests are repeatable
1236 random.seed(int(opts[b'seed']))
1236 random.seed(int(opts[b'seed']))
1237
1237
1238 if not remote_revs:
1238 if not remote_revs:
1239 path = urlutil.get_unique_pull_path_obj(
1239 path = urlutil.get_unique_pull_path_obj(
1240 b'debugdiscovery', ui, remoteurl
1240 b'debugdiscovery', ui, remoteurl
1241 )
1241 )
1242 branches = (path.branch, [])
1242 branches = (path.branch, [])
1243 remote = hg.peer(repo, opts, path)
1243 remote = hg.peer(repo, opts, path)
1244 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
1244 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
1245 else:
1245 else:
1246 branches = (None, [])
1246 branches = (None, [])
1247 remote_filtered_revs = logcmdutil.revrange(
1247 remote_filtered_revs = logcmdutil.revrange(
1248 unfi, [b"not (::(%s))" % remote_revs]
1248 unfi, [b"not (::(%s))" % remote_revs]
1249 )
1249 )
1250 remote_filtered_revs = frozenset(remote_filtered_revs)
1250 remote_filtered_revs = frozenset(remote_filtered_revs)
1251
1251
1252 def remote_func(x):
1252 def remote_func(x):
1253 return remote_filtered_revs
1253 return remote_filtered_revs
1254
1254
1255 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1255 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1256
1256
1257 remote = repo.peer()
1257 remote = repo.peer()
1258 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1258 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1259
1259
1260 if local_revs:
1260 if local_revs:
1261 local_filtered_revs = logcmdutil.revrange(
1261 local_filtered_revs = logcmdutil.revrange(
1262 unfi, [b"not (::(%s))" % local_revs]
1262 unfi, [b"not (::(%s))" % local_revs]
1263 )
1263 )
1264 local_filtered_revs = frozenset(local_filtered_revs)
1264 local_filtered_revs = frozenset(local_filtered_revs)
1265
1265
1266 def local_func(x):
1266 def local_func(x):
1267 return local_filtered_revs
1267 return local_filtered_revs
1268
1268
1269 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1269 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1270 repo = repo.filtered(b'debug-discovery-local-filter')
1270 repo = repo.filtered(b'debug-discovery-local-filter')
1271
1271
1272 data = {}
1272 data = {}
1273 if opts.get(b'old'):
1273 if opts.get(b'old'):
1274
1274
1275 def doit(pushedrevs, remoteheads, remote=remote):
1275 def doit(pushedrevs, remoteheads, remote=remote):
1276 if not util.safehasattr(remote, b'branches'):
1276 if not util.safehasattr(remote, b'branches'):
1277 # enable in-client legacy support
1277 # enable in-client legacy support
1278 remote = localrepo.locallegacypeer(remote.local())
1278 remote = localrepo.locallegacypeer(remote.local())
1279 if remote_revs:
1279 if remote_revs:
1280 r = remote._repo.filtered(b'debug-discovery-remote-filter')
1280 r = remote._repo.filtered(b'debug-discovery-remote-filter')
1281 remote._repo = r
1281 remote._repo = r
1282 common, _in, hds = treediscovery.findcommonincoming(
1282 common, _in, hds = treediscovery.findcommonincoming(
1283 repo, remote, force=True, audit=data
1283 repo, remote, force=True, audit=data
1284 )
1284 )
1285 common = set(common)
1285 common = set(common)
1286 if not opts.get(b'nonheads'):
1286 if not opts.get(b'nonheads'):
1287 ui.writenoi18n(
1287 ui.writenoi18n(
1288 b"unpruned common: %s\n"
1288 b"unpruned common: %s\n"
1289 % b" ".join(sorted(short(n) for n in common))
1289 % b" ".join(sorted(short(n) for n in common))
1290 )
1290 )
1291
1291
1292 clnode = repo.changelog.node
1292 clnode = repo.changelog.node
1293 common = repo.revs(b'heads(::%ln)', common)
1293 common = repo.revs(b'heads(::%ln)', common)
1294 common = {clnode(r) for r in common}
1294 common = {clnode(r) for r in common}
1295 return common, hds
1295 return common, hds
1296
1296
1297 else:
1297 else:
1298
1298
1299 def doit(pushedrevs, remoteheads, remote=remote):
1299 def doit(pushedrevs, remoteheads, remote=remote):
1300 nodes = None
1300 nodes = None
1301 if pushedrevs:
1301 if pushedrevs:
1302 revs = logcmdutil.revrange(repo, pushedrevs)
1302 revs = logcmdutil.revrange(repo, pushedrevs)
1303 nodes = [repo[r].node() for r in revs]
1303 nodes = [repo[r].node() for r in revs]
1304 common, any, hds = setdiscovery.findcommonheads(
1304 common, any, hds = setdiscovery.findcommonheads(
1305 ui,
1305 ui,
1306 repo,
1306 repo,
1307 remote,
1307 remote,
1308 ancestorsof=nodes,
1308 ancestorsof=nodes,
1309 audit=data,
1309 audit=data,
1310 abortwhenunrelated=False,
1310 abortwhenunrelated=False,
1311 )
1311 )
1312 return common, hds
1312 return common, hds
1313
1313
1314 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1314 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1315 localrevs = opts[b'rev']
1315 localrevs = opts[b'rev']
1316
1316
1317 fm = ui.formatter(b'debugdiscovery', opts)
1317 fm = ui.formatter(b'debugdiscovery', opts)
1318 if fm.strict_format:
1318 if fm.strict_format:
1319
1319
1320 @contextlib.contextmanager
1320 @contextlib.contextmanager
1321 def may_capture_output():
1321 def may_capture_output():
1322 ui.pushbuffer()
1322 ui.pushbuffer()
1323 yield
1323 yield
1324 data[b'output'] = ui.popbuffer()
1324 data[b'output'] = ui.popbuffer()
1325
1325
1326 else:
1326 else:
1327 may_capture_output = util.nullcontextmanager
1327 may_capture_output = util.nullcontextmanager
1328 with may_capture_output():
1328 with may_capture_output():
1329 with util.timedcm('debug-discovery') as t:
1329 with util.timedcm('debug-discovery') as t:
1330 common, hds = doit(localrevs, remoterevs)
1330 common, hds = doit(localrevs, remoterevs)
1331
1331
1332 # compute all statistics
1332 # compute all statistics
1333 if len(common) == 1 and repo.nullid in common:
1333 if len(common) == 1 and repo.nullid in common:
1334 common = set()
1334 common = set()
1335 heads_common = set(common)
1335 heads_common = set(common)
1336 heads_remote = set(hds)
1336 heads_remote = set(hds)
1337 heads_local = set(repo.heads())
1337 heads_local = set(repo.heads())
1338 # note: they cannot be a local or remote head that is in common and not
1338 # note: they cannot be a local or remote head that is in common and not
1339 # itself a head of common.
1339 # itself a head of common.
1340 heads_common_local = heads_common & heads_local
1340 heads_common_local = heads_common & heads_local
1341 heads_common_remote = heads_common & heads_remote
1341 heads_common_remote = heads_common & heads_remote
1342 heads_common_both = heads_common & heads_remote & heads_local
1342 heads_common_both = heads_common & heads_remote & heads_local
1343
1343
1344 all = repo.revs(b'all()')
1344 all = repo.revs(b'all()')
1345 common = repo.revs(b'::%ln', common)
1345 common = repo.revs(b'::%ln', common)
1346 roots_common = repo.revs(b'roots(::%ld)', common)
1346 roots_common = repo.revs(b'roots(::%ld)', common)
1347 missing = repo.revs(b'not ::%ld', common)
1347 missing = repo.revs(b'not ::%ld', common)
1348 heads_missing = repo.revs(b'heads(%ld)', missing)
1348 heads_missing = repo.revs(b'heads(%ld)', missing)
1349 roots_missing = repo.revs(b'roots(%ld)', missing)
1349 roots_missing = repo.revs(b'roots(%ld)', missing)
1350 assert len(common) + len(missing) == len(all)
1350 assert len(common) + len(missing) == len(all)
1351
1351
1352 initial_undecided = repo.revs(
1352 initial_undecided = repo.revs(
1353 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1353 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1354 )
1354 )
1355 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1355 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1356 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1356 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1357 common_initial_undecided = initial_undecided & common
1357 common_initial_undecided = initial_undecided & common
1358 missing_initial_undecided = initial_undecided & missing
1358 missing_initial_undecided = initial_undecided & missing
1359
1359
1360 data[b'elapsed'] = t.elapsed
1360 data[b'elapsed'] = t.elapsed
1361 data[b'nb-common-heads'] = len(heads_common)
1361 data[b'nb-common-heads'] = len(heads_common)
1362 data[b'nb-common-heads-local'] = len(heads_common_local)
1362 data[b'nb-common-heads-local'] = len(heads_common_local)
1363 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1363 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1364 data[b'nb-common-heads-both'] = len(heads_common_both)
1364 data[b'nb-common-heads-both'] = len(heads_common_both)
1365 data[b'nb-common-roots'] = len(roots_common)
1365 data[b'nb-common-roots'] = len(roots_common)
1366 data[b'nb-head-local'] = len(heads_local)
1366 data[b'nb-head-local'] = len(heads_local)
1367 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1367 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1368 data[b'nb-head-remote'] = len(heads_remote)
1368 data[b'nb-head-remote'] = len(heads_remote)
1369 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1369 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1370 heads_common_remote
1370 heads_common_remote
1371 )
1371 )
1372 data[b'nb-revs'] = len(all)
1372 data[b'nb-revs'] = len(all)
1373 data[b'nb-revs-common'] = len(common)
1373 data[b'nb-revs-common'] = len(common)
1374 data[b'nb-revs-missing'] = len(missing)
1374 data[b'nb-revs-missing'] = len(missing)
1375 data[b'nb-missing-heads'] = len(heads_missing)
1375 data[b'nb-missing-heads'] = len(heads_missing)
1376 data[b'nb-missing-roots'] = len(roots_missing)
1376 data[b'nb-missing-roots'] = len(roots_missing)
1377 data[b'nb-ini_und'] = len(initial_undecided)
1377 data[b'nb-ini_und'] = len(initial_undecided)
1378 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1378 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1379 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1379 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1380 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1380 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1381 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1381 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1382
1382
1383 fm.startitem()
1383 fm.startitem()
1384 fm.data(**pycompat.strkwargs(data))
1384 fm.data(**pycompat.strkwargs(data))
1385 # display discovery summary
1385 # display discovery summary
1386 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1386 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1387 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1387 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1388 if b'total-round-trips-heads' in data:
1388 if b'total-round-trips-heads' in data:
1389 fm.plain(
1389 fm.plain(
1390 b" round-trips-heads: %(total-round-trips-heads)9d\n" % data
1390 b" round-trips-heads: %(total-round-trips-heads)9d\n" % data
1391 )
1391 )
1392 if b'total-round-trips-branches' in data:
1392 if b'total-round-trips-branches' in data:
1393 fm.plain(
1393 fm.plain(
1394 b" round-trips-branches: %(total-round-trips-branches)9d\n"
1394 b" round-trips-branches: %(total-round-trips-branches)9d\n"
1395 % data
1395 % data
1396 )
1396 )
1397 if b'total-round-trips-between' in data:
1397 if b'total-round-trips-between' in data:
1398 fm.plain(
1398 fm.plain(
1399 b" round-trips-between: %(total-round-trips-between)9d\n" % data
1399 b" round-trips-between: %(total-round-trips-between)9d\n" % data
1400 )
1400 )
1401 fm.plain(b"queries: %(total-queries)9d\n" % data)
1401 fm.plain(b"queries: %(total-queries)9d\n" % data)
1402 if b'total-queries-branches' in data:
1402 if b'total-queries-branches' in data:
1403 fm.plain(b" queries-branches: %(total-queries-branches)9d\n" % data)
1403 fm.plain(b" queries-branches: %(total-queries-branches)9d\n" % data)
1404 if b'total-queries-between' in data:
1404 if b'total-queries-between' in data:
1405 fm.plain(b" queries-between: %(total-queries-between)9d\n" % data)
1405 fm.plain(b" queries-between: %(total-queries-between)9d\n" % data)
1406 fm.plain(b"heads summary:\n")
1406 fm.plain(b"heads summary:\n")
1407 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1407 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1408 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1408 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1409 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1409 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1410 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1410 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1411 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1411 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1412 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1412 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1413 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1413 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1414 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1414 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1415 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1415 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1416 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1416 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1417 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1417 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1418 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1418 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1419 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1419 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1420 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1420 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1421 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1421 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1422 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1422 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1423 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1423 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1424 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1424 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1425 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1425 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1426 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1426 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1427 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1427 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1428 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1428 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1429
1429
1430 if ui.verbose:
1430 if ui.verbose:
1431 fm.plain(
1431 fm.plain(
1432 b"common heads: %s\n"
1432 b"common heads: %s\n"
1433 % b" ".join(sorted(short(n) for n in heads_common))
1433 % b" ".join(sorted(short(n) for n in heads_common))
1434 )
1434 )
1435 fm.end()
1435 fm.end()
1436
1436
1437
1437
1438 _chunksize = 4 << 10
1438 _chunksize = 4 << 10
1439
1439
1440
1440
1441 @command(
1441 @command(
1442 b'debugdownload',
1442 b'debugdownload',
1443 [
1443 [
1444 (b'o', b'output', b'', _(b'path')),
1444 (b'o', b'output', b'', _(b'path')),
1445 ],
1445 ],
1446 optionalrepo=True,
1446 optionalrepo=True,
1447 )
1447 )
1448 def debugdownload(ui, repo, url, output=None, **opts):
1448 def debugdownload(ui, repo, url, output=None, **opts):
1449 """download a resource using Mercurial logic and config"""
1449 """download a resource using Mercurial logic and config"""
1450 fh = urlmod.open(ui, url, output)
1450 fh = urlmod.open(ui, url, output)
1451
1451
1452 dest = ui
1452 dest = ui
1453 if output:
1453 if output:
1454 dest = open(output, b"wb", _chunksize)
1454 dest = open(output, b"wb", _chunksize)
1455 try:
1455 try:
1456 data = fh.read(_chunksize)
1456 data = fh.read(_chunksize)
1457 while data:
1457 while data:
1458 dest.write(data)
1458 dest.write(data)
1459 data = fh.read(_chunksize)
1459 data = fh.read(_chunksize)
1460 finally:
1460 finally:
1461 if output:
1461 if output:
1462 dest.close()
1462 dest.close()
1463
1463
1464
1464
1465 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1465 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1466 def debugextensions(ui, repo, **opts):
1466 def debugextensions(ui, repo, **opts):
1467 '''show information about active extensions'''
1467 '''show information about active extensions'''
1468 opts = pycompat.byteskwargs(opts)
1468 opts = pycompat.byteskwargs(opts)
1469 exts = extensions.extensions(ui)
1469 exts = extensions.extensions(ui)
1470 hgver = util.version()
1470 hgver = util.version()
1471 fm = ui.formatter(b'debugextensions', opts)
1471 fm = ui.formatter(b'debugextensions', opts)
1472 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1472 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1473 isinternal = extensions.ismoduleinternal(extmod)
1473 isinternal = extensions.ismoduleinternal(extmod)
1474 extsource = None
1474 extsource = None
1475
1475
1476 if util.safehasattr(extmod, '__file__'):
1476 if util.safehasattr(extmod, '__file__'):
1477 extsource = pycompat.fsencode(extmod.__file__)
1477 extsource = pycompat.fsencode(extmod.__file__)
1478 elif getattr(sys, 'oxidized', False):
1478 elif getattr(sys, 'oxidized', False):
1479 extsource = pycompat.sysexecutable
1479 extsource = pycompat.sysexecutable
1480 if isinternal:
1480 if isinternal:
1481 exttestedwith = [] # never expose magic string to users
1481 exttestedwith = [] # never expose magic string to users
1482 else:
1482 else:
1483 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1483 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1484 extbuglink = getattr(extmod, 'buglink', None)
1484 extbuglink = getattr(extmod, 'buglink', None)
1485
1485
1486 fm.startitem()
1486 fm.startitem()
1487
1487
1488 if ui.quiet or ui.verbose:
1488 if ui.quiet or ui.verbose:
1489 fm.write(b'name', b'%s\n', extname)
1489 fm.write(b'name', b'%s\n', extname)
1490 else:
1490 else:
1491 fm.write(b'name', b'%s', extname)
1491 fm.write(b'name', b'%s', extname)
1492 if isinternal or hgver in exttestedwith:
1492 if isinternal or hgver in exttestedwith:
1493 fm.plain(b'\n')
1493 fm.plain(b'\n')
1494 elif not exttestedwith:
1494 elif not exttestedwith:
1495 fm.plain(_(b' (untested!)\n'))
1495 fm.plain(_(b' (untested!)\n'))
1496 else:
1496 else:
1497 lasttestedversion = exttestedwith[-1]
1497 lasttestedversion = exttestedwith[-1]
1498 fm.plain(b' (%s!)\n' % lasttestedversion)
1498 fm.plain(b' (%s!)\n' % lasttestedversion)
1499
1499
1500 fm.condwrite(
1500 fm.condwrite(
1501 ui.verbose and extsource,
1501 ui.verbose and extsource,
1502 b'source',
1502 b'source',
1503 _(b' location: %s\n'),
1503 _(b' location: %s\n'),
1504 extsource or b"",
1504 extsource or b"",
1505 )
1505 )
1506
1506
1507 if ui.verbose:
1507 if ui.verbose:
1508 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1508 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1509 fm.data(bundled=isinternal)
1509 fm.data(bundled=isinternal)
1510
1510
1511 fm.condwrite(
1511 fm.condwrite(
1512 ui.verbose and exttestedwith,
1512 ui.verbose and exttestedwith,
1513 b'testedwith',
1513 b'testedwith',
1514 _(b' tested with: %s\n'),
1514 _(b' tested with: %s\n'),
1515 fm.formatlist(exttestedwith, name=b'ver'),
1515 fm.formatlist(exttestedwith, name=b'ver'),
1516 )
1516 )
1517
1517
1518 fm.condwrite(
1518 fm.condwrite(
1519 ui.verbose and extbuglink,
1519 ui.verbose and extbuglink,
1520 b'buglink',
1520 b'buglink',
1521 _(b' bug reporting: %s\n'),
1521 _(b' bug reporting: %s\n'),
1522 extbuglink or b"",
1522 extbuglink or b"",
1523 )
1523 )
1524
1524
1525 fm.end()
1525 fm.end()
1526
1526
1527
1527
1528 @command(
1528 @command(
1529 b'debugfileset',
1529 b'debugfileset',
1530 [
1530 [
1531 (
1531 (
1532 b'r',
1532 b'r',
1533 b'rev',
1533 b'rev',
1534 b'',
1534 b'',
1535 _(b'apply the filespec on this revision'),
1535 _(b'apply the filespec on this revision'),
1536 _(b'REV'),
1536 _(b'REV'),
1537 ),
1537 ),
1538 (
1538 (
1539 b'',
1539 b'',
1540 b'all-files',
1540 b'all-files',
1541 False,
1541 False,
1542 _(b'test files from all revisions and working directory'),
1542 _(b'test files from all revisions and working directory'),
1543 ),
1543 ),
1544 (
1544 (
1545 b's',
1545 b's',
1546 b'show-matcher',
1546 b'show-matcher',
1547 None,
1547 None,
1548 _(b'print internal representation of matcher'),
1548 _(b'print internal representation of matcher'),
1549 ),
1549 ),
1550 (
1550 (
1551 b'p',
1551 b'p',
1552 b'show-stage',
1552 b'show-stage',
1553 [],
1553 [],
1554 _(b'print parsed tree at the given stage'),
1554 _(b'print parsed tree at the given stage'),
1555 _(b'NAME'),
1555 _(b'NAME'),
1556 ),
1556 ),
1557 ],
1557 ],
1558 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1558 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1559 )
1559 )
1560 def debugfileset(ui, repo, expr, **opts):
1560 def debugfileset(ui, repo, expr, **opts):
1561 '''parse and apply a fileset specification'''
1561 '''parse and apply a fileset specification'''
1562 from . import fileset
1562 from . import fileset
1563
1563
1564 fileset.symbols # force import of fileset so we have predicates to optimize
1564 fileset.symbols # force import of fileset so we have predicates to optimize
1565 opts = pycompat.byteskwargs(opts)
1565 opts = pycompat.byteskwargs(opts)
1566 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
1566 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
1567
1567
1568 stages = [
1568 stages = [
1569 (b'parsed', pycompat.identity),
1569 (b'parsed', pycompat.identity),
1570 (b'analyzed', filesetlang.analyze),
1570 (b'analyzed', filesetlang.analyze),
1571 (b'optimized', filesetlang.optimize),
1571 (b'optimized', filesetlang.optimize),
1572 ]
1572 ]
1573 stagenames = {n for n, f in stages}
1573 stagenames = {n for n, f in stages}
1574
1574
1575 showalways = set()
1575 showalways = set()
1576 if ui.verbose and not opts[b'show_stage']:
1576 if ui.verbose and not opts[b'show_stage']:
1577 # show parsed tree by --verbose (deprecated)
1577 # show parsed tree by --verbose (deprecated)
1578 showalways.add(b'parsed')
1578 showalways.add(b'parsed')
1579 if opts[b'show_stage'] == [b'all']:
1579 if opts[b'show_stage'] == [b'all']:
1580 showalways.update(stagenames)
1580 showalways.update(stagenames)
1581 else:
1581 else:
1582 for n in opts[b'show_stage']:
1582 for n in opts[b'show_stage']:
1583 if n not in stagenames:
1583 if n not in stagenames:
1584 raise error.Abort(_(b'invalid stage name: %s') % n)
1584 raise error.Abort(_(b'invalid stage name: %s') % n)
1585 showalways.update(opts[b'show_stage'])
1585 showalways.update(opts[b'show_stage'])
1586
1586
1587 tree = filesetlang.parse(expr)
1587 tree = filesetlang.parse(expr)
1588 for n, f in stages:
1588 for n, f in stages:
1589 tree = f(tree)
1589 tree = f(tree)
1590 if n in showalways:
1590 if n in showalways:
1591 if opts[b'show_stage'] or n != b'parsed':
1591 if opts[b'show_stage'] or n != b'parsed':
1592 ui.write(b"* %s:\n" % n)
1592 ui.write(b"* %s:\n" % n)
1593 ui.write(filesetlang.prettyformat(tree), b"\n")
1593 ui.write(filesetlang.prettyformat(tree), b"\n")
1594
1594
1595 files = set()
1595 files = set()
1596 if opts[b'all_files']:
1596 if opts[b'all_files']:
1597 for r in repo:
1597 for r in repo:
1598 c = repo[r]
1598 c = repo[r]
1599 files.update(c.files())
1599 files.update(c.files())
1600 files.update(c.substate)
1600 files.update(c.substate)
1601 if opts[b'all_files'] or ctx.rev() is None:
1601 if opts[b'all_files'] or ctx.rev() is None:
1602 wctx = repo[None]
1602 wctx = repo[None]
1603 files.update(
1603 files.update(
1604 repo.dirstate.walk(
1604 repo.dirstate.walk(
1605 scmutil.matchall(repo),
1605 scmutil.matchall(repo),
1606 subrepos=list(wctx.substate),
1606 subrepos=list(wctx.substate),
1607 unknown=True,
1607 unknown=True,
1608 ignored=True,
1608 ignored=True,
1609 )
1609 )
1610 )
1610 )
1611 files.update(wctx.substate)
1611 files.update(wctx.substate)
1612 else:
1612 else:
1613 files.update(ctx.files())
1613 files.update(ctx.files())
1614 files.update(ctx.substate)
1614 files.update(ctx.substate)
1615
1615
1616 m = ctx.matchfileset(repo.getcwd(), expr)
1616 m = ctx.matchfileset(repo.getcwd(), expr)
1617 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1617 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1618 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1618 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1619 for f in sorted(files):
1619 for f in sorted(files):
1620 if not m(f):
1620 if not m(f):
1621 continue
1621 continue
1622 ui.write(b"%s\n" % f)
1622 ui.write(b"%s\n" % f)
1623
1623
1624
1624
1625 @command(
1625 @command(
1626 b"debug-repair-issue6528",
1626 b"debug-repair-issue6528",
1627 [
1627 [
1628 (
1628 (
1629 b'',
1629 b'',
1630 b'to-report',
1630 b'to-report',
1631 b'',
1631 b'',
1632 _(b'build a report of affected revisions to this file'),
1632 _(b'build a report of affected revisions to this file'),
1633 _(b'FILE'),
1633 _(b'FILE'),
1634 ),
1634 ),
1635 (
1635 (
1636 b'',
1636 b'',
1637 b'from-report',
1637 b'from-report',
1638 b'',
1638 b'',
1639 _(b'repair revisions listed in this report file'),
1639 _(b'repair revisions listed in this report file'),
1640 _(b'FILE'),
1640 _(b'FILE'),
1641 ),
1641 ),
1642 (
1642 (
1643 b'',
1643 b'',
1644 b'paranoid',
1644 b'paranoid',
1645 False,
1645 False,
1646 _(b'check that both detection methods do the same thing'),
1646 _(b'check that both detection methods do the same thing'),
1647 ),
1647 ),
1648 ]
1648 ]
1649 + cmdutil.dryrunopts,
1649 + cmdutil.dryrunopts,
1650 )
1650 )
1651 def debug_repair_issue6528(ui, repo, **opts):
1651 def debug_repair_issue6528(ui, repo, **opts):
1652 """find affected revisions and repair them. See issue6528 for more details.
1652 """find affected revisions and repair them. See issue6528 for more details.
1653
1653
1654 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1654 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1655 computation of affected revisions for a given repository across clones.
1655 computation of affected revisions for a given repository across clones.
1656 The report format is line-based (with empty lines ignored):
1656 The report format is line-based (with empty lines ignored):
1657
1657
1658 ```
1658 ```
1659 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1659 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1660 ```
1660 ```
1661
1661
1662 There can be multiple broken revisions per filelog, they are separated by
1662 There can be multiple broken revisions per filelog, they are separated by
1663 a comma with no spaces. The only space is between the revision(s) and the
1663 a comma with no spaces. The only space is between the revision(s) and the
1664 filename.
1664 filename.
1665
1665
1666 Note that this does *not* mean that this repairs future affected revisions,
1666 Note that this does *not* mean that this repairs future affected revisions,
1667 that needs a separate fix at the exchange level that was introduced in
1667 that needs a separate fix at the exchange level that was introduced in
1668 Mercurial 5.9.1.
1668 Mercurial 5.9.1.
1669
1669
1670 There is a `--paranoid` flag to test that the fast implementation is correct
1670 There is a `--paranoid` flag to test that the fast implementation is correct
1671 by checking it against the slow implementation. Since this matter is quite
1671 by checking it against the slow implementation. Since this matter is quite
1672 urgent and testing every edge-case is probably quite costly, we use this
1672 urgent and testing every edge-case is probably quite costly, we use this
1673 method to test on large repositories as a fuzzing method of sorts.
1673 method to test on large repositories as a fuzzing method of sorts.
1674 """
1674 """
1675 cmdutil.check_incompatible_arguments(
1675 cmdutil.check_incompatible_arguments(
1676 opts, 'to_report', ['from_report', 'dry_run']
1676 opts, 'to_report', ['from_report', 'dry_run']
1677 )
1677 )
1678 dry_run = opts.get('dry_run')
1678 dry_run = opts.get('dry_run')
1679 to_report = opts.get('to_report')
1679 to_report = opts.get('to_report')
1680 from_report = opts.get('from_report')
1680 from_report = opts.get('from_report')
1681 paranoid = opts.get('paranoid')
1681 paranoid = opts.get('paranoid')
1682 # TODO maybe add filelog pattern and revision pattern parameters to help
1682 # TODO maybe add filelog pattern and revision pattern parameters to help
1683 # narrow down the search for users that know what they're looking for?
1683 # narrow down the search for users that know what they're looking for?
1684
1684
1685 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1685 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1686 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1686 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1687 raise error.Abort(_(msg))
1687 raise error.Abort(_(msg))
1688
1688
1689 rewrite.repair_issue6528(
1689 rewrite.repair_issue6528(
1690 ui,
1690 ui,
1691 repo,
1691 repo,
1692 dry_run=dry_run,
1692 dry_run=dry_run,
1693 to_report=to_report,
1693 to_report=to_report,
1694 from_report=from_report,
1694 from_report=from_report,
1695 paranoid=paranoid,
1695 paranoid=paranoid,
1696 )
1696 )
1697
1697
1698
1698
1699 @command(b'debugformat', [] + cmdutil.formatteropts)
1699 @command(b'debugformat', [] + cmdutil.formatteropts)
1700 def debugformat(ui, repo, **opts):
1700 def debugformat(ui, repo, **opts):
1701 """display format information about the current repository
1701 """display format information about the current repository
1702
1702
1703 Use --verbose to get extra information about current config value and
1703 Use --verbose to get extra information about current config value and
1704 Mercurial default."""
1704 Mercurial default."""
1705 opts = pycompat.byteskwargs(opts)
1705 opts = pycompat.byteskwargs(opts)
1706 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1706 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1707 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1707 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1708
1708
1709 def makeformatname(name):
1709 def makeformatname(name):
1710 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1710 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1711
1711
1712 fm = ui.formatter(b'debugformat', opts)
1712 fm = ui.formatter(b'debugformat', opts)
1713 if fm.isplain():
1713 if fm.isplain():
1714
1714
1715 def formatvalue(value):
1715 def formatvalue(value):
1716 if util.safehasattr(value, b'startswith'):
1716 if util.safehasattr(value, b'startswith'):
1717 return value
1717 return value
1718 if value:
1718 if value:
1719 return b'yes'
1719 return b'yes'
1720 else:
1720 else:
1721 return b'no'
1721 return b'no'
1722
1722
1723 else:
1723 else:
1724 formatvalue = pycompat.identity
1724 formatvalue = pycompat.identity
1725
1725
1726 fm.plain(b'format-variant')
1726 fm.plain(b'format-variant')
1727 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1727 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1728 fm.plain(b' repo')
1728 fm.plain(b' repo')
1729 if ui.verbose:
1729 if ui.verbose:
1730 fm.plain(b' config default')
1730 fm.plain(b' config default')
1731 fm.plain(b'\n')
1731 fm.plain(b'\n')
1732 for fv in upgrade.allformatvariant:
1732 for fv in upgrade.allformatvariant:
1733 fm.startitem()
1733 fm.startitem()
1734 repovalue = fv.fromrepo(repo)
1734 repovalue = fv.fromrepo(repo)
1735 configvalue = fv.fromconfig(repo)
1735 configvalue = fv.fromconfig(repo)
1736
1736
1737 if repovalue != configvalue:
1737 if repovalue != configvalue:
1738 namelabel = b'formatvariant.name.mismatchconfig'
1738 namelabel = b'formatvariant.name.mismatchconfig'
1739 repolabel = b'formatvariant.repo.mismatchconfig'
1739 repolabel = b'formatvariant.repo.mismatchconfig'
1740 elif repovalue != fv.default:
1740 elif repovalue != fv.default:
1741 namelabel = b'formatvariant.name.mismatchdefault'
1741 namelabel = b'formatvariant.name.mismatchdefault'
1742 repolabel = b'formatvariant.repo.mismatchdefault'
1742 repolabel = b'formatvariant.repo.mismatchdefault'
1743 else:
1743 else:
1744 namelabel = b'formatvariant.name.uptodate'
1744 namelabel = b'formatvariant.name.uptodate'
1745 repolabel = b'formatvariant.repo.uptodate'
1745 repolabel = b'formatvariant.repo.uptodate'
1746
1746
1747 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1747 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1748 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1748 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1749 if fv.default != configvalue:
1749 if fv.default != configvalue:
1750 configlabel = b'formatvariant.config.special'
1750 configlabel = b'formatvariant.config.special'
1751 else:
1751 else:
1752 configlabel = b'formatvariant.config.default'
1752 configlabel = b'formatvariant.config.default'
1753 fm.condwrite(
1753 fm.condwrite(
1754 ui.verbose,
1754 ui.verbose,
1755 b'config',
1755 b'config',
1756 b' %6s',
1756 b' %6s',
1757 formatvalue(configvalue),
1757 formatvalue(configvalue),
1758 label=configlabel,
1758 label=configlabel,
1759 )
1759 )
1760 fm.condwrite(
1760 fm.condwrite(
1761 ui.verbose,
1761 ui.verbose,
1762 b'default',
1762 b'default',
1763 b' %7s',
1763 b' %7s',
1764 formatvalue(fv.default),
1764 formatvalue(fv.default),
1765 label=b'formatvariant.default',
1765 label=b'formatvariant.default',
1766 )
1766 )
1767 fm.plain(b'\n')
1767 fm.plain(b'\n')
1768 fm.end()
1768 fm.end()
1769
1769
1770
1770
1771 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1771 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1772 def debugfsinfo(ui, path=b"."):
1772 def debugfsinfo(ui, path=b"."):
1773 """show information detected about current filesystem"""
1773 """show information detected about current filesystem"""
1774 ui.writenoi18n(b'path: %s\n' % path)
1774 ui.writenoi18n(b'path: %s\n' % path)
1775 ui.writenoi18n(
1775 ui.writenoi18n(
1776 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1776 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1777 )
1777 )
1778 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1778 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1779 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1779 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1780 ui.writenoi18n(
1780 ui.writenoi18n(
1781 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1781 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1782 )
1782 )
1783 ui.writenoi18n(
1783 ui.writenoi18n(
1784 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1784 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1785 )
1785 )
1786 casesensitive = b'(unknown)'
1786 casesensitive = b'(unknown)'
1787 try:
1787 try:
1788 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1788 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1789 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1789 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1790 except OSError:
1790 except OSError:
1791 pass
1791 pass
1792 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1792 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1793
1793
1794
1794
1795 @command(
1795 @command(
1796 b'debuggetbundle',
1796 b'debuggetbundle',
1797 [
1797 [
1798 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1798 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1799 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1799 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1800 (
1800 (
1801 b't',
1801 b't',
1802 b'type',
1802 b'type',
1803 b'bzip2',
1803 b'bzip2',
1804 _(b'bundle compression type to use'),
1804 _(b'bundle compression type to use'),
1805 _(b'TYPE'),
1805 _(b'TYPE'),
1806 ),
1806 ),
1807 ],
1807 ],
1808 _(b'REPO FILE [-H|-C ID]...'),
1808 _(b'REPO FILE [-H|-C ID]...'),
1809 norepo=True,
1809 norepo=True,
1810 )
1810 )
1811 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1811 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1812 """retrieves a bundle from a repo
1812 """retrieves a bundle from a repo
1813
1813
1814 Every ID must be a full-length hex node id string. Saves the bundle to the
1814 Every ID must be a full-length hex node id string. Saves the bundle to the
1815 given file.
1815 given file.
1816 """
1816 """
1817 opts = pycompat.byteskwargs(opts)
1817 opts = pycompat.byteskwargs(opts)
1818 repo = hg.peer(ui, opts, repopath)
1818 repo = hg.peer(ui, opts, repopath)
1819 if not repo.capable(b'getbundle'):
1819 if not repo.capable(b'getbundle'):
1820 raise error.Abort(b"getbundle() not supported by target repository")
1820 raise error.Abort(b"getbundle() not supported by target repository")
1821 args = {}
1821 args = {}
1822 if common:
1822 if common:
1823 args['common'] = [bin(s) for s in common]
1823 args['common'] = [bin(s) for s in common]
1824 if head:
1824 if head:
1825 args['heads'] = [bin(s) for s in head]
1825 args['heads'] = [bin(s) for s in head]
1826 # TODO: get desired bundlecaps from command line.
1826 # TODO: get desired bundlecaps from command line.
1827 args['bundlecaps'] = None
1827 args['bundlecaps'] = None
1828 bundle = repo.getbundle(b'debug', **args)
1828 bundle = repo.getbundle(b'debug', **args)
1829
1829
1830 bundletype = opts.get(b'type', b'bzip2').lower()
1830 bundletype = opts.get(b'type', b'bzip2').lower()
1831 btypes = {
1831 btypes = {
1832 b'none': b'HG10UN',
1832 b'none': b'HG10UN',
1833 b'bzip2': b'HG10BZ',
1833 b'bzip2': b'HG10BZ',
1834 b'gzip': b'HG10GZ',
1834 b'gzip': b'HG10GZ',
1835 b'bundle2': b'HG20',
1835 b'bundle2': b'HG20',
1836 }
1836 }
1837 bundletype = btypes.get(bundletype)
1837 bundletype = btypes.get(bundletype)
1838 if bundletype not in bundle2.bundletypes:
1838 if bundletype not in bundle2.bundletypes:
1839 raise error.Abort(_(b'unknown bundle type specified with --type'))
1839 raise error.Abort(_(b'unknown bundle type specified with --type'))
1840 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1840 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1841
1841
1842
1842
1843 @command(b'debugignore', [], b'[FILE]')
1843 @command(b'debugignore', [], b'[FILE]')
1844 def debugignore(ui, repo, *files, **opts):
1844 def debugignore(ui, repo, *files, **opts):
1845 """display the combined ignore pattern and information about ignored files
1845 """display the combined ignore pattern and information about ignored files
1846
1846
1847 With no argument display the combined ignore pattern.
1847 With no argument display the combined ignore pattern.
1848
1848
1849 Given space separated file names, shows if the given file is ignored and
1849 Given space separated file names, shows if the given file is ignored and
1850 if so, show the ignore rule (file and line number) that matched it.
1850 if so, show the ignore rule (file and line number) that matched it.
1851 """
1851 """
1852 ignore = repo.dirstate._ignore
1852 ignore = repo.dirstate._ignore
1853 if not files:
1853 if not files:
1854 # Show all the patterns
1854 # Show all the patterns
1855 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1855 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1856 else:
1856 else:
1857 m = scmutil.match(repo[None], pats=files)
1857 m = scmutil.match(repo[None], pats=files)
1858 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1858 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1859 for f in m.files():
1859 for f in m.files():
1860 nf = util.normpath(f)
1860 nf = util.normpath(f)
1861 ignored = None
1861 ignored = None
1862 ignoredata = None
1862 ignoredata = None
1863 if nf != b'.':
1863 if nf != b'.':
1864 if ignore(nf):
1864 if ignore(nf):
1865 ignored = nf
1865 ignored = nf
1866 ignoredata = repo.dirstate._ignorefileandline(nf)
1866 ignoredata = repo.dirstate._ignorefileandline(nf)
1867 else:
1867 else:
1868 for p in pathutil.finddirs(nf):
1868 for p in pathutil.finddirs(nf):
1869 if ignore(p):
1869 if ignore(p):
1870 ignored = p
1870 ignored = p
1871 ignoredata = repo.dirstate._ignorefileandline(p)
1871 ignoredata = repo.dirstate._ignorefileandline(p)
1872 break
1872 break
1873 if ignored:
1873 if ignored:
1874 if ignored == nf:
1874 if ignored == nf:
1875 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1875 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1876 else:
1876 else:
1877 ui.write(
1877 ui.write(
1878 _(
1878 _(
1879 b"%s is ignored because of "
1879 b"%s is ignored because of "
1880 b"containing directory %s\n"
1880 b"containing directory %s\n"
1881 )
1881 )
1882 % (uipathfn(f), ignored)
1882 % (uipathfn(f), ignored)
1883 )
1883 )
1884 ignorefile, lineno, line = ignoredata
1884 ignorefile, lineno, line = ignoredata
1885 ui.write(
1885 ui.write(
1886 _(b"(ignore rule in %s, line %d: '%s')\n")
1886 _(b"(ignore rule in %s, line %d: '%s')\n")
1887 % (ignorefile, lineno, line)
1887 % (ignorefile, lineno, line)
1888 )
1888 )
1889 else:
1889 else:
1890 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1890 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1891
1891
1892
1892
1893 @command(
1893 @command(
1894 b'debug-revlog-index|debugindex',
1894 b'debug-revlog-index|debugindex',
1895 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1895 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1896 _(b'-c|-m|FILE'),
1896 _(b'-c|-m|FILE'),
1897 )
1897 )
1898 def debugindex(ui, repo, file_=None, **opts):
1898 def debugindex(ui, repo, file_=None, **opts):
1899 """dump index data for a revlog"""
1899 """dump index data for a revlog"""
1900 opts = pycompat.byteskwargs(opts)
1900 opts = pycompat.byteskwargs(opts)
1901 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1901 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1902
1902
1903 fm = ui.formatter(b'debugindex', opts)
1903 fm = ui.formatter(b'debugindex', opts)
1904
1904
1905 revlog = getattr(store, b'_revlog', store)
1905 revlog = getattr(store, b'_revlog', store)
1906
1906
1907 return revlog_debug.debug_index(
1907 return revlog_debug.debug_index(
1908 ui,
1908 ui,
1909 repo,
1909 repo,
1910 formatter=fm,
1910 formatter=fm,
1911 revlog=revlog,
1911 revlog=revlog,
1912 full_node=ui.debugflag,
1912 full_node=ui.debugflag,
1913 )
1913 )
1914
1914
1915
1915
1916 @command(
1916 @command(
1917 b'debugindexdot',
1917 b'debugindexdot',
1918 cmdutil.debugrevlogopts,
1918 cmdutil.debugrevlogopts,
1919 _(b'-c|-m|FILE'),
1919 _(b'-c|-m|FILE'),
1920 optionalrepo=True,
1920 optionalrepo=True,
1921 )
1921 )
1922 def debugindexdot(ui, repo, file_=None, **opts):
1922 def debugindexdot(ui, repo, file_=None, **opts):
1923 """dump an index DAG as a graphviz dot file"""
1923 """dump an index DAG as a graphviz dot file"""
1924 opts = pycompat.byteskwargs(opts)
1924 opts = pycompat.byteskwargs(opts)
1925 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1925 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1926 ui.writenoi18n(b"digraph G {\n")
1926 ui.writenoi18n(b"digraph G {\n")
1927 for i in r:
1927 for i in r:
1928 node = r.node(i)
1928 node = r.node(i)
1929 pp = r.parents(node)
1929 pp = r.parents(node)
1930 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1930 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1931 if pp[1] != repo.nullid:
1931 if pp[1] != repo.nullid:
1932 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1932 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1933 ui.write(b"}\n")
1933 ui.write(b"}\n")
1934
1934
1935
1935
1936 @command(b'debugindexstats', [])
1936 @command(b'debugindexstats', [])
1937 def debugindexstats(ui, repo):
1937 def debugindexstats(ui, repo):
1938 """show stats related to the changelog index"""
1938 """show stats related to the changelog index"""
1939 repo.changelog.shortest(repo.nullid, 1)
1939 repo.changelog.shortest(repo.nullid, 1)
1940 index = repo.changelog.index
1940 index = repo.changelog.index
1941 if not util.safehasattr(index, b'stats'):
1941 if not util.safehasattr(index, b'stats'):
1942 raise error.Abort(_(b'debugindexstats only works with native code'))
1942 raise error.Abort(_(b'debugindexstats only works with native code'))
1943 for k, v in sorted(index.stats().items()):
1943 for k, v in sorted(index.stats().items()):
1944 ui.write(b'%s: %d\n' % (k, v))
1944 ui.write(b'%s: %d\n' % (k, v))
1945
1945
1946
1946
1947 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1947 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1948 def debuginstall(ui, **opts):
1948 def debuginstall(ui, **opts):
1949 """test Mercurial installation
1949 """test Mercurial installation
1950
1950
1951 Returns 0 on success.
1951 Returns 0 on success.
1952 """
1952 """
1953 opts = pycompat.byteskwargs(opts)
1953 opts = pycompat.byteskwargs(opts)
1954
1954
1955 problems = 0
1955 problems = 0
1956
1956
1957 fm = ui.formatter(b'debuginstall', opts)
1957 fm = ui.formatter(b'debuginstall', opts)
1958 fm.startitem()
1958 fm.startitem()
1959
1959
1960 # encoding might be unknown or wrong. don't translate these messages.
1960 # encoding might be unknown or wrong. don't translate these messages.
1961 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1961 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1962 err = None
1962 err = None
1963 try:
1963 try:
1964 codecs.lookup(pycompat.sysstr(encoding.encoding))
1964 codecs.lookup(pycompat.sysstr(encoding.encoding))
1965 except LookupError as inst:
1965 except LookupError as inst:
1966 err = stringutil.forcebytestr(inst)
1966 err = stringutil.forcebytestr(inst)
1967 problems += 1
1967 problems += 1
1968 fm.condwrite(
1968 fm.condwrite(
1969 err,
1969 err,
1970 b'encodingerror',
1970 b'encodingerror',
1971 b" %s\n (check that your locale is properly set)\n",
1971 b" %s\n (check that your locale is properly set)\n",
1972 err,
1972 err,
1973 )
1973 )
1974
1974
1975 # Python
1975 # Python
1976 pythonlib = None
1976 pythonlib = None
1977 if util.safehasattr(os, '__file__'):
1977 if util.safehasattr(os, '__file__'):
1978 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1978 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1979 elif getattr(sys, 'oxidized', False):
1979 elif getattr(sys, 'oxidized', False):
1980 pythonlib = pycompat.sysexecutable
1980 pythonlib = pycompat.sysexecutable
1981
1981
1982 fm.write(
1982 fm.write(
1983 b'pythonexe',
1983 b'pythonexe',
1984 _(b"checking Python executable (%s)\n"),
1984 _(b"checking Python executable (%s)\n"),
1985 pycompat.sysexecutable or _(b"unknown"),
1985 pycompat.sysexecutable or _(b"unknown"),
1986 )
1986 )
1987 fm.write(
1987 fm.write(
1988 b'pythonimplementation',
1988 b'pythonimplementation',
1989 _(b"checking Python implementation (%s)\n"),
1989 _(b"checking Python implementation (%s)\n"),
1990 pycompat.sysbytes(platform.python_implementation()),
1990 pycompat.sysbytes(platform.python_implementation()),
1991 )
1991 )
1992 fm.write(
1992 fm.write(
1993 b'pythonver',
1993 b'pythonver',
1994 _(b"checking Python version (%s)\n"),
1994 _(b"checking Python version (%s)\n"),
1995 (b"%d.%d.%d" % sys.version_info[:3]),
1995 (b"%d.%d.%d" % sys.version_info[:3]),
1996 )
1996 )
1997 fm.write(
1997 fm.write(
1998 b'pythonlib',
1998 b'pythonlib',
1999 _(b"checking Python lib (%s)...\n"),
1999 _(b"checking Python lib (%s)...\n"),
2000 pythonlib or _(b"unknown"),
2000 pythonlib or _(b"unknown"),
2001 )
2001 )
2002
2002
2003 try:
2003 try:
2004 from . import rustext # pytype: disable=import-error
2004 from . import rustext # pytype: disable=import-error
2005
2005
2006 rustext.__doc__ # trigger lazy import
2006 rustext.__doc__ # trigger lazy import
2007 except ImportError:
2007 except ImportError:
2008 rustext = None
2008 rustext = None
2009
2009
2010 security = set(sslutil.supportedprotocols)
2010 security = set(sslutil.supportedprotocols)
2011 if sslutil.hassni:
2011 if sslutil.hassni:
2012 security.add(b'sni')
2012 security.add(b'sni')
2013
2013
2014 fm.write(
2014 fm.write(
2015 b'pythonsecurity',
2015 b'pythonsecurity',
2016 _(b"checking Python security support (%s)\n"),
2016 _(b"checking Python security support (%s)\n"),
2017 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
2017 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
2018 )
2018 )
2019
2019
2020 # These are warnings, not errors. So don't increment problem count. This
2020 # These are warnings, not errors. So don't increment problem count. This
2021 # may change in the future.
2021 # may change in the future.
2022 if b'tls1.2' not in security:
2022 if b'tls1.2' not in security:
2023 fm.plain(
2023 fm.plain(
2024 _(
2024 _(
2025 b' TLS 1.2 not supported by Python install; '
2025 b' TLS 1.2 not supported by Python install; '
2026 b'network connections lack modern security\n'
2026 b'network connections lack modern security\n'
2027 )
2027 )
2028 )
2028 )
2029 if b'sni' not in security:
2029 if b'sni' not in security:
2030 fm.plain(
2030 fm.plain(
2031 _(
2031 _(
2032 b' SNI not supported by Python install; may have '
2032 b' SNI not supported by Python install; may have '
2033 b'connectivity issues with some servers\n'
2033 b'connectivity issues with some servers\n'
2034 )
2034 )
2035 )
2035 )
2036
2036
2037 fm.plain(
2037 fm.plain(
2038 _(
2038 _(
2039 b"checking Rust extensions (%s)\n"
2039 b"checking Rust extensions (%s)\n"
2040 % (b'missing' if rustext is None else b'installed')
2040 % (b'missing' if rustext is None else b'installed')
2041 ),
2041 ),
2042 )
2042 )
2043
2043
2044 # TODO print CA cert info
2044 # TODO print CA cert info
2045
2045
2046 # hg version
2046 # hg version
2047 hgver = util.version()
2047 hgver = util.version()
2048 fm.write(
2048 fm.write(
2049 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
2049 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
2050 )
2050 )
2051 fm.write(
2051 fm.write(
2052 b'hgverextra',
2052 b'hgverextra',
2053 _(b"checking Mercurial custom build (%s)\n"),
2053 _(b"checking Mercurial custom build (%s)\n"),
2054 b'+'.join(hgver.split(b'+')[1:]),
2054 b'+'.join(hgver.split(b'+')[1:]),
2055 )
2055 )
2056
2056
2057 # compiled modules
2057 # compiled modules
2058 hgmodules = None
2058 hgmodules = None
2059 if util.safehasattr(sys.modules[__name__], '__file__'):
2059 if util.safehasattr(sys.modules[__name__], '__file__'):
2060 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
2060 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
2061 elif getattr(sys, 'oxidized', False):
2061 elif getattr(sys, 'oxidized', False):
2062 hgmodules = pycompat.sysexecutable
2062 hgmodules = pycompat.sysexecutable
2063
2063
2064 fm.write(
2064 fm.write(
2065 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
2065 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
2066 )
2066 )
2067 fm.write(
2067 fm.write(
2068 b'hgmodules',
2068 b'hgmodules',
2069 _(b"checking installed modules (%s)...\n"),
2069 _(b"checking installed modules (%s)...\n"),
2070 hgmodules or _(b"unknown"),
2070 hgmodules or _(b"unknown"),
2071 )
2071 )
2072
2072
2073 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
2073 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
2074 rustext = rustandc # for now, that's the only case
2074 rustext = rustandc # for now, that's the only case
2075 cext = policy.policy in (b'c', b'allow') or rustandc
2075 cext = policy.policy in (b'c', b'allow') or rustandc
2076 nopure = cext or rustext
2076 nopure = cext or rustext
2077 if nopure:
2077 if nopure:
2078 err = None
2078 err = None
2079 try:
2079 try:
2080 if cext:
2080 if cext:
2081 from .cext import ( # pytype: disable=import-error
2081 from .cext import ( # pytype: disable=import-error
2082 base85,
2082 base85,
2083 bdiff,
2083 bdiff,
2084 mpatch,
2084 mpatch,
2085 osutil,
2085 osutil,
2086 )
2086 )
2087
2087
2088 # quiet pyflakes
2088 # quiet pyflakes
2089 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
2089 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
2090 if rustext:
2090 if rustext:
2091 from .rustext import ( # pytype: disable=import-error
2091 from .rustext import ( # pytype: disable=import-error
2092 ancestor,
2092 ancestor,
2093 dirstate,
2093 dirstate,
2094 )
2094 )
2095
2095
2096 dir(ancestor), dir(dirstate) # quiet pyflakes
2096 dir(ancestor), dir(dirstate) # quiet pyflakes
2097 except Exception as inst:
2097 except Exception as inst:
2098 err = stringutil.forcebytestr(inst)
2098 err = stringutil.forcebytestr(inst)
2099 problems += 1
2099 problems += 1
2100 fm.condwrite(err, b'extensionserror', b" %s\n", err)
2100 fm.condwrite(err, b'extensionserror', b" %s\n", err)
2101
2101
2102 compengines = util.compengines._engines.values()
2102 compengines = util.compengines._engines.values()
2103 fm.write(
2103 fm.write(
2104 b'compengines',
2104 b'compengines',
2105 _(b'checking registered compression engines (%s)\n'),
2105 _(b'checking registered compression engines (%s)\n'),
2106 fm.formatlist(
2106 fm.formatlist(
2107 sorted(e.name() for e in compengines),
2107 sorted(e.name() for e in compengines),
2108 name=b'compengine',
2108 name=b'compengine',
2109 fmt=b'%s',
2109 fmt=b'%s',
2110 sep=b', ',
2110 sep=b', ',
2111 ),
2111 ),
2112 )
2112 )
2113 fm.write(
2113 fm.write(
2114 b'compenginesavail',
2114 b'compenginesavail',
2115 _(b'checking available compression engines (%s)\n'),
2115 _(b'checking available compression engines (%s)\n'),
2116 fm.formatlist(
2116 fm.formatlist(
2117 sorted(e.name() for e in compengines if e.available()),
2117 sorted(e.name() for e in compengines if e.available()),
2118 name=b'compengine',
2118 name=b'compengine',
2119 fmt=b'%s',
2119 fmt=b'%s',
2120 sep=b', ',
2120 sep=b', ',
2121 ),
2121 ),
2122 )
2122 )
2123 wirecompengines = compression.compengines.supportedwireengines(
2123 wirecompengines = compression.compengines.supportedwireengines(
2124 compression.SERVERROLE
2124 compression.SERVERROLE
2125 )
2125 )
2126 fm.write(
2126 fm.write(
2127 b'compenginesserver',
2127 b'compenginesserver',
2128 _(
2128 _(
2129 b'checking available compression engines '
2129 b'checking available compression engines '
2130 b'for wire protocol (%s)\n'
2130 b'for wire protocol (%s)\n'
2131 ),
2131 ),
2132 fm.formatlist(
2132 fm.formatlist(
2133 [e.name() for e in wirecompengines if e.wireprotosupport()],
2133 [e.name() for e in wirecompengines if e.wireprotosupport()],
2134 name=b'compengine',
2134 name=b'compengine',
2135 fmt=b'%s',
2135 fmt=b'%s',
2136 sep=b', ',
2136 sep=b', ',
2137 ),
2137 ),
2138 )
2138 )
2139 re2 = b'missing'
2139 re2 = b'missing'
2140 if util._re2:
2140 if util.has_re2():
2141 re2 = b'available'
2141 re2 = b'available'
2142 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
2142 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
2143 fm.data(re2=bool(util._re2))
2143 fm.data(re2=bool(util._re2))
2144
2144
2145 # templates
2145 # templates
2146 p = templater.templatedir()
2146 p = templater.templatedir()
2147 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
2147 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
2148 fm.condwrite(not p, b'', _(b" no template directories found\n"))
2148 fm.condwrite(not p, b'', _(b" no template directories found\n"))
2149 if p:
2149 if p:
2150 (m, fp) = templater.try_open_template(b"map-cmdline.default")
2150 (m, fp) = templater.try_open_template(b"map-cmdline.default")
2151 if m:
2151 if m:
2152 # template found, check if it is working
2152 # template found, check if it is working
2153 err = None
2153 err = None
2154 try:
2154 try:
2155 templater.templater.frommapfile(m)
2155 templater.templater.frommapfile(m)
2156 except Exception as inst:
2156 except Exception as inst:
2157 err = stringutil.forcebytestr(inst)
2157 err = stringutil.forcebytestr(inst)
2158 p = None
2158 p = None
2159 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2159 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2160 else:
2160 else:
2161 p = None
2161 p = None
2162 fm.condwrite(
2162 fm.condwrite(
2163 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2163 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2164 )
2164 )
2165 fm.condwrite(
2165 fm.condwrite(
2166 not m,
2166 not m,
2167 b'defaulttemplatenotfound',
2167 b'defaulttemplatenotfound',
2168 _(b" template '%s' not found\n"),
2168 _(b" template '%s' not found\n"),
2169 b"default",
2169 b"default",
2170 )
2170 )
2171 if not p:
2171 if not p:
2172 problems += 1
2172 problems += 1
2173 fm.condwrite(
2173 fm.condwrite(
2174 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2174 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2175 )
2175 )
2176
2176
2177 # editor
2177 # editor
2178 editor = ui.geteditor()
2178 editor = ui.geteditor()
2179 editor = util.expandpath(editor)
2179 editor = util.expandpath(editor)
2180 editorbin = procutil.shellsplit(editor)[0]
2180 editorbin = procutil.shellsplit(editor)[0]
2181 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2181 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2182 cmdpath = procutil.findexe(editorbin)
2182 cmdpath = procutil.findexe(editorbin)
2183 fm.condwrite(
2183 fm.condwrite(
2184 not cmdpath and editor == b'vi',
2184 not cmdpath and editor == b'vi',
2185 b'vinotfound',
2185 b'vinotfound',
2186 _(
2186 _(
2187 b" No commit editor set and can't find %s in PATH\n"
2187 b" No commit editor set and can't find %s in PATH\n"
2188 b" (specify a commit editor in your configuration"
2188 b" (specify a commit editor in your configuration"
2189 b" file)\n"
2189 b" file)\n"
2190 ),
2190 ),
2191 not cmdpath and editor == b'vi' and editorbin,
2191 not cmdpath and editor == b'vi' and editorbin,
2192 )
2192 )
2193 fm.condwrite(
2193 fm.condwrite(
2194 not cmdpath and editor != b'vi',
2194 not cmdpath and editor != b'vi',
2195 b'editornotfound',
2195 b'editornotfound',
2196 _(
2196 _(
2197 b" Can't find editor '%s' in PATH\n"
2197 b" Can't find editor '%s' in PATH\n"
2198 b" (specify a commit editor in your configuration"
2198 b" (specify a commit editor in your configuration"
2199 b" file)\n"
2199 b" file)\n"
2200 ),
2200 ),
2201 not cmdpath and editorbin,
2201 not cmdpath and editorbin,
2202 )
2202 )
2203 if not cmdpath and editor != b'vi':
2203 if not cmdpath and editor != b'vi':
2204 problems += 1
2204 problems += 1
2205
2205
2206 # check username
2206 # check username
2207 username = None
2207 username = None
2208 err = None
2208 err = None
2209 try:
2209 try:
2210 username = ui.username()
2210 username = ui.username()
2211 except error.Abort as e:
2211 except error.Abort as e:
2212 err = e.message
2212 err = e.message
2213 problems += 1
2213 problems += 1
2214
2214
2215 fm.condwrite(
2215 fm.condwrite(
2216 username, b'username', _(b"checking username (%s)\n"), username
2216 username, b'username', _(b"checking username (%s)\n"), username
2217 )
2217 )
2218 fm.condwrite(
2218 fm.condwrite(
2219 err,
2219 err,
2220 b'usernameerror',
2220 b'usernameerror',
2221 _(
2221 _(
2222 b"checking username...\n %s\n"
2222 b"checking username...\n %s\n"
2223 b" (specify a username in your configuration file)\n"
2223 b" (specify a username in your configuration file)\n"
2224 ),
2224 ),
2225 err,
2225 err,
2226 )
2226 )
2227
2227
2228 for name, mod in extensions.extensions():
2228 for name, mod in extensions.extensions():
2229 handler = getattr(mod, 'debuginstall', None)
2229 handler = getattr(mod, 'debuginstall', None)
2230 if handler is not None:
2230 if handler is not None:
2231 problems += handler(ui, fm)
2231 problems += handler(ui, fm)
2232
2232
2233 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2233 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2234 if not problems:
2234 if not problems:
2235 fm.data(problems=problems)
2235 fm.data(problems=problems)
2236 fm.condwrite(
2236 fm.condwrite(
2237 problems,
2237 problems,
2238 b'problems',
2238 b'problems',
2239 _(b"%d problems detected, please check your install!\n"),
2239 _(b"%d problems detected, please check your install!\n"),
2240 problems,
2240 problems,
2241 )
2241 )
2242 fm.end()
2242 fm.end()
2243
2243
2244 return problems
2244 return problems
2245
2245
2246
2246
2247 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2247 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2248 def debugknown(ui, repopath, *ids, **opts):
2248 def debugknown(ui, repopath, *ids, **opts):
2249 """test whether node ids are known to a repo
2249 """test whether node ids are known to a repo
2250
2250
2251 Every ID must be a full-length hex node id string. Returns a list of 0s
2251 Every ID must be a full-length hex node id string. Returns a list of 0s
2252 and 1s indicating unknown/known.
2252 and 1s indicating unknown/known.
2253 """
2253 """
2254 opts = pycompat.byteskwargs(opts)
2254 opts = pycompat.byteskwargs(opts)
2255 repo = hg.peer(ui, opts, repopath)
2255 repo = hg.peer(ui, opts, repopath)
2256 if not repo.capable(b'known'):
2256 if not repo.capable(b'known'):
2257 raise error.Abort(b"known() not supported by target repository")
2257 raise error.Abort(b"known() not supported by target repository")
2258 flags = repo.known([bin(s) for s in ids])
2258 flags = repo.known([bin(s) for s in ids])
2259 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2259 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2260
2260
2261
2261
2262 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2262 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2263 def debuglabelcomplete(ui, repo, *args):
2263 def debuglabelcomplete(ui, repo, *args):
2264 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2264 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2265 debugnamecomplete(ui, repo, *args)
2265 debugnamecomplete(ui, repo, *args)
2266
2266
2267
2267
2268 @command(
2268 @command(
2269 b'debuglocks',
2269 b'debuglocks',
2270 [
2270 [
2271 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2271 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2272 (
2272 (
2273 b'W',
2273 b'W',
2274 b'force-free-wlock',
2274 b'force-free-wlock',
2275 None,
2275 None,
2276 _(b'free the working state lock (DANGEROUS)'),
2276 _(b'free the working state lock (DANGEROUS)'),
2277 ),
2277 ),
2278 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2278 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2279 (
2279 (
2280 b'S',
2280 b'S',
2281 b'set-wlock',
2281 b'set-wlock',
2282 None,
2282 None,
2283 _(b'set the working state lock until stopped'),
2283 _(b'set the working state lock until stopped'),
2284 ),
2284 ),
2285 ],
2285 ],
2286 _(b'[OPTION]...'),
2286 _(b'[OPTION]...'),
2287 )
2287 )
2288 def debuglocks(ui, repo, **opts):
2288 def debuglocks(ui, repo, **opts):
2289 """show or modify state of locks
2289 """show or modify state of locks
2290
2290
2291 By default, this command will show which locks are held. This
2291 By default, this command will show which locks are held. This
2292 includes the user and process holding the lock, the amount of time
2292 includes the user and process holding the lock, the amount of time
2293 the lock has been held, and the machine name where the process is
2293 the lock has been held, and the machine name where the process is
2294 running if it's not local.
2294 running if it's not local.
2295
2295
2296 Locks protect the integrity of Mercurial's data, so should be
2296 Locks protect the integrity of Mercurial's data, so should be
2297 treated with care. System crashes or other interruptions may cause
2297 treated with care. System crashes or other interruptions may cause
2298 locks to not be properly released, though Mercurial will usually
2298 locks to not be properly released, though Mercurial will usually
2299 detect and remove such stale locks automatically.
2299 detect and remove such stale locks automatically.
2300
2300
2301 However, detecting stale locks may not always be possible (for
2301 However, detecting stale locks may not always be possible (for
2302 instance, on a shared filesystem). Removing locks may also be
2302 instance, on a shared filesystem). Removing locks may also be
2303 blocked by filesystem permissions.
2303 blocked by filesystem permissions.
2304
2304
2305 Setting a lock will prevent other commands from changing the data.
2305 Setting a lock will prevent other commands from changing the data.
2306 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2306 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2307 The set locks are removed when the command exits.
2307 The set locks are removed when the command exits.
2308
2308
2309 Returns 0 if no locks are held.
2309 Returns 0 if no locks are held.
2310
2310
2311 """
2311 """
2312
2312
2313 if opts.get('force_free_lock'):
2313 if opts.get('force_free_lock'):
2314 repo.svfs.tryunlink(b'lock')
2314 repo.svfs.tryunlink(b'lock')
2315 if opts.get('force_free_wlock'):
2315 if opts.get('force_free_wlock'):
2316 repo.vfs.tryunlink(b'wlock')
2316 repo.vfs.tryunlink(b'wlock')
2317 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2317 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2318 return 0
2318 return 0
2319
2319
2320 locks = []
2320 locks = []
2321 try:
2321 try:
2322 if opts.get('set_wlock'):
2322 if opts.get('set_wlock'):
2323 try:
2323 try:
2324 locks.append(repo.wlock(False))
2324 locks.append(repo.wlock(False))
2325 except error.LockHeld:
2325 except error.LockHeld:
2326 raise error.Abort(_(b'wlock is already held'))
2326 raise error.Abort(_(b'wlock is already held'))
2327 if opts.get('set_lock'):
2327 if opts.get('set_lock'):
2328 try:
2328 try:
2329 locks.append(repo.lock(False))
2329 locks.append(repo.lock(False))
2330 except error.LockHeld:
2330 except error.LockHeld:
2331 raise error.Abort(_(b'lock is already held'))
2331 raise error.Abort(_(b'lock is already held'))
2332 if len(locks):
2332 if len(locks):
2333 try:
2333 try:
2334 if ui.interactive():
2334 if ui.interactive():
2335 prompt = _(b"ready to release the lock (y)? $$ &Yes")
2335 prompt = _(b"ready to release the lock (y)? $$ &Yes")
2336 ui.promptchoice(prompt)
2336 ui.promptchoice(prompt)
2337 else:
2337 else:
2338 msg = b"%d locks held, waiting for signal\n"
2338 msg = b"%d locks held, waiting for signal\n"
2339 msg %= len(locks)
2339 msg %= len(locks)
2340 ui.status(msg)
2340 ui.status(msg)
2341 while True: # XXX wait for a signal
2341 while True: # XXX wait for a signal
2342 time.sleep(0.1)
2342 time.sleep(0.1)
2343 except KeyboardInterrupt:
2343 except KeyboardInterrupt:
2344 msg = b"signal-received releasing locks\n"
2344 msg = b"signal-received releasing locks\n"
2345 ui.status(msg)
2345 ui.status(msg)
2346 return 0
2346 return 0
2347 finally:
2347 finally:
2348 release(*locks)
2348 release(*locks)
2349
2349
2350 now = time.time()
2350 now = time.time()
2351 held = 0
2351 held = 0
2352
2352
2353 def report(vfs, name, method):
2353 def report(vfs, name, method):
2354 # this causes stale locks to get reaped for more accurate reporting
2354 # this causes stale locks to get reaped for more accurate reporting
2355 try:
2355 try:
2356 l = method(False)
2356 l = method(False)
2357 except error.LockHeld:
2357 except error.LockHeld:
2358 l = None
2358 l = None
2359
2359
2360 if l:
2360 if l:
2361 l.release()
2361 l.release()
2362 else:
2362 else:
2363 try:
2363 try:
2364 st = vfs.lstat(name)
2364 st = vfs.lstat(name)
2365 age = now - st[stat.ST_MTIME]
2365 age = now - st[stat.ST_MTIME]
2366 user = util.username(st.st_uid)
2366 user = util.username(st.st_uid)
2367 locker = vfs.readlock(name)
2367 locker = vfs.readlock(name)
2368 if b":" in locker:
2368 if b":" in locker:
2369 host, pid = locker.split(b':')
2369 host, pid = locker.split(b':')
2370 if host == socket.gethostname():
2370 if host == socket.gethostname():
2371 locker = b'user %s, process %s' % (user or b'None', pid)
2371 locker = b'user %s, process %s' % (user or b'None', pid)
2372 else:
2372 else:
2373 locker = b'user %s, process %s, host %s' % (
2373 locker = b'user %s, process %s, host %s' % (
2374 user or b'None',
2374 user or b'None',
2375 pid,
2375 pid,
2376 host,
2376 host,
2377 )
2377 )
2378 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2378 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2379 return 1
2379 return 1
2380 except FileNotFoundError:
2380 except FileNotFoundError:
2381 pass
2381 pass
2382
2382
2383 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2383 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2384 return 0
2384 return 0
2385
2385
2386 held += report(repo.svfs, b"lock", repo.lock)
2386 held += report(repo.svfs, b"lock", repo.lock)
2387 held += report(repo.vfs, b"wlock", repo.wlock)
2387 held += report(repo.vfs, b"wlock", repo.wlock)
2388
2388
2389 return held
2389 return held
2390
2390
2391
2391
2392 @command(
2392 @command(
2393 b'debugmanifestfulltextcache',
2393 b'debugmanifestfulltextcache',
2394 [
2394 [
2395 (b'', b'clear', False, _(b'clear the cache')),
2395 (b'', b'clear', False, _(b'clear the cache')),
2396 (
2396 (
2397 b'a',
2397 b'a',
2398 b'add',
2398 b'add',
2399 [],
2399 [],
2400 _(b'add the given manifest nodes to the cache'),
2400 _(b'add the given manifest nodes to the cache'),
2401 _(b'NODE'),
2401 _(b'NODE'),
2402 ),
2402 ),
2403 ],
2403 ],
2404 b'',
2404 b'',
2405 )
2405 )
2406 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2406 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2407 """show, clear or amend the contents of the manifest fulltext cache"""
2407 """show, clear or amend the contents of the manifest fulltext cache"""
2408
2408
2409 def getcache():
2409 def getcache():
2410 r = repo.manifestlog.getstorage(b'')
2410 r = repo.manifestlog.getstorage(b'')
2411 try:
2411 try:
2412 return r._fulltextcache
2412 return r._fulltextcache
2413 except AttributeError:
2413 except AttributeError:
2414 msg = _(
2414 msg = _(
2415 b"Current revlog implementation doesn't appear to have a "
2415 b"Current revlog implementation doesn't appear to have a "
2416 b"manifest fulltext cache\n"
2416 b"manifest fulltext cache\n"
2417 )
2417 )
2418 raise error.Abort(msg)
2418 raise error.Abort(msg)
2419
2419
2420 if opts.get('clear'):
2420 if opts.get('clear'):
2421 with repo.wlock():
2421 with repo.wlock():
2422 cache = getcache()
2422 cache = getcache()
2423 cache.clear(clear_persisted_data=True)
2423 cache.clear(clear_persisted_data=True)
2424 return
2424 return
2425
2425
2426 if add:
2426 if add:
2427 with repo.wlock():
2427 with repo.wlock():
2428 m = repo.manifestlog
2428 m = repo.manifestlog
2429 store = m.getstorage(b'')
2429 store = m.getstorage(b'')
2430 for n in add:
2430 for n in add:
2431 try:
2431 try:
2432 manifest = m[store.lookup(n)]
2432 manifest = m[store.lookup(n)]
2433 except error.LookupError as e:
2433 except error.LookupError as e:
2434 raise error.Abort(
2434 raise error.Abort(
2435 bytes(e), hint=b"Check your manifest node id"
2435 bytes(e), hint=b"Check your manifest node id"
2436 )
2436 )
2437 manifest.read() # stores revisision in cache too
2437 manifest.read() # stores revisision in cache too
2438 return
2438 return
2439
2439
2440 cache = getcache()
2440 cache = getcache()
2441 if not len(cache):
2441 if not len(cache):
2442 ui.write(_(b'cache empty\n'))
2442 ui.write(_(b'cache empty\n'))
2443 else:
2443 else:
2444 ui.write(
2444 ui.write(
2445 _(
2445 _(
2446 b'cache contains %d manifest entries, in order of most to '
2446 b'cache contains %d manifest entries, in order of most to '
2447 b'least recent:\n'
2447 b'least recent:\n'
2448 )
2448 )
2449 % (len(cache),)
2449 % (len(cache),)
2450 )
2450 )
2451 totalsize = 0
2451 totalsize = 0
2452 for nodeid in cache:
2452 for nodeid in cache:
2453 # Use cache.get to not update the LRU order
2453 # Use cache.get to not update the LRU order
2454 data = cache.peek(nodeid)
2454 data = cache.peek(nodeid)
2455 size = len(data)
2455 size = len(data)
2456 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2456 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2457 ui.write(
2457 ui.write(
2458 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2458 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2459 )
2459 )
2460 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2460 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2461 ui.write(
2461 ui.write(
2462 _(b'total cache data size %s, on-disk %s\n')
2462 _(b'total cache data size %s, on-disk %s\n')
2463 % (util.bytecount(totalsize), util.bytecount(ondisk))
2463 % (util.bytecount(totalsize), util.bytecount(ondisk))
2464 )
2464 )
2465
2465
2466
2466
2467 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2467 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2468 def debugmergestate(ui, repo, *args, **opts):
2468 def debugmergestate(ui, repo, *args, **opts):
2469 """print merge state
2469 """print merge state
2470
2470
2471 Use --verbose to print out information about whether v1 or v2 merge state
2471 Use --verbose to print out information about whether v1 or v2 merge state
2472 was chosen."""
2472 was chosen."""
2473
2473
2474 if ui.verbose:
2474 if ui.verbose:
2475 ms = mergestatemod.mergestate(repo)
2475 ms = mergestatemod.mergestate(repo)
2476
2476
2477 # sort so that reasonable information is on top
2477 # sort so that reasonable information is on top
2478 v1records = ms._readrecordsv1()
2478 v1records = ms._readrecordsv1()
2479 v2records = ms._readrecordsv2()
2479 v2records = ms._readrecordsv2()
2480
2480
2481 if not v1records and not v2records:
2481 if not v1records and not v2records:
2482 pass
2482 pass
2483 elif not v2records:
2483 elif not v2records:
2484 ui.writenoi18n(b'no version 2 merge state\n')
2484 ui.writenoi18n(b'no version 2 merge state\n')
2485 elif ms._v1v2match(v1records, v2records):
2485 elif ms._v1v2match(v1records, v2records):
2486 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2486 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2487 else:
2487 else:
2488 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2488 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2489
2489
2490 opts = pycompat.byteskwargs(opts)
2490 opts = pycompat.byteskwargs(opts)
2491 if not opts[b'template']:
2491 if not opts[b'template']:
2492 opts[b'template'] = (
2492 opts[b'template'] = (
2493 b'{if(commits, "", "no merge state found\n")}'
2493 b'{if(commits, "", "no merge state found\n")}'
2494 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2494 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2495 b'{files % "file: {path} (state \\"{state}\\")\n'
2495 b'{files % "file: {path} (state \\"{state}\\")\n'
2496 b'{if(local_path, "'
2496 b'{if(local_path, "'
2497 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2497 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2498 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2498 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2499 b' other path: {other_path} (node {other_node})\n'
2499 b' other path: {other_path} (node {other_node})\n'
2500 b'")}'
2500 b'")}'
2501 b'{if(rename_side, "'
2501 b'{if(rename_side, "'
2502 b' rename side: {rename_side}\n'
2502 b' rename side: {rename_side}\n'
2503 b' renamed path: {renamed_path}\n'
2503 b' renamed path: {renamed_path}\n'
2504 b'")}'
2504 b'")}'
2505 b'{extras % " extra: {key} = {value}\n"}'
2505 b'{extras % " extra: {key} = {value}\n"}'
2506 b'"}'
2506 b'"}'
2507 b'{extras % "extra: {file} ({key} = {value})\n"}'
2507 b'{extras % "extra: {file} ({key} = {value})\n"}'
2508 )
2508 )
2509
2509
2510 ms = mergestatemod.mergestate.read(repo)
2510 ms = mergestatemod.mergestate.read(repo)
2511
2511
2512 fm = ui.formatter(b'debugmergestate', opts)
2512 fm = ui.formatter(b'debugmergestate', opts)
2513 fm.startitem()
2513 fm.startitem()
2514
2514
2515 fm_commits = fm.nested(b'commits')
2515 fm_commits = fm.nested(b'commits')
2516 if ms.active():
2516 if ms.active():
2517 for name, node, label_index in (
2517 for name, node, label_index in (
2518 (b'local', ms.local, 0),
2518 (b'local', ms.local, 0),
2519 (b'other', ms.other, 1),
2519 (b'other', ms.other, 1),
2520 ):
2520 ):
2521 fm_commits.startitem()
2521 fm_commits.startitem()
2522 fm_commits.data(name=name)
2522 fm_commits.data(name=name)
2523 fm_commits.data(node=hex(node))
2523 fm_commits.data(node=hex(node))
2524 if ms._labels and len(ms._labels) > label_index:
2524 if ms._labels and len(ms._labels) > label_index:
2525 fm_commits.data(label=ms._labels[label_index])
2525 fm_commits.data(label=ms._labels[label_index])
2526 fm_commits.end()
2526 fm_commits.end()
2527
2527
2528 fm_files = fm.nested(b'files')
2528 fm_files = fm.nested(b'files')
2529 if ms.active():
2529 if ms.active():
2530 for f in ms:
2530 for f in ms:
2531 fm_files.startitem()
2531 fm_files.startitem()
2532 fm_files.data(path=f)
2532 fm_files.data(path=f)
2533 state = ms._state[f]
2533 state = ms._state[f]
2534 fm_files.data(state=state[0])
2534 fm_files.data(state=state[0])
2535 if state[0] in (
2535 if state[0] in (
2536 mergestatemod.MERGE_RECORD_UNRESOLVED,
2536 mergestatemod.MERGE_RECORD_UNRESOLVED,
2537 mergestatemod.MERGE_RECORD_RESOLVED,
2537 mergestatemod.MERGE_RECORD_RESOLVED,
2538 ):
2538 ):
2539 fm_files.data(local_key=state[1])
2539 fm_files.data(local_key=state[1])
2540 fm_files.data(local_path=state[2])
2540 fm_files.data(local_path=state[2])
2541 fm_files.data(ancestor_path=state[3])
2541 fm_files.data(ancestor_path=state[3])
2542 fm_files.data(ancestor_node=state[4])
2542 fm_files.data(ancestor_node=state[4])
2543 fm_files.data(other_path=state[5])
2543 fm_files.data(other_path=state[5])
2544 fm_files.data(other_node=state[6])
2544 fm_files.data(other_node=state[6])
2545 fm_files.data(local_flags=state[7])
2545 fm_files.data(local_flags=state[7])
2546 elif state[0] in (
2546 elif state[0] in (
2547 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2547 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2548 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2548 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2549 ):
2549 ):
2550 fm_files.data(renamed_path=state[1])
2550 fm_files.data(renamed_path=state[1])
2551 fm_files.data(rename_side=state[2])
2551 fm_files.data(rename_side=state[2])
2552 fm_extras = fm_files.nested(b'extras')
2552 fm_extras = fm_files.nested(b'extras')
2553 for k, v in sorted(ms.extras(f).items()):
2553 for k, v in sorted(ms.extras(f).items()):
2554 fm_extras.startitem()
2554 fm_extras.startitem()
2555 fm_extras.data(key=k)
2555 fm_extras.data(key=k)
2556 fm_extras.data(value=v)
2556 fm_extras.data(value=v)
2557 fm_extras.end()
2557 fm_extras.end()
2558
2558
2559 fm_files.end()
2559 fm_files.end()
2560
2560
2561 fm_extras = fm.nested(b'extras')
2561 fm_extras = fm.nested(b'extras')
2562 for f, d in sorted(ms.allextras().items()):
2562 for f, d in sorted(ms.allextras().items()):
2563 if f in ms:
2563 if f in ms:
2564 # If file is in mergestate, we have already processed it's extras
2564 # If file is in mergestate, we have already processed it's extras
2565 continue
2565 continue
2566 for k, v in d.items():
2566 for k, v in d.items():
2567 fm_extras.startitem()
2567 fm_extras.startitem()
2568 fm_extras.data(file=f)
2568 fm_extras.data(file=f)
2569 fm_extras.data(key=k)
2569 fm_extras.data(key=k)
2570 fm_extras.data(value=v)
2570 fm_extras.data(value=v)
2571 fm_extras.end()
2571 fm_extras.end()
2572
2572
2573 fm.end()
2573 fm.end()
2574
2574
2575
2575
2576 @command(b'debugnamecomplete', [], _(b'NAME...'))
2576 @command(b'debugnamecomplete', [], _(b'NAME...'))
2577 def debugnamecomplete(ui, repo, *args):
2577 def debugnamecomplete(ui, repo, *args):
2578 '''complete "names" - tags, open branch names, bookmark names'''
2578 '''complete "names" - tags, open branch names, bookmark names'''
2579
2579
2580 names = set()
2580 names = set()
2581 # since we previously only listed open branches, we will handle that
2581 # since we previously only listed open branches, we will handle that
2582 # specially (after this for loop)
2582 # specially (after this for loop)
2583 for name, ns in repo.names.items():
2583 for name, ns in repo.names.items():
2584 if name != b'branches':
2584 if name != b'branches':
2585 names.update(ns.listnames(repo))
2585 names.update(ns.listnames(repo))
2586 names.update(
2586 names.update(
2587 tag
2587 tag
2588 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2588 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2589 if not closed
2589 if not closed
2590 )
2590 )
2591 completions = set()
2591 completions = set()
2592 if not args:
2592 if not args:
2593 args = [b'']
2593 args = [b'']
2594 for a in args:
2594 for a in args:
2595 completions.update(n for n in names if n.startswith(a))
2595 completions.update(n for n in names if n.startswith(a))
2596 ui.write(b'\n'.join(sorted(completions)))
2596 ui.write(b'\n'.join(sorted(completions)))
2597 ui.write(b'\n')
2597 ui.write(b'\n')
2598
2598
2599
2599
2600 @command(
2600 @command(
2601 b'debugnodemap',
2601 b'debugnodemap',
2602 [
2602 [
2603 (
2603 (
2604 b'',
2604 b'',
2605 b'dump-new',
2605 b'dump-new',
2606 False,
2606 False,
2607 _(b'write a (new) persistent binary nodemap on stdout'),
2607 _(b'write a (new) persistent binary nodemap on stdout'),
2608 ),
2608 ),
2609 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2609 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2610 (
2610 (
2611 b'',
2611 b'',
2612 b'check',
2612 b'check',
2613 False,
2613 False,
2614 _(b'check that the data on disk data are correct.'),
2614 _(b'check that the data on disk data are correct.'),
2615 ),
2615 ),
2616 (
2616 (
2617 b'',
2617 b'',
2618 b'metadata',
2618 b'metadata',
2619 False,
2619 False,
2620 _(b'display the on disk meta data for the nodemap'),
2620 _(b'display the on disk meta data for the nodemap'),
2621 ),
2621 ),
2622 ],
2622 ],
2623 )
2623 )
2624 def debugnodemap(ui, repo, **opts):
2624 def debugnodemap(ui, repo, **opts):
2625 """write and inspect on disk nodemap"""
2625 """write and inspect on disk nodemap"""
2626 if opts['dump_new']:
2626 if opts['dump_new']:
2627 unfi = repo.unfiltered()
2627 unfi = repo.unfiltered()
2628 cl = unfi.changelog
2628 cl = unfi.changelog
2629 if util.safehasattr(cl.index, "nodemap_data_all"):
2629 if util.safehasattr(cl.index, "nodemap_data_all"):
2630 data = cl.index.nodemap_data_all()
2630 data = cl.index.nodemap_data_all()
2631 else:
2631 else:
2632 data = nodemap.persistent_data(cl.index)
2632 data = nodemap.persistent_data(cl.index)
2633 ui.write(data)
2633 ui.write(data)
2634 elif opts['dump_disk']:
2634 elif opts['dump_disk']:
2635 unfi = repo.unfiltered()
2635 unfi = repo.unfiltered()
2636 cl = unfi.changelog
2636 cl = unfi.changelog
2637 nm_data = nodemap.persisted_data(cl)
2637 nm_data = nodemap.persisted_data(cl)
2638 if nm_data is not None:
2638 if nm_data is not None:
2639 docket, data = nm_data
2639 docket, data = nm_data
2640 ui.write(data[:])
2640 ui.write(data[:])
2641 elif opts['check']:
2641 elif opts['check']:
2642 unfi = repo.unfiltered()
2642 unfi = repo.unfiltered()
2643 cl = unfi.changelog
2643 cl = unfi.changelog
2644 nm_data = nodemap.persisted_data(cl)
2644 nm_data = nodemap.persisted_data(cl)
2645 if nm_data is not None:
2645 if nm_data is not None:
2646 docket, data = nm_data
2646 docket, data = nm_data
2647 return nodemap.check_data(ui, cl.index, data)
2647 return nodemap.check_data(ui, cl.index, data)
2648 elif opts['metadata']:
2648 elif opts['metadata']:
2649 unfi = repo.unfiltered()
2649 unfi = repo.unfiltered()
2650 cl = unfi.changelog
2650 cl = unfi.changelog
2651 nm_data = nodemap.persisted_data(cl)
2651 nm_data = nodemap.persisted_data(cl)
2652 if nm_data is not None:
2652 if nm_data is not None:
2653 docket, data = nm_data
2653 docket, data = nm_data
2654 ui.write((b"uid: %s\n") % docket.uid)
2654 ui.write((b"uid: %s\n") % docket.uid)
2655 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2655 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2656 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2656 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2657 ui.write((b"data-length: %d\n") % docket.data_length)
2657 ui.write((b"data-length: %d\n") % docket.data_length)
2658 ui.write((b"data-unused: %d\n") % docket.data_unused)
2658 ui.write((b"data-unused: %d\n") % docket.data_unused)
2659 unused_perc = docket.data_unused * 100.0 / docket.data_length
2659 unused_perc = docket.data_unused * 100.0 / docket.data_length
2660 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2660 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2661
2661
2662
2662
2663 @command(
2663 @command(
2664 b'debugobsolete',
2664 b'debugobsolete',
2665 [
2665 [
2666 (b'', b'flags', 0, _(b'markers flag')),
2666 (b'', b'flags', 0, _(b'markers flag')),
2667 (
2667 (
2668 b'',
2668 b'',
2669 b'record-parents',
2669 b'record-parents',
2670 False,
2670 False,
2671 _(b'record parent information for the precursor'),
2671 _(b'record parent information for the precursor'),
2672 ),
2672 ),
2673 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2673 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2674 (
2674 (
2675 b'',
2675 b'',
2676 b'exclusive',
2676 b'exclusive',
2677 False,
2677 False,
2678 _(b'restrict display to markers only relevant to REV'),
2678 _(b'restrict display to markers only relevant to REV'),
2679 ),
2679 ),
2680 (b'', b'index', False, _(b'display index of the marker')),
2680 (b'', b'index', False, _(b'display index of the marker')),
2681 (b'', b'delete', [], _(b'delete markers specified by indices')),
2681 (b'', b'delete', [], _(b'delete markers specified by indices')),
2682 ]
2682 ]
2683 + cmdutil.commitopts2
2683 + cmdutil.commitopts2
2684 + cmdutil.formatteropts,
2684 + cmdutil.formatteropts,
2685 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2685 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2686 )
2686 )
2687 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2687 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2688 """create arbitrary obsolete marker
2688 """create arbitrary obsolete marker
2689
2689
2690 With no arguments, displays the list of obsolescence markers."""
2690 With no arguments, displays the list of obsolescence markers."""
2691
2691
2692 opts = pycompat.byteskwargs(opts)
2692 opts = pycompat.byteskwargs(opts)
2693
2693
2694 def parsenodeid(s):
2694 def parsenodeid(s):
2695 try:
2695 try:
2696 # We do not use revsingle/revrange functions here to accept
2696 # We do not use revsingle/revrange functions here to accept
2697 # arbitrary node identifiers, possibly not present in the
2697 # arbitrary node identifiers, possibly not present in the
2698 # local repository.
2698 # local repository.
2699 n = bin(s)
2699 n = bin(s)
2700 if len(n) != repo.nodeconstants.nodelen:
2700 if len(n) != repo.nodeconstants.nodelen:
2701 raise ValueError
2701 raise ValueError
2702 return n
2702 return n
2703 except ValueError:
2703 except ValueError:
2704 raise error.InputError(
2704 raise error.InputError(
2705 b'changeset references must be full hexadecimal '
2705 b'changeset references must be full hexadecimal '
2706 b'node identifiers'
2706 b'node identifiers'
2707 )
2707 )
2708
2708
2709 if opts.get(b'delete'):
2709 if opts.get(b'delete'):
2710 indices = []
2710 indices = []
2711 for v in opts.get(b'delete'):
2711 for v in opts.get(b'delete'):
2712 try:
2712 try:
2713 indices.append(int(v))
2713 indices.append(int(v))
2714 except ValueError:
2714 except ValueError:
2715 raise error.InputError(
2715 raise error.InputError(
2716 _(b'invalid index value: %r') % v,
2716 _(b'invalid index value: %r') % v,
2717 hint=_(b'use integers for indices'),
2717 hint=_(b'use integers for indices'),
2718 )
2718 )
2719
2719
2720 if repo.currenttransaction():
2720 if repo.currenttransaction():
2721 raise error.Abort(
2721 raise error.Abort(
2722 _(b'cannot delete obsmarkers in the middle of transaction.')
2722 _(b'cannot delete obsmarkers in the middle of transaction.')
2723 )
2723 )
2724
2724
2725 with repo.lock():
2725 with repo.lock():
2726 n = repair.deleteobsmarkers(repo.obsstore, indices)
2726 n = repair.deleteobsmarkers(repo.obsstore, indices)
2727 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2727 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2728
2728
2729 return
2729 return
2730
2730
2731 if precursor is not None:
2731 if precursor is not None:
2732 if opts[b'rev']:
2732 if opts[b'rev']:
2733 raise error.InputError(
2733 raise error.InputError(
2734 b'cannot select revision when creating marker'
2734 b'cannot select revision when creating marker'
2735 )
2735 )
2736 metadata = {}
2736 metadata = {}
2737 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2737 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2738 succs = tuple(parsenodeid(succ) for succ in successors)
2738 succs = tuple(parsenodeid(succ) for succ in successors)
2739 l = repo.lock()
2739 l = repo.lock()
2740 try:
2740 try:
2741 tr = repo.transaction(b'debugobsolete')
2741 tr = repo.transaction(b'debugobsolete')
2742 try:
2742 try:
2743 date = opts.get(b'date')
2743 date = opts.get(b'date')
2744 if date:
2744 if date:
2745 date = dateutil.parsedate(date)
2745 date = dateutil.parsedate(date)
2746 else:
2746 else:
2747 date = None
2747 date = None
2748 prec = parsenodeid(precursor)
2748 prec = parsenodeid(precursor)
2749 parents = None
2749 parents = None
2750 if opts[b'record_parents']:
2750 if opts[b'record_parents']:
2751 if prec not in repo.unfiltered():
2751 if prec not in repo.unfiltered():
2752 raise error.Abort(
2752 raise error.Abort(
2753 b'cannot used --record-parents on '
2753 b'cannot used --record-parents on '
2754 b'unknown changesets'
2754 b'unknown changesets'
2755 )
2755 )
2756 parents = repo.unfiltered()[prec].parents()
2756 parents = repo.unfiltered()[prec].parents()
2757 parents = tuple(p.node() for p in parents)
2757 parents = tuple(p.node() for p in parents)
2758 repo.obsstore.create(
2758 repo.obsstore.create(
2759 tr,
2759 tr,
2760 prec,
2760 prec,
2761 succs,
2761 succs,
2762 opts[b'flags'],
2762 opts[b'flags'],
2763 parents=parents,
2763 parents=parents,
2764 date=date,
2764 date=date,
2765 metadata=metadata,
2765 metadata=metadata,
2766 ui=ui,
2766 ui=ui,
2767 )
2767 )
2768 tr.close()
2768 tr.close()
2769 except ValueError as exc:
2769 except ValueError as exc:
2770 raise error.Abort(
2770 raise error.Abort(
2771 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2771 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2772 )
2772 )
2773 finally:
2773 finally:
2774 tr.release()
2774 tr.release()
2775 finally:
2775 finally:
2776 l.release()
2776 l.release()
2777 else:
2777 else:
2778 if opts[b'rev']:
2778 if opts[b'rev']:
2779 revs = logcmdutil.revrange(repo, opts[b'rev'])
2779 revs = logcmdutil.revrange(repo, opts[b'rev'])
2780 nodes = [repo[r].node() for r in revs]
2780 nodes = [repo[r].node() for r in revs]
2781 markers = list(
2781 markers = list(
2782 obsutil.getmarkers(
2782 obsutil.getmarkers(
2783 repo, nodes=nodes, exclusive=opts[b'exclusive']
2783 repo, nodes=nodes, exclusive=opts[b'exclusive']
2784 )
2784 )
2785 )
2785 )
2786 markers.sort(key=lambda x: x._data)
2786 markers.sort(key=lambda x: x._data)
2787 else:
2787 else:
2788 markers = obsutil.getmarkers(repo)
2788 markers = obsutil.getmarkers(repo)
2789
2789
2790 markerstoiter = markers
2790 markerstoiter = markers
2791 isrelevant = lambda m: True
2791 isrelevant = lambda m: True
2792 if opts.get(b'rev') and opts.get(b'index'):
2792 if opts.get(b'rev') and opts.get(b'index'):
2793 markerstoiter = obsutil.getmarkers(repo)
2793 markerstoiter = obsutil.getmarkers(repo)
2794 markerset = set(markers)
2794 markerset = set(markers)
2795 isrelevant = lambda m: m in markerset
2795 isrelevant = lambda m: m in markerset
2796
2796
2797 fm = ui.formatter(b'debugobsolete', opts)
2797 fm = ui.formatter(b'debugobsolete', opts)
2798 for i, m in enumerate(markerstoiter):
2798 for i, m in enumerate(markerstoiter):
2799 if not isrelevant(m):
2799 if not isrelevant(m):
2800 # marker can be irrelevant when we're iterating over a set
2800 # marker can be irrelevant when we're iterating over a set
2801 # of markers (markerstoiter) which is bigger than the set
2801 # of markers (markerstoiter) which is bigger than the set
2802 # of markers we want to display (markers)
2802 # of markers we want to display (markers)
2803 # this can happen if both --index and --rev options are
2803 # this can happen if both --index and --rev options are
2804 # provided and thus we need to iterate over all of the markers
2804 # provided and thus we need to iterate over all of the markers
2805 # to get the correct indices, but only display the ones that
2805 # to get the correct indices, but only display the ones that
2806 # are relevant to --rev value
2806 # are relevant to --rev value
2807 continue
2807 continue
2808 fm.startitem()
2808 fm.startitem()
2809 ind = i if opts.get(b'index') else None
2809 ind = i if opts.get(b'index') else None
2810 cmdutil.showmarker(fm, m, index=ind)
2810 cmdutil.showmarker(fm, m, index=ind)
2811 fm.end()
2811 fm.end()
2812
2812
2813
2813
2814 @command(
2814 @command(
2815 b'debugp1copies',
2815 b'debugp1copies',
2816 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2816 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2817 _(b'[-r REV]'),
2817 _(b'[-r REV]'),
2818 )
2818 )
2819 def debugp1copies(ui, repo, **opts):
2819 def debugp1copies(ui, repo, **opts):
2820 """dump copy information compared to p1"""
2820 """dump copy information compared to p1"""
2821
2821
2822 opts = pycompat.byteskwargs(opts)
2822 opts = pycompat.byteskwargs(opts)
2823 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2823 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2824 for dst, src in ctx.p1copies().items():
2824 for dst, src in ctx.p1copies().items():
2825 ui.write(b'%s -> %s\n' % (src, dst))
2825 ui.write(b'%s -> %s\n' % (src, dst))
2826
2826
2827
2827
2828 @command(
2828 @command(
2829 b'debugp2copies',
2829 b'debugp2copies',
2830 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2830 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2831 _(b'[-r REV]'),
2831 _(b'[-r REV]'),
2832 )
2832 )
2833 def debugp2copies(ui, repo, **opts):
2833 def debugp2copies(ui, repo, **opts):
2834 """dump copy information compared to p2"""
2834 """dump copy information compared to p2"""
2835
2835
2836 opts = pycompat.byteskwargs(opts)
2836 opts = pycompat.byteskwargs(opts)
2837 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2837 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2838 for dst, src in ctx.p2copies().items():
2838 for dst, src in ctx.p2copies().items():
2839 ui.write(b'%s -> %s\n' % (src, dst))
2839 ui.write(b'%s -> %s\n' % (src, dst))
2840
2840
2841
2841
2842 @command(
2842 @command(
2843 b'debugpathcomplete',
2843 b'debugpathcomplete',
2844 [
2844 [
2845 (b'f', b'full', None, _(b'complete an entire path')),
2845 (b'f', b'full', None, _(b'complete an entire path')),
2846 (b'n', b'normal', None, _(b'show only normal files')),
2846 (b'n', b'normal', None, _(b'show only normal files')),
2847 (b'a', b'added', None, _(b'show only added files')),
2847 (b'a', b'added', None, _(b'show only added files')),
2848 (b'r', b'removed', None, _(b'show only removed files')),
2848 (b'r', b'removed', None, _(b'show only removed files')),
2849 ],
2849 ],
2850 _(b'FILESPEC...'),
2850 _(b'FILESPEC...'),
2851 )
2851 )
2852 def debugpathcomplete(ui, repo, *specs, **opts):
2852 def debugpathcomplete(ui, repo, *specs, **opts):
2853 """complete part or all of a tracked path
2853 """complete part or all of a tracked path
2854
2854
2855 This command supports shells that offer path name completion. It
2855 This command supports shells that offer path name completion. It
2856 currently completes only files already known to the dirstate.
2856 currently completes only files already known to the dirstate.
2857
2857
2858 Completion extends only to the next path segment unless
2858 Completion extends only to the next path segment unless
2859 --full is specified, in which case entire paths are used."""
2859 --full is specified, in which case entire paths are used."""
2860
2860
2861 def complete(path, acceptable):
2861 def complete(path, acceptable):
2862 dirstate = repo.dirstate
2862 dirstate = repo.dirstate
2863 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2863 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2864 rootdir = repo.root + pycompat.ossep
2864 rootdir = repo.root + pycompat.ossep
2865 if spec != repo.root and not spec.startswith(rootdir):
2865 if spec != repo.root and not spec.startswith(rootdir):
2866 return [], []
2866 return [], []
2867 if os.path.isdir(spec):
2867 if os.path.isdir(spec):
2868 spec += b'/'
2868 spec += b'/'
2869 spec = spec[len(rootdir) :]
2869 spec = spec[len(rootdir) :]
2870 fixpaths = pycompat.ossep != b'/'
2870 fixpaths = pycompat.ossep != b'/'
2871 if fixpaths:
2871 if fixpaths:
2872 spec = spec.replace(pycompat.ossep, b'/')
2872 spec = spec.replace(pycompat.ossep, b'/')
2873 speclen = len(spec)
2873 speclen = len(spec)
2874 fullpaths = opts['full']
2874 fullpaths = opts['full']
2875 files, dirs = set(), set()
2875 files, dirs = set(), set()
2876 adddir, addfile = dirs.add, files.add
2876 adddir, addfile = dirs.add, files.add
2877 for f, st in dirstate.items():
2877 for f, st in dirstate.items():
2878 if f.startswith(spec) and st.state in acceptable:
2878 if f.startswith(spec) and st.state in acceptable:
2879 if fixpaths:
2879 if fixpaths:
2880 f = f.replace(b'/', pycompat.ossep)
2880 f = f.replace(b'/', pycompat.ossep)
2881 if fullpaths:
2881 if fullpaths:
2882 addfile(f)
2882 addfile(f)
2883 continue
2883 continue
2884 s = f.find(pycompat.ossep, speclen)
2884 s = f.find(pycompat.ossep, speclen)
2885 if s >= 0:
2885 if s >= 0:
2886 adddir(f[:s])
2886 adddir(f[:s])
2887 else:
2887 else:
2888 addfile(f)
2888 addfile(f)
2889 return files, dirs
2889 return files, dirs
2890
2890
2891 acceptable = b''
2891 acceptable = b''
2892 if opts['normal']:
2892 if opts['normal']:
2893 acceptable += b'nm'
2893 acceptable += b'nm'
2894 if opts['added']:
2894 if opts['added']:
2895 acceptable += b'a'
2895 acceptable += b'a'
2896 if opts['removed']:
2896 if opts['removed']:
2897 acceptable += b'r'
2897 acceptable += b'r'
2898 cwd = repo.getcwd()
2898 cwd = repo.getcwd()
2899 if not specs:
2899 if not specs:
2900 specs = [b'.']
2900 specs = [b'.']
2901
2901
2902 files, dirs = set(), set()
2902 files, dirs = set(), set()
2903 for spec in specs:
2903 for spec in specs:
2904 f, d = complete(spec, acceptable or b'nmar')
2904 f, d = complete(spec, acceptable or b'nmar')
2905 files.update(f)
2905 files.update(f)
2906 dirs.update(d)
2906 dirs.update(d)
2907 files.update(dirs)
2907 files.update(dirs)
2908 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2908 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2909 ui.write(b'\n')
2909 ui.write(b'\n')
2910
2910
2911
2911
2912 @command(
2912 @command(
2913 b'debugpathcopies',
2913 b'debugpathcopies',
2914 cmdutil.walkopts,
2914 cmdutil.walkopts,
2915 b'hg debugpathcopies REV1 REV2 [FILE]',
2915 b'hg debugpathcopies REV1 REV2 [FILE]',
2916 inferrepo=True,
2916 inferrepo=True,
2917 )
2917 )
2918 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2918 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2919 """show copies between two revisions"""
2919 """show copies between two revisions"""
2920 ctx1 = scmutil.revsingle(repo, rev1)
2920 ctx1 = scmutil.revsingle(repo, rev1)
2921 ctx2 = scmutil.revsingle(repo, rev2)
2921 ctx2 = scmutil.revsingle(repo, rev2)
2922 m = scmutil.match(ctx1, pats, opts)
2922 m = scmutil.match(ctx1, pats, opts)
2923 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2923 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2924 ui.write(b'%s -> %s\n' % (src, dst))
2924 ui.write(b'%s -> %s\n' % (src, dst))
2925
2925
2926
2926
2927 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2927 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2928 def debugpeer(ui, path):
2928 def debugpeer(ui, path):
2929 """establish a connection to a peer repository"""
2929 """establish a connection to a peer repository"""
2930 # Always enable peer request logging. Requires --debug to display
2930 # Always enable peer request logging. Requires --debug to display
2931 # though.
2931 # though.
2932 overrides = {
2932 overrides = {
2933 (b'devel', b'debug.peer-request'): True,
2933 (b'devel', b'debug.peer-request'): True,
2934 }
2934 }
2935
2935
2936 with ui.configoverride(overrides):
2936 with ui.configoverride(overrides):
2937 peer = hg.peer(ui, {}, path)
2937 peer = hg.peer(ui, {}, path)
2938
2938
2939 try:
2939 try:
2940 local = peer.local() is not None
2940 local = peer.local() is not None
2941 canpush = peer.canpush()
2941 canpush = peer.canpush()
2942
2942
2943 ui.write(_(b'url: %s\n') % peer.url())
2943 ui.write(_(b'url: %s\n') % peer.url())
2944 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2944 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2945 ui.write(
2945 ui.write(
2946 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2946 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2947 )
2947 )
2948 finally:
2948 finally:
2949 peer.close()
2949 peer.close()
2950
2950
2951
2951
2952 @command(
2952 @command(
2953 b'debugpickmergetool',
2953 b'debugpickmergetool',
2954 [
2954 [
2955 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2955 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2956 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2956 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2957 ]
2957 ]
2958 + cmdutil.walkopts
2958 + cmdutil.walkopts
2959 + cmdutil.mergetoolopts,
2959 + cmdutil.mergetoolopts,
2960 _(b'[PATTERN]...'),
2960 _(b'[PATTERN]...'),
2961 inferrepo=True,
2961 inferrepo=True,
2962 )
2962 )
2963 def debugpickmergetool(ui, repo, *pats, **opts):
2963 def debugpickmergetool(ui, repo, *pats, **opts):
2964 """examine which merge tool is chosen for specified file
2964 """examine which merge tool is chosen for specified file
2965
2965
2966 As described in :hg:`help merge-tools`, Mercurial examines
2966 As described in :hg:`help merge-tools`, Mercurial examines
2967 configurations below in this order to decide which merge tool is
2967 configurations below in this order to decide which merge tool is
2968 chosen for specified file.
2968 chosen for specified file.
2969
2969
2970 1. ``--tool`` option
2970 1. ``--tool`` option
2971 2. ``HGMERGE`` environment variable
2971 2. ``HGMERGE`` environment variable
2972 3. configurations in ``merge-patterns`` section
2972 3. configurations in ``merge-patterns`` section
2973 4. configuration of ``ui.merge``
2973 4. configuration of ``ui.merge``
2974 5. configurations in ``merge-tools`` section
2974 5. configurations in ``merge-tools`` section
2975 6. ``hgmerge`` tool (for historical reason only)
2975 6. ``hgmerge`` tool (for historical reason only)
2976 7. default tool for fallback (``:merge`` or ``:prompt``)
2976 7. default tool for fallback (``:merge`` or ``:prompt``)
2977
2977
2978 This command writes out examination result in the style below::
2978 This command writes out examination result in the style below::
2979
2979
2980 FILE = MERGETOOL
2980 FILE = MERGETOOL
2981
2981
2982 By default, all files known in the first parent context of the
2982 By default, all files known in the first parent context of the
2983 working directory are examined. Use file patterns and/or -I/-X
2983 working directory are examined. Use file patterns and/or -I/-X
2984 options to limit target files. -r/--rev is also useful to examine
2984 options to limit target files. -r/--rev is also useful to examine
2985 files in another context without actual updating to it.
2985 files in another context without actual updating to it.
2986
2986
2987 With --debug, this command shows warning messages while matching
2987 With --debug, this command shows warning messages while matching
2988 against ``merge-patterns`` and so on, too. It is recommended to
2988 against ``merge-patterns`` and so on, too. It is recommended to
2989 use this option with explicit file patterns and/or -I/-X options,
2989 use this option with explicit file patterns and/or -I/-X options,
2990 because this option increases amount of output per file according
2990 because this option increases amount of output per file according
2991 to configurations in hgrc.
2991 to configurations in hgrc.
2992
2992
2993 With -v/--verbose, this command shows configurations below at
2993 With -v/--verbose, this command shows configurations below at
2994 first (only if specified).
2994 first (only if specified).
2995
2995
2996 - ``--tool`` option
2996 - ``--tool`` option
2997 - ``HGMERGE`` environment variable
2997 - ``HGMERGE`` environment variable
2998 - configuration of ``ui.merge``
2998 - configuration of ``ui.merge``
2999
2999
3000 If merge tool is chosen before matching against
3000 If merge tool is chosen before matching against
3001 ``merge-patterns``, this command can't show any helpful
3001 ``merge-patterns``, this command can't show any helpful
3002 information, even with --debug. In such case, information above is
3002 information, even with --debug. In such case, information above is
3003 useful to know why a merge tool is chosen.
3003 useful to know why a merge tool is chosen.
3004 """
3004 """
3005 opts = pycompat.byteskwargs(opts)
3005 opts = pycompat.byteskwargs(opts)
3006 overrides = {}
3006 overrides = {}
3007 if opts[b'tool']:
3007 if opts[b'tool']:
3008 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
3008 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
3009 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
3009 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
3010
3010
3011 with ui.configoverride(overrides, b'debugmergepatterns'):
3011 with ui.configoverride(overrides, b'debugmergepatterns'):
3012 hgmerge = encoding.environ.get(b"HGMERGE")
3012 hgmerge = encoding.environ.get(b"HGMERGE")
3013 if hgmerge is not None:
3013 if hgmerge is not None:
3014 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
3014 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
3015 uimerge = ui.config(b"ui", b"merge")
3015 uimerge = ui.config(b"ui", b"merge")
3016 if uimerge:
3016 if uimerge:
3017 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
3017 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
3018
3018
3019 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3019 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3020 m = scmutil.match(ctx, pats, opts)
3020 m = scmutil.match(ctx, pats, opts)
3021 changedelete = opts[b'changedelete']
3021 changedelete = opts[b'changedelete']
3022 for path in ctx.walk(m):
3022 for path in ctx.walk(m):
3023 fctx = ctx[path]
3023 fctx = ctx[path]
3024 with ui.silent(
3024 with ui.silent(
3025 error=True
3025 error=True
3026 ) if not ui.debugflag else util.nullcontextmanager():
3026 ) if not ui.debugflag else util.nullcontextmanager():
3027 tool, toolpath = filemerge._picktool(
3027 tool, toolpath = filemerge._picktool(
3028 repo,
3028 repo,
3029 ui,
3029 ui,
3030 path,
3030 path,
3031 fctx.isbinary(),
3031 fctx.isbinary(),
3032 b'l' in fctx.flags(),
3032 b'l' in fctx.flags(),
3033 changedelete,
3033 changedelete,
3034 )
3034 )
3035 ui.write(b'%s = %s\n' % (path, tool))
3035 ui.write(b'%s = %s\n' % (path, tool))
3036
3036
3037
3037
3038 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
3038 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
3039 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
3039 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
3040 """access the pushkey key/value protocol
3040 """access the pushkey key/value protocol
3041
3041
3042 With two args, list the keys in the given namespace.
3042 With two args, list the keys in the given namespace.
3043
3043
3044 With five args, set a key to new if it currently is set to old.
3044 With five args, set a key to new if it currently is set to old.
3045 Reports success or failure.
3045 Reports success or failure.
3046 """
3046 """
3047
3047
3048 target = hg.peer(ui, {}, repopath)
3048 target = hg.peer(ui, {}, repopath)
3049 try:
3049 try:
3050 if keyinfo:
3050 if keyinfo:
3051 key, old, new = keyinfo
3051 key, old, new = keyinfo
3052 with target.commandexecutor() as e:
3052 with target.commandexecutor() as e:
3053 r = e.callcommand(
3053 r = e.callcommand(
3054 b'pushkey',
3054 b'pushkey',
3055 {
3055 {
3056 b'namespace': namespace,
3056 b'namespace': namespace,
3057 b'key': key,
3057 b'key': key,
3058 b'old': old,
3058 b'old': old,
3059 b'new': new,
3059 b'new': new,
3060 },
3060 },
3061 ).result()
3061 ).result()
3062
3062
3063 ui.status(pycompat.bytestr(r) + b'\n')
3063 ui.status(pycompat.bytestr(r) + b'\n')
3064 return not r
3064 return not r
3065 else:
3065 else:
3066 for k, v in sorted(target.listkeys(namespace).items()):
3066 for k, v in sorted(target.listkeys(namespace).items()):
3067 ui.write(
3067 ui.write(
3068 b"%s\t%s\n"
3068 b"%s\t%s\n"
3069 % (stringutil.escapestr(k), stringutil.escapestr(v))
3069 % (stringutil.escapestr(k), stringutil.escapestr(v))
3070 )
3070 )
3071 finally:
3071 finally:
3072 target.close()
3072 target.close()
3073
3073
3074
3074
3075 @command(b'debugpvec', [], _(b'A B'))
3075 @command(b'debugpvec', [], _(b'A B'))
3076 def debugpvec(ui, repo, a, b=None):
3076 def debugpvec(ui, repo, a, b=None):
3077 ca = scmutil.revsingle(repo, a)
3077 ca = scmutil.revsingle(repo, a)
3078 cb = scmutil.revsingle(repo, b)
3078 cb = scmutil.revsingle(repo, b)
3079 pa = pvec.ctxpvec(ca)
3079 pa = pvec.ctxpvec(ca)
3080 pb = pvec.ctxpvec(cb)
3080 pb = pvec.ctxpvec(cb)
3081 if pa == pb:
3081 if pa == pb:
3082 rel = b"="
3082 rel = b"="
3083 elif pa > pb:
3083 elif pa > pb:
3084 rel = b">"
3084 rel = b">"
3085 elif pa < pb:
3085 elif pa < pb:
3086 rel = b"<"
3086 rel = b"<"
3087 elif pa | pb:
3087 elif pa | pb:
3088 rel = b"|"
3088 rel = b"|"
3089 ui.write(_(b"a: %s\n") % pa)
3089 ui.write(_(b"a: %s\n") % pa)
3090 ui.write(_(b"b: %s\n") % pb)
3090 ui.write(_(b"b: %s\n") % pb)
3091 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
3091 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
3092 ui.write(
3092 ui.write(
3093 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
3093 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
3094 % (
3094 % (
3095 abs(pa._depth - pb._depth),
3095 abs(pa._depth - pb._depth),
3096 pvec._hamming(pa._vec, pb._vec),
3096 pvec._hamming(pa._vec, pb._vec),
3097 pa.distance(pb),
3097 pa.distance(pb),
3098 rel,
3098 rel,
3099 )
3099 )
3100 )
3100 )
3101
3101
3102
3102
3103 @command(
3103 @command(
3104 b'debugrebuilddirstate|debugrebuildstate',
3104 b'debugrebuilddirstate|debugrebuildstate',
3105 [
3105 [
3106 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
3106 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
3107 (
3107 (
3108 b'',
3108 b'',
3109 b'minimal',
3109 b'minimal',
3110 None,
3110 None,
3111 _(
3111 _(
3112 b'only rebuild files that are inconsistent with '
3112 b'only rebuild files that are inconsistent with '
3113 b'the working copy parent'
3113 b'the working copy parent'
3114 ),
3114 ),
3115 ),
3115 ),
3116 ],
3116 ],
3117 _(b'[-r REV]'),
3117 _(b'[-r REV]'),
3118 )
3118 )
3119 def debugrebuilddirstate(ui, repo, rev, **opts):
3119 def debugrebuilddirstate(ui, repo, rev, **opts):
3120 """rebuild the dirstate as it would look like for the given revision
3120 """rebuild the dirstate as it would look like for the given revision
3121
3121
3122 If no revision is specified the first current parent will be used.
3122 If no revision is specified the first current parent will be used.
3123
3123
3124 The dirstate will be set to the files of the given revision.
3124 The dirstate will be set to the files of the given revision.
3125 The actual working directory content or existing dirstate
3125 The actual working directory content or existing dirstate
3126 information such as adds or removes is not considered.
3126 information such as adds or removes is not considered.
3127
3127
3128 ``minimal`` will only rebuild the dirstate status for files that claim to be
3128 ``minimal`` will only rebuild the dirstate status for files that claim to be
3129 tracked but are not in the parent manifest, or that exist in the parent
3129 tracked but are not in the parent manifest, or that exist in the parent
3130 manifest but are not in the dirstate. It will not change adds, removes, or
3130 manifest but are not in the dirstate. It will not change adds, removes, or
3131 modified files that are in the working copy parent.
3131 modified files that are in the working copy parent.
3132
3132
3133 One use of this command is to make the next :hg:`status` invocation
3133 One use of this command is to make the next :hg:`status` invocation
3134 check the actual file content.
3134 check the actual file content.
3135 """
3135 """
3136 ctx = scmutil.revsingle(repo, rev)
3136 ctx = scmutil.revsingle(repo, rev)
3137 with repo.wlock():
3137 with repo.wlock():
3138 if repo.currenttransaction() is not None:
3138 if repo.currenttransaction() is not None:
3139 msg = b'rebuild the dirstate outside of a transaction'
3139 msg = b'rebuild the dirstate outside of a transaction'
3140 raise error.ProgrammingError(msg)
3140 raise error.ProgrammingError(msg)
3141 dirstate = repo.dirstate
3141 dirstate = repo.dirstate
3142 changedfiles = None
3142 changedfiles = None
3143 # See command doc for what minimal does.
3143 # See command doc for what minimal does.
3144 if opts.get('minimal'):
3144 if opts.get('minimal'):
3145 manifestfiles = set(ctx.manifest().keys())
3145 manifestfiles = set(ctx.manifest().keys())
3146 dirstatefiles = set(dirstate)
3146 dirstatefiles = set(dirstate)
3147 manifestonly = manifestfiles - dirstatefiles
3147 manifestonly = manifestfiles - dirstatefiles
3148 dsonly = dirstatefiles - manifestfiles
3148 dsonly = dirstatefiles - manifestfiles
3149 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
3149 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
3150 changedfiles = manifestonly | dsnotadded
3150 changedfiles = manifestonly | dsnotadded
3151
3151
3152 with dirstate.changing_parents(repo):
3152 with dirstate.changing_parents(repo):
3153 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
3153 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
3154
3154
3155
3155
3156 @command(
3156 @command(
3157 b'debugrebuildfncache',
3157 b'debugrebuildfncache',
3158 [
3158 [
3159 (
3159 (
3160 b'',
3160 b'',
3161 b'only-data',
3161 b'only-data',
3162 False,
3162 False,
3163 _(b'only look for wrong .d files (much faster)'),
3163 _(b'only look for wrong .d files (much faster)'),
3164 )
3164 )
3165 ],
3165 ],
3166 b'',
3166 b'',
3167 )
3167 )
3168 def debugrebuildfncache(ui, repo, **opts):
3168 def debugrebuildfncache(ui, repo, **opts):
3169 """rebuild the fncache file"""
3169 """rebuild the fncache file"""
3170 opts = pycompat.byteskwargs(opts)
3170 opts = pycompat.byteskwargs(opts)
3171 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
3171 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
3172
3172
3173
3173
3174 @command(
3174 @command(
3175 b'debugrename',
3175 b'debugrename',
3176 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3176 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3177 _(b'[-r REV] [FILE]...'),
3177 _(b'[-r REV] [FILE]...'),
3178 )
3178 )
3179 def debugrename(ui, repo, *pats, **opts):
3179 def debugrename(ui, repo, *pats, **opts):
3180 """dump rename information"""
3180 """dump rename information"""
3181
3181
3182 opts = pycompat.byteskwargs(opts)
3182 opts = pycompat.byteskwargs(opts)
3183 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3183 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3184 m = scmutil.match(ctx, pats, opts)
3184 m = scmutil.match(ctx, pats, opts)
3185 for abs in ctx.walk(m):
3185 for abs in ctx.walk(m):
3186 fctx = ctx[abs]
3186 fctx = ctx[abs]
3187 o = fctx.filelog().renamed(fctx.filenode())
3187 o = fctx.filelog().renamed(fctx.filenode())
3188 rel = repo.pathto(abs)
3188 rel = repo.pathto(abs)
3189 if o:
3189 if o:
3190 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3190 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3191 else:
3191 else:
3192 ui.write(_(b"%s not renamed\n") % rel)
3192 ui.write(_(b"%s not renamed\n") % rel)
3193
3193
3194
3194
3195 @command(b'debugrequires|debugrequirements', [], b'')
3195 @command(b'debugrequires|debugrequirements', [], b'')
3196 def debugrequirements(ui, repo):
3196 def debugrequirements(ui, repo):
3197 """print the current repo requirements"""
3197 """print the current repo requirements"""
3198 for r in sorted(repo.requirements):
3198 for r in sorted(repo.requirements):
3199 ui.write(b"%s\n" % r)
3199 ui.write(b"%s\n" % r)
3200
3200
3201
3201
3202 @command(
3202 @command(
3203 b'debugrevlog',
3203 b'debugrevlog',
3204 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3204 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3205 _(b'-c|-m|FILE'),
3205 _(b'-c|-m|FILE'),
3206 optionalrepo=True,
3206 optionalrepo=True,
3207 )
3207 )
3208 def debugrevlog(ui, repo, file_=None, **opts):
3208 def debugrevlog(ui, repo, file_=None, **opts):
3209 """show data and statistics about a revlog"""
3209 """show data and statistics about a revlog"""
3210 opts = pycompat.byteskwargs(opts)
3210 opts = pycompat.byteskwargs(opts)
3211 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
3211 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
3212
3212
3213 if opts.get(b"dump"):
3213 if opts.get(b"dump"):
3214 revlog_debug.dump(ui, r)
3214 revlog_debug.dump(ui, r)
3215 else:
3215 else:
3216 revlog_debug.debug_revlog(ui, r)
3216 revlog_debug.debug_revlog(ui, r)
3217 return 0
3217 return 0
3218
3218
3219
3219
3220 @command(
3220 @command(
3221 b'debugrevlogindex',
3221 b'debugrevlogindex',
3222 cmdutil.debugrevlogopts
3222 cmdutil.debugrevlogopts
3223 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3223 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3224 _(b'[-f FORMAT] -c|-m|FILE'),
3224 _(b'[-f FORMAT] -c|-m|FILE'),
3225 optionalrepo=True,
3225 optionalrepo=True,
3226 )
3226 )
3227 def debugrevlogindex(ui, repo, file_=None, **opts):
3227 def debugrevlogindex(ui, repo, file_=None, **opts):
3228 """dump the contents of a revlog index"""
3228 """dump the contents of a revlog index"""
3229 opts = pycompat.byteskwargs(opts)
3229 opts = pycompat.byteskwargs(opts)
3230 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3230 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3231 format = opts.get(b'format', 0)
3231 format = opts.get(b'format', 0)
3232 if format not in (0, 1):
3232 if format not in (0, 1):
3233 raise error.Abort(_(b"unknown format %d") % format)
3233 raise error.Abort(_(b"unknown format %d") % format)
3234
3234
3235 if ui.debugflag:
3235 if ui.debugflag:
3236 shortfn = hex
3236 shortfn = hex
3237 else:
3237 else:
3238 shortfn = short
3238 shortfn = short
3239
3239
3240 # There might not be anything in r, so have a sane default
3240 # There might not be anything in r, so have a sane default
3241 idlen = 12
3241 idlen = 12
3242 for i in r:
3242 for i in r:
3243 idlen = len(shortfn(r.node(i)))
3243 idlen = len(shortfn(r.node(i)))
3244 break
3244 break
3245
3245
3246 if format == 0:
3246 if format == 0:
3247 if ui.verbose:
3247 if ui.verbose:
3248 ui.writenoi18n(
3248 ui.writenoi18n(
3249 b" rev offset length linkrev %s %s p2\n"
3249 b" rev offset length linkrev %s %s p2\n"
3250 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3250 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3251 )
3251 )
3252 else:
3252 else:
3253 ui.writenoi18n(
3253 ui.writenoi18n(
3254 b" rev linkrev %s %s p2\n"
3254 b" rev linkrev %s %s p2\n"
3255 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3255 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3256 )
3256 )
3257 elif format == 1:
3257 elif format == 1:
3258 if ui.verbose:
3258 if ui.verbose:
3259 ui.writenoi18n(
3259 ui.writenoi18n(
3260 (
3260 (
3261 b" rev flag offset length size link p1"
3261 b" rev flag offset length size link p1"
3262 b" p2 %s\n"
3262 b" p2 %s\n"
3263 )
3263 )
3264 % b"nodeid".rjust(idlen)
3264 % b"nodeid".rjust(idlen)
3265 )
3265 )
3266 else:
3266 else:
3267 ui.writenoi18n(
3267 ui.writenoi18n(
3268 b" rev flag size link p1 p2 %s\n"
3268 b" rev flag size link p1 p2 %s\n"
3269 % b"nodeid".rjust(idlen)
3269 % b"nodeid".rjust(idlen)
3270 )
3270 )
3271
3271
3272 for i in r:
3272 for i in r:
3273 node = r.node(i)
3273 node = r.node(i)
3274 if format == 0:
3274 if format == 0:
3275 try:
3275 try:
3276 pp = r.parents(node)
3276 pp = r.parents(node)
3277 except Exception:
3277 except Exception:
3278 pp = [repo.nullid, repo.nullid]
3278 pp = [repo.nullid, repo.nullid]
3279 if ui.verbose:
3279 if ui.verbose:
3280 ui.write(
3280 ui.write(
3281 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3281 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3282 % (
3282 % (
3283 i,
3283 i,
3284 r.start(i),
3284 r.start(i),
3285 r.length(i),
3285 r.length(i),
3286 r.linkrev(i),
3286 r.linkrev(i),
3287 shortfn(node),
3287 shortfn(node),
3288 shortfn(pp[0]),
3288 shortfn(pp[0]),
3289 shortfn(pp[1]),
3289 shortfn(pp[1]),
3290 )
3290 )
3291 )
3291 )
3292 else:
3292 else:
3293 ui.write(
3293 ui.write(
3294 b"% 6d % 7d %s %s %s\n"
3294 b"% 6d % 7d %s %s %s\n"
3295 % (
3295 % (
3296 i,
3296 i,
3297 r.linkrev(i),
3297 r.linkrev(i),
3298 shortfn(node),
3298 shortfn(node),
3299 shortfn(pp[0]),
3299 shortfn(pp[0]),
3300 shortfn(pp[1]),
3300 shortfn(pp[1]),
3301 )
3301 )
3302 )
3302 )
3303 elif format == 1:
3303 elif format == 1:
3304 pr = r.parentrevs(i)
3304 pr = r.parentrevs(i)
3305 if ui.verbose:
3305 if ui.verbose:
3306 ui.write(
3306 ui.write(
3307 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3307 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3308 % (
3308 % (
3309 i,
3309 i,
3310 r.flags(i),
3310 r.flags(i),
3311 r.start(i),
3311 r.start(i),
3312 r.length(i),
3312 r.length(i),
3313 r.rawsize(i),
3313 r.rawsize(i),
3314 r.linkrev(i),
3314 r.linkrev(i),
3315 pr[0],
3315 pr[0],
3316 pr[1],
3316 pr[1],
3317 shortfn(node),
3317 shortfn(node),
3318 )
3318 )
3319 )
3319 )
3320 else:
3320 else:
3321 ui.write(
3321 ui.write(
3322 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3322 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3323 % (
3323 % (
3324 i,
3324 i,
3325 r.flags(i),
3325 r.flags(i),
3326 r.rawsize(i),
3326 r.rawsize(i),
3327 r.linkrev(i),
3327 r.linkrev(i),
3328 pr[0],
3328 pr[0],
3329 pr[1],
3329 pr[1],
3330 shortfn(node),
3330 shortfn(node),
3331 )
3331 )
3332 )
3332 )
3333
3333
3334
3334
3335 @command(
3335 @command(
3336 b'debugrevspec',
3336 b'debugrevspec',
3337 [
3337 [
3338 (
3338 (
3339 b'',
3339 b'',
3340 b'optimize',
3340 b'optimize',
3341 None,
3341 None,
3342 _(b'print parsed tree after optimizing (DEPRECATED)'),
3342 _(b'print parsed tree after optimizing (DEPRECATED)'),
3343 ),
3343 ),
3344 (
3344 (
3345 b'',
3345 b'',
3346 b'show-revs',
3346 b'show-revs',
3347 True,
3347 True,
3348 _(b'print list of result revisions (default)'),
3348 _(b'print list of result revisions (default)'),
3349 ),
3349 ),
3350 (
3350 (
3351 b's',
3351 b's',
3352 b'show-set',
3352 b'show-set',
3353 None,
3353 None,
3354 _(b'print internal representation of result set'),
3354 _(b'print internal representation of result set'),
3355 ),
3355 ),
3356 (
3356 (
3357 b'p',
3357 b'p',
3358 b'show-stage',
3358 b'show-stage',
3359 [],
3359 [],
3360 _(b'print parsed tree at the given stage'),
3360 _(b'print parsed tree at the given stage'),
3361 _(b'NAME'),
3361 _(b'NAME'),
3362 ),
3362 ),
3363 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3363 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3364 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3364 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3365 ],
3365 ],
3366 b'REVSPEC',
3366 b'REVSPEC',
3367 )
3367 )
3368 def debugrevspec(ui, repo, expr, **opts):
3368 def debugrevspec(ui, repo, expr, **opts):
3369 """parse and apply a revision specification
3369 """parse and apply a revision specification
3370
3370
3371 Use -p/--show-stage option to print the parsed tree at the given stages.
3371 Use -p/--show-stage option to print the parsed tree at the given stages.
3372 Use -p all to print tree at every stage.
3372 Use -p all to print tree at every stage.
3373
3373
3374 Use --no-show-revs option with -s or -p to print only the set
3374 Use --no-show-revs option with -s or -p to print only the set
3375 representation or the parsed tree respectively.
3375 representation or the parsed tree respectively.
3376
3376
3377 Use --verify-optimized to compare the optimized result with the unoptimized
3377 Use --verify-optimized to compare the optimized result with the unoptimized
3378 one. Returns 1 if the optimized result differs.
3378 one. Returns 1 if the optimized result differs.
3379 """
3379 """
3380 opts = pycompat.byteskwargs(opts)
3380 opts = pycompat.byteskwargs(opts)
3381 aliases = ui.configitems(b'revsetalias')
3381 aliases = ui.configitems(b'revsetalias')
3382 stages = [
3382 stages = [
3383 (b'parsed', lambda tree: tree),
3383 (b'parsed', lambda tree: tree),
3384 (
3384 (
3385 b'expanded',
3385 b'expanded',
3386 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3386 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3387 ),
3387 ),
3388 (b'concatenated', revsetlang.foldconcat),
3388 (b'concatenated', revsetlang.foldconcat),
3389 (b'analyzed', revsetlang.analyze),
3389 (b'analyzed', revsetlang.analyze),
3390 (b'optimized', revsetlang.optimize),
3390 (b'optimized', revsetlang.optimize),
3391 ]
3391 ]
3392 if opts[b'no_optimized']:
3392 if opts[b'no_optimized']:
3393 stages = stages[:-1]
3393 stages = stages[:-1]
3394 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3394 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3395 raise error.Abort(
3395 raise error.Abort(
3396 _(b'cannot use --verify-optimized with --no-optimized')
3396 _(b'cannot use --verify-optimized with --no-optimized')
3397 )
3397 )
3398 stagenames = {n for n, f in stages}
3398 stagenames = {n for n, f in stages}
3399
3399
3400 showalways = set()
3400 showalways = set()
3401 showchanged = set()
3401 showchanged = set()
3402 if ui.verbose and not opts[b'show_stage']:
3402 if ui.verbose and not opts[b'show_stage']:
3403 # show parsed tree by --verbose (deprecated)
3403 # show parsed tree by --verbose (deprecated)
3404 showalways.add(b'parsed')
3404 showalways.add(b'parsed')
3405 showchanged.update([b'expanded', b'concatenated'])
3405 showchanged.update([b'expanded', b'concatenated'])
3406 if opts[b'optimize']:
3406 if opts[b'optimize']:
3407 showalways.add(b'optimized')
3407 showalways.add(b'optimized')
3408 if opts[b'show_stage'] and opts[b'optimize']:
3408 if opts[b'show_stage'] and opts[b'optimize']:
3409 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3409 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3410 if opts[b'show_stage'] == [b'all']:
3410 if opts[b'show_stage'] == [b'all']:
3411 showalways.update(stagenames)
3411 showalways.update(stagenames)
3412 else:
3412 else:
3413 for n in opts[b'show_stage']:
3413 for n in opts[b'show_stage']:
3414 if n not in stagenames:
3414 if n not in stagenames:
3415 raise error.Abort(_(b'invalid stage name: %s') % n)
3415 raise error.Abort(_(b'invalid stage name: %s') % n)
3416 showalways.update(opts[b'show_stage'])
3416 showalways.update(opts[b'show_stage'])
3417
3417
3418 treebystage = {}
3418 treebystage = {}
3419 printedtree = None
3419 printedtree = None
3420 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3420 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3421 for n, f in stages:
3421 for n, f in stages:
3422 treebystage[n] = tree = f(tree)
3422 treebystage[n] = tree = f(tree)
3423 if n in showalways or (n in showchanged and tree != printedtree):
3423 if n in showalways or (n in showchanged and tree != printedtree):
3424 if opts[b'show_stage'] or n != b'parsed':
3424 if opts[b'show_stage'] or n != b'parsed':
3425 ui.write(b"* %s:\n" % n)
3425 ui.write(b"* %s:\n" % n)
3426 ui.write(revsetlang.prettyformat(tree), b"\n")
3426 ui.write(revsetlang.prettyformat(tree), b"\n")
3427 printedtree = tree
3427 printedtree = tree
3428
3428
3429 if opts[b'verify_optimized']:
3429 if opts[b'verify_optimized']:
3430 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3430 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3431 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3431 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3432 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3432 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3433 ui.writenoi18n(
3433 ui.writenoi18n(
3434 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3434 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3435 )
3435 )
3436 ui.writenoi18n(
3436 ui.writenoi18n(
3437 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3437 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3438 )
3438 )
3439 arevs = list(arevs)
3439 arevs = list(arevs)
3440 brevs = list(brevs)
3440 brevs = list(brevs)
3441 if arevs == brevs:
3441 if arevs == brevs:
3442 return 0
3442 return 0
3443 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3443 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3444 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3444 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3445 sm = difflib.SequenceMatcher(None, arevs, brevs)
3445 sm = difflib.SequenceMatcher(None, arevs, brevs)
3446 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3446 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3447 if tag in ('delete', 'replace'):
3447 if tag in ('delete', 'replace'):
3448 for c in arevs[alo:ahi]:
3448 for c in arevs[alo:ahi]:
3449 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3449 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3450 if tag in ('insert', 'replace'):
3450 if tag in ('insert', 'replace'):
3451 for c in brevs[blo:bhi]:
3451 for c in brevs[blo:bhi]:
3452 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3452 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3453 if tag == 'equal':
3453 if tag == 'equal':
3454 for c in arevs[alo:ahi]:
3454 for c in arevs[alo:ahi]:
3455 ui.write(b' %d\n' % c)
3455 ui.write(b' %d\n' % c)
3456 return 1
3456 return 1
3457
3457
3458 func = revset.makematcher(tree)
3458 func = revset.makematcher(tree)
3459 revs = func(repo)
3459 revs = func(repo)
3460 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3460 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3461 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3461 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3462 if not opts[b'show_revs']:
3462 if not opts[b'show_revs']:
3463 return
3463 return
3464 for c in revs:
3464 for c in revs:
3465 ui.write(b"%d\n" % c)
3465 ui.write(b"%d\n" % c)
3466
3466
3467
3467
3468 @command(
3468 @command(
3469 b'debugserve',
3469 b'debugserve',
3470 [
3470 [
3471 (
3471 (
3472 b'',
3472 b'',
3473 b'sshstdio',
3473 b'sshstdio',
3474 False,
3474 False,
3475 _(b'run an SSH server bound to process handles'),
3475 _(b'run an SSH server bound to process handles'),
3476 ),
3476 ),
3477 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3477 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3478 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3478 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3479 ],
3479 ],
3480 b'',
3480 b'',
3481 )
3481 )
3482 def debugserve(ui, repo, **opts):
3482 def debugserve(ui, repo, **opts):
3483 """run a server with advanced settings
3483 """run a server with advanced settings
3484
3484
3485 This command is similar to :hg:`serve`. It exists partially as a
3485 This command is similar to :hg:`serve`. It exists partially as a
3486 workaround to the fact that ``hg serve --stdio`` must have specific
3486 workaround to the fact that ``hg serve --stdio`` must have specific
3487 arguments for security reasons.
3487 arguments for security reasons.
3488 """
3488 """
3489 opts = pycompat.byteskwargs(opts)
3489 opts = pycompat.byteskwargs(opts)
3490
3490
3491 if not opts[b'sshstdio']:
3491 if not opts[b'sshstdio']:
3492 raise error.Abort(_(b'only --sshstdio is currently supported'))
3492 raise error.Abort(_(b'only --sshstdio is currently supported'))
3493
3493
3494 logfh = None
3494 logfh = None
3495
3495
3496 if opts[b'logiofd'] and opts[b'logiofile']:
3496 if opts[b'logiofd'] and opts[b'logiofile']:
3497 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3497 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3498
3498
3499 if opts[b'logiofd']:
3499 if opts[b'logiofd']:
3500 # Ideally we would be line buffered. But line buffering in binary
3500 # Ideally we would be line buffered. But line buffering in binary
3501 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3501 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3502 # buffering could have performance impacts. But since this isn't
3502 # buffering could have performance impacts. But since this isn't
3503 # performance critical code, it should be fine.
3503 # performance critical code, it should be fine.
3504 try:
3504 try:
3505 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3505 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3506 except OSError as e:
3506 except OSError as e:
3507 if e.errno != errno.ESPIPE:
3507 if e.errno != errno.ESPIPE:
3508 raise
3508 raise
3509 # can't seek a pipe, so `ab` mode fails on py3
3509 # can't seek a pipe, so `ab` mode fails on py3
3510 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3510 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3511 elif opts[b'logiofile']:
3511 elif opts[b'logiofile']:
3512 logfh = open(opts[b'logiofile'], b'ab', 0)
3512 logfh = open(opts[b'logiofile'], b'ab', 0)
3513
3513
3514 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3514 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3515 s.serve_forever()
3515 s.serve_forever()
3516
3516
3517
3517
3518 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3518 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3519 def debugsetparents(ui, repo, rev1, rev2=None):
3519 def debugsetparents(ui, repo, rev1, rev2=None):
3520 """manually set the parents of the current working directory (DANGEROUS)
3520 """manually set the parents of the current working directory (DANGEROUS)
3521
3521
3522 This command is not what you are looking for and should not be used. Using
3522 This command is not what you are looking for and should not be used. Using
3523 this command will most certainly results in slight corruption of the file
3523 this command will most certainly results in slight corruption of the file
3524 level histories withing your repository. DO NOT USE THIS COMMAND.
3524 level histories withing your repository. DO NOT USE THIS COMMAND.
3525
3525
3526 The command update the p1 and p2 field in the dirstate, and not touching
3526 The command update the p1 and p2 field in the dirstate, and not touching
3527 anything else. This useful for writing repository conversion tools, but
3527 anything else. This useful for writing repository conversion tools, but
3528 should be used with extreme care. For example, neither the working
3528 should be used with extreme care. For example, neither the working
3529 directory nor the dirstate is updated, so file status may be incorrect
3529 directory nor the dirstate is updated, so file status may be incorrect
3530 after running this command. Only used if you are one of the few people that
3530 after running this command. Only used if you are one of the few people that
3531 deeply unstand both conversion tools and file level histories. If you are
3531 deeply unstand both conversion tools and file level histories. If you are
3532 reading this help, you are not one of this people (most of them sailed west
3532 reading this help, you are not one of this people (most of them sailed west
3533 from Mithlond anyway.
3533 from Mithlond anyway.
3534
3534
3535 So one last time DO NOT USE THIS COMMAND.
3535 So one last time DO NOT USE THIS COMMAND.
3536
3536
3537 Returns 0 on success.
3537 Returns 0 on success.
3538 """
3538 """
3539
3539
3540 node1 = scmutil.revsingle(repo, rev1).node()
3540 node1 = scmutil.revsingle(repo, rev1).node()
3541 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3541 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3542
3542
3543 with repo.wlock():
3543 with repo.wlock():
3544 repo.setparents(node1, node2)
3544 repo.setparents(node1, node2)
3545
3545
3546
3546
3547 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3547 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3548 def debugsidedata(ui, repo, file_, rev=None, **opts):
3548 def debugsidedata(ui, repo, file_, rev=None, **opts):
3549 """dump the side data for a cl/manifest/file revision
3549 """dump the side data for a cl/manifest/file revision
3550
3550
3551 Use --verbose to dump the sidedata content."""
3551 Use --verbose to dump the sidedata content."""
3552 opts = pycompat.byteskwargs(opts)
3552 opts = pycompat.byteskwargs(opts)
3553 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3553 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3554 if rev is not None:
3554 if rev is not None:
3555 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3555 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3556 file_, rev = None, file_
3556 file_, rev = None, file_
3557 elif rev is None:
3557 elif rev is None:
3558 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3558 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3559 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3559 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3560 r = getattr(r, '_revlog', r)
3560 r = getattr(r, '_revlog', r)
3561 try:
3561 try:
3562 sidedata = r.sidedata(r.lookup(rev))
3562 sidedata = r.sidedata(r.lookup(rev))
3563 except KeyError:
3563 except KeyError:
3564 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3564 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3565 if sidedata:
3565 if sidedata:
3566 sidedata = list(sidedata.items())
3566 sidedata = list(sidedata.items())
3567 sidedata.sort()
3567 sidedata.sort()
3568 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3568 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3569 for key, value in sidedata:
3569 for key, value in sidedata:
3570 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3570 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3571 if ui.verbose:
3571 if ui.verbose:
3572 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3572 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3573
3573
3574
3574
3575 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3575 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3576 def debugssl(ui, repo, source=None, **opts):
3576 def debugssl(ui, repo, source=None, **opts):
3577 """test a secure connection to a server
3577 """test a secure connection to a server
3578
3578
3579 This builds the certificate chain for the server on Windows, installing the
3579 This builds the certificate chain for the server on Windows, installing the
3580 missing intermediates and trusted root via Windows Update if necessary. It
3580 missing intermediates and trusted root via Windows Update if necessary. It
3581 does nothing on other platforms.
3581 does nothing on other platforms.
3582
3582
3583 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3583 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3584 that server is used. See :hg:`help urls` for more information.
3584 that server is used. See :hg:`help urls` for more information.
3585
3585
3586 If the update succeeds, retry the original operation. Otherwise, the cause
3586 If the update succeeds, retry the original operation. Otherwise, the cause
3587 of the SSL error is likely another issue.
3587 of the SSL error is likely another issue.
3588 """
3588 """
3589 if not pycompat.iswindows:
3589 if not pycompat.iswindows:
3590 raise error.Abort(
3590 raise error.Abort(
3591 _(b'certificate chain building is only possible on Windows')
3591 _(b'certificate chain building is only possible on Windows')
3592 )
3592 )
3593
3593
3594 if not source:
3594 if not source:
3595 if not repo:
3595 if not repo:
3596 raise error.Abort(
3596 raise error.Abort(
3597 _(
3597 _(
3598 b"there is no Mercurial repository here, and no "
3598 b"there is no Mercurial repository here, and no "
3599 b"server specified"
3599 b"server specified"
3600 )
3600 )
3601 )
3601 )
3602 source = b"default"
3602 source = b"default"
3603
3603
3604 path = urlutil.get_unique_pull_path_obj(b'debugssl', ui, source)
3604 path = urlutil.get_unique_pull_path_obj(b'debugssl', ui, source)
3605 url = path.url
3605 url = path.url
3606
3606
3607 defaultport = {b'https': 443, b'ssh': 22}
3607 defaultport = {b'https': 443, b'ssh': 22}
3608 if url.scheme in defaultport:
3608 if url.scheme in defaultport:
3609 try:
3609 try:
3610 addr = (url.host, int(url.port or defaultport[url.scheme]))
3610 addr = (url.host, int(url.port or defaultport[url.scheme]))
3611 except ValueError:
3611 except ValueError:
3612 raise error.Abort(_(b"malformed port number in URL"))
3612 raise error.Abort(_(b"malformed port number in URL"))
3613 else:
3613 else:
3614 raise error.Abort(_(b"only https and ssh connections are supported"))
3614 raise error.Abort(_(b"only https and ssh connections are supported"))
3615
3615
3616 from . import win32
3616 from . import win32
3617
3617
3618 s = ssl.wrap_socket(
3618 s = ssl.wrap_socket(
3619 socket.socket(),
3619 socket.socket(),
3620 ssl_version=ssl.PROTOCOL_TLS,
3620 ssl_version=ssl.PROTOCOL_TLS,
3621 cert_reqs=ssl.CERT_NONE,
3621 cert_reqs=ssl.CERT_NONE,
3622 ca_certs=None,
3622 ca_certs=None,
3623 )
3623 )
3624
3624
3625 try:
3625 try:
3626 s.connect(addr)
3626 s.connect(addr)
3627 cert = s.getpeercert(True)
3627 cert = s.getpeercert(True)
3628
3628
3629 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3629 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3630
3630
3631 complete = win32.checkcertificatechain(cert, build=False)
3631 complete = win32.checkcertificatechain(cert, build=False)
3632
3632
3633 if not complete:
3633 if not complete:
3634 ui.status(_(b'certificate chain is incomplete, updating... '))
3634 ui.status(_(b'certificate chain is incomplete, updating... '))
3635
3635
3636 if not win32.checkcertificatechain(cert):
3636 if not win32.checkcertificatechain(cert):
3637 ui.status(_(b'failed.\n'))
3637 ui.status(_(b'failed.\n'))
3638 else:
3638 else:
3639 ui.status(_(b'done.\n'))
3639 ui.status(_(b'done.\n'))
3640 else:
3640 else:
3641 ui.status(_(b'full certificate chain is available\n'))
3641 ui.status(_(b'full certificate chain is available\n'))
3642 finally:
3642 finally:
3643 s.close()
3643 s.close()
3644
3644
3645
3645
3646 @command(
3646 @command(
3647 b"debugbackupbundle",
3647 b"debugbackupbundle",
3648 [
3648 [
3649 (
3649 (
3650 b"",
3650 b"",
3651 b"recover",
3651 b"recover",
3652 b"",
3652 b"",
3653 b"brings the specified changeset back into the repository",
3653 b"brings the specified changeset back into the repository",
3654 )
3654 )
3655 ]
3655 ]
3656 + cmdutil.logopts,
3656 + cmdutil.logopts,
3657 _(b"hg debugbackupbundle [--recover HASH]"),
3657 _(b"hg debugbackupbundle [--recover HASH]"),
3658 )
3658 )
3659 def debugbackupbundle(ui, repo, *pats, **opts):
3659 def debugbackupbundle(ui, repo, *pats, **opts):
3660 """lists the changesets available in backup bundles
3660 """lists the changesets available in backup bundles
3661
3661
3662 Without any arguments, this command prints a list of the changesets in each
3662 Without any arguments, this command prints a list of the changesets in each
3663 backup bundle.
3663 backup bundle.
3664
3664
3665 --recover takes a changeset hash and unbundles the first bundle that
3665 --recover takes a changeset hash and unbundles the first bundle that
3666 contains that hash, which puts that changeset back in your repository.
3666 contains that hash, which puts that changeset back in your repository.
3667
3667
3668 --verbose will print the entire commit message and the bundle path for that
3668 --verbose will print the entire commit message and the bundle path for that
3669 backup.
3669 backup.
3670 """
3670 """
3671 backups = list(
3671 backups = list(
3672 filter(
3672 filter(
3673 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3673 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3674 )
3674 )
3675 )
3675 )
3676 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3676 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3677
3677
3678 opts = pycompat.byteskwargs(opts)
3678 opts = pycompat.byteskwargs(opts)
3679 opts[b"bundle"] = b""
3679 opts[b"bundle"] = b""
3680 opts[b"force"] = None
3680 opts[b"force"] = None
3681 limit = logcmdutil.getlimit(opts)
3681 limit = logcmdutil.getlimit(opts)
3682
3682
3683 def display(other, chlist, displayer):
3683 def display(other, chlist, displayer):
3684 if opts.get(b"newest_first"):
3684 if opts.get(b"newest_first"):
3685 chlist.reverse()
3685 chlist.reverse()
3686 count = 0
3686 count = 0
3687 for n in chlist:
3687 for n in chlist:
3688 if limit is not None and count >= limit:
3688 if limit is not None and count >= limit:
3689 break
3689 break
3690 parents = [
3690 parents = [
3691 True for p in other.changelog.parents(n) if p != repo.nullid
3691 True for p in other.changelog.parents(n) if p != repo.nullid
3692 ]
3692 ]
3693 if opts.get(b"no_merges") and len(parents) == 2:
3693 if opts.get(b"no_merges") and len(parents) == 2:
3694 continue
3694 continue
3695 count += 1
3695 count += 1
3696 displayer.show(other[n])
3696 displayer.show(other[n])
3697
3697
3698 recovernode = opts.get(b"recover")
3698 recovernode = opts.get(b"recover")
3699 if recovernode:
3699 if recovernode:
3700 if scmutil.isrevsymbol(repo, recovernode):
3700 if scmutil.isrevsymbol(repo, recovernode):
3701 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3701 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3702 return
3702 return
3703 elif backups:
3703 elif backups:
3704 msg = _(
3704 msg = _(
3705 b"Recover changesets using: hg debugbackupbundle --recover "
3705 b"Recover changesets using: hg debugbackupbundle --recover "
3706 b"<changeset hash>\n\nAvailable backup changesets:"
3706 b"<changeset hash>\n\nAvailable backup changesets:"
3707 )
3707 )
3708 ui.status(msg, label=b"status.removed")
3708 ui.status(msg, label=b"status.removed")
3709 else:
3709 else:
3710 ui.status(_(b"no backup changesets found\n"))
3710 ui.status(_(b"no backup changesets found\n"))
3711 return
3711 return
3712
3712
3713 for backup in backups:
3713 for backup in backups:
3714 # Much of this is copied from the hg incoming logic
3714 # Much of this is copied from the hg incoming logic
3715 source = os.path.relpath(backup, encoding.getcwd())
3715 source = os.path.relpath(backup, encoding.getcwd())
3716 path = urlutil.get_unique_pull_path_obj(
3716 path = urlutil.get_unique_pull_path_obj(
3717 b'debugbackupbundle',
3717 b'debugbackupbundle',
3718 ui,
3718 ui,
3719 source,
3719 source,
3720 )
3720 )
3721 try:
3721 try:
3722 other = hg.peer(repo, opts, path)
3722 other = hg.peer(repo, opts, path)
3723 except error.LookupError as ex:
3723 except error.LookupError as ex:
3724 msg = _(b"\nwarning: unable to open bundle %s") % path.loc
3724 msg = _(b"\nwarning: unable to open bundle %s") % path.loc
3725 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3725 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3726 ui.warn(msg, hint=hint)
3726 ui.warn(msg, hint=hint)
3727 continue
3727 continue
3728 branches = (path.branch, opts.get(b'branch', []))
3728 branches = (path.branch, opts.get(b'branch', []))
3729 revs, checkout = hg.addbranchrevs(
3729 revs, checkout = hg.addbranchrevs(
3730 repo, other, branches, opts.get(b"rev")
3730 repo, other, branches, opts.get(b"rev")
3731 )
3731 )
3732
3732
3733 if revs:
3733 if revs:
3734 revs = [other.lookup(rev) for rev in revs]
3734 revs = [other.lookup(rev) for rev in revs]
3735
3735
3736 with ui.silent():
3736 with ui.silent():
3737 try:
3737 try:
3738 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3738 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3739 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3739 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3740 )
3740 )
3741 except error.LookupError:
3741 except error.LookupError:
3742 continue
3742 continue
3743
3743
3744 try:
3744 try:
3745 if not chlist:
3745 if not chlist:
3746 continue
3746 continue
3747 if recovernode:
3747 if recovernode:
3748 with repo.lock(), repo.transaction(b"unbundle") as tr:
3748 with repo.lock(), repo.transaction(b"unbundle") as tr:
3749 if scmutil.isrevsymbol(other, recovernode):
3749 if scmutil.isrevsymbol(other, recovernode):
3750 ui.status(_(b"Unbundling %s\n") % (recovernode))
3750 ui.status(_(b"Unbundling %s\n") % (recovernode))
3751 f = hg.openpath(ui, path.loc)
3751 f = hg.openpath(ui, path.loc)
3752 gen = exchange.readbundle(ui, f, path.loc)
3752 gen = exchange.readbundle(ui, f, path.loc)
3753 if isinstance(gen, bundle2.unbundle20):
3753 if isinstance(gen, bundle2.unbundle20):
3754 bundle2.applybundle(
3754 bundle2.applybundle(
3755 repo,
3755 repo,
3756 gen,
3756 gen,
3757 tr,
3757 tr,
3758 source=b"unbundle",
3758 source=b"unbundle",
3759 url=b"bundle:" + path.loc,
3759 url=b"bundle:" + path.loc,
3760 )
3760 )
3761 else:
3761 else:
3762 gen.apply(repo, b"unbundle", b"bundle:" + path.loc)
3762 gen.apply(repo, b"unbundle", b"bundle:" + path.loc)
3763 break
3763 break
3764 else:
3764 else:
3765 backupdate = encoding.strtolocal(
3765 backupdate = encoding.strtolocal(
3766 time.strftime(
3766 time.strftime(
3767 "%a %H:%M, %Y-%m-%d",
3767 "%a %H:%M, %Y-%m-%d",
3768 time.localtime(os.path.getmtime(path.loc)),
3768 time.localtime(os.path.getmtime(path.loc)),
3769 )
3769 )
3770 )
3770 )
3771 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3771 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3772 if ui.verbose:
3772 if ui.verbose:
3773 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), path.loc))
3773 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), path.loc))
3774 else:
3774 else:
3775 opts[
3775 opts[
3776 b"template"
3776 b"template"
3777 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3777 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3778 displayer = logcmdutil.changesetdisplayer(
3778 displayer = logcmdutil.changesetdisplayer(
3779 ui, other, opts, False
3779 ui, other, opts, False
3780 )
3780 )
3781 display(other, chlist, displayer)
3781 display(other, chlist, displayer)
3782 displayer.close()
3782 displayer.close()
3783 finally:
3783 finally:
3784 cleanupfn()
3784 cleanupfn()
3785
3785
3786
3786
3787 @command(
3787 @command(
3788 b'debugsub',
3788 b'debugsub',
3789 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3789 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3790 _(b'[-r REV] [REV]'),
3790 _(b'[-r REV] [REV]'),
3791 )
3791 )
3792 def debugsub(ui, repo, rev=None):
3792 def debugsub(ui, repo, rev=None):
3793 ctx = scmutil.revsingle(repo, rev, None)
3793 ctx = scmutil.revsingle(repo, rev, None)
3794 for k, v in sorted(ctx.substate.items()):
3794 for k, v in sorted(ctx.substate.items()):
3795 ui.writenoi18n(b'path %s\n' % k)
3795 ui.writenoi18n(b'path %s\n' % k)
3796 ui.writenoi18n(b' source %s\n' % v[0])
3796 ui.writenoi18n(b' source %s\n' % v[0])
3797 ui.writenoi18n(b' revision %s\n' % v[1])
3797 ui.writenoi18n(b' revision %s\n' % v[1])
3798
3798
3799
3799
3800 @command(
3800 @command(
3801 b'debugshell',
3801 b'debugshell',
3802 [
3802 [
3803 (
3803 (
3804 b'c',
3804 b'c',
3805 b'command',
3805 b'command',
3806 b'',
3806 b'',
3807 _(b'program passed in as a string'),
3807 _(b'program passed in as a string'),
3808 _(b'COMMAND'),
3808 _(b'COMMAND'),
3809 )
3809 )
3810 ],
3810 ],
3811 _(b'[-c COMMAND]'),
3811 _(b'[-c COMMAND]'),
3812 optionalrepo=True,
3812 optionalrepo=True,
3813 )
3813 )
3814 def debugshell(ui, repo, **opts):
3814 def debugshell(ui, repo, **opts):
3815 """run an interactive Python interpreter
3815 """run an interactive Python interpreter
3816
3816
3817 The local namespace is provided with a reference to the ui and
3817 The local namespace is provided with a reference to the ui and
3818 the repo instance (if available).
3818 the repo instance (if available).
3819 """
3819 """
3820 import code
3820 import code
3821
3821
3822 imported_objects = {
3822 imported_objects = {
3823 'ui': ui,
3823 'ui': ui,
3824 'repo': repo,
3824 'repo': repo,
3825 }
3825 }
3826
3826
3827 # py2exe disables initialization of the site module, which is responsible
3827 # py2exe disables initialization of the site module, which is responsible
3828 # for arranging for ``quit()`` to exit the interpreter. Manually initialize
3828 # for arranging for ``quit()`` to exit the interpreter. Manually initialize
3829 # the stuff that site normally does here, so that the interpreter can be
3829 # the stuff that site normally does here, so that the interpreter can be
3830 # quit in a consistent manner, whether run with pyoxidizer, exewrapper.c,
3830 # quit in a consistent manner, whether run with pyoxidizer, exewrapper.c,
3831 # py.exe, or py2exe.
3831 # py.exe, or py2exe.
3832 if getattr(sys, "frozen", None) == 'console_exe':
3832 if getattr(sys, "frozen", None) == 'console_exe':
3833 try:
3833 try:
3834 import site
3834 import site
3835
3835
3836 site.setcopyright()
3836 site.setcopyright()
3837 site.sethelper()
3837 site.sethelper()
3838 site.setquit()
3838 site.setquit()
3839 except ImportError:
3839 except ImportError:
3840 site = None # Keep PyCharm happy
3840 site = None # Keep PyCharm happy
3841
3841
3842 command = opts.get('command')
3842 command = opts.get('command')
3843 if command:
3843 if command:
3844 compiled = code.compile_command(encoding.strfromlocal(command))
3844 compiled = code.compile_command(encoding.strfromlocal(command))
3845 code.InteractiveInterpreter(locals=imported_objects).runcode(compiled)
3845 code.InteractiveInterpreter(locals=imported_objects).runcode(compiled)
3846 return
3846 return
3847
3847
3848 code.interact(local=imported_objects)
3848 code.interact(local=imported_objects)
3849
3849
3850
3850
3851 @command(
3851 @command(
3852 b'debug-revlog-stats',
3852 b'debug-revlog-stats',
3853 [
3853 [
3854 (b'c', b'changelog', None, _(b'Display changelog statistics')),
3854 (b'c', b'changelog', None, _(b'Display changelog statistics')),
3855 (b'm', b'manifest', None, _(b'Display manifest statistics')),
3855 (b'm', b'manifest', None, _(b'Display manifest statistics')),
3856 (b'f', b'filelogs', None, _(b'Display filelogs statistics')),
3856 (b'f', b'filelogs', None, _(b'Display filelogs statistics')),
3857 ]
3857 ]
3858 + cmdutil.formatteropts,
3858 + cmdutil.formatteropts,
3859 )
3859 )
3860 def debug_revlog_stats(ui, repo, **opts):
3860 def debug_revlog_stats(ui, repo, **opts):
3861 """display statistics about revlogs in the store"""
3861 """display statistics about revlogs in the store"""
3862 opts = pycompat.byteskwargs(opts)
3862 opts = pycompat.byteskwargs(opts)
3863 changelog = opts[b"changelog"]
3863 changelog = opts[b"changelog"]
3864 manifest = opts[b"manifest"]
3864 manifest = opts[b"manifest"]
3865 filelogs = opts[b"filelogs"]
3865 filelogs = opts[b"filelogs"]
3866
3866
3867 if changelog is None and manifest is None and filelogs is None:
3867 if changelog is None and manifest is None and filelogs is None:
3868 changelog = True
3868 changelog = True
3869 manifest = True
3869 manifest = True
3870 filelogs = True
3870 filelogs = True
3871
3871
3872 repo = repo.unfiltered()
3872 repo = repo.unfiltered()
3873 fm = ui.formatter(b'debug-revlog-stats', opts)
3873 fm = ui.formatter(b'debug-revlog-stats', opts)
3874 revlog_debug.debug_revlog_stats(repo, fm, changelog, manifest, filelogs)
3874 revlog_debug.debug_revlog_stats(repo, fm, changelog, manifest, filelogs)
3875 fm.end()
3875 fm.end()
3876
3876
3877
3877
3878 @command(
3878 @command(
3879 b'debugsuccessorssets',
3879 b'debugsuccessorssets',
3880 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3880 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3881 _(b'[REV]'),
3881 _(b'[REV]'),
3882 )
3882 )
3883 def debugsuccessorssets(ui, repo, *revs, **opts):
3883 def debugsuccessorssets(ui, repo, *revs, **opts):
3884 """show set of successors for revision
3884 """show set of successors for revision
3885
3885
3886 A successors set of changeset A is a consistent group of revisions that
3886 A successors set of changeset A is a consistent group of revisions that
3887 succeed A. It contains non-obsolete changesets only unless closests
3887 succeed A. It contains non-obsolete changesets only unless closests
3888 successors set is set.
3888 successors set is set.
3889
3889
3890 In most cases a changeset A has a single successors set containing a single
3890 In most cases a changeset A has a single successors set containing a single
3891 successor (changeset A replaced by A').
3891 successor (changeset A replaced by A').
3892
3892
3893 A changeset that is made obsolete with no successors are called "pruned".
3893 A changeset that is made obsolete with no successors are called "pruned".
3894 Such changesets have no successors sets at all.
3894 Such changesets have no successors sets at all.
3895
3895
3896 A changeset that has been "split" will have a successors set containing
3896 A changeset that has been "split" will have a successors set containing
3897 more than one successor.
3897 more than one successor.
3898
3898
3899 A changeset that has been rewritten in multiple different ways is called
3899 A changeset that has been rewritten in multiple different ways is called
3900 "divergent". Such changesets have multiple successor sets (each of which
3900 "divergent". Such changesets have multiple successor sets (each of which
3901 may also be split, i.e. have multiple successors).
3901 may also be split, i.e. have multiple successors).
3902
3902
3903 Results are displayed as follows::
3903 Results are displayed as follows::
3904
3904
3905 <rev1>
3905 <rev1>
3906 <successors-1A>
3906 <successors-1A>
3907 <rev2>
3907 <rev2>
3908 <successors-2A>
3908 <successors-2A>
3909 <successors-2B1> <successors-2B2> <successors-2B3>
3909 <successors-2B1> <successors-2B2> <successors-2B3>
3910
3910
3911 Here rev2 has two possible (i.e. divergent) successors sets. The first
3911 Here rev2 has two possible (i.e. divergent) successors sets. The first
3912 holds one element, whereas the second holds three (i.e. the changeset has
3912 holds one element, whereas the second holds three (i.e. the changeset has
3913 been split).
3913 been split).
3914 """
3914 """
3915 # passed to successorssets caching computation from one call to another
3915 # passed to successorssets caching computation from one call to another
3916 cache = {}
3916 cache = {}
3917 ctx2str = bytes
3917 ctx2str = bytes
3918 node2str = short
3918 node2str = short
3919 for rev in logcmdutil.revrange(repo, revs):
3919 for rev in logcmdutil.revrange(repo, revs):
3920 ctx = repo[rev]
3920 ctx = repo[rev]
3921 ui.write(b'%s\n' % ctx2str(ctx))
3921 ui.write(b'%s\n' % ctx2str(ctx))
3922 for succsset in obsutil.successorssets(
3922 for succsset in obsutil.successorssets(
3923 repo, ctx.node(), closest=opts['closest'], cache=cache
3923 repo, ctx.node(), closest=opts['closest'], cache=cache
3924 ):
3924 ):
3925 if succsset:
3925 if succsset:
3926 ui.write(b' ')
3926 ui.write(b' ')
3927 ui.write(node2str(succsset[0]))
3927 ui.write(node2str(succsset[0]))
3928 for node in succsset[1:]:
3928 for node in succsset[1:]:
3929 ui.write(b' ')
3929 ui.write(b' ')
3930 ui.write(node2str(node))
3930 ui.write(node2str(node))
3931 ui.write(b'\n')
3931 ui.write(b'\n')
3932
3932
3933
3933
3934 @command(b'debugtagscache', [])
3934 @command(b'debugtagscache', [])
3935 def debugtagscache(ui, repo):
3935 def debugtagscache(ui, repo):
3936 """display the contents of .hg/cache/hgtagsfnodes1"""
3936 """display the contents of .hg/cache/hgtagsfnodes1"""
3937 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3937 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3938 flog = repo.file(b'.hgtags')
3938 flog = repo.file(b'.hgtags')
3939 for r in repo:
3939 for r in repo:
3940 node = repo[r].node()
3940 node = repo[r].node()
3941 tagsnode = cache.getfnode(node, computemissing=False)
3941 tagsnode = cache.getfnode(node, computemissing=False)
3942 if tagsnode:
3942 if tagsnode:
3943 tagsnodedisplay = hex(tagsnode)
3943 tagsnodedisplay = hex(tagsnode)
3944 if not flog.hasnode(tagsnode):
3944 if not flog.hasnode(tagsnode):
3945 tagsnodedisplay += b' (unknown node)'
3945 tagsnodedisplay += b' (unknown node)'
3946 elif tagsnode is None:
3946 elif tagsnode is None:
3947 tagsnodedisplay = b'missing'
3947 tagsnodedisplay = b'missing'
3948 else:
3948 else:
3949 tagsnodedisplay = b'invalid'
3949 tagsnodedisplay = b'invalid'
3950
3950
3951 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3951 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3952
3952
3953
3953
3954 @command(
3954 @command(
3955 b'debugtemplate',
3955 b'debugtemplate',
3956 [
3956 [
3957 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3957 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3958 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3958 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3959 ],
3959 ],
3960 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3960 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3961 optionalrepo=True,
3961 optionalrepo=True,
3962 )
3962 )
3963 def debugtemplate(ui, repo, tmpl, **opts):
3963 def debugtemplate(ui, repo, tmpl, **opts):
3964 """parse and apply a template
3964 """parse and apply a template
3965
3965
3966 If -r/--rev is given, the template is processed as a log template and
3966 If -r/--rev is given, the template is processed as a log template and
3967 applied to the given changesets. Otherwise, it is processed as a generic
3967 applied to the given changesets. Otherwise, it is processed as a generic
3968 template.
3968 template.
3969
3969
3970 Use --verbose to print the parsed tree.
3970 Use --verbose to print the parsed tree.
3971 """
3971 """
3972 revs = None
3972 revs = None
3973 if opts['rev']:
3973 if opts['rev']:
3974 if repo is None:
3974 if repo is None:
3975 raise error.RepoError(
3975 raise error.RepoError(
3976 _(b'there is no Mercurial repository here (.hg not found)')
3976 _(b'there is no Mercurial repository here (.hg not found)')
3977 )
3977 )
3978 revs = logcmdutil.revrange(repo, opts['rev'])
3978 revs = logcmdutil.revrange(repo, opts['rev'])
3979
3979
3980 props = {}
3980 props = {}
3981 for d in opts['define']:
3981 for d in opts['define']:
3982 try:
3982 try:
3983 k, v = (e.strip() for e in d.split(b'=', 1))
3983 k, v = (e.strip() for e in d.split(b'=', 1))
3984 if not k or k == b'ui':
3984 if not k or k == b'ui':
3985 raise ValueError
3985 raise ValueError
3986 props[k] = v
3986 props[k] = v
3987 except ValueError:
3987 except ValueError:
3988 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3988 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3989
3989
3990 if ui.verbose:
3990 if ui.verbose:
3991 aliases = ui.configitems(b'templatealias')
3991 aliases = ui.configitems(b'templatealias')
3992 tree = templater.parse(tmpl)
3992 tree = templater.parse(tmpl)
3993 ui.note(templater.prettyformat(tree), b'\n')
3993 ui.note(templater.prettyformat(tree), b'\n')
3994 newtree = templater.expandaliases(tree, aliases)
3994 newtree = templater.expandaliases(tree, aliases)
3995 if newtree != tree:
3995 if newtree != tree:
3996 ui.notenoi18n(
3996 ui.notenoi18n(
3997 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3997 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3998 )
3998 )
3999
3999
4000 if revs is None:
4000 if revs is None:
4001 tres = formatter.templateresources(ui, repo)
4001 tres = formatter.templateresources(ui, repo)
4002 t = formatter.maketemplater(ui, tmpl, resources=tres)
4002 t = formatter.maketemplater(ui, tmpl, resources=tres)
4003 if ui.verbose:
4003 if ui.verbose:
4004 kwds, funcs = t.symbolsuseddefault()
4004 kwds, funcs = t.symbolsuseddefault()
4005 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4005 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4006 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4006 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4007 ui.write(t.renderdefault(props))
4007 ui.write(t.renderdefault(props))
4008 else:
4008 else:
4009 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4009 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4010 if ui.verbose:
4010 if ui.verbose:
4011 kwds, funcs = displayer.t.symbolsuseddefault()
4011 kwds, funcs = displayer.t.symbolsuseddefault()
4012 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4012 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4013 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4013 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4014 for r in revs:
4014 for r in revs:
4015 displayer.show(repo[r], **pycompat.strkwargs(props))
4015 displayer.show(repo[r], **pycompat.strkwargs(props))
4016 displayer.close()
4016 displayer.close()
4017
4017
4018
4018
4019 @command(
4019 @command(
4020 b'debuguigetpass',
4020 b'debuguigetpass',
4021 [
4021 [
4022 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4022 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4023 ],
4023 ],
4024 _(b'[-p TEXT]'),
4024 _(b'[-p TEXT]'),
4025 norepo=True,
4025 norepo=True,
4026 )
4026 )
4027 def debuguigetpass(ui, prompt=b''):
4027 def debuguigetpass(ui, prompt=b''):
4028 """show prompt to type password"""
4028 """show prompt to type password"""
4029 r = ui.getpass(prompt)
4029 r = ui.getpass(prompt)
4030 if r is None:
4030 if r is None:
4031 r = b"<default response>"
4031 r = b"<default response>"
4032 ui.writenoi18n(b'response: %s\n' % r)
4032 ui.writenoi18n(b'response: %s\n' % r)
4033
4033
4034
4034
4035 @command(
4035 @command(
4036 b'debuguiprompt',
4036 b'debuguiprompt',
4037 [
4037 [
4038 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4038 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4039 ],
4039 ],
4040 _(b'[-p TEXT]'),
4040 _(b'[-p TEXT]'),
4041 norepo=True,
4041 norepo=True,
4042 )
4042 )
4043 def debuguiprompt(ui, prompt=b''):
4043 def debuguiprompt(ui, prompt=b''):
4044 """show plain prompt"""
4044 """show plain prompt"""
4045 r = ui.prompt(prompt)
4045 r = ui.prompt(prompt)
4046 ui.writenoi18n(b'response: %s\n' % r)
4046 ui.writenoi18n(b'response: %s\n' % r)
4047
4047
4048
4048
4049 @command(b'debugupdatecaches', [])
4049 @command(b'debugupdatecaches', [])
4050 def debugupdatecaches(ui, repo, *pats, **opts):
4050 def debugupdatecaches(ui, repo, *pats, **opts):
4051 """warm all known caches in the repository"""
4051 """warm all known caches in the repository"""
4052 with repo.wlock(), repo.lock():
4052 with repo.wlock(), repo.lock():
4053 repo.updatecaches(caches=repository.CACHES_ALL)
4053 repo.updatecaches(caches=repository.CACHES_ALL)
4054
4054
4055
4055
4056 @command(
4056 @command(
4057 b'debugupgraderepo',
4057 b'debugupgraderepo',
4058 [
4058 [
4059 (
4059 (
4060 b'o',
4060 b'o',
4061 b'optimize',
4061 b'optimize',
4062 [],
4062 [],
4063 _(b'extra optimization to perform'),
4063 _(b'extra optimization to perform'),
4064 _(b'NAME'),
4064 _(b'NAME'),
4065 ),
4065 ),
4066 (b'', b'run', False, _(b'performs an upgrade')),
4066 (b'', b'run', False, _(b'performs an upgrade')),
4067 (b'', b'backup', True, _(b'keep the old repository content around')),
4067 (b'', b'backup', True, _(b'keep the old repository content around')),
4068 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4068 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4069 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4069 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4070 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4070 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4071 ],
4071 ],
4072 )
4072 )
4073 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4073 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4074 """upgrade a repository to use different features
4074 """upgrade a repository to use different features
4075
4075
4076 If no arguments are specified, the repository is evaluated for upgrade
4076 If no arguments are specified, the repository is evaluated for upgrade
4077 and a list of problems and potential optimizations is printed.
4077 and a list of problems and potential optimizations is printed.
4078
4078
4079 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4079 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4080 can be influenced via additional arguments. More details will be provided
4080 can be influenced via additional arguments. More details will be provided
4081 by the command output when run without ``--run``.
4081 by the command output when run without ``--run``.
4082
4082
4083 During the upgrade, the repository will be locked and no writes will be
4083 During the upgrade, the repository will be locked and no writes will be
4084 allowed.
4084 allowed.
4085
4085
4086 At the end of the upgrade, the repository may not be readable while new
4086 At the end of the upgrade, the repository may not be readable while new
4087 repository data is swapped in. This window will be as long as it takes to
4087 repository data is swapped in. This window will be as long as it takes to
4088 rename some directories inside the ``.hg`` directory. On most machines, this
4088 rename some directories inside the ``.hg`` directory. On most machines, this
4089 should complete almost instantaneously and the chances of a consumer being
4089 should complete almost instantaneously and the chances of a consumer being
4090 unable to access the repository should be low.
4090 unable to access the repository should be low.
4091
4091
4092 By default, all revlogs will be upgraded. You can restrict this using flags
4092 By default, all revlogs will be upgraded. You can restrict this using flags
4093 such as `--manifest`:
4093 such as `--manifest`:
4094
4094
4095 * `--manifest`: only optimize the manifest
4095 * `--manifest`: only optimize the manifest
4096 * `--no-manifest`: optimize all revlog but the manifest
4096 * `--no-manifest`: optimize all revlog but the manifest
4097 * `--changelog`: optimize the changelog only
4097 * `--changelog`: optimize the changelog only
4098 * `--no-changelog --no-manifest`: optimize filelogs only
4098 * `--no-changelog --no-manifest`: optimize filelogs only
4099 * `--filelogs`: optimize the filelogs only
4099 * `--filelogs`: optimize the filelogs only
4100 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4100 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4101 """
4101 """
4102 return upgrade.upgraderepo(
4102 return upgrade.upgraderepo(
4103 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4103 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4104 )
4104 )
4105
4105
4106
4106
4107 @command(
4107 @command(
4108 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4108 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4109 )
4109 )
4110 def debugwalk(ui, repo, *pats, **opts):
4110 def debugwalk(ui, repo, *pats, **opts):
4111 """show how files match on given patterns"""
4111 """show how files match on given patterns"""
4112 opts = pycompat.byteskwargs(opts)
4112 opts = pycompat.byteskwargs(opts)
4113 m = scmutil.match(repo[None], pats, opts)
4113 m = scmutil.match(repo[None], pats, opts)
4114 if ui.verbose:
4114 if ui.verbose:
4115 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4115 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4116 items = list(repo[None].walk(m))
4116 items = list(repo[None].walk(m))
4117 if not items:
4117 if not items:
4118 return
4118 return
4119 f = lambda fn: fn
4119 f = lambda fn: fn
4120 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4120 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4121 f = lambda fn: util.normpath(fn)
4121 f = lambda fn: util.normpath(fn)
4122 fmt = b'f %%-%ds %%-%ds %%s' % (
4122 fmt = b'f %%-%ds %%-%ds %%s' % (
4123 max([len(abs) for abs in items]),
4123 max([len(abs) for abs in items]),
4124 max([len(repo.pathto(abs)) for abs in items]),
4124 max([len(repo.pathto(abs)) for abs in items]),
4125 )
4125 )
4126 for abs in items:
4126 for abs in items:
4127 line = fmt % (
4127 line = fmt % (
4128 abs,
4128 abs,
4129 f(repo.pathto(abs)),
4129 f(repo.pathto(abs)),
4130 m.exact(abs) and b'exact' or b'',
4130 m.exact(abs) and b'exact' or b'',
4131 )
4131 )
4132 ui.write(b"%s\n" % line.rstrip())
4132 ui.write(b"%s\n" % line.rstrip())
4133
4133
4134
4134
4135 @command(b'debugwhyunstable', [], _(b'REV'))
4135 @command(b'debugwhyunstable', [], _(b'REV'))
4136 def debugwhyunstable(ui, repo, rev):
4136 def debugwhyunstable(ui, repo, rev):
4137 """explain instabilities of a changeset"""
4137 """explain instabilities of a changeset"""
4138 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4138 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4139 dnodes = b''
4139 dnodes = b''
4140 if entry.get(b'divergentnodes'):
4140 if entry.get(b'divergentnodes'):
4141 dnodes = (
4141 dnodes = (
4142 b' '.join(
4142 b' '.join(
4143 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4143 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4144 for ctx in entry[b'divergentnodes']
4144 for ctx in entry[b'divergentnodes']
4145 )
4145 )
4146 + b' '
4146 + b' '
4147 )
4147 )
4148 ui.write(
4148 ui.write(
4149 b'%s: %s%s %s\n'
4149 b'%s: %s%s %s\n'
4150 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4150 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4151 )
4151 )
4152
4152
4153
4153
4154 @command(
4154 @command(
4155 b'debugwireargs',
4155 b'debugwireargs',
4156 [
4156 [
4157 (b'', b'three', b'', b'three'),
4157 (b'', b'three', b'', b'three'),
4158 (b'', b'four', b'', b'four'),
4158 (b'', b'four', b'', b'four'),
4159 (b'', b'five', b'', b'five'),
4159 (b'', b'five', b'', b'five'),
4160 ]
4160 ]
4161 + cmdutil.remoteopts,
4161 + cmdutil.remoteopts,
4162 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4162 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4163 norepo=True,
4163 norepo=True,
4164 )
4164 )
4165 def debugwireargs(ui, repopath, *vals, **opts):
4165 def debugwireargs(ui, repopath, *vals, **opts):
4166 opts = pycompat.byteskwargs(opts)
4166 opts = pycompat.byteskwargs(opts)
4167 repo = hg.peer(ui, opts, repopath)
4167 repo = hg.peer(ui, opts, repopath)
4168 try:
4168 try:
4169 for opt in cmdutil.remoteopts:
4169 for opt in cmdutil.remoteopts:
4170 del opts[opt[1]]
4170 del opts[opt[1]]
4171 args = {}
4171 args = {}
4172 for k, v in opts.items():
4172 for k, v in opts.items():
4173 if v:
4173 if v:
4174 args[k] = v
4174 args[k] = v
4175 args = pycompat.strkwargs(args)
4175 args = pycompat.strkwargs(args)
4176 # run twice to check that we don't mess up the stream for the next command
4176 # run twice to check that we don't mess up the stream for the next command
4177 res1 = repo.debugwireargs(*vals, **args)
4177 res1 = repo.debugwireargs(*vals, **args)
4178 res2 = repo.debugwireargs(*vals, **args)
4178 res2 = repo.debugwireargs(*vals, **args)
4179 ui.write(b"%s\n" % res1)
4179 ui.write(b"%s\n" % res1)
4180 if res1 != res2:
4180 if res1 != res2:
4181 ui.warn(b"%s\n" % res2)
4181 ui.warn(b"%s\n" % res2)
4182 finally:
4182 finally:
4183 repo.close()
4183 repo.close()
4184
4184
4185
4185
4186 def _parsewirelangblocks(fh):
4186 def _parsewirelangblocks(fh):
4187 activeaction = None
4187 activeaction = None
4188 blocklines = []
4188 blocklines = []
4189 lastindent = 0
4189 lastindent = 0
4190
4190
4191 for line in fh:
4191 for line in fh:
4192 line = line.rstrip()
4192 line = line.rstrip()
4193 if not line:
4193 if not line:
4194 continue
4194 continue
4195
4195
4196 if line.startswith(b'#'):
4196 if line.startswith(b'#'):
4197 continue
4197 continue
4198
4198
4199 if not line.startswith(b' '):
4199 if not line.startswith(b' '):
4200 # New block. Flush previous one.
4200 # New block. Flush previous one.
4201 if activeaction:
4201 if activeaction:
4202 yield activeaction, blocklines
4202 yield activeaction, blocklines
4203
4203
4204 activeaction = line
4204 activeaction = line
4205 blocklines = []
4205 blocklines = []
4206 lastindent = 0
4206 lastindent = 0
4207 continue
4207 continue
4208
4208
4209 # Else we start with an indent.
4209 # Else we start with an indent.
4210
4210
4211 if not activeaction:
4211 if not activeaction:
4212 raise error.Abort(_(b'indented line outside of block'))
4212 raise error.Abort(_(b'indented line outside of block'))
4213
4213
4214 indent = len(line) - len(line.lstrip())
4214 indent = len(line) - len(line.lstrip())
4215
4215
4216 # If this line is indented more than the last line, concatenate it.
4216 # If this line is indented more than the last line, concatenate it.
4217 if indent > lastindent and blocklines:
4217 if indent > lastindent and blocklines:
4218 blocklines[-1] += line.lstrip()
4218 blocklines[-1] += line.lstrip()
4219 else:
4219 else:
4220 blocklines.append(line)
4220 blocklines.append(line)
4221 lastindent = indent
4221 lastindent = indent
4222
4222
4223 # Flush last block.
4223 # Flush last block.
4224 if activeaction:
4224 if activeaction:
4225 yield activeaction, blocklines
4225 yield activeaction, blocklines
4226
4226
4227
4227
4228 @command(
4228 @command(
4229 b'debugwireproto',
4229 b'debugwireproto',
4230 [
4230 [
4231 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4231 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4232 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4232 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4233 (
4233 (
4234 b'',
4234 b'',
4235 b'noreadstderr',
4235 b'noreadstderr',
4236 False,
4236 False,
4237 _(b'do not read from stderr of the remote'),
4237 _(b'do not read from stderr of the remote'),
4238 ),
4238 ),
4239 (
4239 (
4240 b'',
4240 b'',
4241 b'nologhandshake',
4241 b'nologhandshake',
4242 False,
4242 False,
4243 _(b'do not log I/O related to the peer handshake'),
4243 _(b'do not log I/O related to the peer handshake'),
4244 ),
4244 ),
4245 ]
4245 ]
4246 + cmdutil.remoteopts,
4246 + cmdutil.remoteopts,
4247 _(b'[PATH]'),
4247 _(b'[PATH]'),
4248 optionalrepo=True,
4248 optionalrepo=True,
4249 )
4249 )
4250 def debugwireproto(ui, repo, path=None, **opts):
4250 def debugwireproto(ui, repo, path=None, **opts):
4251 """send wire protocol commands to a server
4251 """send wire protocol commands to a server
4252
4252
4253 This command can be used to issue wire protocol commands to remote
4253 This command can be used to issue wire protocol commands to remote
4254 peers and to debug the raw data being exchanged.
4254 peers and to debug the raw data being exchanged.
4255
4255
4256 ``--localssh`` will start an SSH server against the current repository
4256 ``--localssh`` will start an SSH server against the current repository
4257 and connect to that. By default, the connection will perform a handshake
4257 and connect to that. By default, the connection will perform a handshake
4258 and establish an appropriate peer instance.
4258 and establish an appropriate peer instance.
4259
4259
4260 ``--peer`` can be used to bypass the handshake protocol and construct a
4260 ``--peer`` can be used to bypass the handshake protocol and construct a
4261 peer instance using the specified class type. Valid values are ``raw``,
4261 peer instance using the specified class type. Valid values are ``raw``,
4262 ``ssh1``. ``raw`` instances only allow sending raw data payloads and
4262 ``ssh1``. ``raw`` instances only allow sending raw data payloads and
4263 don't support higher-level command actions.
4263 don't support higher-level command actions.
4264
4264
4265 ``--noreadstderr`` can be used to disable automatic reading from stderr
4265 ``--noreadstderr`` can be used to disable automatic reading from stderr
4266 of the peer (for SSH connections only). Disabling automatic reading of
4266 of the peer (for SSH connections only). Disabling automatic reading of
4267 stderr is useful for making output more deterministic.
4267 stderr is useful for making output more deterministic.
4268
4268
4269 Commands are issued via a mini language which is specified via stdin.
4269 Commands are issued via a mini language which is specified via stdin.
4270 The language consists of individual actions to perform. An action is
4270 The language consists of individual actions to perform. An action is
4271 defined by a block. A block is defined as a line with no leading
4271 defined by a block. A block is defined as a line with no leading
4272 space followed by 0 or more lines with leading space. Blocks are
4272 space followed by 0 or more lines with leading space. Blocks are
4273 effectively a high-level command with additional metadata.
4273 effectively a high-level command with additional metadata.
4274
4274
4275 Lines beginning with ``#`` are ignored.
4275 Lines beginning with ``#`` are ignored.
4276
4276
4277 The following sections denote available actions.
4277 The following sections denote available actions.
4278
4278
4279 raw
4279 raw
4280 ---
4280 ---
4281
4281
4282 Send raw data to the server.
4282 Send raw data to the server.
4283
4283
4284 The block payload contains the raw data to send as one atomic send
4284 The block payload contains the raw data to send as one atomic send
4285 operation. The data may not actually be delivered in a single system
4285 operation. The data may not actually be delivered in a single system
4286 call: it depends on the abilities of the transport being used.
4286 call: it depends on the abilities of the transport being used.
4287
4287
4288 Each line in the block is de-indented and concatenated. Then, that
4288 Each line in the block is de-indented and concatenated. Then, that
4289 value is evaluated as a Python b'' literal. This allows the use of
4289 value is evaluated as a Python b'' literal. This allows the use of
4290 backslash escaping, etc.
4290 backslash escaping, etc.
4291
4291
4292 raw+
4292 raw+
4293 ----
4293 ----
4294
4294
4295 Behaves like ``raw`` except flushes output afterwards.
4295 Behaves like ``raw`` except flushes output afterwards.
4296
4296
4297 command <X>
4297 command <X>
4298 -----------
4298 -----------
4299
4299
4300 Send a request to run a named command, whose name follows the ``command``
4300 Send a request to run a named command, whose name follows the ``command``
4301 string.
4301 string.
4302
4302
4303 Arguments to the command are defined as lines in this block. The format of
4303 Arguments to the command are defined as lines in this block. The format of
4304 each line is ``<key> <value>``. e.g.::
4304 each line is ``<key> <value>``. e.g.::
4305
4305
4306 command listkeys
4306 command listkeys
4307 namespace bookmarks
4307 namespace bookmarks
4308
4308
4309 If the value begins with ``eval:``, it will be interpreted as a Python
4309 If the value begins with ``eval:``, it will be interpreted as a Python
4310 literal expression. Otherwise values are interpreted as Python b'' literals.
4310 literal expression. Otherwise values are interpreted as Python b'' literals.
4311 This allows sending complex types and encoding special byte sequences via
4311 This allows sending complex types and encoding special byte sequences via
4312 backslash escaping.
4312 backslash escaping.
4313
4313
4314 The following arguments have special meaning:
4314 The following arguments have special meaning:
4315
4315
4316 ``PUSHFILE``
4316 ``PUSHFILE``
4317 When defined, the *push* mechanism of the peer will be used instead
4317 When defined, the *push* mechanism of the peer will be used instead
4318 of the static request-response mechanism and the content of the
4318 of the static request-response mechanism and the content of the
4319 file specified in the value of this argument will be sent as the
4319 file specified in the value of this argument will be sent as the
4320 command payload.
4320 command payload.
4321
4321
4322 This can be used to submit a local bundle file to the remote.
4322 This can be used to submit a local bundle file to the remote.
4323
4323
4324 batchbegin
4324 batchbegin
4325 ----------
4325 ----------
4326
4326
4327 Instruct the peer to begin a batched send.
4327 Instruct the peer to begin a batched send.
4328
4328
4329 All ``command`` blocks are queued for execution until the next
4329 All ``command`` blocks are queued for execution until the next
4330 ``batchsubmit`` block.
4330 ``batchsubmit`` block.
4331
4331
4332 batchsubmit
4332 batchsubmit
4333 -----------
4333 -----------
4334
4334
4335 Submit previously queued ``command`` blocks as a batch request.
4335 Submit previously queued ``command`` blocks as a batch request.
4336
4336
4337 This action MUST be paired with a ``batchbegin`` action.
4337 This action MUST be paired with a ``batchbegin`` action.
4338
4338
4339 httprequest <method> <path>
4339 httprequest <method> <path>
4340 ---------------------------
4340 ---------------------------
4341
4341
4342 (HTTP peer only)
4342 (HTTP peer only)
4343
4343
4344 Send an HTTP request to the peer.
4344 Send an HTTP request to the peer.
4345
4345
4346 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4346 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4347
4347
4348 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4348 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4349 headers to add to the request. e.g. ``Accept: foo``.
4349 headers to add to the request. e.g. ``Accept: foo``.
4350
4350
4351 The following arguments are special:
4351 The following arguments are special:
4352
4352
4353 ``BODYFILE``
4353 ``BODYFILE``
4354 The content of the file defined as the value to this argument will be
4354 The content of the file defined as the value to this argument will be
4355 transferred verbatim as the HTTP request body.
4355 transferred verbatim as the HTTP request body.
4356
4356
4357 ``frame <type> <flags> <payload>``
4357 ``frame <type> <flags> <payload>``
4358 Send a unified protocol frame as part of the request body.
4358 Send a unified protocol frame as part of the request body.
4359
4359
4360 All frames will be collected and sent as the body to the HTTP
4360 All frames will be collected and sent as the body to the HTTP
4361 request.
4361 request.
4362
4362
4363 close
4363 close
4364 -----
4364 -----
4365
4365
4366 Close the connection to the server.
4366 Close the connection to the server.
4367
4367
4368 flush
4368 flush
4369 -----
4369 -----
4370
4370
4371 Flush data written to the server.
4371 Flush data written to the server.
4372
4372
4373 readavailable
4373 readavailable
4374 -------------
4374 -------------
4375
4375
4376 Close the write end of the connection and read all available data from
4376 Close the write end of the connection and read all available data from
4377 the server.
4377 the server.
4378
4378
4379 If the connection to the server encompasses multiple pipes, we poll both
4379 If the connection to the server encompasses multiple pipes, we poll both
4380 pipes and read available data.
4380 pipes and read available data.
4381
4381
4382 readline
4382 readline
4383 --------
4383 --------
4384
4384
4385 Read a line of output from the server. If there are multiple output
4385 Read a line of output from the server. If there are multiple output
4386 pipes, reads only the main pipe.
4386 pipes, reads only the main pipe.
4387
4387
4388 ereadline
4388 ereadline
4389 ---------
4389 ---------
4390
4390
4391 Like ``readline``, but read from the stderr pipe, if available.
4391 Like ``readline``, but read from the stderr pipe, if available.
4392
4392
4393 read <X>
4393 read <X>
4394 --------
4394 --------
4395
4395
4396 ``read()`` N bytes from the server's main output pipe.
4396 ``read()`` N bytes from the server's main output pipe.
4397
4397
4398 eread <X>
4398 eread <X>
4399 ---------
4399 ---------
4400
4400
4401 ``read()`` N bytes from the server's stderr pipe, if available.
4401 ``read()`` N bytes from the server's stderr pipe, if available.
4402
4402
4403 Specifying Unified Frame-Based Protocol Frames
4403 Specifying Unified Frame-Based Protocol Frames
4404 ----------------------------------------------
4404 ----------------------------------------------
4405
4405
4406 It is possible to emit a *Unified Frame-Based Protocol* by using special
4406 It is possible to emit a *Unified Frame-Based Protocol* by using special
4407 syntax.
4407 syntax.
4408
4408
4409 A frame is composed as a type, flags, and payload. These can be parsed
4409 A frame is composed as a type, flags, and payload. These can be parsed
4410 from a string of the form:
4410 from a string of the form:
4411
4411
4412 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4412 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4413
4413
4414 ``request-id`` and ``stream-id`` are integers defining the request and
4414 ``request-id`` and ``stream-id`` are integers defining the request and
4415 stream identifiers.
4415 stream identifiers.
4416
4416
4417 ``type`` can be an integer value for the frame type or the string name
4417 ``type`` can be an integer value for the frame type or the string name
4418 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4418 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4419 ``command-name``.
4419 ``command-name``.
4420
4420
4421 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4421 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4422 components. Each component (and there can be just one) can be an integer
4422 components. Each component (and there can be just one) can be an integer
4423 or a flag name for stream flags or frame flags, respectively. Values are
4423 or a flag name for stream flags or frame flags, respectively. Values are
4424 resolved to integers and then bitwise OR'd together.
4424 resolved to integers and then bitwise OR'd together.
4425
4425
4426 ``payload`` represents the raw frame payload. If it begins with
4426 ``payload`` represents the raw frame payload. If it begins with
4427 ``cbor:``, the following string is evaluated as Python code and the
4427 ``cbor:``, the following string is evaluated as Python code and the
4428 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4428 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4429 as a Python byte string literal.
4429 as a Python byte string literal.
4430 """
4430 """
4431 opts = pycompat.byteskwargs(opts)
4431 opts = pycompat.byteskwargs(opts)
4432
4432
4433 if opts[b'localssh'] and not repo:
4433 if opts[b'localssh'] and not repo:
4434 raise error.Abort(_(b'--localssh requires a repository'))
4434 raise error.Abort(_(b'--localssh requires a repository'))
4435
4435
4436 if opts[b'peer'] and opts[b'peer'] not in (
4436 if opts[b'peer'] and opts[b'peer'] not in (
4437 b'raw',
4437 b'raw',
4438 b'ssh1',
4438 b'ssh1',
4439 ):
4439 ):
4440 raise error.Abort(
4440 raise error.Abort(
4441 _(b'invalid value for --peer'),
4441 _(b'invalid value for --peer'),
4442 hint=_(b'valid values are "raw" and "ssh1"'),
4442 hint=_(b'valid values are "raw" and "ssh1"'),
4443 )
4443 )
4444
4444
4445 if path and opts[b'localssh']:
4445 if path and opts[b'localssh']:
4446 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4446 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4447
4447
4448 if ui.interactive():
4448 if ui.interactive():
4449 ui.write(_(b'(waiting for commands on stdin)\n'))
4449 ui.write(_(b'(waiting for commands on stdin)\n'))
4450
4450
4451 blocks = list(_parsewirelangblocks(ui.fin))
4451 blocks = list(_parsewirelangblocks(ui.fin))
4452
4452
4453 proc = None
4453 proc = None
4454 stdin = None
4454 stdin = None
4455 stdout = None
4455 stdout = None
4456 stderr = None
4456 stderr = None
4457 opener = None
4457 opener = None
4458
4458
4459 if opts[b'localssh']:
4459 if opts[b'localssh']:
4460 # We start the SSH server in its own process so there is process
4460 # We start the SSH server in its own process so there is process
4461 # separation. This prevents a whole class of potential bugs around
4461 # separation. This prevents a whole class of potential bugs around
4462 # shared state from interfering with server operation.
4462 # shared state from interfering with server operation.
4463 args = procutil.hgcmd() + [
4463 args = procutil.hgcmd() + [
4464 b'-R',
4464 b'-R',
4465 repo.root,
4465 repo.root,
4466 b'debugserve',
4466 b'debugserve',
4467 b'--sshstdio',
4467 b'--sshstdio',
4468 ]
4468 ]
4469 proc = subprocess.Popen(
4469 proc = subprocess.Popen(
4470 pycompat.rapply(procutil.tonativestr, args),
4470 pycompat.rapply(procutil.tonativestr, args),
4471 stdin=subprocess.PIPE,
4471 stdin=subprocess.PIPE,
4472 stdout=subprocess.PIPE,
4472 stdout=subprocess.PIPE,
4473 stderr=subprocess.PIPE,
4473 stderr=subprocess.PIPE,
4474 bufsize=0,
4474 bufsize=0,
4475 )
4475 )
4476
4476
4477 stdin = proc.stdin
4477 stdin = proc.stdin
4478 stdout = proc.stdout
4478 stdout = proc.stdout
4479 stderr = proc.stderr
4479 stderr = proc.stderr
4480
4480
4481 # We turn the pipes into observers so we can log I/O.
4481 # We turn the pipes into observers so we can log I/O.
4482 if ui.verbose or opts[b'peer'] == b'raw':
4482 if ui.verbose or opts[b'peer'] == b'raw':
4483 stdin = util.makeloggingfileobject(
4483 stdin = util.makeloggingfileobject(
4484 ui, proc.stdin, b'i', logdata=True
4484 ui, proc.stdin, b'i', logdata=True
4485 )
4485 )
4486 stdout = util.makeloggingfileobject(
4486 stdout = util.makeloggingfileobject(
4487 ui, proc.stdout, b'o', logdata=True
4487 ui, proc.stdout, b'o', logdata=True
4488 )
4488 )
4489 stderr = util.makeloggingfileobject(
4489 stderr = util.makeloggingfileobject(
4490 ui, proc.stderr, b'e', logdata=True
4490 ui, proc.stderr, b'e', logdata=True
4491 )
4491 )
4492
4492
4493 # --localssh also implies the peer connection settings.
4493 # --localssh also implies the peer connection settings.
4494
4494
4495 url = b'ssh://localserver'
4495 url = b'ssh://localserver'
4496 autoreadstderr = not opts[b'noreadstderr']
4496 autoreadstderr = not opts[b'noreadstderr']
4497
4497
4498 if opts[b'peer'] == b'ssh1':
4498 if opts[b'peer'] == b'ssh1':
4499 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4499 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4500 peer = sshpeer.sshv1peer(
4500 peer = sshpeer.sshv1peer(
4501 ui,
4501 ui,
4502 url,
4502 url,
4503 proc,
4503 proc,
4504 stdin,
4504 stdin,
4505 stdout,
4505 stdout,
4506 stderr,
4506 stderr,
4507 None,
4507 None,
4508 autoreadstderr=autoreadstderr,
4508 autoreadstderr=autoreadstderr,
4509 )
4509 )
4510 elif opts[b'peer'] == b'raw':
4510 elif opts[b'peer'] == b'raw':
4511 ui.write(_(b'using raw connection to peer\n'))
4511 ui.write(_(b'using raw connection to peer\n'))
4512 peer = None
4512 peer = None
4513 else:
4513 else:
4514 ui.write(_(b'creating ssh peer from handshake results\n'))
4514 ui.write(_(b'creating ssh peer from handshake results\n'))
4515 peer = sshpeer.makepeer(
4515 peer = sshpeer.makepeer(
4516 ui,
4516 ui,
4517 url,
4517 url,
4518 proc,
4518 proc,
4519 stdin,
4519 stdin,
4520 stdout,
4520 stdout,
4521 stderr,
4521 stderr,
4522 autoreadstderr=autoreadstderr,
4522 autoreadstderr=autoreadstderr,
4523 )
4523 )
4524
4524
4525 elif path:
4525 elif path:
4526 # We bypass hg.peer() so we can proxy the sockets.
4526 # We bypass hg.peer() so we can proxy the sockets.
4527 # TODO consider not doing this because we skip
4527 # TODO consider not doing this because we skip
4528 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4528 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4529 u = urlutil.url(path)
4529 u = urlutil.url(path)
4530 if u.scheme != b'http':
4530 if u.scheme != b'http':
4531 raise error.Abort(_(b'only http:// paths are currently supported'))
4531 raise error.Abort(_(b'only http:// paths are currently supported'))
4532
4532
4533 url, authinfo = u.authinfo()
4533 url, authinfo = u.authinfo()
4534 openerargs = {
4534 openerargs = {
4535 'useragent': b'Mercurial debugwireproto',
4535 'useragent': b'Mercurial debugwireproto',
4536 }
4536 }
4537
4537
4538 # Turn pipes/sockets into observers so we can log I/O.
4538 # Turn pipes/sockets into observers so we can log I/O.
4539 if ui.verbose:
4539 if ui.verbose:
4540 openerargs.update(
4540 openerargs.update(
4541 {
4541 {
4542 'loggingfh': ui,
4542 'loggingfh': ui,
4543 'loggingname': b's',
4543 'loggingname': b's',
4544 'loggingopts': {
4544 'loggingopts': {
4545 'logdata': True,
4545 'logdata': True,
4546 'logdataapis': False,
4546 'logdataapis': False,
4547 },
4547 },
4548 }
4548 }
4549 )
4549 )
4550
4550
4551 if ui.debugflag:
4551 if ui.debugflag:
4552 openerargs['loggingopts']['logdataapis'] = True
4552 openerargs['loggingopts']['logdataapis'] = True
4553
4553
4554 # Don't send default headers when in raw mode. This allows us to
4554 # Don't send default headers when in raw mode. This allows us to
4555 # bypass most of the behavior of our URL handling code so we can
4555 # bypass most of the behavior of our URL handling code so we can
4556 # have near complete control over what's sent on the wire.
4556 # have near complete control over what's sent on the wire.
4557 if opts[b'peer'] == b'raw':
4557 if opts[b'peer'] == b'raw':
4558 openerargs['sendaccept'] = False
4558 openerargs['sendaccept'] = False
4559
4559
4560 opener = urlmod.opener(ui, authinfo, **openerargs)
4560 opener = urlmod.opener(ui, authinfo, **openerargs)
4561
4561
4562 if opts[b'peer'] == b'raw':
4562 if opts[b'peer'] == b'raw':
4563 ui.write(_(b'using raw connection to peer\n'))
4563 ui.write(_(b'using raw connection to peer\n'))
4564 peer = None
4564 peer = None
4565 elif opts[b'peer']:
4565 elif opts[b'peer']:
4566 raise error.Abort(
4566 raise error.Abort(
4567 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4567 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4568 )
4568 )
4569 else:
4569 else:
4570 peer_path = urlutil.try_path(ui, path)
4570 peer_path = urlutil.try_path(ui, path)
4571 peer = httppeer.makepeer(ui, peer_path, opener=opener)
4571 peer = httppeer.makepeer(ui, peer_path, opener=opener)
4572
4572
4573 # We /could/ populate stdin/stdout with sock.makefile()...
4573 # We /could/ populate stdin/stdout with sock.makefile()...
4574 else:
4574 else:
4575 raise error.Abort(_(b'unsupported connection configuration'))
4575 raise error.Abort(_(b'unsupported connection configuration'))
4576
4576
4577 batchedcommands = None
4577 batchedcommands = None
4578
4578
4579 # Now perform actions based on the parsed wire language instructions.
4579 # Now perform actions based on the parsed wire language instructions.
4580 for action, lines in blocks:
4580 for action, lines in blocks:
4581 if action in (b'raw', b'raw+'):
4581 if action in (b'raw', b'raw+'):
4582 if not stdin:
4582 if not stdin:
4583 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4583 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4584
4584
4585 # Concatenate the data together.
4585 # Concatenate the data together.
4586 data = b''.join(l.lstrip() for l in lines)
4586 data = b''.join(l.lstrip() for l in lines)
4587 data = stringutil.unescapestr(data)
4587 data = stringutil.unescapestr(data)
4588 stdin.write(data)
4588 stdin.write(data)
4589
4589
4590 if action == b'raw+':
4590 if action == b'raw+':
4591 stdin.flush()
4591 stdin.flush()
4592 elif action == b'flush':
4592 elif action == b'flush':
4593 if not stdin:
4593 if not stdin:
4594 raise error.Abort(_(b'cannot call flush on this peer'))
4594 raise error.Abort(_(b'cannot call flush on this peer'))
4595 stdin.flush()
4595 stdin.flush()
4596 elif action.startswith(b'command'):
4596 elif action.startswith(b'command'):
4597 if not peer:
4597 if not peer:
4598 raise error.Abort(
4598 raise error.Abort(
4599 _(
4599 _(
4600 b'cannot send commands unless peer instance '
4600 b'cannot send commands unless peer instance '
4601 b'is available'
4601 b'is available'
4602 )
4602 )
4603 )
4603 )
4604
4604
4605 command = action.split(b' ', 1)[1]
4605 command = action.split(b' ', 1)[1]
4606
4606
4607 args = {}
4607 args = {}
4608 for line in lines:
4608 for line in lines:
4609 # We need to allow empty values.
4609 # We need to allow empty values.
4610 fields = line.lstrip().split(b' ', 1)
4610 fields = line.lstrip().split(b' ', 1)
4611 if len(fields) == 1:
4611 if len(fields) == 1:
4612 key = fields[0]
4612 key = fields[0]
4613 value = b''
4613 value = b''
4614 else:
4614 else:
4615 key, value = fields
4615 key, value = fields
4616
4616
4617 if value.startswith(b'eval:'):
4617 if value.startswith(b'eval:'):
4618 value = stringutil.evalpythonliteral(value[5:])
4618 value = stringutil.evalpythonliteral(value[5:])
4619 else:
4619 else:
4620 value = stringutil.unescapestr(value)
4620 value = stringutil.unescapestr(value)
4621
4621
4622 args[key] = value
4622 args[key] = value
4623
4623
4624 if batchedcommands is not None:
4624 if batchedcommands is not None:
4625 batchedcommands.append((command, args))
4625 batchedcommands.append((command, args))
4626 continue
4626 continue
4627
4627
4628 ui.status(_(b'sending %s command\n') % command)
4628 ui.status(_(b'sending %s command\n') % command)
4629
4629
4630 if b'PUSHFILE' in args:
4630 if b'PUSHFILE' in args:
4631 with open(args[b'PUSHFILE'], 'rb') as fh:
4631 with open(args[b'PUSHFILE'], 'rb') as fh:
4632 del args[b'PUSHFILE']
4632 del args[b'PUSHFILE']
4633 res, output = peer._callpush(
4633 res, output = peer._callpush(
4634 command, fh, **pycompat.strkwargs(args)
4634 command, fh, **pycompat.strkwargs(args)
4635 )
4635 )
4636 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4636 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4637 ui.status(
4637 ui.status(
4638 _(b'remote output: %s\n') % stringutil.escapestr(output)
4638 _(b'remote output: %s\n') % stringutil.escapestr(output)
4639 )
4639 )
4640 else:
4640 else:
4641 with peer.commandexecutor() as e:
4641 with peer.commandexecutor() as e:
4642 res = e.callcommand(command, args).result()
4642 res = e.callcommand(command, args).result()
4643
4643
4644 ui.status(
4644 ui.status(
4645 _(b'response: %s\n')
4645 _(b'response: %s\n')
4646 % stringutil.pprint(res, bprefix=True, indent=2)
4646 % stringutil.pprint(res, bprefix=True, indent=2)
4647 )
4647 )
4648
4648
4649 elif action == b'batchbegin':
4649 elif action == b'batchbegin':
4650 if batchedcommands is not None:
4650 if batchedcommands is not None:
4651 raise error.Abort(_(b'nested batchbegin not allowed'))
4651 raise error.Abort(_(b'nested batchbegin not allowed'))
4652
4652
4653 batchedcommands = []
4653 batchedcommands = []
4654 elif action == b'batchsubmit':
4654 elif action == b'batchsubmit':
4655 # There is a batching API we could go through. But it would be
4655 # There is a batching API we could go through. But it would be
4656 # difficult to normalize requests into function calls. It is easier
4656 # difficult to normalize requests into function calls. It is easier
4657 # to bypass this layer and normalize to commands + args.
4657 # to bypass this layer and normalize to commands + args.
4658 ui.status(
4658 ui.status(
4659 _(b'sending batch with %d sub-commands\n')
4659 _(b'sending batch with %d sub-commands\n')
4660 % len(batchedcommands)
4660 % len(batchedcommands)
4661 )
4661 )
4662 assert peer is not None
4662 assert peer is not None
4663 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4663 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4664 ui.status(
4664 ui.status(
4665 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4665 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4666 )
4666 )
4667
4667
4668 batchedcommands = None
4668 batchedcommands = None
4669
4669
4670 elif action.startswith(b'httprequest '):
4670 elif action.startswith(b'httprequest '):
4671 if not opener:
4671 if not opener:
4672 raise error.Abort(
4672 raise error.Abort(
4673 _(b'cannot use httprequest without an HTTP peer')
4673 _(b'cannot use httprequest without an HTTP peer')
4674 )
4674 )
4675
4675
4676 request = action.split(b' ', 2)
4676 request = action.split(b' ', 2)
4677 if len(request) != 3:
4677 if len(request) != 3:
4678 raise error.Abort(
4678 raise error.Abort(
4679 _(
4679 _(
4680 b'invalid httprequest: expected format is '
4680 b'invalid httprequest: expected format is '
4681 b'"httprequest <method> <path>'
4681 b'"httprequest <method> <path>'
4682 )
4682 )
4683 )
4683 )
4684
4684
4685 method, httppath = request[1:]
4685 method, httppath = request[1:]
4686 headers = {}
4686 headers = {}
4687 body = None
4687 body = None
4688 frames = []
4688 frames = []
4689 for line in lines:
4689 for line in lines:
4690 line = line.lstrip()
4690 line = line.lstrip()
4691 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4691 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4692 if m:
4692 if m:
4693 # Headers need to use native strings.
4693 # Headers need to use native strings.
4694 key = pycompat.strurl(m.group(1))
4694 key = pycompat.strurl(m.group(1))
4695 value = pycompat.strurl(m.group(2))
4695 value = pycompat.strurl(m.group(2))
4696 headers[key] = value
4696 headers[key] = value
4697 continue
4697 continue
4698
4698
4699 if line.startswith(b'BODYFILE '):
4699 if line.startswith(b'BODYFILE '):
4700 with open(line.split(b' ', 1), b'rb') as fh:
4700 with open(line.split(b' ', 1), b'rb') as fh:
4701 body = fh.read()
4701 body = fh.read()
4702 elif line.startswith(b'frame '):
4702 elif line.startswith(b'frame '):
4703 frame = wireprotoframing.makeframefromhumanstring(
4703 frame = wireprotoframing.makeframefromhumanstring(
4704 line[len(b'frame ') :]
4704 line[len(b'frame ') :]
4705 )
4705 )
4706
4706
4707 frames.append(frame)
4707 frames.append(frame)
4708 else:
4708 else:
4709 raise error.Abort(
4709 raise error.Abort(
4710 _(b'unknown argument to httprequest: %s') % line
4710 _(b'unknown argument to httprequest: %s') % line
4711 )
4711 )
4712
4712
4713 url = path + httppath
4713 url = path + httppath
4714
4714
4715 if frames:
4715 if frames:
4716 body = b''.join(bytes(f) for f in frames)
4716 body = b''.join(bytes(f) for f in frames)
4717
4717
4718 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4718 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4719
4719
4720 # urllib.Request insists on using has_data() as a proxy for
4720 # urllib.Request insists on using has_data() as a proxy for
4721 # determining the request method. Override that to use our
4721 # determining the request method. Override that to use our
4722 # explicitly requested method.
4722 # explicitly requested method.
4723 req.get_method = lambda: pycompat.sysstr(method)
4723 req.get_method = lambda: pycompat.sysstr(method)
4724
4724
4725 try:
4725 try:
4726 res = opener.open(req)
4726 res = opener.open(req)
4727 body = res.read()
4727 body = res.read()
4728 except util.urlerr.urlerror as e:
4728 except util.urlerr.urlerror as e:
4729 # read() method must be called, but only exists in Python 2
4729 # read() method must be called, but only exists in Python 2
4730 getattr(e, 'read', lambda: None)()
4730 getattr(e, 'read', lambda: None)()
4731 continue
4731 continue
4732
4732
4733 ct = res.headers.get('Content-Type')
4733 ct = res.headers.get('Content-Type')
4734 if ct == 'application/mercurial-cbor':
4734 if ct == 'application/mercurial-cbor':
4735 ui.write(
4735 ui.write(
4736 _(b'cbor> %s\n')
4736 _(b'cbor> %s\n')
4737 % stringutil.pprint(
4737 % stringutil.pprint(
4738 cborutil.decodeall(body), bprefix=True, indent=2
4738 cborutil.decodeall(body), bprefix=True, indent=2
4739 )
4739 )
4740 )
4740 )
4741
4741
4742 elif action == b'close':
4742 elif action == b'close':
4743 assert peer is not None
4743 assert peer is not None
4744 peer.close()
4744 peer.close()
4745 elif action == b'readavailable':
4745 elif action == b'readavailable':
4746 if not stdout or not stderr:
4746 if not stdout or not stderr:
4747 raise error.Abort(
4747 raise error.Abort(
4748 _(b'readavailable not available on this peer')
4748 _(b'readavailable not available on this peer')
4749 )
4749 )
4750
4750
4751 stdin.close()
4751 stdin.close()
4752 stdout.read()
4752 stdout.read()
4753 stderr.read()
4753 stderr.read()
4754
4754
4755 elif action == b'readline':
4755 elif action == b'readline':
4756 if not stdout:
4756 if not stdout:
4757 raise error.Abort(_(b'readline not available on this peer'))
4757 raise error.Abort(_(b'readline not available on this peer'))
4758 stdout.readline()
4758 stdout.readline()
4759 elif action == b'ereadline':
4759 elif action == b'ereadline':
4760 if not stderr:
4760 if not stderr:
4761 raise error.Abort(_(b'ereadline not available on this peer'))
4761 raise error.Abort(_(b'ereadline not available on this peer'))
4762 stderr.readline()
4762 stderr.readline()
4763 elif action.startswith(b'read '):
4763 elif action.startswith(b'read '):
4764 count = int(action.split(b' ', 1)[1])
4764 count = int(action.split(b' ', 1)[1])
4765 if not stdout:
4765 if not stdout:
4766 raise error.Abort(_(b'read not available on this peer'))
4766 raise error.Abort(_(b'read not available on this peer'))
4767 stdout.read(count)
4767 stdout.read(count)
4768 elif action.startswith(b'eread '):
4768 elif action.startswith(b'eread '):
4769 count = int(action.split(b' ', 1)[1])
4769 count = int(action.split(b' ', 1)[1])
4770 if not stderr:
4770 if not stderr:
4771 raise error.Abort(_(b'eread not available on this peer'))
4771 raise error.Abort(_(b'eread not available on this peer'))
4772 stderr.read(count)
4772 stderr.read(count)
4773 else:
4773 else:
4774 raise error.Abort(_(b'unknown action: %s') % action)
4774 raise error.Abort(_(b'unknown action: %s') % action)
4775
4775
4776 if batchedcommands is not None:
4776 if batchedcommands is not None:
4777 raise error.Abort(_(b'unclosed "batchbegin" request'))
4777 raise error.Abort(_(b'unclosed "batchbegin" request'))
4778
4778
4779 if peer:
4779 if peer:
4780 peer.close()
4780 peer.close()
4781
4781
4782 if proc:
4782 if proc:
4783 proc.kill()
4783 proc.kill()
@@ -1,3320 +1,3327 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16
16
17 import abc
17 import abc
18 import collections
18 import collections
19 import contextlib
19 import contextlib
20 import errno
20 import errno
21 import gc
21 import gc
22 import hashlib
22 import hashlib
23 import io
23 import io
24 import itertools
24 import itertools
25 import locale
25 import locale
26 import mmap
26 import mmap
27 import os
27 import os
28 import pickle # provides util.pickle symbol
28 import pickle # provides util.pickle symbol
29 import re as remod
29 import re as remod
30 import shutil
30 import shutil
31 import stat
31 import stat
32 import sys
32 import sys
33 import time
33 import time
34 import traceback
34 import traceback
35 import warnings
35 import warnings
36
36
37 from .node import hex
37 from .node import hex
38 from .thirdparty import attr
38 from .thirdparty import attr
39 from .pycompat import (
39 from .pycompat import (
40 delattr,
40 delattr,
41 getattr,
41 getattr,
42 open,
42 open,
43 setattr,
43 setattr,
44 )
44 )
45 from hgdemandimport import tracing
45 from hgdemandimport import tracing
46 from . import (
46 from . import (
47 encoding,
47 encoding,
48 error,
48 error,
49 i18n,
49 i18n,
50 policy,
50 policy,
51 pycompat,
51 pycompat,
52 urllibcompat,
52 urllibcompat,
53 )
53 )
54 from .utils import (
54 from .utils import (
55 compression,
55 compression,
56 hashutil,
56 hashutil,
57 procutil,
57 procutil,
58 stringutil,
58 stringutil,
59 )
59 )
60
60
61 if pycompat.TYPE_CHECKING:
61 if pycompat.TYPE_CHECKING:
62 from typing import (
62 from typing import (
63 Iterable,
63 Iterable,
64 Iterator,
64 Iterator,
65 List,
65 List,
66 Optional,
66 Optional,
67 Tuple,
67 Tuple,
68 )
68 )
69
69
70
70
71 base85 = policy.importmod('base85')
71 base85 = policy.importmod('base85')
72 osutil = policy.importmod('osutil')
72 osutil = policy.importmod('osutil')
73
73
74 b85decode = base85.b85decode
74 b85decode = base85.b85decode
75 b85encode = base85.b85encode
75 b85encode = base85.b85encode
76
76
77 cookielib = pycompat.cookielib
77 cookielib = pycompat.cookielib
78 httplib = pycompat.httplib
78 httplib = pycompat.httplib
79 safehasattr = pycompat.safehasattr
79 safehasattr = pycompat.safehasattr
80 socketserver = pycompat.socketserver
80 socketserver = pycompat.socketserver
81 bytesio = io.BytesIO
81 bytesio = io.BytesIO
82 # TODO deprecate stringio name, as it is a lie on Python 3.
82 # TODO deprecate stringio name, as it is a lie on Python 3.
83 stringio = bytesio
83 stringio = bytesio
84 xmlrpclib = pycompat.xmlrpclib
84 xmlrpclib = pycompat.xmlrpclib
85
85
86 httpserver = urllibcompat.httpserver
86 httpserver = urllibcompat.httpserver
87 urlerr = urllibcompat.urlerr
87 urlerr = urllibcompat.urlerr
88 urlreq = urllibcompat.urlreq
88 urlreq = urllibcompat.urlreq
89
89
90 # workaround for win32mbcs
90 # workaround for win32mbcs
91 _filenamebytestr = pycompat.bytestr
91 _filenamebytestr = pycompat.bytestr
92
92
93 if pycompat.iswindows:
93 if pycompat.iswindows:
94 from . import windows as platform
94 from . import windows as platform
95 else:
95 else:
96 from . import posix as platform
96 from . import posix as platform
97
97
98 _ = i18n._
98 _ = i18n._
99
99
100 abspath = platform.abspath
100 abspath = platform.abspath
101 bindunixsocket = platform.bindunixsocket
101 bindunixsocket = platform.bindunixsocket
102 cachestat = platform.cachestat
102 cachestat = platform.cachestat
103 checkexec = platform.checkexec
103 checkexec = platform.checkexec
104 checklink = platform.checklink
104 checklink = platform.checklink
105 copymode = platform.copymode
105 copymode = platform.copymode
106 expandglobs = platform.expandglobs
106 expandglobs = platform.expandglobs
107 getfsmountpoint = platform.getfsmountpoint
107 getfsmountpoint = platform.getfsmountpoint
108 getfstype = platform.getfstype
108 getfstype = platform.getfstype
109 get_password = platform.get_password
109 get_password = platform.get_password
110 groupmembers = platform.groupmembers
110 groupmembers = platform.groupmembers
111 groupname = platform.groupname
111 groupname = platform.groupname
112 isexec = platform.isexec
112 isexec = platform.isexec
113 isowner = platform.isowner
113 isowner = platform.isowner
114 listdir = osutil.listdir
114 listdir = osutil.listdir
115 localpath = platform.localpath
115 localpath = platform.localpath
116 lookupreg = platform.lookupreg
116 lookupreg = platform.lookupreg
117 makedir = platform.makedir
117 makedir = platform.makedir
118 nlinks = platform.nlinks
118 nlinks = platform.nlinks
119 normpath = platform.normpath
119 normpath = platform.normpath
120 normcase = platform.normcase
120 normcase = platform.normcase
121 normcasespec = platform.normcasespec
121 normcasespec = platform.normcasespec
122 normcasefallback = platform.normcasefallback
122 normcasefallback = platform.normcasefallback
123 openhardlinks = platform.openhardlinks
123 openhardlinks = platform.openhardlinks
124 oslink = platform.oslink
124 oslink = platform.oslink
125 parsepatchoutput = platform.parsepatchoutput
125 parsepatchoutput = platform.parsepatchoutput
126 pconvert = platform.pconvert
126 pconvert = platform.pconvert
127 poll = platform.poll
127 poll = platform.poll
128 posixfile = platform.posixfile
128 posixfile = platform.posixfile
129 readlink = platform.readlink
129 readlink = platform.readlink
130 rename = platform.rename
130 rename = platform.rename
131 removedirs = platform.removedirs
131 removedirs = platform.removedirs
132 samedevice = platform.samedevice
132 samedevice = platform.samedevice
133 samefile = platform.samefile
133 samefile = platform.samefile
134 samestat = platform.samestat
134 samestat = platform.samestat
135 setflags = platform.setflags
135 setflags = platform.setflags
136 split = platform.split
136 split = platform.split
137 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
137 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
138 statisexec = platform.statisexec
138 statisexec = platform.statisexec
139 statislink = platform.statislink
139 statislink = platform.statislink
140 umask = platform.umask
140 umask = platform.umask
141 unlink = platform.unlink
141 unlink = platform.unlink
142 username = platform.username
142 username = platform.username
143
143
144
144
145 def setumask(val):
145 def setumask(val):
146 # type: (int) -> None
146 # type: (int) -> None
147 '''updates the umask. used by chg server'''
147 '''updates the umask. used by chg server'''
148 if pycompat.iswindows:
148 if pycompat.iswindows:
149 return
149 return
150 os.umask(val)
150 os.umask(val)
151 global umask
151 global umask
152 platform.umask = umask = val & 0o777
152 platform.umask = umask = val & 0o777
153
153
154
154
155 # small compat layer
155 # small compat layer
156 compengines = compression.compengines
156 compengines = compression.compengines
157 SERVERROLE = compression.SERVERROLE
157 SERVERROLE = compression.SERVERROLE
158 CLIENTROLE = compression.CLIENTROLE
158 CLIENTROLE = compression.CLIENTROLE
159
159
160 # Python compatibility
160 # Python compatibility
161
161
162 _notset = object()
162 _notset = object()
163
163
164
164
165 def bitsfrom(container):
165 def bitsfrom(container):
166 bits = 0
166 bits = 0
167 for bit in container:
167 for bit in container:
168 bits |= bit
168 bits |= bit
169 return bits
169 return bits
170
170
171
171
172 # python 2.6 still have deprecation warning enabled by default. We do not want
172 # python 2.6 still have deprecation warning enabled by default. We do not want
173 # to display anything to standard user so detect if we are running test and
173 # to display anything to standard user so detect if we are running test and
174 # only use python deprecation warning in this case.
174 # only use python deprecation warning in this case.
175 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
175 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
176 if _dowarn:
176 if _dowarn:
177 # explicitly unfilter our warning for python 2.7
177 # explicitly unfilter our warning for python 2.7
178 #
178 #
179 # The option of setting PYTHONWARNINGS in the test runner was investigated.
179 # The option of setting PYTHONWARNINGS in the test runner was investigated.
180 # However, module name set through PYTHONWARNINGS was exactly matched, so
180 # However, module name set through PYTHONWARNINGS was exactly matched, so
181 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
181 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
182 # makes the whole PYTHONWARNINGS thing useless for our usecase.
182 # makes the whole PYTHONWARNINGS thing useless for our usecase.
183 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
183 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
184 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
184 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
185 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
185 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
186 if _dowarn:
186 if _dowarn:
187 # silence warning emitted by passing user string to re.sub()
187 # silence warning emitted by passing user string to re.sub()
188 warnings.filterwarnings(
188 warnings.filterwarnings(
189 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
189 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
190 )
190 )
191 warnings.filterwarnings(
191 warnings.filterwarnings(
192 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
192 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
193 )
193 )
194 # TODO: reinvent imp.is_frozen()
194 # TODO: reinvent imp.is_frozen()
195 warnings.filterwarnings(
195 warnings.filterwarnings(
196 'ignore',
196 'ignore',
197 'the imp module is deprecated',
197 'the imp module is deprecated',
198 DeprecationWarning,
198 DeprecationWarning,
199 'mercurial',
199 'mercurial',
200 )
200 )
201
201
202
202
203 def nouideprecwarn(msg, version, stacklevel=1):
203 def nouideprecwarn(msg, version, stacklevel=1):
204 """Issue an python native deprecation warning
204 """Issue an python native deprecation warning
205
205
206 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
206 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
207 """
207 """
208 if _dowarn:
208 if _dowarn:
209 msg += (
209 msg += (
210 b"\n(compatibility will be dropped after Mercurial-%s,"
210 b"\n(compatibility will be dropped after Mercurial-%s,"
211 b" update your code.)"
211 b" update your code.)"
212 ) % version
212 ) % version
213 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
213 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
214 # on python 3 with chg, we will need to explicitly flush the output
214 # on python 3 with chg, we will need to explicitly flush the output
215 sys.stderr.flush()
215 sys.stderr.flush()
216
216
217
217
218 DIGESTS = {
218 DIGESTS = {
219 b'md5': hashlib.md5,
219 b'md5': hashlib.md5,
220 b'sha1': hashutil.sha1,
220 b'sha1': hashutil.sha1,
221 b'sha512': hashlib.sha512,
221 b'sha512': hashlib.sha512,
222 }
222 }
223 # List of digest types from strongest to weakest
223 # List of digest types from strongest to weakest
224 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
224 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
225
225
226 for k in DIGESTS_BY_STRENGTH:
226 for k in DIGESTS_BY_STRENGTH:
227 assert k in DIGESTS
227 assert k in DIGESTS
228
228
229
229
230 class digester:
230 class digester:
231 """helper to compute digests.
231 """helper to compute digests.
232
232
233 This helper can be used to compute one or more digests given their name.
233 This helper can be used to compute one or more digests given their name.
234
234
235 >>> d = digester([b'md5', b'sha1'])
235 >>> d = digester([b'md5', b'sha1'])
236 >>> d.update(b'foo')
236 >>> d.update(b'foo')
237 >>> [k for k in sorted(d)]
237 >>> [k for k in sorted(d)]
238 ['md5', 'sha1']
238 ['md5', 'sha1']
239 >>> d[b'md5']
239 >>> d[b'md5']
240 'acbd18db4cc2f85cedef654fccc4a4d8'
240 'acbd18db4cc2f85cedef654fccc4a4d8'
241 >>> d[b'sha1']
241 >>> d[b'sha1']
242 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
242 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
243 >>> digester.preferred([b'md5', b'sha1'])
243 >>> digester.preferred([b'md5', b'sha1'])
244 'sha1'
244 'sha1'
245 """
245 """
246
246
247 def __init__(self, digests, s=b''):
247 def __init__(self, digests, s=b''):
248 self._hashes = {}
248 self._hashes = {}
249 for k in digests:
249 for k in digests:
250 if k not in DIGESTS:
250 if k not in DIGESTS:
251 raise error.Abort(_(b'unknown digest type: %s') % k)
251 raise error.Abort(_(b'unknown digest type: %s') % k)
252 self._hashes[k] = DIGESTS[k]()
252 self._hashes[k] = DIGESTS[k]()
253 if s:
253 if s:
254 self.update(s)
254 self.update(s)
255
255
256 def update(self, data):
256 def update(self, data):
257 for h in self._hashes.values():
257 for h in self._hashes.values():
258 h.update(data)
258 h.update(data)
259
259
260 def __getitem__(self, key):
260 def __getitem__(self, key):
261 if key not in DIGESTS:
261 if key not in DIGESTS:
262 raise error.Abort(_(b'unknown digest type: %s') % k)
262 raise error.Abort(_(b'unknown digest type: %s') % k)
263 return hex(self._hashes[key].digest())
263 return hex(self._hashes[key].digest())
264
264
265 def __iter__(self):
265 def __iter__(self):
266 return iter(self._hashes)
266 return iter(self._hashes)
267
267
268 @staticmethod
268 @staticmethod
269 def preferred(supported):
269 def preferred(supported):
270 """returns the strongest digest type in both supported and DIGESTS."""
270 """returns the strongest digest type in both supported and DIGESTS."""
271
271
272 for k in DIGESTS_BY_STRENGTH:
272 for k in DIGESTS_BY_STRENGTH:
273 if k in supported:
273 if k in supported:
274 return k
274 return k
275 return None
275 return None
276
276
277
277
278 class digestchecker:
278 class digestchecker:
279 """file handle wrapper that additionally checks content against a given
279 """file handle wrapper that additionally checks content against a given
280 size and digests.
280 size and digests.
281
281
282 d = digestchecker(fh, size, {'md5': '...'})
282 d = digestchecker(fh, size, {'md5': '...'})
283
283
284 When multiple digests are given, all of them are validated.
284 When multiple digests are given, all of them are validated.
285 """
285 """
286
286
287 def __init__(self, fh, size, digests):
287 def __init__(self, fh, size, digests):
288 self._fh = fh
288 self._fh = fh
289 self._size = size
289 self._size = size
290 self._got = 0
290 self._got = 0
291 self._digests = dict(digests)
291 self._digests = dict(digests)
292 self._digester = digester(self._digests.keys())
292 self._digester = digester(self._digests.keys())
293
293
294 def read(self, length=-1):
294 def read(self, length=-1):
295 content = self._fh.read(length)
295 content = self._fh.read(length)
296 self._digester.update(content)
296 self._digester.update(content)
297 self._got += len(content)
297 self._got += len(content)
298 return content
298 return content
299
299
300 def validate(self):
300 def validate(self):
301 if self._size != self._got:
301 if self._size != self._got:
302 raise error.Abort(
302 raise error.Abort(
303 _(b'size mismatch: expected %d, got %d')
303 _(b'size mismatch: expected %d, got %d')
304 % (self._size, self._got)
304 % (self._size, self._got)
305 )
305 )
306 for k, v in self._digests.items():
306 for k, v in self._digests.items():
307 if v != self._digester[k]:
307 if v != self._digester[k]:
308 # i18n: first parameter is a digest name
308 # i18n: first parameter is a digest name
309 raise error.Abort(
309 raise error.Abort(
310 _(b'%s mismatch: expected %s, got %s')
310 _(b'%s mismatch: expected %s, got %s')
311 % (k, v, self._digester[k])
311 % (k, v, self._digester[k])
312 )
312 )
313
313
314
314
315 try:
315 try:
316 buffer = buffer # pytype: disable=name-error
316 buffer = buffer # pytype: disable=name-error
317 except NameError:
317 except NameError:
318
318
319 def buffer(sliceable, offset=0, length=None):
319 def buffer(sliceable, offset=0, length=None):
320 if length is not None:
320 if length is not None:
321 return memoryview(sliceable)[offset : offset + length]
321 return memoryview(sliceable)[offset : offset + length]
322 return memoryview(sliceable)[offset:]
322 return memoryview(sliceable)[offset:]
323
323
324
324
325 _chunksize = 4096
325 _chunksize = 4096
326
326
327
327
328 class bufferedinputpipe:
328 class bufferedinputpipe:
329 """a manually buffered input pipe
329 """a manually buffered input pipe
330
330
331 Python will not let us use buffered IO and lazy reading with 'polling' at
331 Python will not let us use buffered IO and lazy reading with 'polling' at
332 the same time. We cannot probe the buffer state and select will not detect
332 the same time. We cannot probe the buffer state and select will not detect
333 that data are ready to read if they are already buffered.
333 that data are ready to read if they are already buffered.
334
334
335 This class let us work around that by implementing its own buffering
335 This class let us work around that by implementing its own buffering
336 (allowing efficient readline) while offering a way to know if the buffer is
336 (allowing efficient readline) while offering a way to know if the buffer is
337 empty from the output (allowing collaboration of the buffer with polling).
337 empty from the output (allowing collaboration of the buffer with polling).
338
338
339 This class lives in the 'util' module because it makes use of the 'os'
339 This class lives in the 'util' module because it makes use of the 'os'
340 module from the python stdlib.
340 module from the python stdlib.
341 """
341 """
342
342
343 def __new__(cls, fh):
343 def __new__(cls, fh):
344 # If we receive a fileobjectproxy, we need to use a variation of this
344 # If we receive a fileobjectproxy, we need to use a variation of this
345 # class that notifies observers about activity.
345 # class that notifies observers about activity.
346 if isinstance(fh, fileobjectproxy):
346 if isinstance(fh, fileobjectproxy):
347 cls = observedbufferedinputpipe
347 cls = observedbufferedinputpipe
348
348
349 return super(bufferedinputpipe, cls).__new__(cls)
349 return super(bufferedinputpipe, cls).__new__(cls)
350
350
351 def __init__(self, input):
351 def __init__(self, input):
352 self._input = input
352 self._input = input
353 self._buffer = []
353 self._buffer = []
354 self._eof = False
354 self._eof = False
355 self._lenbuf = 0
355 self._lenbuf = 0
356
356
357 @property
357 @property
358 def hasbuffer(self):
358 def hasbuffer(self):
359 """True is any data is currently buffered
359 """True is any data is currently buffered
360
360
361 This will be used externally a pre-step for polling IO. If there is
361 This will be used externally a pre-step for polling IO. If there is
362 already data then no polling should be set in place."""
362 already data then no polling should be set in place."""
363 return bool(self._buffer)
363 return bool(self._buffer)
364
364
365 @property
365 @property
366 def closed(self):
366 def closed(self):
367 return self._input.closed
367 return self._input.closed
368
368
369 def fileno(self):
369 def fileno(self):
370 return self._input.fileno()
370 return self._input.fileno()
371
371
372 def close(self):
372 def close(self):
373 return self._input.close()
373 return self._input.close()
374
374
375 def read(self, size):
375 def read(self, size):
376 while (not self._eof) and (self._lenbuf < size):
376 while (not self._eof) and (self._lenbuf < size):
377 self._fillbuffer()
377 self._fillbuffer()
378 return self._frombuffer(size)
378 return self._frombuffer(size)
379
379
380 def unbufferedread(self, size):
380 def unbufferedread(self, size):
381 if not self._eof and self._lenbuf == 0:
381 if not self._eof and self._lenbuf == 0:
382 self._fillbuffer(max(size, _chunksize))
382 self._fillbuffer(max(size, _chunksize))
383 return self._frombuffer(min(self._lenbuf, size))
383 return self._frombuffer(min(self._lenbuf, size))
384
384
385 def readline(self, *args, **kwargs):
385 def readline(self, *args, **kwargs):
386 if len(self._buffer) > 1:
386 if len(self._buffer) > 1:
387 # this should not happen because both read and readline end with a
387 # this should not happen because both read and readline end with a
388 # _frombuffer call that collapse it.
388 # _frombuffer call that collapse it.
389 self._buffer = [b''.join(self._buffer)]
389 self._buffer = [b''.join(self._buffer)]
390 self._lenbuf = len(self._buffer[0])
390 self._lenbuf = len(self._buffer[0])
391 lfi = -1
391 lfi = -1
392 if self._buffer:
392 if self._buffer:
393 lfi = self._buffer[-1].find(b'\n')
393 lfi = self._buffer[-1].find(b'\n')
394 while (not self._eof) and lfi < 0:
394 while (not self._eof) and lfi < 0:
395 self._fillbuffer()
395 self._fillbuffer()
396 if self._buffer:
396 if self._buffer:
397 lfi = self._buffer[-1].find(b'\n')
397 lfi = self._buffer[-1].find(b'\n')
398 size = lfi + 1
398 size = lfi + 1
399 if lfi < 0: # end of file
399 if lfi < 0: # end of file
400 size = self._lenbuf
400 size = self._lenbuf
401 elif len(self._buffer) > 1:
401 elif len(self._buffer) > 1:
402 # we need to take previous chunks into account
402 # we need to take previous chunks into account
403 size += self._lenbuf - len(self._buffer[-1])
403 size += self._lenbuf - len(self._buffer[-1])
404 return self._frombuffer(size)
404 return self._frombuffer(size)
405
405
406 def _frombuffer(self, size):
406 def _frombuffer(self, size):
407 """return at most 'size' data from the buffer
407 """return at most 'size' data from the buffer
408
408
409 The data are removed from the buffer."""
409 The data are removed from the buffer."""
410 if size == 0 or not self._buffer:
410 if size == 0 or not self._buffer:
411 return b''
411 return b''
412 buf = self._buffer[0]
412 buf = self._buffer[0]
413 if len(self._buffer) > 1:
413 if len(self._buffer) > 1:
414 buf = b''.join(self._buffer)
414 buf = b''.join(self._buffer)
415
415
416 data = buf[:size]
416 data = buf[:size]
417 buf = buf[len(data) :]
417 buf = buf[len(data) :]
418 if buf:
418 if buf:
419 self._buffer = [buf]
419 self._buffer = [buf]
420 self._lenbuf = len(buf)
420 self._lenbuf = len(buf)
421 else:
421 else:
422 self._buffer = []
422 self._buffer = []
423 self._lenbuf = 0
423 self._lenbuf = 0
424 return data
424 return data
425
425
426 def _fillbuffer(self, size=_chunksize):
426 def _fillbuffer(self, size=_chunksize):
427 """read data to the buffer"""
427 """read data to the buffer"""
428 data = os.read(self._input.fileno(), size)
428 data = os.read(self._input.fileno(), size)
429 if not data:
429 if not data:
430 self._eof = True
430 self._eof = True
431 else:
431 else:
432 self._lenbuf += len(data)
432 self._lenbuf += len(data)
433 self._buffer.append(data)
433 self._buffer.append(data)
434
434
435 return data
435 return data
436
436
437
437
438 def mmapread(fp, size=None):
438 def mmapread(fp, size=None):
439 if size == 0:
439 if size == 0:
440 # size of 0 to mmap.mmap() means "all data"
440 # size of 0 to mmap.mmap() means "all data"
441 # rather than "zero bytes", so special case that.
441 # rather than "zero bytes", so special case that.
442 return b''
442 return b''
443 elif size is None:
443 elif size is None:
444 size = 0
444 size = 0
445 fd = getattr(fp, 'fileno', lambda: fp)()
445 fd = getattr(fp, 'fileno', lambda: fp)()
446 try:
446 try:
447 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
447 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
448 except ValueError:
448 except ValueError:
449 # Empty files cannot be mmapped, but mmapread should still work. Check
449 # Empty files cannot be mmapped, but mmapread should still work. Check
450 # if the file is empty, and if so, return an empty buffer.
450 # if the file is empty, and if so, return an empty buffer.
451 if os.fstat(fd).st_size == 0:
451 if os.fstat(fd).st_size == 0:
452 return b''
452 return b''
453 raise
453 raise
454
454
455
455
456 class fileobjectproxy:
456 class fileobjectproxy:
457 """A proxy around file objects that tells a watcher when events occur.
457 """A proxy around file objects that tells a watcher when events occur.
458
458
459 This type is intended to only be used for testing purposes. Think hard
459 This type is intended to only be used for testing purposes. Think hard
460 before using it in important code.
460 before using it in important code.
461 """
461 """
462
462
463 __slots__ = (
463 __slots__ = (
464 '_orig',
464 '_orig',
465 '_observer',
465 '_observer',
466 )
466 )
467
467
468 def __init__(self, fh, observer):
468 def __init__(self, fh, observer):
469 object.__setattr__(self, '_orig', fh)
469 object.__setattr__(self, '_orig', fh)
470 object.__setattr__(self, '_observer', observer)
470 object.__setattr__(self, '_observer', observer)
471
471
472 def __getattribute__(self, name):
472 def __getattribute__(self, name):
473 ours = {
473 ours = {
474 '_observer',
474 '_observer',
475 # IOBase
475 # IOBase
476 'close',
476 'close',
477 # closed if a property
477 # closed if a property
478 'fileno',
478 'fileno',
479 'flush',
479 'flush',
480 'isatty',
480 'isatty',
481 'readable',
481 'readable',
482 'readline',
482 'readline',
483 'readlines',
483 'readlines',
484 'seek',
484 'seek',
485 'seekable',
485 'seekable',
486 'tell',
486 'tell',
487 'truncate',
487 'truncate',
488 'writable',
488 'writable',
489 'writelines',
489 'writelines',
490 # RawIOBase
490 # RawIOBase
491 'read',
491 'read',
492 'readall',
492 'readall',
493 'readinto',
493 'readinto',
494 'write',
494 'write',
495 # BufferedIOBase
495 # BufferedIOBase
496 # raw is a property
496 # raw is a property
497 'detach',
497 'detach',
498 # read defined above
498 # read defined above
499 'read1',
499 'read1',
500 # readinto defined above
500 # readinto defined above
501 # write defined above
501 # write defined above
502 }
502 }
503
503
504 # We only observe some methods.
504 # We only observe some methods.
505 if name in ours:
505 if name in ours:
506 return object.__getattribute__(self, name)
506 return object.__getattribute__(self, name)
507
507
508 return getattr(object.__getattribute__(self, '_orig'), name)
508 return getattr(object.__getattribute__(self, '_orig'), name)
509
509
510 def __nonzero__(self):
510 def __nonzero__(self):
511 return bool(object.__getattribute__(self, '_orig'))
511 return bool(object.__getattribute__(self, '_orig'))
512
512
513 __bool__ = __nonzero__
513 __bool__ = __nonzero__
514
514
515 def __delattr__(self, name):
515 def __delattr__(self, name):
516 return delattr(object.__getattribute__(self, '_orig'), name)
516 return delattr(object.__getattribute__(self, '_orig'), name)
517
517
518 def __setattr__(self, name, value):
518 def __setattr__(self, name, value):
519 return setattr(object.__getattribute__(self, '_orig'), name, value)
519 return setattr(object.__getattribute__(self, '_orig'), name, value)
520
520
521 def __iter__(self):
521 def __iter__(self):
522 return object.__getattribute__(self, '_orig').__iter__()
522 return object.__getattribute__(self, '_orig').__iter__()
523
523
524 def _observedcall(self, name, *args, **kwargs):
524 def _observedcall(self, name, *args, **kwargs):
525 # Call the original object.
525 # Call the original object.
526 orig = object.__getattribute__(self, '_orig')
526 orig = object.__getattribute__(self, '_orig')
527 res = getattr(orig, name)(*args, **kwargs)
527 res = getattr(orig, name)(*args, **kwargs)
528
528
529 # Call a method on the observer of the same name with arguments
529 # Call a method on the observer of the same name with arguments
530 # so it can react, log, etc.
530 # so it can react, log, etc.
531 observer = object.__getattribute__(self, '_observer')
531 observer = object.__getattribute__(self, '_observer')
532 fn = getattr(observer, name, None)
532 fn = getattr(observer, name, None)
533 if fn:
533 if fn:
534 fn(res, *args, **kwargs)
534 fn(res, *args, **kwargs)
535
535
536 return res
536 return res
537
537
538 def close(self, *args, **kwargs):
538 def close(self, *args, **kwargs):
539 return object.__getattribute__(self, '_observedcall')(
539 return object.__getattribute__(self, '_observedcall')(
540 'close', *args, **kwargs
540 'close', *args, **kwargs
541 )
541 )
542
542
543 def fileno(self, *args, **kwargs):
543 def fileno(self, *args, **kwargs):
544 return object.__getattribute__(self, '_observedcall')(
544 return object.__getattribute__(self, '_observedcall')(
545 'fileno', *args, **kwargs
545 'fileno', *args, **kwargs
546 )
546 )
547
547
548 def flush(self, *args, **kwargs):
548 def flush(self, *args, **kwargs):
549 return object.__getattribute__(self, '_observedcall')(
549 return object.__getattribute__(self, '_observedcall')(
550 'flush', *args, **kwargs
550 'flush', *args, **kwargs
551 )
551 )
552
552
553 def isatty(self, *args, **kwargs):
553 def isatty(self, *args, **kwargs):
554 return object.__getattribute__(self, '_observedcall')(
554 return object.__getattribute__(self, '_observedcall')(
555 'isatty', *args, **kwargs
555 'isatty', *args, **kwargs
556 )
556 )
557
557
558 def readable(self, *args, **kwargs):
558 def readable(self, *args, **kwargs):
559 return object.__getattribute__(self, '_observedcall')(
559 return object.__getattribute__(self, '_observedcall')(
560 'readable', *args, **kwargs
560 'readable', *args, **kwargs
561 )
561 )
562
562
563 def readline(self, *args, **kwargs):
563 def readline(self, *args, **kwargs):
564 return object.__getattribute__(self, '_observedcall')(
564 return object.__getattribute__(self, '_observedcall')(
565 'readline', *args, **kwargs
565 'readline', *args, **kwargs
566 )
566 )
567
567
568 def readlines(self, *args, **kwargs):
568 def readlines(self, *args, **kwargs):
569 return object.__getattribute__(self, '_observedcall')(
569 return object.__getattribute__(self, '_observedcall')(
570 'readlines', *args, **kwargs
570 'readlines', *args, **kwargs
571 )
571 )
572
572
573 def seek(self, *args, **kwargs):
573 def seek(self, *args, **kwargs):
574 return object.__getattribute__(self, '_observedcall')(
574 return object.__getattribute__(self, '_observedcall')(
575 'seek', *args, **kwargs
575 'seek', *args, **kwargs
576 )
576 )
577
577
578 def seekable(self, *args, **kwargs):
578 def seekable(self, *args, **kwargs):
579 return object.__getattribute__(self, '_observedcall')(
579 return object.__getattribute__(self, '_observedcall')(
580 'seekable', *args, **kwargs
580 'seekable', *args, **kwargs
581 )
581 )
582
582
583 def tell(self, *args, **kwargs):
583 def tell(self, *args, **kwargs):
584 return object.__getattribute__(self, '_observedcall')(
584 return object.__getattribute__(self, '_observedcall')(
585 'tell', *args, **kwargs
585 'tell', *args, **kwargs
586 )
586 )
587
587
588 def truncate(self, *args, **kwargs):
588 def truncate(self, *args, **kwargs):
589 return object.__getattribute__(self, '_observedcall')(
589 return object.__getattribute__(self, '_observedcall')(
590 'truncate', *args, **kwargs
590 'truncate', *args, **kwargs
591 )
591 )
592
592
593 def writable(self, *args, **kwargs):
593 def writable(self, *args, **kwargs):
594 return object.__getattribute__(self, '_observedcall')(
594 return object.__getattribute__(self, '_observedcall')(
595 'writable', *args, **kwargs
595 'writable', *args, **kwargs
596 )
596 )
597
597
598 def writelines(self, *args, **kwargs):
598 def writelines(self, *args, **kwargs):
599 return object.__getattribute__(self, '_observedcall')(
599 return object.__getattribute__(self, '_observedcall')(
600 'writelines', *args, **kwargs
600 'writelines', *args, **kwargs
601 )
601 )
602
602
603 def read(self, *args, **kwargs):
603 def read(self, *args, **kwargs):
604 return object.__getattribute__(self, '_observedcall')(
604 return object.__getattribute__(self, '_observedcall')(
605 'read', *args, **kwargs
605 'read', *args, **kwargs
606 )
606 )
607
607
608 def readall(self, *args, **kwargs):
608 def readall(self, *args, **kwargs):
609 return object.__getattribute__(self, '_observedcall')(
609 return object.__getattribute__(self, '_observedcall')(
610 'readall', *args, **kwargs
610 'readall', *args, **kwargs
611 )
611 )
612
612
613 def readinto(self, *args, **kwargs):
613 def readinto(self, *args, **kwargs):
614 return object.__getattribute__(self, '_observedcall')(
614 return object.__getattribute__(self, '_observedcall')(
615 'readinto', *args, **kwargs
615 'readinto', *args, **kwargs
616 )
616 )
617
617
618 def write(self, *args, **kwargs):
618 def write(self, *args, **kwargs):
619 return object.__getattribute__(self, '_observedcall')(
619 return object.__getattribute__(self, '_observedcall')(
620 'write', *args, **kwargs
620 'write', *args, **kwargs
621 )
621 )
622
622
623 def detach(self, *args, **kwargs):
623 def detach(self, *args, **kwargs):
624 return object.__getattribute__(self, '_observedcall')(
624 return object.__getattribute__(self, '_observedcall')(
625 'detach', *args, **kwargs
625 'detach', *args, **kwargs
626 )
626 )
627
627
628 def read1(self, *args, **kwargs):
628 def read1(self, *args, **kwargs):
629 return object.__getattribute__(self, '_observedcall')(
629 return object.__getattribute__(self, '_observedcall')(
630 'read1', *args, **kwargs
630 'read1', *args, **kwargs
631 )
631 )
632
632
633
633
634 class observedbufferedinputpipe(bufferedinputpipe):
634 class observedbufferedinputpipe(bufferedinputpipe):
635 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
635 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
636
636
637 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
637 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
638 bypass ``fileobjectproxy``. Because of this, we need to make
638 bypass ``fileobjectproxy``. Because of this, we need to make
639 ``bufferedinputpipe`` aware of these operations.
639 ``bufferedinputpipe`` aware of these operations.
640
640
641 This variation of ``bufferedinputpipe`` can notify observers about
641 This variation of ``bufferedinputpipe`` can notify observers about
642 ``os.read()`` events. It also re-publishes other events, such as
642 ``os.read()`` events. It also re-publishes other events, such as
643 ``read()`` and ``readline()``.
643 ``read()`` and ``readline()``.
644 """
644 """
645
645
646 def _fillbuffer(self, size=_chunksize):
646 def _fillbuffer(self, size=_chunksize):
647 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
647 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
648
648
649 fn = getattr(self._input._observer, 'osread', None)
649 fn = getattr(self._input._observer, 'osread', None)
650 if fn:
650 if fn:
651 fn(res, size)
651 fn(res, size)
652
652
653 return res
653 return res
654
654
655 # We use different observer methods because the operation isn't
655 # We use different observer methods because the operation isn't
656 # performed on the actual file object but on us.
656 # performed on the actual file object but on us.
657 def read(self, size):
657 def read(self, size):
658 res = super(observedbufferedinputpipe, self).read(size)
658 res = super(observedbufferedinputpipe, self).read(size)
659
659
660 fn = getattr(self._input._observer, 'bufferedread', None)
660 fn = getattr(self._input._observer, 'bufferedread', None)
661 if fn:
661 if fn:
662 fn(res, size)
662 fn(res, size)
663
663
664 return res
664 return res
665
665
666 def readline(self, *args, **kwargs):
666 def readline(self, *args, **kwargs):
667 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
667 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
668
668
669 fn = getattr(self._input._observer, 'bufferedreadline', None)
669 fn = getattr(self._input._observer, 'bufferedreadline', None)
670 if fn:
670 if fn:
671 fn(res)
671 fn(res)
672
672
673 return res
673 return res
674
674
675
675
676 PROXIED_SOCKET_METHODS = {
676 PROXIED_SOCKET_METHODS = {
677 'makefile',
677 'makefile',
678 'recv',
678 'recv',
679 'recvfrom',
679 'recvfrom',
680 'recvfrom_into',
680 'recvfrom_into',
681 'recv_into',
681 'recv_into',
682 'send',
682 'send',
683 'sendall',
683 'sendall',
684 'sendto',
684 'sendto',
685 'setblocking',
685 'setblocking',
686 'settimeout',
686 'settimeout',
687 'gettimeout',
687 'gettimeout',
688 'setsockopt',
688 'setsockopt',
689 }
689 }
690
690
691
691
692 class socketproxy:
692 class socketproxy:
693 """A proxy around a socket that tells a watcher when events occur.
693 """A proxy around a socket that tells a watcher when events occur.
694
694
695 This is like ``fileobjectproxy`` except for sockets.
695 This is like ``fileobjectproxy`` except for sockets.
696
696
697 This type is intended to only be used for testing purposes. Think hard
697 This type is intended to only be used for testing purposes. Think hard
698 before using it in important code.
698 before using it in important code.
699 """
699 """
700
700
701 __slots__ = (
701 __slots__ = (
702 '_orig',
702 '_orig',
703 '_observer',
703 '_observer',
704 )
704 )
705
705
706 def __init__(self, sock, observer):
706 def __init__(self, sock, observer):
707 object.__setattr__(self, '_orig', sock)
707 object.__setattr__(self, '_orig', sock)
708 object.__setattr__(self, '_observer', observer)
708 object.__setattr__(self, '_observer', observer)
709
709
710 def __getattribute__(self, name):
710 def __getattribute__(self, name):
711 if name in PROXIED_SOCKET_METHODS:
711 if name in PROXIED_SOCKET_METHODS:
712 return object.__getattribute__(self, name)
712 return object.__getattribute__(self, name)
713
713
714 return getattr(object.__getattribute__(self, '_orig'), name)
714 return getattr(object.__getattribute__(self, '_orig'), name)
715
715
716 def __delattr__(self, name):
716 def __delattr__(self, name):
717 return delattr(object.__getattribute__(self, '_orig'), name)
717 return delattr(object.__getattribute__(self, '_orig'), name)
718
718
719 def __setattr__(self, name, value):
719 def __setattr__(self, name, value):
720 return setattr(object.__getattribute__(self, '_orig'), name, value)
720 return setattr(object.__getattribute__(self, '_orig'), name, value)
721
721
722 def __nonzero__(self):
722 def __nonzero__(self):
723 return bool(object.__getattribute__(self, '_orig'))
723 return bool(object.__getattribute__(self, '_orig'))
724
724
725 __bool__ = __nonzero__
725 __bool__ = __nonzero__
726
726
727 def _observedcall(self, name, *args, **kwargs):
727 def _observedcall(self, name, *args, **kwargs):
728 # Call the original object.
728 # Call the original object.
729 orig = object.__getattribute__(self, '_orig')
729 orig = object.__getattribute__(self, '_orig')
730 res = getattr(orig, name)(*args, **kwargs)
730 res = getattr(orig, name)(*args, **kwargs)
731
731
732 # Call a method on the observer of the same name with arguments
732 # Call a method on the observer of the same name with arguments
733 # so it can react, log, etc.
733 # so it can react, log, etc.
734 observer = object.__getattribute__(self, '_observer')
734 observer = object.__getattribute__(self, '_observer')
735 fn = getattr(observer, name, None)
735 fn = getattr(observer, name, None)
736 if fn:
736 if fn:
737 fn(res, *args, **kwargs)
737 fn(res, *args, **kwargs)
738
738
739 return res
739 return res
740
740
741 def makefile(self, *args, **kwargs):
741 def makefile(self, *args, **kwargs):
742 res = object.__getattribute__(self, '_observedcall')(
742 res = object.__getattribute__(self, '_observedcall')(
743 'makefile', *args, **kwargs
743 'makefile', *args, **kwargs
744 )
744 )
745
745
746 # The file object may be used for I/O. So we turn it into a
746 # The file object may be used for I/O. So we turn it into a
747 # proxy using our observer.
747 # proxy using our observer.
748 observer = object.__getattribute__(self, '_observer')
748 observer = object.__getattribute__(self, '_observer')
749 return makeloggingfileobject(
749 return makeloggingfileobject(
750 observer.fh,
750 observer.fh,
751 res,
751 res,
752 observer.name,
752 observer.name,
753 reads=observer.reads,
753 reads=observer.reads,
754 writes=observer.writes,
754 writes=observer.writes,
755 logdata=observer.logdata,
755 logdata=observer.logdata,
756 logdataapis=observer.logdataapis,
756 logdataapis=observer.logdataapis,
757 )
757 )
758
758
759 def recv(self, *args, **kwargs):
759 def recv(self, *args, **kwargs):
760 return object.__getattribute__(self, '_observedcall')(
760 return object.__getattribute__(self, '_observedcall')(
761 'recv', *args, **kwargs
761 'recv', *args, **kwargs
762 )
762 )
763
763
764 def recvfrom(self, *args, **kwargs):
764 def recvfrom(self, *args, **kwargs):
765 return object.__getattribute__(self, '_observedcall')(
765 return object.__getattribute__(self, '_observedcall')(
766 'recvfrom', *args, **kwargs
766 'recvfrom', *args, **kwargs
767 )
767 )
768
768
769 def recvfrom_into(self, *args, **kwargs):
769 def recvfrom_into(self, *args, **kwargs):
770 return object.__getattribute__(self, '_observedcall')(
770 return object.__getattribute__(self, '_observedcall')(
771 'recvfrom_into', *args, **kwargs
771 'recvfrom_into', *args, **kwargs
772 )
772 )
773
773
774 def recv_into(self, *args, **kwargs):
774 def recv_into(self, *args, **kwargs):
775 return object.__getattribute__(self, '_observedcall')(
775 return object.__getattribute__(self, '_observedcall')(
776 'recv_info', *args, **kwargs
776 'recv_info', *args, **kwargs
777 )
777 )
778
778
779 def send(self, *args, **kwargs):
779 def send(self, *args, **kwargs):
780 return object.__getattribute__(self, '_observedcall')(
780 return object.__getattribute__(self, '_observedcall')(
781 'send', *args, **kwargs
781 'send', *args, **kwargs
782 )
782 )
783
783
784 def sendall(self, *args, **kwargs):
784 def sendall(self, *args, **kwargs):
785 return object.__getattribute__(self, '_observedcall')(
785 return object.__getattribute__(self, '_observedcall')(
786 'sendall', *args, **kwargs
786 'sendall', *args, **kwargs
787 )
787 )
788
788
789 def sendto(self, *args, **kwargs):
789 def sendto(self, *args, **kwargs):
790 return object.__getattribute__(self, '_observedcall')(
790 return object.__getattribute__(self, '_observedcall')(
791 'sendto', *args, **kwargs
791 'sendto', *args, **kwargs
792 )
792 )
793
793
794 def setblocking(self, *args, **kwargs):
794 def setblocking(self, *args, **kwargs):
795 return object.__getattribute__(self, '_observedcall')(
795 return object.__getattribute__(self, '_observedcall')(
796 'setblocking', *args, **kwargs
796 'setblocking', *args, **kwargs
797 )
797 )
798
798
799 def settimeout(self, *args, **kwargs):
799 def settimeout(self, *args, **kwargs):
800 return object.__getattribute__(self, '_observedcall')(
800 return object.__getattribute__(self, '_observedcall')(
801 'settimeout', *args, **kwargs
801 'settimeout', *args, **kwargs
802 )
802 )
803
803
804 def gettimeout(self, *args, **kwargs):
804 def gettimeout(self, *args, **kwargs):
805 return object.__getattribute__(self, '_observedcall')(
805 return object.__getattribute__(self, '_observedcall')(
806 'gettimeout', *args, **kwargs
806 'gettimeout', *args, **kwargs
807 )
807 )
808
808
809 def setsockopt(self, *args, **kwargs):
809 def setsockopt(self, *args, **kwargs):
810 return object.__getattribute__(self, '_observedcall')(
810 return object.__getattribute__(self, '_observedcall')(
811 'setsockopt', *args, **kwargs
811 'setsockopt', *args, **kwargs
812 )
812 )
813
813
814
814
815 class baseproxyobserver:
815 class baseproxyobserver:
816 def __init__(self, fh, name, logdata, logdataapis):
816 def __init__(self, fh, name, logdata, logdataapis):
817 self.fh = fh
817 self.fh = fh
818 self.name = name
818 self.name = name
819 self.logdata = logdata
819 self.logdata = logdata
820 self.logdataapis = logdataapis
820 self.logdataapis = logdataapis
821
821
822 def _writedata(self, data):
822 def _writedata(self, data):
823 if not self.logdata:
823 if not self.logdata:
824 if self.logdataapis:
824 if self.logdataapis:
825 self.fh.write(b'\n')
825 self.fh.write(b'\n')
826 self.fh.flush()
826 self.fh.flush()
827 return
827 return
828
828
829 # Simple case writes all data on a single line.
829 # Simple case writes all data on a single line.
830 if b'\n' not in data:
830 if b'\n' not in data:
831 if self.logdataapis:
831 if self.logdataapis:
832 self.fh.write(b': %s\n' % stringutil.escapestr(data))
832 self.fh.write(b': %s\n' % stringutil.escapestr(data))
833 else:
833 else:
834 self.fh.write(
834 self.fh.write(
835 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
835 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
836 )
836 )
837 self.fh.flush()
837 self.fh.flush()
838 return
838 return
839
839
840 # Data with newlines is written to multiple lines.
840 # Data with newlines is written to multiple lines.
841 if self.logdataapis:
841 if self.logdataapis:
842 self.fh.write(b':\n')
842 self.fh.write(b':\n')
843
843
844 lines = data.splitlines(True)
844 lines = data.splitlines(True)
845 for line in lines:
845 for line in lines:
846 self.fh.write(
846 self.fh.write(
847 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
847 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
848 )
848 )
849 self.fh.flush()
849 self.fh.flush()
850
850
851
851
852 class fileobjectobserver(baseproxyobserver):
852 class fileobjectobserver(baseproxyobserver):
853 """Logs file object activity."""
853 """Logs file object activity."""
854
854
855 def __init__(
855 def __init__(
856 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
856 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
857 ):
857 ):
858 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
858 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
859 self.reads = reads
859 self.reads = reads
860 self.writes = writes
860 self.writes = writes
861
861
862 def read(self, res, size=-1):
862 def read(self, res, size=-1):
863 if not self.reads:
863 if not self.reads:
864 return
864 return
865 # Python 3 can return None from reads at EOF instead of empty strings.
865 # Python 3 can return None from reads at EOF instead of empty strings.
866 if res is None:
866 if res is None:
867 res = b''
867 res = b''
868
868
869 if size == -1 and res == b'':
869 if size == -1 and res == b'':
870 # Suppress pointless read(-1) calls that return
870 # Suppress pointless read(-1) calls that return
871 # nothing. These happen _a lot_ on Python 3, and there
871 # nothing. These happen _a lot_ on Python 3, and there
872 # doesn't seem to be a better workaround to have matching
872 # doesn't seem to be a better workaround to have matching
873 # Python 2 and 3 behavior. :(
873 # Python 2 and 3 behavior. :(
874 return
874 return
875
875
876 if self.logdataapis:
876 if self.logdataapis:
877 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
877 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
878
878
879 self._writedata(res)
879 self._writedata(res)
880
880
881 def readline(self, res, limit=-1):
881 def readline(self, res, limit=-1):
882 if not self.reads:
882 if not self.reads:
883 return
883 return
884
884
885 if self.logdataapis:
885 if self.logdataapis:
886 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
886 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
887
887
888 self._writedata(res)
888 self._writedata(res)
889
889
890 def readinto(self, res, dest):
890 def readinto(self, res, dest):
891 if not self.reads:
891 if not self.reads:
892 return
892 return
893
893
894 if self.logdataapis:
894 if self.logdataapis:
895 self.fh.write(
895 self.fh.write(
896 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
896 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
897 )
897 )
898
898
899 data = dest[0:res] if res is not None else b''
899 data = dest[0:res] if res is not None else b''
900
900
901 # _writedata() uses "in" operator and is confused by memoryview because
901 # _writedata() uses "in" operator and is confused by memoryview because
902 # characters are ints on Python 3.
902 # characters are ints on Python 3.
903 if isinstance(data, memoryview):
903 if isinstance(data, memoryview):
904 data = data.tobytes()
904 data = data.tobytes()
905
905
906 self._writedata(data)
906 self._writedata(data)
907
907
908 def write(self, res, data):
908 def write(self, res, data):
909 if not self.writes:
909 if not self.writes:
910 return
910 return
911
911
912 # Python 2 returns None from some write() calls. Python 3 (reasonably)
912 # Python 2 returns None from some write() calls. Python 3 (reasonably)
913 # returns the integer bytes written.
913 # returns the integer bytes written.
914 if res is None and data:
914 if res is None and data:
915 res = len(data)
915 res = len(data)
916
916
917 if self.logdataapis:
917 if self.logdataapis:
918 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
918 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
919
919
920 self._writedata(data)
920 self._writedata(data)
921
921
922 def flush(self, res):
922 def flush(self, res):
923 if not self.writes:
923 if not self.writes:
924 return
924 return
925
925
926 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
926 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
927
927
928 # For observedbufferedinputpipe.
928 # For observedbufferedinputpipe.
929 def bufferedread(self, res, size):
929 def bufferedread(self, res, size):
930 if not self.reads:
930 if not self.reads:
931 return
931 return
932
932
933 if self.logdataapis:
933 if self.logdataapis:
934 self.fh.write(
934 self.fh.write(
935 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
935 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
936 )
936 )
937
937
938 self._writedata(res)
938 self._writedata(res)
939
939
940 def bufferedreadline(self, res):
940 def bufferedreadline(self, res):
941 if not self.reads:
941 if not self.reads:
942 return
942 return
943
943
944 if self.logdataapis:
944 if self.logdataapis:
945 self.fh.write(
945 self.fh.write(
946 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
946 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
947 )
947 )
948
948
949 self._writedata(res)
949 self._writedata(res)
950
950
951
951
952 def makeloggingfileobject(
952 def makeloggingfileobject(
953 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
953 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
954 ):
954 ):
955 """Turn a file object into a logging file object."""
955 """Turn a file object into a logging file object."""
956
956
957 observer = fileobjectobserver(
957 observer = fileobjectobserver(
958 logh,
958 logh,
959 name,
959 name,
960 reads=reads,
960 reads=reads,
961 writes=writes,
961 writes=writes,
962 logdata=logdata,
962 logdata=logdata,
963 logdataapis=logdataapis,
963 logdataapis=logdataapis,
964 )
964 )
965 return fileobjectproxy(fh, observer)
965 return fileobjectproxy(fh, observer)
966
966
967
967
968 class socketobserver(baseproxyobserver):
968 class socketobserver(baseproxyobserver):
969 """Logs socket activity."""
969 """Logs socket activity."""
970
970
971 def __init__(
971 def __init__(
972 self,
972 self,
973 fh,
973 fh,
974 name,
974 name,
975 reads=True,
975 reads=True,
976 writes=True,
976 writes=True,
977 states=True,
977 states=True,
978 logdata=False,
978 logdata=False,
979 logdataapis=True,
979 logdataapis=True,
980 ):
980 ):
981 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
981 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
982 self.reads = reads
982 self.reads = reads
983 self.writes = writes
983 self.writes = writes
984 self.states = states
984 self.states = states
985
985
986 def makefile(self, res, mode=None, bufsize=None):
986 def makefile(self, res, mode=None, bufsize=None):
987 if not self.states:
987 if not self.states:
988 return
988 return
989
989
990 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
990 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
991
991
992 def recv(self, res, size, flags=0):
992 def recv(self, res, size, flags=0):
993 if not self.reads:
993 if not self.reads:
994 return
994 return
995
995
996 if self.logdataapis:
996 if self.logdataapis:
997 self.fh.write(
997 self.fh.write(
998 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
998 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
999 )
999 )
1000 self._writedata(res)
1000 self._writedata(res)
1001
1001
1002 def recvfrom(self, res, size, flags=0):
1002 def recvfrom(self, res, size, flags=0):
1003 if not self.reads:
1003 if not self.reads:
1004 return
1004 return
1005
1005
1006 if self.logdataapis:
1006 if self.logdataapis:
1007 self.fh.write(
1007 self.fh.write(
1008 b'%s> recvfrom(%d, %d) -> %d'
1008 b'%s> recvfrom(%d, %d) -> %d'
1009 % (self.name, size, flags, len(res[0]))
1009 % (self.name, size, flags, len(res[0]))
1010 )
1010 )
1011
1011
1012 self._writedata(res[0])
1012 self._writedata(res[0])
1013
1013
1014 def recvfrom_into(self, res, buf, size, flags=0):
1014 def recvfrom_into(self, res, buf, size, flags=0):
1015 if not self.reads:
1015 if not self.reads:
1016 return
1016 return
1017
1017
1018 if self.logdataapis:
1018 if self.logdataapis:
1019 self.fh.write(
1019 self.fh.write(
1020 b'%s> recvfrom_into(%d, %d) -> %d'
1020 b'%s> recvfrom_into(%d, %d) -> %d'
1021 % (self.name, size, flags, res[0])
1021 % (self.name, size, flags, res[0])
1022 )
1022 )
1023
1023
1024 self._writedata(buf[0 : res[0]])
1024 self._writedata(buf[0 : res[0]])
1025
1025
1026 def recv_into(self, res, buf, size=0, flags=0):
1026 def recv_into(self, res, buf, size=0, flags=0):
1027 if not self.reads:
1027 if not self.reads:
1028 return
1028 return
1029
1029
1030 if self.logdataapis:
1030 if self.logdataapis:
1031 self.fh.write(
1031 self.fh.write(
1032 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1032 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1033 )
1033 )
1034
1034
1035 self._writedata(buf[0:res])
1035 self._writedata(buf[0:res])
1036
1036
1037 def send(self, res, data, flags=0):
1037 def send(self, res, data, flags=0):
1038 if not self.writes:
1038 if not self.writes:
1039 return
1039 return
1040
1040
1041 self.fh.write(
1041 self.fh.write(
1042 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1042 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1043 )
1043 )
1044 self._writedata(data)
1044 self._writedata(data)
1045
1045
1046 def sendall(self, res, data, flags=0):
1046 def sendall(self, res, data, flags=0):
1047 if not self.writes:
1047 if not self.writes:
1048 return
1048 return
1049
1049
1050 if self.logdataapis:
1050 if self.logdataapis:
1051 # Returns None on success. So don't bother reporting return value.
1051 # Returns None on success. So don't bother reporting return value.
1052 self.fh.write(
1052 self.fh.write(
1053 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1053 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1054 )
1054 )
1055
1055
1056 self._writedata(data)
1056 self._writedata(data)
1057
1057
1058 def sendto(self, res, data, flagsoraddress, address=None):
1058 def sendto(self, res, data, flagsoraddress, address=None):
1059 if not self.writes:
1059 if not self.writes:
1060 return
1060 return
1061
1061
1062 if address:
1062 if address:
1063 flags = flagsoraddress
1063 flags = flagsoraddress
1064 else:
1064 else:
1065 flags = 0
1065 flags = 0
1066
1066
1067 if self.logdataapis:
1067 if self.logdataapis:
1068 self.fh.write(
1068 self.fh.write(
1069 b'%s> sendto(%d, %d, %r) -> %d'
1069 b'%s> sendto(%d, %d, %r) -> %d'
1070 % (self.name, len(data), flags, address, res)
1070 % (self.name, len(data), flags, address, res)
1071 )
1071 )
1072
1072
1073 self._writedata(data)
1073 self._writedata(data)
1074
1074
1075 def setblocking(self, res, flag):
1075 def setblocking(self, res, flag):
1076 if not self.states:
1076 if not self.states:
1077 return
1077 return
1078
1078
1079 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1079 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1080
1080
1081 def settimeout(self, res, value):
1081 def settimeout(self, res, value):
1082 if not self.states:
1082 if not self.states:
1083 return
1083 return
1084
1084
1085 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1085 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1086
1086
1087 def gettimeout(self, res):
1087 def gettimeout(self, res):
1088 if not self.states:
1088 if not self.states:
1089 return
1089 return
1090
1090
1091 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1091 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1092
1092
1093 def setsockopt(self, res, level, optname, value):
1093 def setsockopt(self, res, level, optname, value):
1094 if not self.states:
1094 if not self.states:
1095 return
1095 return
1096
1096
1097 self.fh.write(
1097 self.fh.write(
1098 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1098 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1099 % (self.name, level, optname, value, res)
1099 % (self.name, level, optname, value, res)
1100 )
1100 )
1101
1101
1102
1102
1103 def makeloggingsocket(
1103 def makeloggingsocket(
1104 logh,
1104 logh,
1105 fh,
1105 fh,
1106 name,
1106 name,
1107 reads=True,
1107 reads=True,
1108 writes=True,
1108 writes=True,
1109 states=True,
1109 states=True,
1110 logdata=False,
1110 logdata=False,
1111 logdataapis=True,
1111 logdataapis=True,
1112 ):
1112 ):
1113 """Turn a socket into a logging socket."""
1113 """Turn a socket into a logging socket."""
1114
1114
1115 observer = socketobserver(
1115 observer = socketobserver(
1116 logh,
1116 logh,
1117 name,
1117 name,
1118 reads=reads,
1118 reads=reads,
1119 writes=writes,
1119 writes=writes,
1120 states=states,
1120 states=states,
1121 logdata=logdata,
1121 logdata=logdata,
1122 logdataapis=logdataapis,
1122 logdataapis=logdataapis,
1123 )
1123 )
1124 return socketproxy(fh, observer)
1124 return socketproxy(fh, observer)
1125
1125
1126
1126
1127 def version():
1127 def version():
1128 """Return version information if available."""
1128 """Return version information if available."""
1129 try:
1129 try:
1130 from . import __version__
1130 from . import __version__
1131
1131
1132 return __version__.version
1132 return __version__.version
1133 except ImportError:
1133 except ImportError:
1134 return b'unknown'
1134 return b'unknown'
1135
1135
1136
1136
1137 def versiontuple(v=None, n=4):
1137 def versiontuple(v=None, n=4):
1138 """Parses a Mercurial version string into an N-tuple.
1138 """Parses a Mercurial version string into an N-tuple.
1139
1139
1140 The version string to be parsed is specified with the ``v`` argument.
1140 The version string to be parsed is specified with the ``v`` argument.
1141 If it isn't defined, the current Mercurial version string will be parsed.
1141 If it isn't defined, the current Mercurial version string will be parsed.
1142
1142
1143 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1143 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1144 returned values:
1144 returned values:
1145
1145
1146 >>> v = b'3.6.1+190-df9b73d2d444'
1146 >>> v = b'3.6.1+190-df9b73d2d444'
1147 >>> versiontuple(v, 2)
1147 >>> versiontuple(v, 2)
1148 (3, 6)
1148 (3, 6)
1149 >>> versiontuple(v, 3)
1149 >>> versiontuple(v, 3)
1150 (3, 6, 1)
1150 (3, 6, 1)
1151 >>> versiontuple(v, 4)
1151 >>> versiontuple(v, 4)
1152 (3, 6, 1, '190-df9b73d2d444')
1152 (3, 6, 1, '190-df9b73d2d444')
1153
1153
1154 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1154 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1155 (3, 6, 1, '190-df9b73d2d444+20151118')
1155 (3, 6, 1, '190-df9b73d2d444+20151118')
1156
1156
1157 >>> v = b'3.6'
1157 >>> v = b'3.6'
1158 >>> versiontuple(v, 2)
1158 >>> versiontuple(v, 2)
1159 (3, 6)
1159 (3, 6)
1160 >>> versiontuple(v, 3)
1160 >>> versiontuple(v, 3)
1161 (3, 6, None)
1161 (3, 6, None)
1162 >>> versiontuple(v, 4)
1162 >>> versiontuple(v, 4)
1163 (3, 6, None, None)
1163 (3, 6, None, None)
1164
1164
1165 >>> v = b'3.9-rc'
1165 >>> v = b'3.9-rc'
1166 >>> versiontuple(v, 2)
1166 >>> versiontuple(v, 2)
1167 (3, 9)
1167 (3, 9)
1168 >>> versiontuple(v, 3)
1168 >>> versiontuple(v, 3)
1169 (3, 9, None)
1169 (3, 9, None)
1170 >>> versiontuple(v, 4)
1170 >>> versiontuple(v, 4)
1171 (3, 9, None, 'rc')
1171 (3, 9, None, 'rc')
1172
1172
1173 >>> v = b'3.9-rc+2-02a8fea4289b'
1173 >>> v = b'3.9-rc+2-02a8fea4289b'
1174 >>> versiontuple(v, 2)
1174 >>> versiontuple(v, 2)
1175 (3, 9)
1175 (3, 9)
1176 >>> versiontuple(v, 3)
1176 >>> versiontuple(v, 3)
1177 (3, 9, None)
1177 (3, 9, None)
1178 >>> versiontuple(v, 4)
1178 >>> versiontuple(v, 4)
1179 (3, 9, None, 'rc+2-02a8fea4289b')
1179 (3, 9, None, 'rc+2-02a8fea4289b')
1180
1180
1181 >>> versiontuple(b'4.6rc0')
1181 >>> versiontuple(b'4.6rc0')
1182 (4, 6, None, 'rc0')
1182 (4, 6, None, 'rc0')
1183 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1183 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1184 (4, 6, None, 'rc0+12-425d55e54f98')
1184 (4, 6, None, 'rc0+12-425d55e54f98')
1185 >>> versiontuple(b'.1.2.3')
1185 >>> versiontuple(b'.1.2.3')
1186 (None, None, None, '.1.2.3')
1186 (None, None, None, '.1.2.3')
1187 >>> versiontuple(b'12.34..5')
1187 >>> versiontuple(b'12.34..5')
1188 (12, 34, None, '..5')
1188 (12, 34, None, '..5')
1189 >>> versiontuple(b'1.2.3.4.5.6')
1189 >>> versiontuple(b'1.2.3.4.5.6')
1190 (1, 2, 3, '.4.5.6')
1190 (1, 2, 3, '.4.5.6')
1191 """
1191 """
1192 if not v:
1192 if not v:
1193 v = version()
1193 v = version()
1194 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1194 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1195 if not m:
1195 if not m:
1196 vparts, extra = b'', v
1196 vparts, extra = b'', v
1197 elif m.group(2):
1197 elif m.group(2):
1198 vparts, extra = m.groups()
1198 vparts, extra = m.groups()
1199 else:
1199 else:
1200 vparts, extra = m.group(1), None
1200 vparts, extra = m.group(1), None
1201
1201
1202 assert vparts is not None # help pytype
1202 assert vparts is not None # help pytype
1203
1203
1204 vints = []
1204 vints = []
1205 for i in vparts.split(b'.'):
1205 for i in vparts.split(b'.'):
1206 try:
1206 try:
1207 vints.append(int(i))
1207 vints.append(int(i))
1208 except ValueError:
1208 except ValueError:
1209 break
1209 break
1210 # (3, 6) -> (3, 6, None)
1210 # (3, 6) -> (3, 6, None)
1211 while len(vints) < 3:
1211 while len(vints) < 3:
1212 vints.append(None)
1212 vints.append(None)
1213
1213
1214 if n == 2:
1214 if n == 2:
1215 return (vints[0], vints[1])
1215 return (vints[0], vints[1])
1216 if n == 3:
1216 if n == 3:
1217 return (vints[0], vints[1], vints[2])
1217 return (vints[0], vints[1], vints[2])
1218 if n == 4:
1218 if n == 4:
1219 return (vints[0], vints[1], vints[2], extra)
1219 return (vints[0], vints[1], vints[2], extra)
1220
1220
1221 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1221 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1222
1222
1223
1223
1224 def cachefunc(func):
1224 def cachefunc(func):
1225 '''cache the result of function calls'''
1225 '''cache the result of function calls'''
1226 # XXX doesn't handle keywords args
1226 # XXX doesn't handle keywords args
1227 if func.__code__.co_argcount == 0:
1227 if func.__code__.co_argcount == 0:
1228 listcache = []
1228 listcache = []
1229
1229
1230 def f():
1230 def f():
1231 if len(listcache) == 0:
1231 if len(listcache) == 0:
1232 listcache.append(func())
1232 listcache.append(func())
1233 return listcache[0]
1233 return listcache[0]
1234
1234
1235 return f
1235 return f
1236 cache = {}
1236 cache = {}
1237 if func.__code__.co_argcount == 1:
1237 if func.__code__.co_argcount == 1:
1238 # we gain a small amount of time because
1238 # we gain a small amount of time because
1239 # we don't need to pack/unpack the list
1239 # we don't need to pack/unpack the list
1240 def f(arg):
1240 def f(arg):
1241 if arg not in cache:
1241 if arg not in cache:
1242 cache[arg] = func(arg)
1242 cache[arg] = func(arg)
1243 return cache[arg]
1243 return cache[arg]
1244
1244
1245 else:
1245 else:
1246
1246
1247 def f(*args):
1247 def f(*args):
1248 if args not in cache:
1248 if args not in cache:
1249 cache[args] = func(*args)
1249 cache[args] = func(*args)
1250 return cache[args]
1250 return cache[args]
1251
1251
1252 return f
1252 return f
1253
1253
1254
1254
1255 class cow:
1255 class cow:
1256 """helper class to make copy-on-write easier
1256 """helper class to make copy-on-write easier
1257
1257
1258 Call preparewrite before doing any writes.
1258 Call preparewrite before doing any writes.
1259 """
1259 """
1260
1260
1261 def preparewrite(self):
1261 def preparewrite(self):
1262 """call this before writes, return self or a copied new object"""
1262 """call this before writes, return self or a copied new object"""
1263 if getattr(self, '_copied', 0):
1263 if getattr(self, '_copied', 0):
1264 self._copied -= 1
1264 self._copied -= 1
1265 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1265 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1266 return self.__class__(self) # pytype: disable=wrong-arg-count
1266 return self.__class__(self) # pytype: disable=wrong-arg-count
1267 return self
1267 return self
1268
1268
1269 def copy(self):
1269 def copy(self):
1270 """always do a cheap copy"""
1270 """always do a cheap copy"""
1271 self._copied = getattr(self, '_copied', 0) + 1
1271 self._copied = getattr(self, '_copied', 0) + 1
1272 return self
1272 return self
1273
1273
1274
1274
1275 class sortdict(collections.OrderedDict):
1275 class sortdict(collections.OrderedDict):
1276 """a simple sorted dictionary
1276 """a simple sorted dictionary
1277
1277
1278 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1278 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1279 >>> d2 = d1.copy()
1279 >>> d2 = d1.copy()
1280 >>> d2
1280 >>> d2
1281 sortdict([('a', 0), ('b', 1)])
1281 sortdict([('a', 0), ('b', 1)])
1282 >>> d2.update([(b'a', 2)])
1282 >>> d2.update([(b'a', 2)])
1283 >>> list(d2.keys()) # should still be in last-set order
1283 >>> list(d2.keys()) # should still be in last-set order
1284 ['b', 'a']
1284 ['b', 'a']
1285 >>> d1.insert(1, b'a.5', 0.5)
1285 >>> d1.insert(1, b'a.5', 0.5)
1286 >>> d1
1286 >>> d1
1287 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1287 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1288 """
1288 """
1289
1289
1290 def __setitem__(self, key, value):
1290 def __setitem__(self, key, value):
1291 if key in self:
1291 if key in self:
1292 del self[key]
1292 del self[key]
1293 super(sortdict, self).__setitem__(key, value)
1293 super(sortdict, self).__setitem__(key, value)
1294
1294
1295 if pycompat.ispypy:
1295 if pycompat.ispypy:
1296 # __setitem__() isn't called as of PyPy 5.8.0
1296 # __setitem__() isn't called as of PyPy 5.8.0
1297 def update(self, src, **f):
1297 def update(self, src, **f):
1298 if isinstance(src, dict):
1298 if isinstance(src, dict):
1299 src = src.items()
1299 src = src.items()
1300 for k, v in src:
1300 for k, v in src:
1301 self[k] = v
1301 self[k] = v
1302 for k in f:
1302 for k in f:
1303 self[k] = f[k]
1303 self[k] = f[k]
1304
1304
1305 def insert(self, position, key, value):
1305 def insert(self, position, key, value):
1306 for (i, (k, v)) in enumerate(list(self.items())):
1306 for (i, (k, v)) in enumerate(list(self.items())):
1307 if i == position:
1307 if i == position:
1308 self[key] = value
1308 self[key] = value
1309 if i >= position:
1309 if i >= position:
1310 del self[k]
1310 del self[k]
1311 self[k] = v
1311 self[k] = v
1312
1312
1313
1313
1314 class cowdict(cow, dict):
1314 class cowdict(cow, dict):
1315 """copy-on-write dict
1315 """copy-on-write dict
1316
1316
1317 Be sure to call d = d.preparewrite() before writing to d.
1317 Be sure to call d = d.preparewrite() before writing to d.
1318
1318
1319 >>> a = cowdict()
1319 >>> a = cowdict()
1320 >>> a is a.preparewrite()
1320 >>> a is a.preparewrite()
1321 True
1321 True
1322 >>> b = a.copy()
1322 >>> b = a.copy()
1323 >>> b is a
1323 >>> b is a
1324 True
1324 True
1325 >>> c = b.copy()
1325 >>> c = b.copy()
1326 >>> c is a
1326 >>> c is a
1327 True
1327 True
1328 >>> a = a.preparewrite()
1328 >>> a = a.preparewrite()
1329 >>> b is a
1329 >>> b is a
1330 False
1330 False
1331 >>> a is a.preparewrite()
1331 >>> a is a.preparewrite()
1332 True
1332 True
1333 >>> c = c.preparewrite()
1333 >>> c = c.preparewrite()
1334 >>> b is c
1334 >>> b is c
1335 False
1335 False
1336 >>> b is b.preparewrite()
1336 >>> b is b.preparewrite()
1337 True
1337 True
1338 """
1338 """
1339
1339
1340
1340
1341 class cowsortdict(cow, sortdict):
1341 class cowsortdict(cow, sortdict):
1342 """copy-on-write sortdict
1342 """copy-on-write sortdict
1343
1343
1344 Be sure to call d = d.preparewrite() before writing to d.
1344 Be sure to call d = d.preparewrite() before writing to d.
1345 """
1345 """
1346
1346
1347
1347
1348 class transactional: # pytype: disable=ignored-metaclass
1348 class transactional: # pytype: disable=ignored-metaclass
1349 """Base class for making a transactional type into a context manager."""
1349 """Base class for making a transactional type into a context manager."""
1350
1350
1351 __metaclass__ = abc.ABCMeta
1351 __metaclass__ = abc.ABCMeta
1352
1352
1353 @abc.abstractmethod
1353 @abc.abstractmethod
1354 def close(self):
1354 def close(self):
1355 """Successfully closes the transaction."""
1355 """Successfully closes the transaction."""
1356
1356
1357 @abc.abstractmethod
1357 @abc.abstractmethod
1358 def release(self):
1358 def release(self):
1359 """Marks the end of the transaction.
1359 """Marks the end of the transaction.
1360
1360
1361 If the transaction has not been closed, it will be aborted.
1361 If the transaction has not been closed, it will be aborted.
1362 """
1362 """
1363
1363
1364 def __enter__(self):
1364 def __enter__(self):
1365 return self
1365 return self
1366
1366
1367 def __exit__(self, exc_type, exc_val, exc_tb):
1367 def __exit__(self, exc_type, exc_val, exc_tb):
1368 try:
1368 try:
1369 if exc_type is None:
1369 if exc_type is None:
1370 self.close()
1370 self.close()
1371 finally:
1371 finally:
1372 self.release()
1372 self.release()
1373
1373
1374
1374
1375 @contextlib.contextmanager
1375 @contextlib.contextmanager
1376 def acceptintervention(tr=None):
1376 def acceptintervention(tr=None):
1377 """A context manager that closes the transaction on InterventionRequired
1377 """A context manager that closes the transaction on InterventionRequired
1378
1378
1379 If no transaction was provided, this simply runs the body and returns
1379 If no transaction was provided, this simply runs the body and returns
1380 """
1380 """
1381 if not tr:
1381 if not tr:
1382 yield
1382 yield
1383 return
1383 return
1384 try:
1384 try:
1385 yield
1385 yield
1386 tr.close()
1386 tr.close()
1387 except error.InterventionRequired:
1387 except error.InterventionRequired:
1388 tr.close()
1388 tr.close()
1389 raise
1389 raise
1390 finally:
1390 finally:
1391 tr.release()
1391 tr.release()
1392
1392
1393
1393
1394 @contextlib.contextmanager
1394 @contextlib.contextmanager
1395 def nullcontextmanager(enter_result=None):
1395 def nullcontextmanager(enter_result=None):
1396 yield enter_result
1396 yield enter_result
1397
1397
1398
1398
1399 class _lrucachenode:
1399 class _lrucachenode:
1400 """A node in a doubly linked list.
1400 """A node in a doubly linked list.
1401
1401
1402 Holds a reference to nodes on either side as well as a key-value
1402 Holds a reference to nodes on either side as well as a key-value
1403 pair for the dictionary entry.
1403 pair for the dictionary entry.
1404 """
1404 """
1405
1405
1406 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1406 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1407
1407
1408 def __init__(self):
1408 def __init__(self):
1409 self.next = self
1409 self.next = self
1410 self.prev = self
1410 self.prev = self
1411
1411
1412 self.key = _notset
1412 self.key = _notset
1413 self.value = None
1413 self.value = None
1414 self.cost = 0
1414 self.cost = 0
1415
1415
1416 def markempty(self):
1416 def markempty(self):
1417 """Mark the node as emptied."""
1417 """Mark the node as emptied."""
1418 self.key = _notset
1418 self.key = _notset
1419 self.value = None
1419 self.value = None
1420 self.cost = 0
1420 self.cost = 0
1421
1421
1422
1422
1423 class lrucachedict:
1423 class lrucachedict:
1424 """Dict that caches most recent accesses and sets.
1424 """Dict that caches most recent accesses and sets.
1425
1425
1426 The dict consists of an actual backing dict - indexed by original
1426 The dict consists of an actual backing dict - indexed by original
1427 key - and a doubly linked circular list defining the order of entries in
1427 key - and a doubly linked circular list defining the order of entries in
1428 the cache.
1428 the cache.
1429
1429
1430 The head node is the newest entry in the cache. If the cache is full,
1430 The head node is the newest entry in the cache. If the cache is full,
1431 we recycle head.prev and make it the new head. Cache accesses result in
1431 we recycle head.prev and make it the new head. Cache accesses result in
1432 the node being moved to before the existing head and being marked as the
1432 the node being moved to before the existing head and being marked as the
1433 new head node.
1433 new head node.
1434
1434
1435 Items in the cache can be inserted with an optional "cost" value. This is
1435 Items in the cache can be inserted with an optional "cost" value. This is
1436 simply an integer that is specified by the caller. The cache can be queried
1436 simply an integer that is specified by the caller. The cache can be queried
1437 for the total cost of all items presently in the cache.
1437 for the total cost of all items presently in the cache.
1438
1438
1439 The cache can also define a maximum cost. If a cache insertion would
1439 The cache can also define a maximum cost. If a cache insertion would
1440 cause the total cost of the cache to go beyond the maximum cost limit,
1440 cause the total cost of the cache to go beyond the maximum cost limit,
1441 nodes will be evicted to make room for the new code. This can be used
1441 nodes will be evicted to make room for the new code. This can be used
1442 to e.g. set a max memory limit and associate an estimated bytes size
1442 to e.g. set a max memory limit and associate an estimated bytes size
1443 cost to each item in the cache. By default, no maximum cost is enforced.
1443 cost to each item in the cache. By default, no maximum cost is enforced.
1444 """
1444 """
1445
1445
1446 def __init__(self, max, maxcost=0):
1446 def __init__(self, max, maxcost=0):
1447 self._cache = {}
1447 self._cache = {}
1448
1448
1449 self._head = _lrucachenode()
1449 self._head = _lrucachenode()
1450 self._size = 1
1450 self._size = 1
1451 self.capacity = max
1451 self.capacity = max
1452 self.totalcost = 0
1452 self.totalcost = 0
1453 self.maxcost = maxcost
1453 self.maxcost = maxcost
1454
1454
1455 def __len__(self):
1455 def __len__(self):
1456 return len(self._cache)
1456 return len(self._cache)
1457
1457
1458 def __contains__(self, k):
1458 def __contains__(self, k):
1459 return k in self._cache
1459 return k in self._cache
1460
1460
1461 def __iter__(self):
1461 def __iter__(self):
1462 # We don't have to iterate in cache order, but why not.
1462 # We don't have to iterate in cache order, but why not.
1463 n = self._head
1463 n = self._head
1464 for i in range(len(self._cache)):
1464 for i in range(len(self._cache)):
1465 yield n.key
1465 yield n.key
1466 n = n.next
1466 n = n.next
1467
1467
1468 def __getitem__(self, k):
1468 def __getitem__(self, k):
1469 node = self._cache[k]
1469 node = self._cache[k]
1470 self._movetohead(node)
1470 self._movetohead(node)
1471 return node.value
1471 return node.value
1472
1472
1473 def insert(self, k, v, cost=0):
1473 def insert(self, k, v, cost=0):
1474 """Insert a new item in the cache with optional cost value."""
1474 """Insert a new item in the cache with optional cost value."""
1475 node = self._cache.get(k)
1475 node = self._cache.get(k)
1476 # Replace existing value and mark as newest.
1476 # Replace existing value and mark as newest.
1477 if node is not None:
1477 if node is not None:
1478 self.totalcost -= node.cost
1478 self.totalcost -= node.cost
1479 node.value = v
1479 node.value = v
1480 node.cost = cost
1480 node.cost = cost
1481 self.totalcost += cost
1481 self.totalcost += cost
1482 self._movetohead(node)
1482 self._movetohead(node)
1483
1483
1484 if self.maxcost:
1484 if self.maxcost:
1485 self._enforcecostlimit()
1485 self._enforcecostlimit()
1486
1486
1487 return
1487 return
1488
1488
1489 if self._size < self.capacity:
1489 if self._size < self.capacity:
1490 node = self._addcapacity()
1490 node = self._addcapacity()
1491 else:
1491 else:
1492 # Grab the last/oldest item.
1492 # Grab the last/oldest item.
1493 node = self._head.prev
1493 node = self._head.prev
1494
1494
1495 # At capacity. Kill the old entry.
1495 # At capacity. Kill the old entry.
1496 if node.key is not _notset:
1496 if node.key is not _notset:
1497 self.totalcost -= node.cost
1497 self.totalcost -= node.cost
1498 del self._cache[node.key]
1498 del self._cache[node.key]
1499
1499
1500 node.key = k
1500 node.key = k
1501 node.value = v
1501 node.value = v
1502 node.cost = cost
1502 node.cost = cost
1503 self.totalcost += cost
1503 self.totalcost += cost
1504 self._cache[k] = node
1504 self._cache[k] = node
1505 # And mark it as newest entry. No need to adjust order since it
1505 # And mark it as newest entry. No need to adjust order since it
1506 # is already self._head.prev.
1506 # is already self._head.prev.
1507 self._head = node
1507 self._head = node
1508
1508
1509 if self.maxcost:
1509 if self.maxcost:
1510 self._enforcecostlimit()
1510 self._enforcecostlimit()
1511
1511
1512 def __setitem__(self, k, v):
1512 def __setitem__(self, k, v):
1513 self.insert(k, v)
1513 self.insert(k, v)
1514
1514
1515 def __delitem__(self, k):
1515 def __delitem__(self, k):
1516 self.pop(k)
1516 self.pop(k)
1517
1517
1518 def pop(self, k, default=_notset):
1518 def pop(self, k, default=_notset):
1519 try:
1519 try:
1520 node = self._cache.pop(k)
1520 node = self._cache.pop(k)
1521 except KeyError:
1521 except KeyError:
1522 if default is _notset:
1522 if default is _notset:
1523 raise
1523 raise
1524 return default
1524 return default
1525
1525
1526 assert node is not None # help pytype
1526 assert node is not None # help pytype
1527 value = node.value
1527 value = node.value
1528 self.totalcost -= node.cost
1528 self.totalcost -= node.cost
1529 node.markempty()
1529 node.markempty()
1530
1530
1531 # Temporarily mark as newest item before re-adjusting head to make
1531 # Temporarily mark as newest item before re-adjusting head to make
1532 # this node the oldest item.
1532 # this node the oldest item.
1533 self._movetohead(node)
1533 self._movetohead(node)
1534 self._head = node.next
1534 self._head = node.next
1535
1535
1536 return value
1536 return value
1537
1537
1538 # Additional dict methods.
1538 # Additional dict methods.
1539
1539
1540 def get(self, k, default=None):
1540 def get(self, k, default=None):
1541 try:
1541 try:
1542 return self.__getitem__(k)
1542 return self.__getitem__(k)
1543 except KeyError:
1543 except KeyError:
1544 return default
1544 return default
1545
1545
1546 def peek(self, k, default=_notset):
1546 def peek(self, k, default=_notset):
1547 """Get the specified item without moving it to the head
1547 """Get the specified item without moving it to the head
1548
1548
1549 Unlike get(), this doesn't mutate the internal state. But be aware
1549 Unlike get(), this doesn't mutate the internal state. But be aware
1550 that it doesn't mean peek() is thread safe.
1550 that it doesn't mean peek() is thread safe.
1551 """
1551 """
1552 try:
1552 try:
1553 node = self._cache[k]
1553 node = self._cache[k]
1554 assert node is not None # help pytype
1554 assert node is not None # help pytype
1555 return node.value
1555 return node.value
1556 except KeyError:
1556 except KeyError:
1557 if default is _notset:
1557 if default is _notset:
1558 raise
1558 raise
1559 return default
1559 return default
1560
1560
1561 def clear(self):
1561 def clear(self):
1562 n = self._head
1562 n = self._head
1563 while n.key is not _notset:
1563 while n.key is not _notset:
1564 self.totalcost -= n.cost
1564 self.totalcost -= n.cost
1565 n.markempty()
1565 n.markempty()
1566 n = n.next
1566 n = n.next
1567
1567
1568 self._cache.clear()
1568 self._cache.clear()
1569
1569
1570 def copy(self, capacity=None, maxcost=0):
1570 def copy(self, capacity=None, maxcost=0):
1571 """Create a new cache as a copy of the current one.
1571 """Create a new cache as a copy of the current one.
1572
1572
1573 By default, the new cache has the same capacity as the existing one.
1573 By default, the new cache has the same capacity as the existing one.
1574 But, the cache capacity can be changed as part of performing the
1574 But, the cache capacity can be changed as part of performing the
1575 copy.
1575 copy.
1576
1576
1577 Items in the copy have an insertion/access order matching this
1577 Items in the copy have an insertion/access order matching this
1578 instance.
1578 instance.
1579 """
1579 """
1580
1580
1581 capacity = capacity or self.capacity
1581 capacity = capacity or self.capacity
1582 maxcost = maxcost or self.maxcost
1582 maxcost = maxcost or self.maxcost
1583 result = lrucachedict(capacity, maxcost=maxcost)
1583 result = lrucachedict(capacity, maxcost=maxcost)
1584
1584
1585 # We copy entries by iterating in oldest-to-newest order so the copy
1585 # We copy entries by iterating in oldest-to-newest order so the copy
1586 # has the correct ordering.
1586 # has the correct ordering.
1587
1587
1588 # Find the first non-empty entry.
1588 # Find the first non-empty entry.
1589 n = self._head.prev
1589 n = self._head.prev
1590 while n.key is _notset and n is not self._head:
1590 while n.key is _notset and n is not self._head:
1591 n = n.prev
1591 n = n.prev
1592
1592
1593 # We could potentially skip the first N items when decreasing capacity.
1593 # We could potentially skip the first N items when decreasing capacity.
1594 # But let's keep it simple unless it is a performance problem.
1594 # But let's keep it simple unless it is a performance problem.
1595 for i in range(len(self._cache)):
1595 for i in range(len(self._cache)):
1596 result.insert(n.key, n.value, cost=n.cost)
1596 result.insert(n.key, n.value, cost=n.cost)
1597 n = n.prev
1597 n = n.prev
1598
1598
1599 return result
1599 return result
1600
1600
1601 def popoldest(self):
1601 def popoldest(self):
1602 """Remove the oldest item from the cache.
1602 """Remove the oldest item from the cache.
1603
1603
1604 Returns the (key, value) describing the removed cache entry.
1604 Returns the (key, value) describing the removed cache entry.
1605 """
1605 """
1606 if not self._cache:
1606 if not self._cache:
1607 return
1607 return
1608
1608
1609 # Walk the linked list backwards starting at tail node until we hit
1609 # Walk the linked list backwards starting at tail node until we hit
1610 # a non-empty node.
1610 # a non-empty node.
1611 n = self._head.prev
1611 n = self._head.prev
1612
1612
1613 assert n is not None # help pytype
1613 assert n is not None # help pytype
1614
1614
1615 while n.key is _notset:
1615 while n.key is _notset:
1616 n = n.prev
1616 n = n.prev
1617
1617
1618 assert n is not None # help pytype
1618 assert n is not None # help pytype
1619
1619
1620 key, value = n.key, n.value
1620 key, value = n.key, n.value
1621
1621
1622 # And remove it from the cache and mark it as empty.
1622 # And remove it from the cache and mark it as empty.
1623 del self._cache[n.key]
1623 del self._cache[n.key]
1624 self.totalcost -= n.cost
1624 self.totalcost -= n.cost
1625 n.markempty()
1625 n.markempty()
1626
1626
1627 return key, value
1627 return key, value
1628
1628
1629 def _movetohead(self, node):
1629 def _movetohead(self, node):
1630 """Mark a node as the newest, making it the new head.
1630 """Mark a node as the newest, making it the new head.
1631
1631
1632 When a node is accessed, it becomes the freshest entry in the LRU
1632 When a node is accessed, it becomes the freshest entry in the LRU
1633 list, which is denoted by self._head.
1633 list, which is denoted by self._head.
1634
1634
1635 Visually, let's make ``N`` the new head node (* denotes head):
1635 Visually, let's make ``N`` the new head node (* denotes head):
1636
1636
1637 previous/oldest <-> head <-> next/next newest
1637 previous/oldest <-> head <-> next/next newest
1638
1638
1639 ----<->--- A* ---<->-----
1639 ----<->--- A* ---<->-----
1640 | |
1640 | |
1641 E <-> D <-> N <-> C <-> B
1641 E <-> D <-> N <-> C <-> B
1642
1642
1643 To:
1643 To:
1644
1644
1645 ----<->--- N* ---<->-----
1645 ----<->--- N* ---<->-----
1646 | |
1646 | |
1647 E <-> D <-> C <-> B <-> A
1647 E <-> D <-> C <-> B <-> A
1648
1648
1649 This requires the following moves:
1649 This requires the following moves:
1650
1650
1651 C.next = D (node.prev.next = node.next)
1651 C.next = D (node.prev.next = node.next)
1652 D.prev = C (node.next.prev = node.prev)
1652 D.prev = C (node.next.prev = node.prev)
1653 E.next = N (head.prev.next = node)
1653 E.next = N (head.prev.next = node)
1654 N.prev = E (node.prev = head.prev)
1654 N.prev = E (node.prev = head.prev)
1655 N.next = A (node.next = head)
1655 N.next = A (node.next = head)
1656 A.prev = N (head.prev = node)
1656 A.prev = N (head.prev = node)
1657 """
1657 """
1658 head = self._head
1658 head = self._head
1659 # C.next = D
1659 # C.next = D
1660 node.prev.next = node.next
1660 node.prev.next = node.next
1661 # D.prev = C
1661 # D.prev = C
1662 node.next.prev = node.prev
1662 node.next.prev = node.prev
1663 # N.prev = E
1663 # N.prev = E
1664 node.prev = head.prev
1664 node.prev = head.prev
1665 # N.next = A
1665 # N.next = A
1666 # It is tempting to do just "head" here, however if node is
1666 # It is tempting to do just "head" here, however if node is
1667 # adjacent to head, this will do bad things.
1667 # adjacent to head, this will do bad things.
1668 node.next = head.prev.next
1668 node.next = head.prev.next
1669 # E.next = N
1669 # E.next = N
1670 node.next.prev = node
1670 node.next.prev = node
1671 # A.prev = N
1671 # A.prev = N
1672 node.prev.next = node
1672 node.prev.next = node
1673
1673
1674 self._head = node
1674 self._head = node
1675
1675
1676 def _addcapacity(self):
1676 def _addcapacity(self):
1677 """Add a node to the circular linked list.
1677 """Add a node to the circular linked list.
1678
1678
1679 The new node is inserted before the head node.
1679 The new node is inserted before the head node.
1680 """
1680 """
1681 head = self._head
1681 head = self._head
1682 node = _lrucachenode()
1682 node = _lrucachenode()
1683 head.prev.next = node
1683 head.prev.next = node
1684 node.prev = head.prev
1684 node.prev = head.prev
1685 node.next = head
1685 node.next = head
1686 head.prev = node
1686 head.prev = node
1687 self._size += 1
1687 self._size += 1
1688 return node
1688 return node
1689
1689
1690 def _enforcecostlimit(self):
1690 def _enforcecostlimit(self):
1691 # This should run after an insertion. It should only be called if total
1691 # This should run after an insertion. It should only be called if total
1692 # cost limits are being enforced.
1692 # cost limits are being enforced.
1693 # The most recently inserted node is never evicted.
1693 # The most recently inserted node is never evicted.
1694 if len(self) <= 1 or self.totalcost <= self.maxcost:
1694 if len(self) <= 1 or self.totalcost <= self.maxcost:
1695 return
1695 return
1696
1696
1697 # This is logically equivalent to calling popoldest() until we
1697 # This is logically equivalent to calling popoldest() until we
1698 # free up enough cost. We don't do that since popoldest() needs
1698 # free up enough cost. We don't do that since popoldest() needs
1699 # to walk the linked list and doing this in a loop would be
1699 # to walk the linked list and doing this in a loop would be
1700 # quadratic. So we find the first non-empty node and then
1700 # quadratic. So we find the first non-empty node and then
1701 # walk nodes until we free up enough capacity.
1701 # walk nodes until we free up enough capacity.
1702 #
1702 #
1703 # If we only removed the minimum number of nodes to free enough
1703 # If we only removed the minimum number of nodes to free enough
1704 # cost at insert time, chances are high that the next insert would
1704 # cost at insert time, chances are high that the next insert would
1705 # also require pruning. This would effectively constitute quadratic
1705 # also require pruning. This would effectively constitute quadratic
1706 # behavior for insert-heavy workloads. To mitigate this, we set a
1706 # behavior for insert-heavy workloads. To mitigate this, we set a
1707 # target cost that is a percentage of the max cost. This will tend
1707 # target cost that is a percentage of the max cost. This will tend
1708 # to free more nodes when the high water mark is reached, which
1708 # to free more nodes when the high water mark is reached, which
1709 # lowers the chances of needing to prune on the subsequent insert.
1709 # lowers the chances of needing to prune on the subsequent insert.
1710 targetcost = int(self.maxcost * 0.75)
1710 targetcost = int(self.maxcost * 0.75)
1711
1711
1712 n = self._head.prev
1712 n = self._head.prev
1713 while n.key is _notset:
1713 while n.key is _notset:
1714 n = n.prev
1714 n = n.prev
1715
1715
1716 while len(self) > 1 and self.totalcost > targetcost:
1716 while len(self) > 1 and self.totalcost > targetcost:
1717 del self._cache[n.key]
1717 del self._cache[n.key]
1718 self.totalcost -= n.cost
1718 self.totalcost -= n.cost
1719 n.markempty()
1719 n.markempty()
1720 n = n.prev
1720 n = n.prev
1721
1721
1722
1722
1723 def lrucachefunc(func):
1723 def lrucachefunc(func):
1724 '''cache most recent results of function calls'''
1724 '''cache most recent results of function calls'''
1725 cache = {}
1725 cache = {}
1726 order = collections.deque()
1726 order = collections.deque()
1727 if func.__code__.co_argcount == 1:
1727 if func.__code__.co_argcount == 1:
1728
1728
1729 def f(arg):
1729 def f(arg):
1730 if arg not in cache:
1730 if arg not in cache:
1731 if len(cache) > 20:
1731 if len(cache) > 20:
1732 del cache[order.popleft()]
1732 del cache[order.popleft()]
1733 cache[arg] = func(arg)
1733 cache[arg] = func(arg)
1734 else:
1734 else:
1735 order.remove(arg)
1735 order.remove(arg)
1736 order.append(arg)
1736 order.append(arg)
1737 return cache[arg]
1737 return cache[arg]
1738
1738
1739 else:
1739 else:
1740
1740
1741 def f(*args):
1741 def f(*args):
1742 if args not in cache:
1742 if args not in cache:
1743 if len(cache) > 20:
1743 if len(cache) > 20:
1744 del cache[order.popleft()]
1744 del cache[order.popleft()]
1745 cache[args] = func(*args)
1745 cache[args] = func(*args)
1746 else:
1746 else:
1747 order.remove(args)
1747 order.remove(args)
1748 order.append(args)
1748 order.append(args)
1749 return cache[args]
1749 return cache[args]
1750
1750
1751 return f
1751 return f
1752
1752
1753
1753
1754 class propertycache:
1754 class propertycache:
1755 def __init__(self, func):
1755 def __init__(self, func):
1756 self.func = func
1756 self.func = func
1757 self.name = func.__name__
1757 self.name = func.__name__
1758
1758
1759 def __get__(self, obj, type=None):
1759 def __get__(self, obj, type=None):
1760 result = self.func(obj)
1760 result = self.func(obj)
1761 self.cachevalue(obj, result)
1761 self.cachevalue(obj, result)
1762 return result
1762 return result
1763
1763
1764 def cachevalue(self, obj, value):
1764 def cachevalue(self, obj, value):
1765 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1765 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1766 obj.__dict__[self.name] = value
1766 obj.__dict__[self.name] = value
1767
1767
1768
1768
1769 def clearcachedproperty(obj, prop):
1769 def clearcachedproperty(obj, prop):
1770 '''clear a cached property value, if one has been set'''
1770 '''clear a cached property value, if one has been set'''
1771 prop = pycompat.sysstr(prop)
1771 prop = pycompat.sysstr(prop)
1772 if prop in obj.__dict__:
1772 if prop in obj.__dict__:
1773 del obj.__dict__[prop]
1773 del obj.__dict__[prop]
1774
1774
1775
1775
1776 def increasingchunks(source, min=1024, max=65536):
1776 def increasingchunks(source, min=1024, max=65536):
1777 """return no less than min bytes per chunk while data remains,
1777 """return no less than min bytes per chunk while data remains,
1778 doubling min after each chunk until it reaches max"""
1778 doubling min after each chunk until it reaches max"""
1779
1779
1780 def log2(x):
1780 def log2(x):
1781 if not x:
1781 if not x:
1782 return 0
1782 return 0
1783 i = 0
1783 i = 0
1784 while x:
1784 while x:
1785 x >>= 1
1785 x >>= 1
1786 i += 1
1786 i += 1
1787 return i - 1
1787 return i - 1
1788
1788
1789 buf = []
1789 buf = []
1790 blen = 0
1790 blen = 0
1791 for chunk in source:
1791 for chunk in source:
1792 buf.append(chunk)
1792 buf.append(chunk)
1793 blen += len(chunk)
1793 blen += len(chunk)
1794 if blen >= min:
1794 if blen >= min:
1795 if min < max:
1795 if min < max:
1796 min = min << 1
1796 min = min << 1
1797 nmin = 1 << log2(blen)
1797 nmin = 1 << log2(blen)
1798 if nmin > min:
1798 if nmin > min:
1799 min = nmin
1799 min = nmin
1800 if min > max:
1800 if min > max:
1801 min = max
1801 min = max
1802 yield b''.join(buf)
1802 yield b''.join(buf)
1803 blen = 0
1803 blen = 0
1804 buf = []
1804 buf = []
1805 if buf:
1805 if buf:
1806 yield b''.join(buf)
1806 yield b''.join(buf)
1807
1807
1808
1808
1809 def always(fn):
1809 def always(fn):
1810 return True
1810 return True
1811
1811
1812
1812
1813 def never(fn):
1813 def never(fn):
1814 return False
1814 return False
1815
1815
1816
1816
1817 def nogc(func):
1817 def nogc(func):
1818 """disable garbage collector
1818 """disable garbage collector
1819
1819
1820 Python's garbage collector triggers a GC each time a certain number of
1820 Python's garbage collector triggers a GC each time a certain number of
1821 container objects (the number being defined by gc.get_threshold()) are
1821 container objects (the number being defined by gc.get_threshold()) are
1822 allocated even when marked not to be tracked by the collector. Tracking has
1822 allocated even when marked not to be tracked by the collector. Tracking has
1823 no effect on when GCs are triggered, only on what objects the GC looks
1823 no effect on when GCs are triggered, only on what objects the GC looks
1824 into. As a workaround, disable GC while building complex (huge)
1824 into. As a workaround, disable GC while building complex (huge)
1825 containers.
1825 containers.
1826
1826
1827 This garbage collector issue have been fixed in 2.7. But it still affect
1827 This garbage collector issue have been fixed in 2.7. But it still affect
1828 CPython's performance.
1828 CPython's performance.
1829 """
1829 """
1830
1830
1831 def wrapper(*args, **kwargs):
1831 def wrapper(*args, **kwargs):
1832 gcenabled = gc.isenabled()
1832 gcenabled = gc.isenabled()
1833 gc.disable()
1833 gc.disable()
1834 try:
1834 try:
1835 return func(*args, **kwargs)
1835 return func(*args, **kwargs)
1836 finally:
1836 finally:
1837 if gcenabled:
1837 if gcenabled:
1838 gc.enable()
1838 gc.enable()
1839
1839
1840 return wrapper
1840 return wrapper
1841
1841
1842
1842
1843 if pycompat.ispypy:
1843 if pycompat.ispypy:
1844 # PyPy runs slower with gc disabled
1844 # PyPy runs slower with gc disabled
1845 nogc = lambda x: x
1845 nogc = lambda x: x
1846
1846
1847
1847
1848 def pathto(root, n1, n2):
1848 def pathto(root, n1, n2):
1849 # type: (bytes, bytes, bytes) -> bytes
1849 # type: (bytes, bytes, bytes) -> bytes
1850 """return the relative path from one place to another.
1850 """return the relative path from one place to another.
1851 root should use os.sep to separate directories
1851 root should use os.sep to separate directories
1852 n1 should use os.sep to separate directories
1852 n1 should use os.sep to separate directories
1853 n2 should use "/" to separate directories
1853 n2 should use "/" to separate directories
1854 returns an os.sep-separated path.
1854 returns an os.sep-separated path.
1855
1855
1856 If n1 is a relative path, it's assumed it's
1856 If n1 is a relative path, it's assumed it's
1857 relative to root.
1857 relative to root.
1858 n2 should always be relative to root.
1858 n2 should always be relative to root.
1859 """
1859 """
1860 if not n1:
1860 if not n1:
1861 return localpath(n2)
1861 return localpath(n2)
1862 if os.path.isabs(n1):
1862 if os.path.isabs(n1):
1863 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1863 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1864 return os.path.join(root, localpath(n2))
1864 return os.path.join(root, localpath(n2))
1865 n2 = b'/'.join((pconvert(root), n2))
1865 n2 = b'/'.join((pconvert(root), n2))
1866 a, b = splitpath(n1), n2.split(b'/')
1866 a, b = splitpath(n1), n2.split(b'/')
1867 a.reverse()
1867 a.reverse()
1868 b.reverse()
1868 b.reverse()
1869 while a and b and a[-1] == b[-1]:
1869 while a and b and a[-1] == b[-1]:
1870 a.pop()
1870 a.pop()
1871 b.pop()
1871 b.pop()
1872 b.reverse()
1872 b.reverse()
1873 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1873 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1874
1874
1875
1875
1876 def checksignature(func, depth=1):
1876 def checksignature(func, depth=1):
1877 '''wrap a function with code to check for calling errors'''
1877 '''wrap a function with code to check for calling errors'''
1878
1878
1879 def check(*args, **kwargs):
1879 def check(*args, **kwargs):
1880 try:
1880 try:
1881 return func(*args, **kwargs)
1881 return func(*args, **kwargs)
1882 except TypeError:
1882 except TypeError:
1883 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1883 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1884 raise error.SignatureError
1884 raise error.SignatureError
1885 raise
1885 raise
1886
1886
1887 return check
1887 return check
1888
1888
1889
1889
1890 # a whilelist of known filesystems where hardlink works reliably
1890 # a whilelist of known filesystems where hardlink works reliably
1891 _hardlinkfswhitelist = {
1891 _hardlinkfswhitelist = {
1892 b'apfs',
1892 b'apfs',
1893 b'btrfs',
1893 b'btrfs',
1894 b'ext2',
1894 b'ext2',
1895 b'ext3',
1895 b'ext3',
1896 b'ext4',
1896 b'ext4',
1897 b'hfs',
1897 b'hfs',
1898 b'jfs',
1898 b'jfs',
1899 b'NTFS',
1899 b'NTFS',
1900 b'reiserfs',
1900 b'reiserfs',
1901 b'tmpfs',
1901 b'tmpfs',
1902 b'ufs',
1902 b'ufs',
1903 b'xfs',
1903 b'xfs',
1904 b'zfs',
1904 b'zfs',
1905 }
1905 }
1906
1906
1907
1907
1908 def copyfile(
1908 def copyfile(
1909 src,
1909 src,
1910 dest,
1910 dest,
1911 hardlink=False,
1911 hardlink=False,
1912 copystat=False,
1912 copystat=False,
1913 checkambig=False,
1913 checkambig=False,
1914 nb_bytes=None,
1914 nb_bytes=None,
1915 no_hardlink_cb=None,
1915 no_hardlink_cb=None,
1916 check_fs_hardlink=True,
1916 check_fs_hardlink=True,
1917 ):
1917 ):
1918 """copy a file, preserving mode and optionally other stat info like
1918 """copy a file, preserving mode and optionally other stat info like
1919 atime/mtime
1919 atime/mtime
1920
1920
1921 checkambig argument is used with filestat, and is useful only if
1921 checkambig argument is used with filestat, and is useful only if
1922 destination file is guarded by any lock (e.g. repo.lock or
1922 destination file is guarded by any lock (e.g. repo.lock or
1923 repo.wlock).
1923 repo.wlock).
1924
1924
1925 copystat and checkambig should be exclusive.
1925 copystat and checkambig should be exclusive.
1926
1926
1927 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1927 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1928 """
1928 """
1929 assert not (copystat and checkambig)
1929 assert not (copystat and checkambig)
1930 oldstat = None
1930 oldstat = None
1931 if os.path.lexists(dest):
1931 if os.path.lexists(dest):
1932 if checkambig:
1932 if checkambig:
1933 oldstat = checkambig and filestat.frompath(dest)
1933 oldstat = checkambig and filestat.frompath(dest)
1934 unlink(dest)
1934 unlink(dest)
1935 if hardlink and check_fs_hardlink:
1935 if hardlink and check_fs_hardlink:
1936 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1936 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1937 # unless we are confident that dest is on a whitelisted filesystem.
1937 # unless we are confident that dest is on a whitelisted filesystem.
1938 try:
1938 try:
1939 fstype = getfstype(os.path.dirname(dest))
1939 fstype = getfstype(os.path.dirname(dest))
1940 except OSError:
1940 except OSError:
1941 fstype = None
1941 fstype = None
1942 if fstype not in _hardlinkfswhitelist:
1942 if fstype not in _hardlinkfswhitelist:
1943 if no_hardlink_cb is not None:
1943 if no_hardlink_cb is not None:
1944 no_hardlink_cb()
1944 no_hardlink_cb()
1945 hardlink = False
1945 hardlink = False
1946 if hardlink:
1946 if hardlink:
1947 try:
1947 try:
1948 oslink(src, dest)
1948 oslink(src, dest)
1949 if nb_bytes is not None:
1949 if nb_bytes is not None:
1950 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1950 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1951 raise error.ProgrammingError(m)
1951 raise error.ProgrammingError(m)
1952 return
1952 return
1953 except (IOError, OSError) as exc:
1953 except (IOError, OSError) as exc:
1954 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
1954 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
1955 no_hardlink_cb()
1955 no_hardlink_cb()
1956 # fall back to normal copy
1956 # fall back to normal copy
1957 if os.path.islink(src):
1957 if os.path.islink(src):
1958 os.symlink(os.readlink(src), dest)
1958 os.symlink(os.readlink(src), dest)
1959 # copytime is ignored for symlinks, but in general copytime isn't needed
1959 # copytime is ignored for symlinks, but in general copytime isn't needed
1960 # for them anyway
1960 # for them anyway
1961 if nb_bytes is not None:
1961 if nb_bytes is not None:
1962 m = "cannot use `nb_bytes` on a symlink"
1962 m = "cannot use `nb_bytes` on a symlink"
1963 raise error.ProgrammingError(m)
1963 raise error.ProgrammingError(m)
1964 else:
1964 else:
1965 try:
1965 try:
1966 shutil.copyfile(src, dest)
1966 shutil.copyfile(src, dest)
1967 if copystat:
1967 if copystat:
1968 # copystat also copies mode
1968 # copystat also copies mode
1969 shutil.copystat(src, dest)
1969 shutil.copystat(src, dest)
1970 else:
1970 else:
1971 shutil.copymode(src, dest)
1971 shutil.copymode(src, dest)
1972 if oldstat and oldstat.stat:
1972 if oldstat and oldstat.stat:
1973 newstat = filestat.frompath(dest)
1973 newstat = filestat.frompath(dest)
1974 if newstat.isambig(oldstat):
1974 if newstat.isambig(oldstat):
1975 # stat of copied file is ambiguous to original one
1975 # stat of copied file is ambiguous to original one
1976 advanced = (
1976 advanced = (
1977 oldstat.stat[stat.ST_MTIME] + 1
1977 oldstat.stat[stat.ST_MTIME] + 1
1978 ) & 0x7FFFFFFF
1978 ) & 0x7FFFFFFF
1979 os.utime(dest, (advanced, advanced))
1979 os.utime(dest, (advanced, advanced))
1980 # We could do something smarter using `copy_file_range` call or similar
1980 # We could do something smarter using `copy_file_range` call or similar
1981 if nb_bytes is not None:
1981 if nb_bytes is not None:
1982 with open(dest, mode='r+') as f:
1982 with open(dest, mode='r+') as f:
1983 f.truncate(nb_bytes)
1983 f.truncate(nb_bytes)
1984 except shutil.Error as inst:
1984 except shutil.Error as inst:
1985 raise error.Abort(stringutil.forcebytestr(inst))
1985 raise error.Abort(stringutil.forcebytestr(inst))
1986
1986
1987
1987
1988 def copyfiles(src, dst, hardlink=None, progress=None):
1988 def copyfiles(src, dst, hardlink=None, progress=None):
1989 """Copy a directory tree using hardlinks if possible."""
1989 """Copy a directory tree using hardlinks if possible."""
1990 num = 0
1990 num = 0
1991
1991
1992 def settopic():
1992 def settopic():
1993 if progress:
1993 if progress:
1994 progress.topic = _(b'linking') if hardlink else _(b'copying')
1994 progress.topic = _(b'linking') if hardlink else _(b'copying')
1995
1995
1996 if os.path.isdir(src):
1996 if os.path.isdir(src):
1997 if hardlink is None:
1997 if hardlink is None:
1998 hardlink = (
1998 hardlink = (
1999 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1999 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
2000 )
2000 )
2001 settopic()
2001 settopic()
2002 os.mkdir(dst)
2002 os.mkdir(dst)
2003 for name, kind in listdir(src):
2003 for name, kind in listdir(src):
2004 srcname = os.path.join(src, name)
2004 srcname = os.path.join(src, name)
2005 dstname = os.path.join(dst, name)
2005 dstname = os.path.join(dst, name)
2006 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2006 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2007 num += n
2007 num += n
2008 else:
2008 else:
2009 if hardlink is None:
2009 if hardlink is None:
2010 hardlink = (
2010 hardlink = (
2011 os.stat(os.path.dirname(src)).st_dev
2011 os.stat(os.path.dirname(src)).st_dev
2012 == os.stat(os.path.dirname(dst)).st_dev
2012 == os.stat(os.path.dirname(dst)).st_dev
2013 )
2013 )
2014 settopic()
2014 settopic()
2015
2015
2016 if hardlink:
2016 if hardlink:
2017 try:
2017 try:
2018 oslink(src, dst)
2018 oslink(src, dst)
2019 except (IOError, OSError) as exc:
2019 except (IOError, OSError) as exc:
2020 if exc.errno != errno.EEXIST:
2020 if exc.errno != errno.EEXIST:
2021 hardlink = False
2021 hardlink = False
2022 # XXX maybe try to relink if the file exist ?
2022 # XXX maybe try to relink if the file exist ?
2023 shutil.copy(src, dst)
2023 shutil.copy(src, dst)
2024 else:
2024 else:
2025 shutil.copy(src, dst)
2025 shutil.copy(src, dst)
2026 num += 1
2026 num += 1
2027 if progress:
2027 if progress:
2028 progress.increment()
2028 progress.increment()
2029
2029
2030 return hardlink, num
2030 return hardlink, num
2031
2031
2032
2032
2033 _winreservednames = {
2033 _winreservednames = {
2034 b'con',
2034 b'con',
2035 b'prn',
2035 b'prn',
2036 b'aux',
2036 b'aux',
2037 b'nul',
2037 b'nul',
2038 b'com1',
2038 b'com1',
2039 b'com2',
2039 b'com2',
2040 b'com3',
2040 b'com3',
2041 b'com4',
2041 b'com4',
2042 b'com5',
2042 b'com5',
2043 b'com6',
2043 b'com6',
2044 b'com7',
2044 b'com7',
2045 b'com8',
2045 b'com8',
2046 b'com9',
2046 b'com9',
2047 b'lpt1',
2047 b'lpt1',
2048 b'lpt2',
2048 b'lpt2',
2049 b'lpt3',
2049 b'lpt3',
2050 b'lpt4',
2050 b'lpt4',
2051 b'lpt5',
2051 b'lpt5',
2052 b'lpt6',
2052 b'lpt6',
2053 b'lpt7',
2053 b'lpt7',
2054 b'lpt8',
2054 b'lpt8',
2055 b'lpt9',
2055 b'lpt9',
2056 }
2056 }
2057 _winreservedchars = b':*?"<>|'
2057 _winreservedchars = b':*?"<>|'
2058
2058
2059
2059
2060 def checkwinfilename(path):
2060 def checkwinfilename(path):
2061 # type: (bytes) -> Optional[bytes]
2061 # type: (bytes) -> Optional[bytes]
2062 r"""Check that the base-relative path is a valid filename on Windows.
2062 r"""Check that the base-relative path is a valid filename on Windows.
2063 Returns None if the path is ok, or a UI string describing the problem.
2063 Returns None if the path is ok, or a UI string describing the problem.
2064
2064
2065 >>> checkwinfilename(b"just/a/normal/path")
2065 >>> checkwinfilename(b"just/a/normal/path")
2066 >>> checkwinfilename(b"foo/bar/con.xml")
2066 >>> checkwinfilename(b"foo/bar/con.xml")
2067 "filename contains 'con', which is reserved on Windows"
2067 "filename contains 'con', which is reserved on Windows"
2068 >>> checkwinfilename(b"foo/con.xml/bar")
2068 >>> checkwinfilename(b"foo/con.xml/bar")
2069 "filename contains 'con', which is reserved on Windows"
2069 "filename contains 'con', which is reserved on Windows"
2070 >>> checkwinfilename(b"foo/bar/xml.con")
2070 >>> checkwinfilename(b"foo/bar/xml.con")
2071 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2071 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2072 "filename contains 'AUX', which is reserved on Windows"
2072 "filename contains 'AUX', which is reserved on Windows"
2073 >>> checkwinfilename(b"foo/bar/bla:.txt")
2073 >>> checkwinfilename(b"foo/bar/bla:.txt")
2074 "filename contains ':', which is reserved on Windows"
2074 "filename contains ':', which is reserved on Windows"
2075 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2075 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2076 "filename contains '\\x07', which is invalid on Windows"
2076 "filename contains '\\x07', which is invalid on Windows"
2077 >>> checkwinfilename(b"foo/bar/bla ")
2077 >>> checkwinfilename(b"foo/bar/bla ")
2078 "filename ends with ' ', which is not allowed on Windows"
2078 "filename ends with ' ', which is not allowed on Windows"
2079 >>> checkwinfilename(b"../bar")
2079 >>> checkwinfilename(b"../bar")
2080 >>> checkwinfilename(b"foo\\")
2080 >>> checkwinfilename(b"foo\\")
2081 "filename ends with '\\', which is invalid on Windows"
2081 "filename ends with '\\', which is invalid on Windows"
2082 >>> checkwinfilename(b"foo\\/bar")
2082 >>> checkwinfilename(b"foo\\/bar")
2083 "directory name ends with '\\', which is invalid on Windows"
2083 "directory name ends with '\\', which is invalid on Windows"
2084 """
2084 """
2085 if path.endswith(b'\\'):
2085 if path.endswith(b'\\'):
2086 return _(b"filename ends with '\\', which is invalid on Windows")
2086 return _(b"filename ends with '\\', which is invalid on Windows")
2087 if b'\\/' in path:
2087 if b'\\/' in path:
2088 return _(b"directory name ends with '\\', which is invalid on Windows")
2088 return _(b"directory name ends with '\\', which is invalid on Windows")
2089 for n in path.replace(b'\\', b'/').split(b'/'):
2089 for n in path.replace(b'\\', b'/').split(b'/'):
2090 if not n:
2090 if not n:
2091 continue
2091 continue
2092 for c in _filenamebytestr(n):
2092 for c in _filenamebytestr(n):
2093 if c in _winreservedchars:
2093 if c in _winreservedchars:
2094 return (
2094 return (
2095 _(
2095 _(
2096 b"filename contains '%s', which is reserved "
2096 b"filename contains '%s', which is reserved "
2097 b"on Windows"
2097 b"on Windows"
2098 )
2098 )
2099 % c
2099 % c
2100 )
2100 )
2101 if ord(c) <= 31:
2101 if ord(c) <= 31:
2102 return _(
2102 return _(
2103 b"filename contains '%s', which is invalid on Windows"
2103 b"filename contains '%s', which is invalid on Windows"
2104 ) % stringutil.escapestr(c)
2104 ) % stringutil.escapestr(c)
2105 base = n.split(b'.')[0]
2105 base = n.split(b'.')[0]
2106 if base and base.lower() in _winreservednames:
2106 if base and base.lower() in _winreservednames:
2107 return (
2107 return (
2108 _(b"filename contains '%s', which is reserved on Windows")
2108 _(b"filename contains '%s', which is reserved on Windows")
2109 % base
2109 % base
2110 )
2110 )
2111 t = n[-1:]
2111 t = n[-1:]
2112 if t in b'. ' and n not in b'..':
2112 if t in b'. ' and n not in b'..':
2113 return (
2113 return (
2114 _(
2114 _(
2115 b"filename ends with '%s', which is not allowed "
2115 b"filename ends with '%s', which is not allowed "
2116 b"on Windows"
2116 b"on Windows"
2117 )
2117 )
2118 % t
2118 % t
2119 )
2119 )
2120
2120
2121
2121
2122 timer = getattr(time, "perf_counter", None)
2122 timer = getattr(time, "perf_counter", None)
2123
2123
2124 if pycompat.iswindows:
2124 if pycompat.iswindows:
2125 checkosfilename = checkwinfilename
2125 checkosfilename = checkwinfilename
2126 if not timer:
2126 if not timer:
2127 timer = time.clock
2127 timer = time.clock
2128 else:
2128 else:
2129 # mercurial.windows doesn't have platform.checkosfilename
2129 # mercurial.windows doesn't have platform.checkosfilename
2130 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2130 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2131 if not timer:
2131 if not timer:
2132 timer = time.time
2132 timer = time.time
2133
2133
2134
2134
2135 def makelock(info, pathname):
2135 def makelock(info, pathname):
2136 """Create a lock file atomically if possible
2136 """Create a lock file atomically if possible
2137
2137
2138 This may leave a stale lock file if symlink isn't supported and signal
2138 This may leave a stale lock file if symlink isn't supported and signal
2139 interrupt is enabled.
2139 interrupt is enabled.
2140 """
2140 """
2141 try:
2141 try:
2142 return os.symlink(info, pathname)
2142 return os.symlink(info, pathname)
2143 except OSError as why:
2143 except OSError as why:
2144 if why.errno == errno.EEXIST:
2144 if why.errno == errno.EEXIST:
2145 raise
2145 raise
2146 except AttributeError: # no symlink in os
2146 except AttributeError: # no symlink in os
2147 pass
2147 pass
2148
2148
2149 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2149 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2150 ld = os.open(pathname, flags)
2150 ld = os.open(pathname, flags)
2151 os.write(ld, info)
2151 os.write(ld, info)
2152 os.close(ld)
2152 os.close(ld)
2153
2153
2154
2154
2155 def readlock(pathname):
2155 def readlock(pathname):
2156 # type: (bytes) -> bytes
2156 # type: (bytes) -> bytes
2157 try:
2157 try:
2158 return readlink(pathname)
2158 return readlink(pathname)
2159 except OSError as why:
2159 except OSError as why:
2160 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2160 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2161 raise
2161 raise
2162 except AttributeError: # no symlink in os
2162 except AttributeError: # no symlink in os
2163 pass
2163 pass
2164 with posixfile(pathname, b'rb') as fp:
2164 with posixfile(pathname, b'rb') as fp:
2165 return fp.read()
2165 return fp.read()
2166
2166
2167
2167
2168 def fstat(fp):
2168 def fstat(fp):
2169 '''stat file object that may not have fileno method.'''
2169 '''stat file object that may not have fileno method.'''
2170 try:
2170 try:
2171 return os.fstat(fp.fileno())
2171 return os.fstat(fp.fileno())
2172 except AttributeError:
2172 except AttributeError:
2173 return os.stat(fp.name)
2173 return os.stat(fp.name)
2174
2174
2175
2175
2176 # File system features
2176 # File system features
2177
2177
2178
2178
2179 def fscasesensitive(path):
2179 def fscasesensitive(path):
2180 # type: (bytes) -> bool
2180 # type: (bytes) -> bool
2181 """
2181 """
2182 Return true if the given path is on a case-sensitive filesystem
2182 Return true if the given path is on a case-sensitive filesystem
2183
2183
2184 Requires a path (like /foo/.hg) ending with a foldable final
2184 Requires a path (like /foo/.hg) ending with a foldable final
2185 directory component.
2185 directory component.
2186 """
2186 """
2187 s1 = os.lstat(path)
2187 s1 = os.lstat(path)
2188 d, b = os.path.split(path)
2188 d, b = os.path.split(path)
2189 b2 = b.upper()
2189 b2 = b.upper()
2190 if b == b2:
2190 if b == b2:
2191 b2 = b.lower()
2191 b2 = b.lower()
2192 if b == b2:
2192 if b == b2:
2193 return True # no evidence against case sensitivity
2193 return True # no evidence against case sensitivity
2194 p2 = os.path.join(d, b2)
2194 p2 = os.path.join(d, b2)
2195 try:
2195 try:
2196 s2 = os.lstat(p2)
2196 s2 = os.lstat(p2)
2197 if s2 == s1:
2197 if s2 == s1:
2198 return False
2198 return False
2199 return True
2199 return True
2200 except OSError:
2200 except OSError:
2201 return True
2201 return True
2202
2202
2203
2203
2204 _re2_input = lambda x: x
2204 _re2_input = lambda x: x
2205 try:
2205 try:
2206 import re2 # pytype: disable=import-error
2206 import re2 # pytype: disable=import-error
2207
2207
2208 _re2 = None
2208 _re2 = None
2209 except ImportError:
2209 except ImportError:
2210 _re2 = False
2210 _re2 = False
2211
2211
2212
2212
2213 def has_re2():
2214 """return True is re2 is available, False otherwise"""
2215 if _re2 is None:
2216 _re._checkre2()
2217 return _re2
2218
2219
2213 class _re:
2220 class _re:
2214 @staticmethod
2221 @staticmethod
2215 def _checkre2():
2222 def _checkre2():
2216 global _re2
2223 global _re2
2217 global _re2_input
2224 global _re2_input
2218 if _re2 is not None:
2225 if _re2 is not None:
2219 # we already have the answer
2226 # we already have the answer
2220 return
2227 return
2221
2228
2222 check_pattern = br'\[([^\[]+)\]'
2229 check_pattern = br'\[([^\[]+)\]'
2223 check_input = b'[ui]'
2230 check_input = b'[ui]'
2224 try:
2231 try:
2225 # check if match works, see issue3964
2232 # check if match works, see issue3964
2226 _re2 = bool(re2.match(check_pattern, check_input))
2233 _re2 = bool(re2.match(check_pattern, check_input))
2227 except ImportError:
2234 except ImportError:
2228 _re2 = False
2235 _re2 = False
2229 except TypeError:
2236 except TypeError:
2230 # the `pyre-2` project provides a re2 module that accept bytes
2237 # the `pyre-2` project provides a re2 module that accept bytes
2231 # the `fb-re2` project provides a re2 module that acccept sysstr
2238 # the `fb-re2` project provides a re2 module that acccept sysstr
2232 check_pattern = pycompat.sysstr(check_pattern)
2239 check_pattern = pycompat.sysstr(check_pattern)
2233 check_input = pycompat.sysstr(check_input)
2240 check_input = pycompat.sysstr(check_input)
2234 _re2 = bool(re2.match(check_pattern, check_input))
2241 _re2 = bool(re2.match(check_pattern, check_input))
2235 _re2_input = pycompat.sysstr
2242 _re2_input = pycompat.sysstr
2236
2243
2237 def compile(self, pat, flags=0):
2244 def compile(self, pat, flags=0):
2238 """Compile a regular expression, using re2 if possible
2245 """Compile a regular expression, using re2 if possible
2239
2246
2240 For best performance, use only re2-compatible regexp features. The
2247 For best performance, use only re2-compatible regexp features. The
2241 only flags from the re module that are re2-compatible are
2248 only flags from the re module that are re2-compatible are
2242 IGNORECASE and MULTILINE."""
2249 IGNORECASE and MULTILINE."""
2243 if _re2 is None:
2250 if _re2 is None:
2244 self._checkre2()
2251 self._checkre2()
2245 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2252 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2246 if flags & remod.IGNORECASE:
2253 if flags & remod.IGNORECASE:
2247 pat = b'(?i)' + pat
2254 pat = b'(?i)' + pat
2248 if flags & remod.MULTILINE:
2255 if flags & remod.MULTILINE:
2249 pat = b'(?m)' + pat
2256 pat = b'(?m)' + pat
2250 try:
2257 try:
2251 return re2.compile(_re2_input(pat))
2258 return re2.compile(_re2_input(pat))
2252 except re2.error:
2259 except re2.error:
2253 pass
2260 pass
2254 return remod.compile(pat, flags)
2261 return remod.compile(pat, flags)
2255
2262
2256 @propertycache
2263 @propertycache
2257 def escape(self):
2264 def escape(self):
2258 """Return the version of escape corresponding to self.compile.
2265 """Return the version of escape corresponding to self.compile.
2259
2266
2260 This is imperfect because whether re2 or re is used for a particular
2267 This is imperfect because whether re2 or re is used for a particular
2261 function depends on the flags, etc, but it's the best we can do.
2268 function depends on the flags, etc, but it's the best we can do.
2262 """
2269 """
2263 global _re2
2270 global _re2
2264 if _re2 is None:
2271 if _re2 is None:
2265 self._checkre2()
2272 self._checkre2()
2266 if _re2:
2273 if _re2:
2267 return re2.escape
2274 return re2.escape
2268 else:
2275 else:
2269 return remod.escape
2276 return remod.escape
2270
2277
2271
2278
2272 re = _re()
2279 re = _re()
2273
2280
2274 _fspathcache = {}
2281 _fspathcache = {}
2275
2282
2276
2283
2277 def fspath(name, root):
2284 def fspath(name, root):
2278 # type: (bytes, bytes) -> bytes
2285 # type: (bytes, bytes) -> bytes
2279 """Get name in the case stored in the filesystem
2286 """Get name in the case stored in the filesystem
2280
2287
2281 The name should be relative to root, and be normcase-ed for efficiency.
2288 The name should be relative to root, and be normcase-ed for efficiency.
2282
2289
2283 Note that this function is unnecessary, and should not be
2290 Note that this function is unnecessary, and should not be
2284 called, for case-sensitive filesystems (simply because it's expensive).
2291 called, for case-sensitive filesystems (simply because it's expensive).
2285
2292
2286 The root should be normcase-ed, too.
2293 The root should be normcase-ed, too.
2287 """
2294 """
2288
2295
2289 def _makefspathcacheentry(dir):
2296 def _makefspathcacheentry(dir):
2290 return {normcase(n): n for n in os.listdir(dir)}
2297 return {normcase(n): n for n in os.listdir(dir)}
2291
2298
2292 seps = pycompat.ossep
2299 seps = pycompat.ossep
2293 if pycompat.osaltsep:
2300 if pycompat.osaltsep:
2294 seps = seps + pycompat.osaltsep
2301 seps = seps + pycompat.osaltsep
2295 # Protect backslashes. This gets silly very quickly.
2302 # Protect backslashes. This gets silly very quickly.
2296 seps.replace(b'\\', b'\\\\')
2303 seps.replace(b'\\', b'\\\\')
2297 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2304 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2298 dir = os.path.normpath(root)
2305 dir = os.path.normpath(root)
2299 result = []
2306 result = []
2300 for part, sep in pattern.findall(name):
2307 for part, sep in pattern.findall(name):
2301 if sep:
2308 if sep:
2302 result.append(sep)
2309 result.append(sep)
2303 continue
2310 continue
2304
2311
2305 if dir not in _fspathcache:
2312 if dir not in _fspathcache:
2306 _fspathcache[dir] = _makefspathcacheentry(dir)
2313 _fspathcache[dir] = _makefspathcacheentry(dir)
2307 contents = _fspathcache[dir]
2314 contents = _fspathcache[dir]
2308
2315
2309 found = contents.get(part)
2316 found = contents.get(part)
2310 if not found:
2317 if not found:
2311 # retry "once per directory" per "dirstate.walk" which
2318 # retry "once per directory" per "dirstate.walk" which
2312 # may take place for each patches of "hg qpush", for example
2319 # may take place for each patches of "hg qpush", for example
2313 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2320 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2314 found = contents.get(part)
2321 found = contents.get(part)
2315
2322
2316 result.append(found or part)
2323 result.append(found or part)
2317 dir = os.path.join(dir, part)
2324 dir = os.path.join(dir, part)
2318
2325
2319 return b''.join(result)
2326 return b''.join(result)
2320
2327
2321
2328
2322 def checknlink(testfile):
2329 def checknlink(testfile):
2323 # type: (bytes) -> bool
2330 # type: (bytes) -> bool
2324 '''check whether hardlink count reporting works properly'''
2331 '''check whether hardlink count reporting works properly'''
2325
2332
2326 # testfile may be open, so we need a separate file for checking to
2333 # testfile may be open, so we need a separate file for checking to
2327 # work around issue2543 (or testfile may get lost on Samba shares)
2334 # work around issue2543 (or testfile may get lost on Samba shares)
2328 f1, f2, fp = None, None, None
2335 f1, f2, fp = None, None, None
2329 try:
2336 try:
2330 fd, f1 = pycompat.mkstemp(
2337 fd, f1 = pycompat.mkstemp(
2331 prefix=b'.%s-' % os.path.basename(testfile),
2338 prefix=b'.%s-' % os.path.basename(testfile),
2332 suffix=b'1~',
2339 suffix=b'1~',
2333 dir=os.path.dirname(testfile),
2340 dir=os.path.dirname(testfile),
2334 )
2341 )
2335 os.close(fd)
2342 os.close(fd)
2336 f2 = b'%s2~' % f1[:-2]
2343 f2 = b'%s2~' % f1[:-2]
2337
2344
2338 oslink(f1, f2)
2345 oslink(f1, f2)
2339 # nlinks() may behave differently for files on Windows shares if
2346 # nlinks() may behave differently for files on Windows shares if
2340 # the file is open.
2347 # the file is open.
2341 fp = posixfile(f2)
2348 fp = posixfile(f2)
2342 return nlinks(f2) > 1
2349 return nlinks(f2) > 1
2343 except OSError:
2350 except OSError:
2344 return False
2351 return False
2345 finally:
2352 finally:
2346 if fp is not None:
2353 if fp is not None:
2347 fp.close()
2354 fp.close()
2348 for f in (f1, f2):
2355 for f in (f1, f2):
2349 try:
2356 try:
2350 if f is not None:
2357 if f is not None:
2351 os.unlink(f)
2358 os.unlink(f)
2352 except OSError:
2359 except OSError:
2353 pass
2360 pass
2354
2361
2355
2362
2356 def endswithsep(path):
2363 def endswithsep(path):
2357 # type: (bytes) -> bool
2364 # type: (bytes) -> bool
2358 '''Check path ends with os.sep or os.altsep.'''
2365 '''Check path ends with os.sep or os.altsep.'''
2359 return bool( # help pytype
2366 return bool( # help pytype
2360 path.endswith(pycompat.ossep)
2367 path.endswith(pycompat.ossep)
2361 or pycompat.osaltsep
2368 or pycompat.osaltsep
2362 and path.endswith(pycompat.osaltsep)
2369 and path.endswith(pycompat.osaltsep)
2363 )
2370 )
2364
2371
2365
2372
2366 def splitpath(path):
2373 def splitpath(path):
2367 # type: (bytes) -> List[bytes]
2374 # type: (bytes) -> List[bytes]
2368 """Split path by os.sep.
2375 """Split path by os.sep.
2369 Note that this function does not use os.altsep because this is
2376 Note that this function does not use os.altsep because this is
2370 an alternative of simple "xxx.split(os.sep)".
2377 an alternative of simple "xxx.split(os.sep)".
2371 It is recommended to use os.path.normpath() before using this
2378 It is recommended to use os.path.normpath() before using this
2372 function if need."""
2379 function if need."""
2373 return path.split(pycompat.ossep)
2380 return path.split(pycompat.ossep)
2374
2381
2375
2382
2376 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2383 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2377 """Create a temporary file with the same contents from name
2384 """Create a temporary file with the same contents from name
2378
2385
2379 The permission bits are copied from the original file.
2386 The permission bits are copied from the original file.
2380
2387
2381 If the temporary file is going to be truncated immediately, you
2388 If the temporary file is going to be truncated immediately, you
2382 can use emptyok=True as an optimization.
2389 can use emptyok=True as an optimization.
2383
2390
2384 Returns the name of the temporary file.
2391 Returns the name of the temporary file.
2385 """
2392 """
2386 d, fn = os.path.split(name)
2393 d, fn = os.path.split(name)
2387 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2394 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2388 os.close(fd)
2395 os.close(fd)
2389 # Temporary files are created with mode 0600, which is usually not
2396 # Temporary files are created with mode 0600, which is usually not
2390 # what we want. If the original file already exists, just copy
2397 # what we want. If the original file already exists, just copy
2391 # its mode. Otherwise, manually obey umask.
2398 # its mode. Otherwise, manually obey umask.
2392 copymode(name, temp, createmode, enforcewritable)
2399 copymode(name, temp, createmode, enforcewritable)
2393
2400
2394 if emptyok:
2401 if emptyok:
2395 return temp
2402 return temp
2396 try:
2403 try:
2397 try:
2404 try:
2398 ifp = posixfile(name, b"rb")
2405 ifp = posixfile(name, b"rb")
2399 except IOError as inst:
2406 except IOError as inst:
2400 if inst.errno == errno.ENOENT:
2407 if inst.errno == errno.ENOENT:
2401 return temp
2408 return temp
2402 if not getattr(inst, 'filename', None):
2409 if not getattr(inst, 'filename', None):
2403 inst.filename = name
2410 inst.filename = name
2404 raise
2411 raise
2405 ofp = posixfile(temp, b"wb")
2412 ofp = posixfile(temp, b"wb")
2406 for chunk in filechunkiter(ifp):
2413 for chunk in filechunkiter(ifp):
2407 ofp.write(chunk)
2414 ofp.write(chunk)
2408 ifp.close()
2415 ifp.close()
2409 ofp.close()
2416 ofp.close()
2410 except: # re-raises
2417 except: # re-raises
2411 try:
2418 try:
2412 os.unlink(temp)
2419 os.unlink(temp)
2413 except OSError:
2420 except OSError:
2414 pass
2421 pass
2415 raise
2422 raise
2416 return temp
2423 return temp
2417
2424
2418
2425
2419 class filestat:
2426 class filestat:
2420 """help to exactly detect change of a file
2427 """help to exactly detect change of a file
2421
2428
2422 'stat' attribute is result of 'os.stat()' if specified 'path'
2429 'stat' attribute is result of 'os.stat()' if specified 'path'
2423 exists. Otherwise, it is None. This can avoid preparative
2430 exists. Otherwise, it is None. This can avoid preparative
2424 'exists()' examination on client side of this class.
2431 'exists()' examination on client side of this class.
2425 """
2432 """
2426
2433
2427 def __init__(self, stat):
2434 def __init__(self, stat):
2428 self.stat = stat
2435 self.stat = stat
2429
2436
2430 @classmethod
2437 @classmethod
2431 def frompath(cls, path):
2438 def frompath(cls, path):
2432 try:
2439 try:
2433 stat = os.stat(path)
2440 stat = os.stat(path)
2434 except FileNotFoundError:
2441 except FileNotFoundError:
2435 stat = None
2442 stat = None
2436 return cls(stat)
2443 return cls(stat)
2437
2444
2438 @classmethod
2445 @classmethod
2439 def fromfp(cls, fp):
2446 def fromfp(cls, fp):
2440 stat = os.fstat(fp.fileno())
2447 stat = os.fstat(fp.fileno())
2441 return cls(stat)
2448 return cls(stat)
2442
2449
2443 __hash__ = object.__hash__
2450 __hash__ = object.__hash__
2444
2451
2445 def __eq__(self, old):
2452 def __eq__(self, old):
2446 try:
2453 try:
2447 # if ambiguity between stat of new and old file is
2454 # if ambiguity between stat of new and old file is
2448 # avoided, comparison of size, ctime and mtime is enough
2455 # avoided, comparison of size, ctime and mtime is enough
2449 # to exactly detect change of a file regardless of platform
2456 # to exactly detect change of a file regardless of platform
2450 return (
2457 return (
2451 self.stat.st_size == old.stat.st_size
2458 self.stat.st_size == old.stat.st_size
2452 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2459 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2453 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2460 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2454 )
2461 )
2455 except AttributeError:
2462 except AttributeError:
2456 pass
2463 pass
2457 try:
2464 try:
2458 return self.stat is None and old.stat is None
2465 return self.stat is None and old.stat is None
2459 except AttributeError:
2466 except AttributeError:
2460 return False
2467 return False
2461
2468
2462 def isambig(self, old):
2469 def isambig(self, old):
2463 """Examine whether new (= self) stat is ambiguous against old one
2470 """Examine whether new (= self) stat is ambiguous against old one
2464
2471
2465 "S[N]" below means stat of a file at N-th change:
2472 "S[N]" below means stat of a file at N-th change:
2466
2473
2467 - S[n-1].ctime < S[n].ctime: can detect change of a file
2474 - S[n-1].ctime < S[n].ctime: can detect change of a file
2468 - S[n-1].ctime == S[n].ctime
2475 - S[n-1].ctime == S[n].ctime
2469 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2476 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2470 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2477 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2471 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2478 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2472 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2479 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2473
2480
2474 Case (*2) above means that a file was changed twice or more at
2481 Case (*2) above means that a file was changed twice or more at
2475 same time in sec (= S[n-1].ctime), and comparison of timestamp
2482 same time in sec (= S[n-1].ctime), and comparison of timestamp
2476 is ambiguous.
2483 is ambiguous.
2477
2484
2478 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2485 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2479 timestamp is ambiguous".
2486 timestamp is ambiguous".
2480
2487
2481 But advancing mtime only in case (*2) doesn't work as
2488 But advancing mtime only in case (*2) doesn't work as
2482 expected, because naturally advanced S[n].mtime in case (*1)
2489 expected, because naturally advanced S[n].mtime in case (*1)
2483 might be equal to manually advanced S[n-1 or earlier].mtime.
2490 might be equal to manually advanced S[n-1 or earlier].mtime.
2484
2491
2485 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2492 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2486 treated as ambiguous regardless of mtime, to avoid overlooking
2493 treated as ambiguous regardless of mtime, to avoid overlooking
2487 by confliction between such mtime.
2494 by confliction between such mtime.
2488
2495
2489 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2496 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2490 S[n].mtime", even if size of a file isn't changed.
2497 S[n].mtime", even if size of a file isn't changed.
2491 """
2498 """
2492 try:
2499 try:
2493 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2500 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2494 except AttributeError:
2501 except AttributeError:
2495 return False
2502 return False
2496
2503
2497 def avoidambig(self, path, old):
2504 def avoidambig(self, path, old):
2498 """Change file stat of specified path to avoid ambiguity
2505 """Change file stat of specified path to avoid ambiguity
2499
2506
2500 'old' should be previous filestat of 'path'.
2507 'old' should be previous filestat of 'path'.
2501
2508
2502 This skips avoiding ambiguity, if a process doesn't have
2509 This skips avoiding ambiguity, if a process doesn't have
2503 appropriate privileges for 'path'. This returns False in this
2510 appropriate privileges for 'path'. This returns False in this
2504 case.
2511 case.
2505
2512
2506 Otherwise, this returns True, as "ambiguity is avoided".
2513 Otherwise, this returns True, as "ambiguity is avoided".
2507 """
2514 """
2508 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2515 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2509 try:
2516 try:
2510 os.utime(path, (advanced, advanced))
2517 os.utime(path, (advanced, advanced))
2511 except PermissionError:
2518 except PermissionError:
2512 # utime() on the file created by another user causes EPERM,
2519 # utime() on the file created by another user causes EPERM,
2513 # if a process doesn't have appropriate privileges
2520 # if a process doesn't have appropriate privileges
2514 return False
2521 return False
2515 return True
2522 return True
2516
2523
2517 def __ne__(self, other):
2524 def __ne__(self, other):
2518 return not self == other
2525 return not self == other
2519
2526
2520
2527
2521 class atomictempfile:
2528 class atomictempfile:
2522 """writable file object that atomically updates a file
2529 """writable file object that atomically updates a file
2523
2530
2524 All writes will go to a temporary copy of the original file. Call
2531 All writes will go to a temporary copy of the original file. Call
2525 close() when you are done writing, and atomictempfile will rename
2532 close() when you are done writing, and atomictempfile will rename
2526 the temporary copy to the original name, making the changes
2533 the temporary copy to the original name, making the changes
2527 visible. If the object is destroyed without being closed, all your
2534 visible. If the object is destroyed without being closed, all your
2528 writes are discarded.
2535 writes are discarded.
2529
2536
2530 checkambig argument of constructor is used with filestat, and is
2537 checkambig argument of constructor is used with filestat, and is
2531 useful only if target file is guarded by any lock (e.g. repo.lock
2538 useful only if target file is guarded by any lock (e.g. repo.lock
2532 or repo.wlock).
2539 or repo.wlock).
2533 """
2540 """
2534
2541
2535 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2542 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2536 self.__name = name # permanent name
2543 self.__name = name # permanent name
2537 self._tempname = mktempcopy(
2544 self._tempname = mktempcopy(
2538 name,
2545 name,
2539 emptyok=(b'w' in mode),
2546 emptyok=(b'w' in mode),
2540 createmode=createmode,
2547 createmode=createmode,
2541 enforcewritable=(b'w' in mode),
2548 enforcewritable=(b'w' in mode),
2542 )
2549 )
2543
2550
2544 self._fp = posixfile(self._tempname, mode)
2551 self._fp = posixfile(self._tempname, mode)
2545 self._checkambig = checkambig
2552 self._checkambig = checkambig
2546
2553
2547 # delegated methods
2554 # delegated methods
2548 self.read = self._fp.read
2555 self.read = self._fp.read
2549 self.write = self._fp.write
2556 self.write = self._fp.write
2550 self.writelines = self._fp.writelines
2557 self.writelines = self._fp.writelines
2551 self.seek = self._fp.seek
2558 self.seek = self._fp.seek
2552 self.tell = self._fp.tell
2559 self.tell = self._fp.tell
2553 self.fileno = self._fp.fileno
2560 self.fileno = self._fp.fileno
2554
2561
2555 def close(self):
2562 def close(self):
2556 if not self._fp.closed:
2563 if not self._fp.closed:
2557 self._fp.close()
2564 self._fp.close()
2558 filename = localpath(self.__name)
2565 filename = localpath(self.__name)
2559 oldstat = self._checkambig and filestat.frompath(filename)
2566 oldstat = self._checkambig and filestat.frompath(filename)
2560 if oldstat and oldstat.stat:
2567 if oldstat and oldstat.stat:
2561 rename(self._tempname, filename)
2568 rename(self._tempname, filename)
2562 newstat = filestat.frompath(filename)
2569 newstat = filestat.frompath(filename)
2563 if newstat.isambig(oldstat):
2570 if newstat.isambig(oldstat):
2564 # stat of changed file is ambiguous to original one
2571 # stat of changed file is ambiguous to original one
2565 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2572 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2566 os.utime(filename, (advanced, advanced))
2573 os.utime(filename, (advanced, advanced))
2567 else:
2574 else:
2568 rename(self._tempname, filename)
2575 rename(self._tempname, filename)
2569
2576
2570 def discard(self):
2577 def discard(self):
2571 if not self._fp.closed:
2578 if not self._fp.closed:
2572 try:
2579 try:
2573 os.unlink(self._tempname)
2580 os.unlink(self._tempname)
2574 except OSError:
2581 except OSError:
2575 pass
2582 pass
2576 self._fp.close()
2583 self._fp.close()
2577
2584
2578 def __del__(self):
2585 def __del__(self):
2579 if safehasattr(self, '_fp'): # constructor actually did something
2586 if safehasattr(self, '_fp'): # constructor actually did something
2580 self.discard()
2587 self.discard()
2581
2588
2582 def __enter__(self):
2589 def __enter__(self):
2583 return self
2590 return self
2584
2591
2585 def __exit__(self, exctype, excvalue, traceback):
2592 def __exit__(self, exctype, excvalue, traceback):
2586 if exctype is not None:
2593 if exctype is not None:
2587 self.discard()
2594 self.discard()
2588 else:
2595 else:
2589 self.close()
2596 self.close()
2590
2597
2591
2598
2592 def tryrmdir(f):
2599 def tryrmdir(f):
2593 try:
2600 try:
2594 removedirs(f)
2601 removedirs(f)
2595 except OSError as e:
2602 except OSError as e:
2596 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2603 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2597 raise
2604 raise
2598
2605
2599
2606
2600 def unlinkpath(f, ignoremissing=False, rmdir=True):
2607 def unlinkpath(f, ignoremissing=False, rmdir=True):
2601 # type: (bytes, bool, bool) -> None
2608 # type: (bytes, bool, bool) -> None
2602 """unlink and remove the directory if it is empty"""
2609 """unlink and remove the directory if it is empty"""
2603 if ignoremissing:
2610 if ignoremissing:
2604 tryunlink(f)
2611 tryunlink(f)
2605 else:
2612 else:
2606 unlink(f)
2613 unlink(f)
2607 if rmdir:
2614 if rmdir:
2608 # try removing directories that might now be empty
2615 # try removing directories that might now be empty
2609 try:
2616 try:
2610 removedirs(os.path.dirname(f))
2617 removedirs(os.path.dirname(f))
2611 except OSError:
2618 except OSError:
2612 pass
2619 pass
2613
2620
2614
2621
2615 def tryunlink(f):
2622 def tryunlink(f):
2616 # type: (bytes) -> None
2623 # type: (bytes) -> None
2617 """Attempt to remove a file, ignoring FileNotFoundError."""
2624 """Attempt to remove a file, ignoring FileNotFoundError."""
2618 try:
2625 try:
2619 unlink(f)
2626 unlink(f)
2620 except FileNotFoundError:
2627 except FileNotFoundError:
2621 pass
2628 pass
2622
2629
2623
2630
2624 def makedirs(name, mode=None, notindexed=False):
2631 def makedirs(name, mode=None, notindexed=False):
2625 # type: (bytes, Optional[int], bool) -> None
2632 # type: (bytes, Optional[int], bool) -> None
2626 """recursive directory creation with parent mode inheritance
2633 """recursive directory creation with parent mode inheritance
2627
2634
2628 Newly created directories are marked as "not to be indexed by
2635 Newly created directories are marked as "not to be indexed by
2629 the content indexing service", if ``notindexed`` is specified
2636 the content indexing service", if ``notindexed`` is specified
2630 for "write" mode access.
2637 for "write" mode access.
2631 """
2638 """
2632 try:
2639 try:
2633 makedir(name, notindexed)
2640 makedir(name, notindexed)
2634 except OSError as err:
2641 except OSError as err:
2635 if err.errno == errno.EEXIST:
2642 if err.errno == errno.EEXIST:
2636 return
2643 return
2637 if err.errno != errno.ENOENT or not name:
2644 if err.errno != errno.ENOENT or not name:
2638 raise
2645 raise
2639 parent = os.path.dirname(abspath(name))
2646 parent = os.path.dirname(abspath(name))
2640 if parent == name:
2647 if parent == name:
2641 raise
2648 raise
2642 makedirs(parent, mode, notindexed)
2649 makedirs(parent, mode, notindexed)
2643 try:
2650 try:
2644 makedir(name, notindexed)
2651 makedir(name, notindexed)
2645 except OSError as err:
2652 except OSError as err:
2646 # Catch EEXIST to handle races
2653 # Catch EEXIST to handle races
2647 if err.errno == errno.EEXIST:
2654 if err.errno == errno.EEXIST:
2648 return
2655 return
2649 raise
2656 raise
2650 if mode is not None:
2657 if mode is not None:
2651 os.chmod(name, mode)
2658 os.chmod(name, mode)
2652
2659
2653
2660
2654 def readfile(path):
2661 def readfile(path):
2655 # type: (bytes) -> bytes
2662 # type: (bytes) -> bytes
2656 with open(path, b'rb') as fp:
2663 with open(path, b'rb') as fp:
2657 return fp.read()
2664 return fp.read()
2658
2665
2659
2666
2660 def writefile(path, text):
2667 def writefile(path, text):
2661 # type: (bytes, bytes) -> None
2668 # type: (bytes, bytes) -> None
2662 with open(path, b'wb') as fp:
2669 with open(path, b'wb') as fp:
2663 fp.write(text)
2670 fp.write(text)
2664
2671
2665
2672
2666 def appendfile(path, text):
2673 def appendfile(path, text):
2667 # type: (bytes, bytes) -> None
2674 # type: (bytes, bytes) -> None
2668 with open(path, b'ab') as fp:
2675 with open(path, b'ab') as fp:
2669 fp.write(text)
2676 fp.write(text)
2670
2677
2671
2678
2672 class chunkbuffer:
2679 class chunkbuffer:
2673 """Allow arbitrary sized chunks of data to be efficiently read from an
2680 """Allow arbitrary sized chunks of data to be efficiently read from an
2674 iterator over chunks of arbitrary size."""
2681 iterator over chunks of arbitrary size."""
2675
2682
2676 def __init__(self, in_iter):
2683 def __init__(self, in_iter):
2677 """in_iter is the iterator that's iterating over the input chunks."""
2684 """in_iter is the iterator that's iterating over the input chunks."""
2678
2685
2679 def splitbig(chunks):
2686 def splitbig(chunks):
2680 for chunk in chunks:
2687 for chunk in chunks:
2681 if len(chunk) > 2 ** 20:
2688 if len(chunk) > 2 ** 20:
2682 pos = 0
2689 pos = 0
2683 while pos < len(chunk):
2690 while pos < len(chunk):
2684 end = pos + 2 ** 18
2691 end = pos + 2 ** 18
2685 yield chunk[pos:end]
2692 yield chunk[pos:end]
2686 pos = end
2693 pos = end
2687 else:
2694 else:
2688 yield chunk
2695 yield chunk
2689
2696
2690 self.iter = splitbig(in_iter)
2697 self.iter = splitbig(in_iter)
2691 self._queue = collections.deque()
2698 self._queue = collections.deque()
2692 self._chunkoffset = 0
2699 self._chunkoffset = 0
2693
2700
2694 def read(self, l=None):
2701 def read(self, l=None):
2695 """Read L bytes of data from the iterator of chunks of data.
2702 """Read L bytes of data from the iterator of chunks of data.
2696 Returns less than L bytes if the iterator runs dry.
2703 Returns less than L bytes if the iterator runs dry.
2697
2704
2698 If size parameter is omitted, read everything"""
2705 If size parameter is omitted, read everything"""
2699 if l is None:
2706 if l is None:
2700 return b''.join(self.iter)
2707 return b''.join(self.iter)
2701
2708
2702 left = l
2709 left = l
2703 buf = []
2710 buf = []
2704 queue = self._queue
2711 queue = self._queue
2705 while left > 0:
2712 while left > 0:
2706 # refill the queue
2713 # refill the queue
2707 if not queue:
2714 if not queue:
2708 target = 2 ** 18
2715 target = 2 ** 18
2709 for chunk in self.iter:
2716 for chunk in self.iter:
2710 queue.append(chunk)
2717 queue.append(chunk)
2711 target -= len(chunk)
2718 target -= len(chunk)
2712 if target <= 0:
2719 if target <= 0:
2713 break
2720 break
2714 if not queue:
2721 if not queue:
2715 break
2722 break
2716
2723
2717 # The easy way to do this would be to queue.popleft(), modify the
2724 # The easy way to do this would be to queue.popleft(), modify the
2718 # chunk (if necessary), then queue.appendleft(). However, for cases
2725 # chunk (if necessary), then queue.appendleft(). However, for cases
2719 # where we read partial chunk content, this incurs 2 dequeue
2726 # where we read partial chunk content, this incurs 2 dequeue
2720 # mutations and creates a new str for the remaining chunk in the
2727 # mutations and creates a new str for the remaining chunk in the
2721 # queue. Our code below avoids this overhead.
2728 # queue. Our code below avoids this overhead.
2722
2729
2723 chunk = queue[0]
2730 chunk = queue[0]
2724 chunkl = len(chunk)
2731 chunkl = len(chunk)
2725 offset = self._chunkoffset
2732 offset = self._chunkoffset
2726
2733
2727 # Use full chunk.
2734 # Use full chunk.
2728 if offset == 0 and left >= chunkl:
2735 if offset == 0 and left >= chunkl:
2729 left -= chunkl
2736 left -= chunkl
2730 queue.popleft()
2737 queue.popleft()
2731 buf.append(chunk)
2738 buf.append(chunk)
2732 # self._chunkoffset remains at 0.
2739 # self._chunkoffset remains at 0.
2733 continue
2740 continue
2734
2741
2735 chunkremaining = chunkl - offset
2742 chunkremaining = chunkl - offset
2736
2743
2737 # Use all of unconsumed part of chunk.
2744 # Use all of unconsumed part of chunk.
2738 if left >= chunkremaining:
2745 if left >= chunkremaining:
2739 left -= chunkremaining
2746 left -= chunkremaining
2740 queue.popleft()
2747 queue.popleft()
2741 # offset == 0 is enabled by block above, so this won't merely
2748 # offset == 0 is enabled by block above, so this won't merely
2742 # copy via ``chunk[0:]``.
2749 # copy via ``chunk[0:]``.
2743 buf.append(chunk[offset:])
2750 buf.append(chunk[offset:])
2744 self._chunkoffset = 0
2751 self._chunkoffset = 0
2745
2752
2746 # Partial chunk needed.
2753 # Partial chunk needed.
2747 else:
2754 else:
2748 buf.append(chunk[offset : offset + left])
2755 buf.append(chunk[offset : offset + left])
2749 self._chunkoffset += left
2756 self._chunkoffset += left
2750 left -= chunkremaining
2757 left -= chunkremaining
2751
2758
2752 return b''.join(buf)
2759 return b''.join(buf)
2753
2760
2754
2761
2755 def filechunkiter(f, size=131072, limit=None):
2762 def filechunkiter(f, size=131072, limit=None):
2756 """Create a generator that produces the data in the file size
2763 """Create a generator that produces the data in the file size
2757 (default 131072) bytes at a time, up to optional limit (default is
2764 (default 131072) bytes at a time, up to optional limit (default is
2758 to read all data). Chunks may be less than size bytes if the
2765 to read all data). Chunks may be less than size bytes if the
2759 chunk is the last chunk in the file, or the file is a socket or
2766 chunk is the last chunk in the file, or the file is a socket or
2760 some other type of file that sometimes reads less data than is
2767 some other type of file that sometimes reads less data than is
2761 requested."""
2768 requested."""
2762 assert size >= 0
2769 assert size >= 0
2763 assert limit is None or limit >= 0
2770 assert limit is None or limit >= 0
2764 while True:
2771 while True:
2765 if limit is None:
2772 if limit is None:
2766 nbytes = size
2773 nbytes = size
2767 else:
2774 else:
2768 nbytes = min(limit, size)
2775 nbytes = min(limit, size)
2769 s = nbytes and f.read(nbytes)
2776 s = nbytes and f.read(nbytes)
2770 if not s:
2777 if not s:
2771 break
2778 break
2772 if limit:
2779 if limit:
2773 limit -= len(s)
2780 limit -= len(s)
2774 yield s
2781 yield s
2775
2782
2776
2783
2777 class cappedreader:
2784 class cappedreader:
2778 """A file object proxy that allows reading up to N bytes.
2785 """A file object proxy that allows reading up to N bytes.
2779
2786
2780 Given a source file object, instances of this type allow reading up to
2787 Given a source file object, instances of this type allow reading up to
2781 N bytes from that source file object. Attempts to read past the allowed
2788 N bytes from that source file object. Attempts to read past the allowed
2782 limit are treated as EOF.
2789 limit are treated as EOF.
2783
2790
2784 It is assumed that I/O is not performed on the original file object
2791 It is assumed that I/O is not performed on the original file object
2785 in addition to I/O that is performed by this instance. If there is,
2792 in addition to I/O that is performed by this instance. If there is,
2786 state tracking will get out of sync and unexpected results will ensue.
2793 state tracking will get out of sync and unexpected results will ensue.
2787 """
2794 """
2788
2795
2789 def __init__(self, fh, limit):
2796 def __init__(self, fh, limit):
2790 """Allow reading up to <limit> bytes from <fh>."""
2797 """Allow reading up to <limit> bytes from <fh>."""
2791 self._fh = fh
2798 self._fh = fh
2792 self._left = limit
2799 self._left = limit
2793
2800
2794 def read(self, n=-1):
2801 def read(self, n=-1):
2795 if not self._left:
2802 if not self._left:
2796 return b''
2803 return b''
2797
2804
2798 if n < 0:
2805 if n < 0:
2799 n = self._left
2806 n = self._left
2800
2807
2801 data = self._fh.read(min(n, self._left))
2808 data = self._fh.read(min(n, self._left))
2802 self._left -= len(data)
2809 self._left -= len(data)
2803 assert self._left >= 0
2810 assert self._left >= 0
2804
2811
2805 return data
2812 return data
2806
2813
2807 def readinto(self, b):
2814 def readinto(self, b):
2808 res = self.read(len(b))
2815 res = self.read(len(b))
2809 if res is None:
2816 if res is None:
2810 return None
2817 return None
2811
2818
2812 b[0 : len(res)] = res
2819 b[0 : len(res)] = res
2813 return len(res)
2820 return len(res)
2814
2821
2815
2822
2816 def unitcountfn(*unittable):
2823 def unitcountfn(*unittable):
2817 '''return a function that renders a readable count of some quantity'''
2824 '''return a function that renders a readable count of some quantity'''
2818
2825
2819 def go(count):
2826 def go(count):
2820 for multiplier, divisor, format in unittable:
2827 for multiplier, divisor, format in unittable:
2821 if abs(count) >= divisor * multiplier:
2828 if abs(count) >= divisor * multiplier:
2822 return format % (count / float(divisor))
2829 return format % (count / float(divisor))
2823 return unittable[-1][2] % count
2830 return unittable[-1][2] % count
2824
2831
2825 return go
2832 return go
2826
2833
2827
2834
2828 def processlinerange(fromline, toline):
2835 def processlinerange(fromline, toline):
2829 # type: (int, int) -> Tuple[int, int]
2836 # type: (int, int) -> Tuple[int, int]
2830 """Check that linerange <fromline>:<toline> makes sense and return a
2837 """Check that linerange <fromline>:<toline> makes sense and return a
2831 0-based range.
2838 0-based range.
2832
2839
2833 >>> processlinerange(10, 20)
2840 >>> processlinerange(10, 20)
2834 (9, 20)
2841 (9, 20)
2835 >>> processlinerange(2, 1)
2842 >>> processlinerange(2, 1)
2836 Traceback (most recent call last):
2843 Traceback (most recent call last):
2837 ...
2844 ...
2838 ParseError: line range must be positive
2845 ParseError: line range must be positive
2839 >>> processlinerange(0, 5)
2846 >>> processlinerange(0, 5)
2840 Traceback (most recent call last):
2847 Traceback (most recent call last):
2841 ...
2848 ...
2842 ParseError: fromline must be strictly positive
2849 ParseError: fromline must be strictly positive
2843 """
2850 """
2844 if toline - fromline < 0:
2851 if toline - fromline < 0:
2845 raise error.ParseError(_(b"line range must be positive"))
2852 raise error.ParseError(_(b"line range must be positive"))
2846 if fromline < 1:
2853 if fromline < 1:
2847 raise error.ParseError(_(b"fromline must be strictly positive"))
2854 raise error.ParseError(_(b"fromline must be strictly positive"))
2848 return fromline - 1, toline
2855 return fromline - 1, toline
2849
2856
2850
2857
2851 bytecount = unitcountfn(
2858 bytecount = unitcountfn(
2852 (100, 1 << 30, _(b'%.0f GB')),
2859 (100, 1 << 30, _(b'%.0f GB')),
2853 (10, 1 << 30, _(b'%.1f GB')),
2860 (10, 1 << 30, _(b'%.1f GB')),
2854 (1, 1 << 30, _(b'%.2f GB')),
2861 (1, 1 << 30, _(b'%.2f GB')),
2855 (100, 1 << 20, _(b'%.0f MB')),
2862 (100, 1 << 20, _(b'%.0f MB')),
2856 (10, 1 << 20, _(b'%.1f MB')),
2863 (10, 1 << 20, _(b'%.1f MB')),
2857 (1, 1 << 20, _(b'%.2f MB')),
2864 (1, 1 << 20, _(b'%.2f MB')),
2858 (100, 1 << 10, _(b'%.0f KB')),
2865 (100, 1 << 10, _(b'%.0f KB')),
2859 (10, 1 << 10, _(b'%.1f KB')),
2866 (10, 1 << 10, _(b'%.1f KB')),
2860 (1, 1 << 10, _(b'%.2f KB')),
2867 (1, 1 << 10, _(b'%.2f KB')),
2861 (1, 1, _(b'%.0f bytes')),
2868 (1, 1, _(b'%.0f bytes')),
2862 )
2869 )
2863
2870
2864
2871
2865 class transformingwriter:
2872 class transformingwriter:
2866 """Writable file wrapper to transform data by function"""
2873 """Writable file wrapper to transform data by function"""
2867
2874
2868 def __init__(self, fp, encode):
2875 def __init__(self, fp, encode):
2869 self._fp = fp
2876 self._fp = fp
2870 self._encode = encode
2877 self._encode = encode
2871
2878
2872 def close(self):
2879 def close(self):
2873 self._fp.close()
2880 self._fp.close()
2874
2881
2875 def flush(self):
2882 def flush(self):
2876 self._fp.flush()
2883 self._fp.flush()
2877
2884
2878 def write(self, data):
2885 def write(self, data):
2879 return self._fp.write(self._encode(data))
2886 return self._fp.write(self._encode(data))
2880
2887
2881
2888
2882 # Matches a single EOL which can either be a CRLF where repeated CR
2889 # Matches a single EOL which can either be a CRLF where repeated CR
2883 # are removed or a LF. We do not care about old Macintosh files, so a
2890 # are removed or a LF. We do not care about old Macintosh files, so a
2884 # stray CR is an error.
2891 # stray CR is an error.
2885 _eolre = remod.compile(br'\r*\n')
2892 _eolre = remod.compile(br'\r*\n')
2886
2893
2887
2894
2888 def tolf(s):
2895 def tolf(s):
2889 # type: (bytes) -> bytes
2896 # type: (bytes) -> bytes
2890 return _eolre.sub(b'\n', s)
2897 return _eolre.sub(b'\n', s)
2891
2898
2892
2899
2893 def tocrlf(s):
2900 def tocrlf(s):
2894 # type: (bytes) -> bytes
2901 # type: (bytes) -> bytes
2895 return _eolre.sub(b'\r\n', s)
2902 return _eolre.sub(b'\r\n', s)
2896
2903
2897
2904
2898 def _crlfwriter(fp):
2905 def _crlfwriter(fp):
2899 return transformingwriter(fp, tocrlf)
2906 return transformingwriter(fp, tocrlf)
2900
2907
2901
2908
2902 if pycompat.oslinesep == b'\r\n':
2909 if pycompat.oslinesep == b'\r\n':
2903 tonativeeol = tocrlf
2910 tonativeeol = tocrlf
2904 fromnativeeol = tolf
2911 fromnativeeol = tolf
2905 nativeeolwriter = _crlfwriter
2912 nativeeolwriter = _crlfwriter
2906 else:
2913 else:
2907 tonativeeol = pycompat.identity
2914 tonativeeol = pycompat.identity
2908 fromnativeeol = pycompat.identity
2915 fromnativeeol = pycompat.identity
2909 nativeeolwriter = pycompat.identity
2916 nativeeolwriter = pycompat.identity
2910
2917
2911
2918
2912 # TODO delete since workaround variant for Python 2 no longer needed.
2919 # TODO delete since workaround variant for Python 2 no longer needed.
2913 def iterfile(fp):
2920 def iterfile(fp):
2914 return fp
2921 return fp
2915
2922
2916
2923
2917 def iterlines(iterator):
2924 def iterlines(iterator):
2918 # type: (Iterable[bytes]) -> Iterator[bytes]
2925 # type: (Iterable[bytes]) -> Iterator[bytes]
2919 for chunk in iterator:
2926 for chunk in iterator:
2920 for line in chunk.splitlines():
2927 for line in chunk.splitlines():
2921 yield line
2928 yield line
2922
2929
2923
2930
2924 def expandpath(path):
2931 def expandpath(path):
2925 # type: (bytes) -> bytes
2932 # type: (bytes) -> bytes
2926 return os.path.expanduser(os.path.expandvars(path))
2933 return os.path.expanduser(os.path.expandvars(path))
2927
2934
2928
2935
2929 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2936 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2930 """Return the result of interpolating items in the mapping into string s.
2937 """Return the result of interpolating items in the mapping into string s.
2931
2938
2932 prefix is a single character string, or a two character string with
2939 prefix is a single character string, or a two character string with
2933 a backslash as the first character if the prefix needs to be escaped in
2940 a backslash as the first character if the prefix needs to be escaped in
2934 a regular expression.
2941 a regular expression.
2935
2942
2936 fn is an optional function that will be applied to the replacement text
2943 fn is an optional function that will be applied to the replacement text
2937 just before replacement.
2944 just before replacement.
2938
2945
2939 escape_prefix is an optional flag that allows using doubled prefix for
2946 escape_prefix is an optional flag that allows using doubled prefix for
2940 its escaping.
2947 its escaping.
2941 """
2948 """
2942 fn = fn or (lambda s: s)
2949 fn = fn or (lambda s: s)
2943 patterns = b'|'.join(mapping.keys())
2950 patterns = b'|'.join(mapping.keys())
2944 if escape_prefix:
2951 if escape_prefix:
2945 patterns += b'|' + prefix
2952 patterns += b'|' + prefix
2946 if len(prefix) > 1:
2953 if len(prefix) > 1:
2947 prefix_char = prefix[1:]
2954 prefix_char = prefix[1:]
2948 else:
2955 else:
2949 prefix_char = prefix
2956 prefix_char = prefix
2950 mapping[prefix_char] = prefix_char
2957 mapping[prefix_char] = prefix_char
2951 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2958 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2952 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2959 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2953
2960
2954
2961
2955 timecount = unitcountfn(
2962 timecount = unitcountfn(
2956 (1, 1e3, _(b'%.0f s')),
2963 (1, 1e3, _(b'%.0f s')),
2957 (100, 1, _(b'%.1f s')),
2964 (100, 1, _(b'%.1f s')),
2958 (10, 1, _(b'%.2f s')),
2965 (10, 1, _(b'%.2f s')),
2959 (1, 1, _(b'%.3f s')),
2966 (1, 1, _(b'%.3f s')),
2960 (100, 0.001, _(b'%.1f ms')),
2967 (100, 0.001, _(b'%.1f ms')),
2961 (10, 0.001, _(b'%.2f ms')),
2968 (10, 0.001, _(b'%.2f ms')),
2962 (1, 0.001, _(b'%.3f ms')),
2969 (1, 0.001, _(b'%.3f ms')),
2963 (100, 0.000001, _(b'%.1f us')),
2970 (100, 0.000001, _(b'%.1f us')),
2964 (10, 0.000001, _(b'%.2f us')),
2971 (10, 0.000001, _(b'%.2f us')),
2965 (1, 0.000001, _(b'%.3f us')),
2972 (1, 0.000001, _(b'%.3f us')),
2966 (100, 0.000000001, _(b'%.1f ns')),
2973 (100, 0.000000001, _(b'%.1f ns')),
2967 (10, 0.000000001, _(b'%.2f ns')),
2974 (10, 0.000000001, _(b'%.2f ns')),
2968 (1, 0.000000001, _(b'%.3f ns')),
2975 (1, 0.000000001, _(b'%.3f ns')),
2969 )
2976 )
2970
2977
2971
2978
2972 @attr.s
2979 @attr.s
2973 class timedcmstats:
2980 class timedcmstats:
2974 """Stats information produced by the timedcm context manager on entering."""
2981 """Stats information produced by the timedcm context manager on entering."""
2975
2982
2976 # the starting value of the timer as a float (meaning and resulution is
2983 # the starting value of the timer as a float (meaning and resulution is
2977 # platform dependent, see util.timer)
2984 # platform dependent, see util.timer)
2978 start = attr.ib(default=attr.Factory(lambda: timer()))
2985 start = attr.ib(default=attr.Factory(lambda: timer()))
2979 # the number of seconds as a floating point value; starts at 0, updated when
2986 # the number of seconds as a floating point value; starts at 0, updated when
2980 # the context is exited.
2987 # the context is exited.
2981 elapsed = attr.ib(default=0)
2988 elapsed = attr.ib(default=0)
2982 # the number of nested timedcm context managers.
2989 # the number of nested timedcm context managers.
2983 level = attr.ib(default=1)
2990 level = attr.ib(default=1)
2984
2991
2985 def __bytes__(self):
2992 def __bytes__(self):
2986 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
2993 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
2987
2994
2988 __str__ = encoding.strmethod(__bytes__)
2995 __str__ = encoding.strmethod(__bytes__)
2989
2996
2990
2997
2991 @contextlib.contextmanager
2998 @contextlib.contextmanager
2992 def timedcm(whencefmt, *whenceargs):
2999 def timedcm(whencefmt, *whenceargs):
2993 """A context manager that produces timing information for a given context.
3000 """A context manager that produces timing information for a given context.
2994
3001
2995 On entering a timedcmstats instance is produced.
3002 On entering a timedcmstats instance is produced.
2996
3003
2997 This context manager is reentrant.
3004 This context manager is reentrant.
2998
3005
2999 """
3006 """
3000 # track nested context managers
3007 # track nested context managers
3001 timedcm._nested += 1
3008 timedcm._nested += 1
3002 timing_stats = timedcmstats(level=timedcm._nested)
3009 timing_stats = timedcmstats(level=timedcm._nested)
3003 try:
3010 try:
3004 with tracing.log(whencefmt, *whenceargs):
3011 with tracing.log(whencefmt, *whenceargs):
3005 yield timing_stats
3012 yield timing_stats
3006 finally:
3013 finally:
3007 timing_stats.elapsed = timer() - timing_stats.start
3014 timing_stats.elapsed = timer() - timing_stats.start
3008 timedcm._nested -= 1
3015 timedcm._nested -= 1
3009
3016
3010
3017
3011 timedcm._nested = 0
3018 timedcm._nested = 0
3012
3019
3013
3020
3014 def timed(func):
3021 def timed(func):
3015 """Report the execution time of a function call to stderr.
3022 """Report the execution time of a function call to stderr.
3016
3023
3017 During development, use as a decorator when you need to measure
3024 During development, use as a decorator when you need to measure
3018 the cost of a function, e.g. as follows:
3025 the cost of a function, e.g. as follows:
3019
3026
3020 @util.timed
3027 @util.timed
3021 def foo(a, b, c):
3028 def foo(a, b, c):
3022 pass
3029 pass
3023 """
3030 """
3024
3031
3025 def wrapper(*args, **kwargs):
3032 def wrapper(*args, **kwargs):
3026 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3033 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3027 result = func(*args, **kwargs)
3034 result = func(*args, **kwargs)
3028 stderr = procutil.stderr
3035 stderr = procutil.stderr
3029 stderr.write(
3036 stderr.write(
3030 b'%s%s: %s\n'
3037 b'%s%s: %s\n'
3031 % (
3038 % (
3032 b' ' * time_stats.level * 2,
3039 b' ' * time_stats.level * 2,
3033 pycompat.bytestr(func.__name__),
3040 pycompat.bytestr(func.__name__),
3034 time_stats,
3041 time_stats,
3035 )
3042 )
3036 )
3043 )
3037 return result
3044 return result
3038
3045
3039 return wrapper
3046 return wrapper
3040
3047
3041
3048
3042 _sizeunits = (
3049 _sizeunits = (
3043 (b'm', 2 ** 20),
3050 (b'm', 2 ** 20),
3044 (b'k', 2 ** 10),
3051 (b'k', 2 ** 10),
3045 (b'g', 2 ** 30),
3052 (b'g', 2 ** 30),
3046 (b'kb', 2 ** 10),
3053 (b'kb', 2 ** 10),
3047 (b'mb', 2 ** 20),
3054 (b'mb', 2 ** 20),
3048 (b'gb', 2 ** 30),
3055 (b'gb', 2 ** 30),
3049 (b'b', 1),
3056 (b'b', 1),
3050 )
3057 )
3051
3058
3052
3059
3053 def sizetoint(s):
3060 def sizetoint(s):
3054 # type: (bytes) -> int
3061 # type: (bytes) -> int
3055 """Convert a space specifier to a byte count.
3062 """Convert a space specifier to a byte count.
3056
3063
3057 >>> sizetoint(b'30')
3064 >>> sizetoint(b'30')
3058 30
3065 30
3059 >>> sizetoint(b'2.2kb')
3066 >>> sizetoint(b'2.2kb')
3060 2252
3067 2252
3061 >>> sizetoint(b'6M')
3068 >>> sizetoint(b'6M')
3062 6291456
3069 6291456
3063 """
3070 """
3064 t = s.strip().lower()
3071 t = s.strip().lower()
3065 try:
3072 try:
3066 for k, u in _sizeunits:
3073 for k, u in _sizeunits:
3067 if t.endswith(k):
3074 if t.endswith(k):
3068 return int(float(t[: -len(k)]) * u)
3075 return int(float(t[: -len(k)]) * u)
3069 return int(t)
3076 return int(t)
3070 except ValueError:
3077 except ValueError:
3071 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3078 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3072
3079
3073
3080
3074 class hooks:
3081 class hooks:
3075 """A collection of hook functions that can be used to extend a
3082 """A collection of hook functions that can be used to extend a
3076 function's behavior. Hooks are called in lexicographic order,
3083 function's behavior. Hooks are called in lexicographic order,
3077 based on the names of their sources."""
3084 based on the names of their sources."""
3078
3085
3079 def __init__(self):
3086 def __init__(self):
3080 self._hooks = []
3087 self._hooks = []
3081
3088
3082 def add(self, source, hook):
3089 def add(self, source, hook):
3083 self._hooks.append((source, hook))
3090 self._hooks.append((source, hook))
3084
3091
3085 def __call__(self, *args):
3092 def __call__(self, *args):
3086 self._hooks.sort(key=lambda x: x[0])
3093 self._hooks.sort(key=lambda x: x[0])
3087 results = []
3094 results = []
3088 for source, hook in self._hooks:
3095 for source, hook in self._hooks:
3089 results.append(hook(*args))
3096 results.append(hook(*args))
3090 return results
3097 return results
3091
3098
3092
3099
3093 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3100 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3094 """Yields lines for a nicely formatted stacktrace.
3101 """Yields lines for a nicely formatted stacktrace.
3095 Skips the 'skip' last entries, then return the last 'depth' entries.
3102 Skips the 'skip' last entries, then return the last 'depth' entries.
3096 Each file+linenumber is formatted according to fileline.
3103 Each file+linenumber is formatted according to fileline.
3097 Each line is formatted according to line.
3104 Each line is formatted according to line.
3098 If line is None, it yields:
3105 If line is None, it yields:
3099 length of longest filepath+line number,
3106 length of longest filepath+line number,
3100 filepath+linenumber,
3107 filepath+linenumber,
3101 function
3108 function
3102
3109
3103 Not be used in production code but very convenient while developing.
3110 Not be used in production code but very convenient while developing.
3104 """
3111 """
3105 entries = [
3112 entries = [
3106 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3113 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3107 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3114 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3108 ][-depth:]
3115 ][-depth:]
3109 if entries:
3116 if entries:
3110 fnmax = max(len(entry[0]) for entry in entries)
3117 fnmax = max(len(entry[0]) for entry in entries)
3111 for fnln, func in entries:
3118 for fnln, func in entries:
3112 if line is None:
3119 if line is None:
3113 yield (fnmax, fnln, func)
3120 yield (fnmax, fnln, func)
3114 else:
3121 else:
3115 yield line % (fnmax, fnln, func)
3122 yield line % (fnmax, fnln, func)
3116
3123
3117
3124
3118 def debugstacktrace(
3125 def debugstacktrace(
3119 msg=b'stacktrace',
3126 msg=b'stacktrace',
3120 skip=0,
3127 skip=0,
3121 f=procutil.stderr,
3128 f=procutil.stderr,
3122 otherf=procutil.stdout,
3129 otherf=procutil.stdout,
3123 depth=0,
3130 depth=0,
3124 prefix=b'',
3131 prefix=b'',
3125 ):
3132 ):
3126 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3133 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3127 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3134 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3128 By default it will flush stdout first.
3135 By default it will flush stdout first.
3129 It can be used everywhere and intentionally does not require an ui object.
3136 It can be used everywhere and intentionally does not require an ui object.
3130 Not be used in production code but very convenient while developing.
3137 Not be used in production code but very convenient while developing.
3131 """
3138 """
3132 if otherf:
3139 if otherf:
3133 otherf.flush()
3140 otherf.flush()
3134 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3141 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3135 for line in getstackframes(skip + 1, depth=depth):
3142 for line in getstackframes(skip + 1, depth=depth):
3136 f.write(prefix + line)
3143 f.write(prefix + line)
3137 f.flush()
3144 f.flush()
3138
3145
3139
3146
3140 # convenient shortcut
3147 # convenient shortcut
3141 dst = debugstacktrace
3148 dst = debugstacktrace
3142
3149
3143
3150
3144 def safename(f, tag, ctx, others=None):
3151 def safename(f, tag, ctx, others=None):
3145 """
3152 """
3146 Generate a name that it is safe to rename f to in the given context.
3153 Generate a name that it is safe to rename f to in the given context.
3147
3154
3148 f: filename to rename
3155 f: filename to rename
3149 tag: a string tag that will be included in the new name
3156 tag: a string tag that will be included in the new name
3150 ctx: a context, in which the new name must not exist
3157 ctx: a context, in which the new name must not exist
3151 others: a set of other filenames that the new name must not be in
3158 others: a set of other filenames that the new name must not be in
3152
3159
3153 Returns a file name of the form oldname~tag[~number] which does not exist
3160 Returns a file name of the form oldname~tag[~number] which does not exist
3154 in the provided context and is not in the set of other names.
3161 in the provided context and is not in the set of other names.
3155 """
3162 """
3156 if others is None:
3163 if others is None:
3157 others = set()
3164 others = set()
3158
3165
3159 fn = b'%s~%s' % (f, tag)
3166 fn = b'%s~%s' % (f, tag)
3160 if fn not in ctx and fn not in others:
3167 if fn not in ctx and fn not in others:
3161 return fn
3168 return fn
3162 for n in itertools.count(1):
3169 for n in itertools.count(1):
3163 fn = b'%s~%s~%s' % (f, tag, n)
3170 fn = b'%s~%s~%s' % (f, tag, n)
3164 if fn not in ctx and fn not in others:
3171 if fn not in ctx and fn not in others:
3165 return fn
3172 return fn
3166
3173
3167
3174
3168 def readexactly(stream, n):
3175 def readexactly(stream, n):
3169 '''read n bytes from stream.read and abort if less was available'''
3176 '''read n bytes from stream.read and abort if less was available'''
3170 s = stream.read(n)
3177 s = stream.read(n)
3171 if len(s) < n:
3178 if len(s) < n:
3172 raise error.Abort(
3179 raise error.Abort(
3173 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3180 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3174 % (len(s), n)
3181 % (len(s), n)
3175 )
3182 )
3176 return s
3183 return s
3177
3184
3178
3185
3179 def uvarintencode(value):
3186 def uvarintencode(value):
3180 """Encode an unsigned integer value to a varint.
3187 """Encode an unsigned integer value to a varint.
3181
3188
3182 A varint is a variable length integer of 1 or more bytes. Each byte
3189 A varint is a variable length integer of 1 or more bytes. Each byte
3183 except the last has the most significant bit set. The lower 7 bits of
3190 except the last has the most significant bit set. The lower 7 bits of
3184 each byte store the 2's complement representation, least significant group
3191 each byte store the 2's complement representation, least significant group
3185 first.
3192 first.
3186
3193
3187 >>> uvarintencode(0)
3194 >>> uvarintencode(0)
3188 '\\x00'
3195 '\\x00'
3189 >>> uvarintencode(1)
3196 >>> uvarintencode(1)
3190 '\\x01'
3197 '\\x01'
3191 >>> uvarintencode(127)
3198 >>> uvarintencode(127)
3192 '\\x7f'
3199 '\\x7f'
3193 >>> uvarintencode(1337)
3200 >>> uvarintencode(1337)
3194 '\\xb9\\n'
3201 '\\xb9\\n'
3195 >>> uvarintencode(65536)
3202 >>> uvarintencode(65536)
3196 '\\x80\\x80\\x04'
3203 '\\x80\\x80\\x04'
3197 >>> uvarintencode(-1)
3204 >>> uvarintencode(-1)
3198 Traceback (most recent call last):
3205 Traceback (most recent call last):
3199 ...
3206 ...
3200 ProgrammingError: negative value for uvarint: -1
3207 ProgrammingError: negative value for uvarint: -1
3201 """
3208 """
3202 if value < 0:
3209 if value < 0:
3203 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3210 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3204 bits = value & 0x7F
3211 bits = value & 0x7F
3205 value >>= 7
3212 value >>= 7
3206 bytes = []
3213 bytes = []
3207 while value:
3214 while value:
3208 bytes.append(pycompat.bytechr(0x80 | bits))
3215 bytes.append(pycompat.bytechr(0x80 | bits))
3209 bits = value & 0x7F
3216 bits = value & 0x7F
3210 value >>= 7
3217 value >>= 7
3211 bytes.append(pycompat.bytechr(bits))
3218 bytes.append(pycompat.bytechr(bits))
3212
3219
3213 return b''.join(bytes)
3220 return b''.join(bytes)
3214
3221
3215
3222
3216 def uvarintdecodestream(fh):
3223 def uvarintdecodestream(fh):
3217 """Decode an unsigned variable length integer from a stream.
3224 """Decode an unsigned variable length integer from a stream.
3218
3225
3219 The passed argument is anything that has a ``.read(N)`` method.
3226 The passed argument is anything that has a ``.read(N)`` method.
3220
3227
3221 >>> from io import BytesIO
3228 >>> from io import BytesIO
3222 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3229 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3223 0
3230 0
3224 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3231 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3225 1
3232 1
3226 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3233 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3227 127
3234 127
3228 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3235 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3229 1337
3236 1337
3230 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3237 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3231 65536
3238 65536
3232 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3239 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3233 Traceback (most recent call last):
3240 Traceback (most recent call last):
3234 ...
3241 ...
3235 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3242 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3236 """
3243 """
3237 result = 0
3244 result = 0
3238 shift = 0
3245 shift = 0
3239 while True:
3246 while True:
3240 byte = ord(readexactly(fh, 1))
3247 byte = ord(readexactly(fh, 1))
3241 result |= (byte & 0x7F) << shift
3248 result |= (byte & 0x7F) << shift
3242 if not (byte & 0x80):
3249 if not (byte & 0x80):
3243 return result
3250 return result
3244 shift += 7
3251 shift += 7
3245
3252
3246
3253
3247 # Passing the '' locale means that the locale should be set according to the
3254 # Passing the '' locale means that the locale should be set according to the
3248 # user settings (environment variables).
3255 # user settings (environment variables).
3249 # Python sometimes avoids setting the global locale settings. When interfacing
3256 # Python sometimes avoids setting the global locale settings. When interfacing
3250 # with C code (e.g. the curses module or the Subversion bindings), the global
3257 # with C code (e.g. the curses module or the Subversion bindings), the global
3251 # locale settings must be initialized correctly. Python 2 does not initialize
3258 # locale settings must be initialized correctly. Python 2 does not initialize
3252 # the global locale settings on interpreter startup. Python 3 sometimes
3259 # the global locale settings on interpreter startup. Python 3 sometimes
3253 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3260 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3254 # explicitly initialize it to get consistent behavior if it's not already
3261 # explicitly initialize it to get consistent behavior if it's not already
3255 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3262 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3256 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3263 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3257 # if we can remove this code.
3264 # if we can remove this code.
3258 @contextlib.contextmanager
3265 @contextlib.contextmanager
3259 def with_lc_ctype():
3266 def with_lc_ctype():
3260 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3267 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3261 if oldloc == 'C':
3268 if oldloc == 'C':
3262 try:
3269 try:
3263 try:
3270 try:
3264 locale.setlocale(locale.LC_CTYPE, '')
3271 locale.setlocale(locale.LC_CTYPE, '')
3265 except locale.Error:
3272 except locale.Error:
3266 # The likely case is that the locale from the environment
3273 # The likely case is that the locale from the environment
3267 # variables is unknown.
3274 # variables is unknown.
3268 pass
3275 pass
3269 yield
3276 yield
3270 finally:
3277 finally:
3271 locale.setlocale(locale.LC_CTYPE, oldloc)
3278 locale.setlocale(locale.LC_CTYPE, oldloc)
3272 else:
3279 else:
3273 yield
3280 yield
3274
3281
3275
3282
3276 def _estimatememory():
3283 def _estimatememory():
3277 # type: () -> Optional[int]
3284 # type: () -> Optional[int]
3278 """Provide an estimate for the available system memory in Bytes.
3285 """Provide an estimate for the available system memory in Bytes.
3279
3286
3280 If no estimate can be provided on the platform, returns None.
3287 If no estimate can be provided on the platform, returns None.
3281 """
3288 """
3282 if pycompat.sysplatform.startswith(b'win'):
3289 if pycompat.sysplatform.startswith(b'win'):
3283 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3290 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3284 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3291 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3285 from ctypes.wintypes import ( # pytype: disable=import-error
3292 from ctypes.wintypes import ( # pytype: disable=import-error
3286 Structure,
3293 Structure,
3287 byref,
3294 byref,
3288 sizeof,
3295 sizeof,
3289 windll,
3296 windll,
3290 )
3297 )
3291
3298
3292 class MEMORYSTATUSEX(Structure):
3299 class MEMORYSTATUSEX(Structure):
3293 _fields_ = [
3300 _fields_ = [
3294 ('dwLength', DWORD),
3301 ('dwLength', DWORD),
3295 ('dwMemoryLoad', DWORD),
3302 ('dwMemoryLoad', DWORD),
3296 ('ullTotalPhys', DWORDLONG),
3303 ('ullTotalPhys', DWORDLONG),
3297 ('ullAvailPhys', DWORDLONG),
3304 ('ullAvailPhys', DWORDLONG),
3298 ('ullTotalPageFile', DWORDLONG),
3305 ('ullTotalPageFile', DWORDLONG),
3299 ('ullAvailPageFile', DWORDLONG),
3306 ('ullAvailPageFile', DWORDLONG),
3300 ('ullTotalVirtual', DWORDLONG),
3307 ('ullTotalVirtual', DWORDLONG),
3301 ('ullAvailVirtual', DWORDLONG),
3308 ('ullAvailVirtual', DWORDLONG),
3302 ('ullExtendedVirtual', DWORDLONG),
3309 ('ullExtendedVirtual', DWORDLONG),
3303 ]
3310 ]
3304
3311
3305 x = MEMORYSTATUSEX()
3312 x = MEMORYSTATUSEX()
3306 x.dwLength = sizeof(x)
3313 x.dwLength = sizeof(x)
3307 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3314 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3308 return x.ullAvailPhys
3315 return x.ullAvailPhys
3309
3316
3310 # On newer Unix-like systems and Mac OSX, the sysconf interface
3317 # On newer Unix-like systems and Mac OSX, the sysconf interface
3311 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3318 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3312 # seems to be implemented on most systems.
3319 # seems to be implemented on most systems.
3313 try:
3320 try:
3314 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3321 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3315 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3322 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3316 return pagesize * pages
3323 return pagesize * pages
3317 except OSError: # sysconf can fail
3324 except OSError: # sysconf can fail
3318 pass
3325 pass
3319 except KeyError: # unknown parameter
3326 except KeyError: # unknown parameter
3320 pass
3327 pass
General Comments 0
You need to be logged in to leave comments. Login now