##// END OF EJS Templates
peer: rename makepeer() → _make_peer()...
Manuel Jacob -
r51284:ed052780 default
parent child Browse files
Show More
@@ -1,4783 +1,4783 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import binascii
10 10 import codecs
11 11 import collections
12 12 import contextlib
13 13 import difflib
14 14 import errno
15 15 import glob
16 16 import operator
17 17 import os
18 18 import platform
19 19 import random
20 20 import re
21 21 import socket
22 22 import ssl
23 23 import stat
24 24 import subprocess
25 25 import sys
26 26 import time
27 27
28 28 from .i18n import _
29 29 from .node import (
30 30 bin,
31 31 hex,
32 32 nullrev,
33 33 short,
34 34 )
35 35 from .pycompat import (
36 36 getattr,
37 37 open,
38 38 )
39 39 from . import (
40 40 bundle2,
41 41 bundlerepo,
42 42 changegroup,
43 43 cmdutil,
44 44 color,
45 45 context,
46 46 copies,
47 47 dagparser,
48 48 dirstateutils,
49 49 encoding,
50 50 error,
51 51 exchange,
52 52 extensions,
53 53 filemerge,
54 54 filesetlang,
55 55 formatter,
56 56 hg,
57 57 httppeer,
58 58 localrepo,
59 59 lock as lockmod,
60 60 logcmdutil,
61 61 mergestate as mergestatemod,
62 62 metadata,
63 63 obsolete,
64 64 obsutil,
65 65 pathutil,
66 66 phases,
67 67 policy,
68 68 pvec,
69 69 pycompat,
70 70 registrar,
71 71 repair,
72 72 repoview,
73 73 requirements,
74 74 revlog,
75 75 revset,
76 76 revsetlang,
77 77 scmutil,
78 78 setdiscovery,
79 79 simplemerge,
80 80 sshpeer,
81 81 sslutil,
82 82 streamclone,
83 83 strip,
84 84 tags as tagsmod,
85 85 templater,
86 86 treediscovery,
87 87 upgrade,
88 88 url as urlmod,
89 89 util,
90 90 verify,
91 91 vfs as vfsmod,
92 92 wireprotoframing,
93 93 wireprotoserver,
94 94 )
95 95 from .interfaces import repository
96 96 from .utils import (
97 97 cborutil,
98 98 compression,
99 99 dateutil,
100 100 procutil,
101 101 stringutil,
102 102 urlutil,
103 103 )
104 104
105 105 from .revlogutils import (
106 106 constants as revlog_constants,
107 107 debug as revlog_debug,
108 108 deltas as deltautil,
109 109 nodemap,
110 110 rewrite,
111 111 sidedata,
112 112 )
113 113
114 114 release = lockmod.release
115 115
116 116 table = {}
117 117 table.update(strip.command._table)
118 118 command = registrar.command(table)
119 119
120 120
121 121 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
122 122 def debugancestor(ui, repo, *args):
123 123 """find the ancestor revision of two revisions in a given index"""
124 124 if len(args) == 3:
125 125 index, rev1, rev2 = args
126 126 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
127 127 lookup = r.lookup
128 128 elif len(args) == 2:
129 129 if not repo:
130 130 raise error.Abort(
131 131 _(b'there is no Mercurial repository here (.hg not found)')
132 132 )
133 133 rev1, rev2 = args
134 134 r = repo.changelog
135 135 lookup = repo.lookup
136 136 else:
137 137 raise error.Abort(_(b'either two or three arguments required'))
138 138 a = r.ancestor(lookup(rev1), lookup(rev2))
139 139 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
140 140
141 141
142 142 @command(b'debugantivirusrunning', [])
143 143 def debugantivirusrunning(ui, repo):
144 144 """attempt to trigger an antivirus scanner to see if one is active"""
145 145 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
146 146 f.write(
147 147 util.b85decode(
148 148 # This is a base85-armored version of the EICAR test file. See
149 149 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
150 150 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
151 151 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
152 152 )
153 153 )
154 154 # Give an AV engine time to scan the file.
155 155 time.sleep(2)
156 156 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
157 157
158 158
159 159 @command(b'debugapplystreamclonebundle', [], b'FILE')
160 160 def debugapplystreamclonebundle(ui, repo, fname):
161 161 """apply a stream clone bundle file"""
162 162 f = hg.openpath(ui, fname)
163 163 gen = exchange.readbundle(ui, f, fname)
164 164 gen.apply(repo)
165 165
166 166
167 167 @command(
168 168 b'debugbuilddag',
169 169 [
170 170 (
171 171 b'm',
172 172 b'mergeable-file',
173 173 None,
174 174 _(b'add single file mergeable changes'),
175 175 ),
176 176 (
177 177 b'o',
178 178 b'overwritten-file',
179 179 None,
180 180 _(b'add single file all revs overwrite'),
181 181 ),
182 182 (b'n', b'new-file', None, _(b'add new file at each rev')),
183 183 (
184 184 b'',
185 185 b'from-existing',
186 186 None,
187 187 _(b'continue from a non-empty repository'),
188 188 ),
189 189 ],
190 190 _(b'[OPTION]... [TEXT]'),
191 191 )
192 192 def debugbuilddag(
193 193 ui,
194 194 repo,
195 195 text=None,
196 196 mergeable_file=False,
197 197 overwritten_file=False,
198 198 new_file=False,
199 199 from_existing=False,
200 200 ):
201 201 """builds a repo with a given DAG from scratch in the current empty repo
202 202
203 203 The description of the DAG is read from stdin if not given on the
204 204 command line.
205 205
206 206 Elements:
207 207
208 208 - "+n" is a linear run of n nodes based on the current default parent
209 209 - "." is a single node based on the current default parent
210 210 - "$" resets the default parent to null (implied at the start);
211 211 otherwise the default parent is always the last node created
212 212 - "<p" sets the default parent to the backref p
213 213 - "*p" is a fork at parent p, which is a backref
214 214 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
215 215 - "/p2" is a merge of the preceding node and p2
216 216 - ":tag" defines a local tag for the preceding node
217 217 - "@branch" sets the named branch for subsequent nodes
218 218 - "#...\\n" is a comment up to the end of the line
219 219
220 220 Whitespace between the above elements is ignored.
221 221
222 222 A backref is either
223 223
224 224 - a number n, which references the node curr-n, where curr is the current
225 225 node, or
226 226 - the name of a local tag you placed earlier using ":tag", or
227 227 - empty to denote the default parent.
228 228
229 229 All string valued-elements are either strictly alphanumeric, or must
230 230 be enclosed in double quotes ("..."), with "\\" as escape character.
231 231 """
232 232
233 233 if text is None:
234 234 ui.status(_(b"reading DAG from stdin\n"))
235 235 text = ui.fin.read()
236 236
237 237 cl = repo.changelog
238 238 if len(cl) > 0 and not from_existing:
239 239 raise error.Abort(_(b'repository is not empty'))
240 240
241 241 # determine number of revs in DAG
242 242 total = 0
243 243 for type, data in dagparser.parsedag(text):
244 244 if type == b'n':
245 245 total += 1
246 246
247 247 if mergeable_file:
248 248 linesperrev = 2
249 249 # make a file with k lines per rev
250 250 initialmergedlines = [b'%d' % i for i in range(0, total * linesperrev)]
251 251 initialmergedlines.append(b"")
252 252
253 253 tags = []
254 254 progress = ui.makeprogress(
255 255 _(b'building'), unit=_(b'revisions'), total=total
256 256 )
257 257 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
258 258 at = -1
259 259 atbranch = b'default'
260 260 nodeids = []
261 261 id = 0
262 262 progress.update(id)
263 263 for type, data in dagparser.parsedag(text):
264 264 if type == b'n':
265 265 ui.note((b'node %s\n' % pycompat.bytestr(data)))
266 266 id, ps = data
267 267
268 268 files = []
269 269 filecontent = {}
270 270
271 271 p2 = None
272 272 if mergeable_file:
273 273 fn = b"mf"
274 274 p1 = repo[ps[0]]
275 275 if len(ps) > 1:
276 276 p2 = repo[ps[1]]
277 277 pa = p1.ancestor(p2)
278 278 base, local, other = [
279 279 x[fn].data() for x in (pa, p1, p2)
280 280 ]
281 281 m3 = simplemerge.Merge3Text(base, local, other)
282 282 ml = [
283 283 l.strip()
284 284 for l in simplemerge.render_minimized(m3)[0]
285 285 ]
286 286 ml.append(b"")
287 287 elif at > 0:
288 288 ml = p1[fn].data().split(b"\n")
289 289 else:
290 290 ml = initialmergedlines
291 291 ml[id * linesperrev] += b" r%i" % id
292 292 mergedtext = b"\n".join(ml)
293 293 files.append(fn)
294 294 filecontent[fn] = mergedtext
295 295
296 296 if overwritten_file:
297 297 fn = b"of"
298 298 files.append(fn)
299 299 filecontent[fn] = b"r%i\n" % id
300 300
301 301 if new_file:
302 302 fn = b"nf%i" % id
303 303 files.append(fn)
304 304 filecontent[fn] = b"r%i\n" % id
305 305 if len(ps) > 1:
306 306 if not p2:
307 307 p2 = repo[ps[1]]
308 308 for fn in p2:
309 309 if fn.startswith(b"nf"):
310 310 files.append(fn)
311 311 filecontent[fn] = p2[fn].data()
312 312
313 313 def fctxfn(repo, cx, path):
314 314 if path in filecontent:
315 315 return context.memfilectx(
316 316 repo, cx, path, filecontent[path]
317 317 )
318 318 return None
319 319
320 320 if len(ps) == 0 or ps[0] < 0:
321 321 pars = [None, None]
322 322 elif len(ps) == 1:
323 323 pars = [nodeids[ps[0]], None]
324 324 else:
325 325 pars = [nodeids[p] for p in ps]
326 326 cx = context.memctx(
327 327 repo,
328 328 pars,
329 329 b"r%i" % id,
330 330 files,
331 331 fctxfn,
332 332 date=(id, 0),
333 333 user=b"debugbuilddag",
334 334 extra={b'branch': atbranch},
335 335 )
336 336 nodeid = repo.commitctx(cx)
337 337 nodeids.append(nodeid)
338 338 at = id
339 339 elif type == b'l':
340 340 id, name = data
341 341 ui.note((b'tag %s\n' % name))
342 342 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
343 343 elif type == b'a':
344 344 ui.note((b'branch %s\n' % data))
345 345 atbranch = data
346 346 progress.update(id)
347 347
348 348 if tags:
349 349 repo.vfs.write(b"localtags", b"".join(tags))
350 350
351 351
352 352 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
353 353 indent_string = b' ' * indent
354 354 if all:
355 355 ui.writenoi18n(
356 356 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
357 357 % indent_string
358 358 )
359 359
360 360 def showchunks(named):
361 361 ui.write(b"\n%s%s\n" % (indent_string, named))
362 362 for deltadata in gen.deltaiter():
363 363 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
364 364 ui.write(
365 365 b"%s%s %s %s %s %s %d\n"
366 366 % (
367 367 indent_string,
368 368 hex(node),
369 369 hex(p1),
370 370 hex(p2),
371 371 hex(cs),
372 372 hex(deltabase),
373 373 len(delta),
374 374 )
375 375 )
376 376
377 377 gen.changelogheader()
378 378 showchunks(b"changelog")
379 379 gen.manifestheader()
380 380 showchunks(b"manifest")
381 381 for chunkdata in iter(gen.filelogheader, {}):
382 382 fname = chunkdata[b'filename']
383 383 showchunks(fname)
384 384 else:
385 385 if isinstance(gen, bundle2.unbundle20):
386 386 raise error.Abort(_(b'use debugbundle2 for this file'))
387 387 gen.changelogheader()
388 388 for deltadata in gen.deltaiter():
389 389 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
390 390 ui.write(b"%s%s\n" % (indent_string, hex(node)))
391 391
392 392
393 393 def _debugobsmarkers(ui, part, indent=0, **opts):
394 394 """display version and markers contained in 'data'"""
395 395 opts = pycompat.byteskwargs(opts)
396 396 data = part.read()
397 397 indent_string = b' ' * indent
398 398 try:
399 399 version, markers = obsolete._readmarkers(data)
400 400 except error.UnknownVersion as exc:
401 401 msg = b"%sunsupported version: %s (%d bytes)\n"
402 402 msg %= indent_string, exc.version, len(data)
403 403 ui.write(msg)
404 404 else:
405 405 msg = b"%sversion: %d (%d bytes)\n"
406 406 msg %= indent_string, version, len(data)
407 407 ui.write(msg)
408 408 fm = ui.formatter(b'debugobsolete', opts)
409 409 for rawmarker in sorted(markers):
410 410 m = obsutil.marker(None, rawmarker)
411 411 fm.startitem()
412 412 fm.plain(indent_string)
413 413 cmdutil.showmarker(fm, m)
414 414 fm.end()
415 415
416 416
417 417 def _debugphaseheads(ui, data, indent=0):
418 418 """display version and markers contained in 'data'"""
419 419 indent_string = b' ' * indent
420 420 headsbyphase = phases.binarydecode(data)
421 421 for phase in phases.allphases:
422 422 for head in headsbyphase[phase]:
423 423 ui.write(indent_string)
424 424 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
425 425
426 426
427 427 def _quasirepr(thing):
428 428 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
429 429 return b'{%s}' % (
430 430 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
431 431 )
432 432 return pycompat.bytestr(repr(thing))
433 433
434 434
435 435 def _debugbundle2(ui, gen, all=None, **opts):
436 436 """lists the contents of a bundle2"""
437 437 if not isinstance(gen, bundle2.unbundle20):
438 438 raise error.Abort(_(b'not a bundle2 file'))
439 439 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
440 440 parttypes = opts.get('part_type', [])
441 441 for part in gen.iterparts():
442 442 if parttypes and part.type not in parttypes:
443 443 continue
444 444 msg = b'%s -- %s (mandatory: %r)\n'
445 445 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
446 446 if part.type == b'changegroup':
447 447 version = part.params.get(b'version', b'01')
448 448 cg = changegroup.getunbundler(version, part, b'UN')
449 449 if not ui.quiet:
450 450 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
451 451 if part.type == b'obsmarkers':
452 452 if not ui.quiet:
453 453 _debugobsmarkers(ui, part, indent=4, **opts)
454 454 if part.type == b'phase-heads':
455 455 if not ui.quiet:
456 456 _debugphaseheads(ui, part, indent=4)
457 457
458 458
459 459 @command(
460 460 b'debugbundle',
461 461 [
462 462 (b'a', b'all', None, _(b'show all details')),
463 463 (b'', b'part-type', [], _(b'show only the named part type')),
464 464 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
465 465 ],
466 466 _(b'FILE'),
467 467 norepo=True,
468 468 )
469 469 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
470 470 """lists the contents of a bundle"""
471 471 with hg.openpath(ui, bundlepath) as f:
472 472 if spec:
473 473 spec = exchange.getbundlespec(ui, f)
474 474 ui.write(b'%s\n' % spec)
475 475 return
476 476
477 477 gen = exchange.readbundle(ui, f, bundlepath)
478 478 if isinstance(gen, bundle2.unbundle20):
479 479 return _debugbundle2(ui, gen, all=all, **opts)
480 480 _debugchangegroup(ui, gen, all=all, **opts)
481 481
482 482
483 483 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
484 484 def debugcapabilities(ui, path, **opts):
485 485 """lists the capabilities of a remote peer"""
486 486 opts = pycompat.byteskwargs(opts)
487 487 peer = hg.peer(ui, opts, path)
488 488 try:
489 489 caps = peer.capabilities()
490 490 ui.writenoi18n(b'Main capabilities:\n')
491 491 for c in sorted(caps):
492 492 ui.write(b' %s\n' % c)
493 493 b2caps = bundle2.bundle2caps(peer)
494 494 if b2caps:
495 495 ui.writenoi18n(b'Bundle2 capabilities:\n')
496 496 for key, values in sorted(b2caps.items()):
497 497 ui.write(b' %s\n' % key)
498 498 for v in values:
499 499 ui.write(b' %s\n' % v)
500 500 finally:
501 501 peer.close()
502 502
503 503
504 504 @command(
505 505 b'debugchangedfiles',
506 506 [
507 507 (
508 508 b'',
509 509 b'compute',
510 510 False,
511 511 b"compute information instead of reading it from storage",
512 512 ),
513 513 ],
514 514 b'REV',
515 515 )
516 516 def debugchangedfiles(ui, repo, rev, **opts):
517 517 """list the stored files changes for a revision"""
518 518 ctx = logcmdutil.revsingle(repo, rev, None)
519 519 files = None
520 520
521 521 if opts['compute']:
522 522 files = metadata.compute_all_files_changes(ctx)
523 523 else:
524 524 sd = repo.changelog.sidedata(ctx.rev())
525 525 files_block = sd.get(sidedata.SD_FILES)
526 526 if files_block is not None:
527 527 files = metadata.decode_files_sidedata(sd)
528 528 if files is not None:
529 529 for f in sorted(files.touched):
530 530 if f in files.added:
531 531 action = b"added"
532 532 elif f in files.removed:
533 533 action = b"removed"
534 534 elif f in files.merged:
535 535 action = b"merged"
536 536 elif f in files.salvaged:
537 537 action = b"salvaged"
538 538 else:
539 539 action = b"touched"
540 540
541 541 copy_parent = b""
542 542 copy_source = b""
543 543 if f in files.copied_from_p1:
544 544 copy_parent = b"p1"
545 545 copy_source = files.copied_from_p1[f]
546 546 elif f in files.copied_from_p2:
547 547 copy_parent = b"p2"
548 548 copy_source = files.copied_from_p2[f]
549 549
550 550 data = (action, copy_parent, f, copy_source)
551 551 template = b"%-8s %2s: %s, %s;\n"
552 552 ui.write(template % data)
553 553
554 554
555 555 @command(b'debugcheckstate', [], b'')
556 556 def debugcheckstate(ui, repo):
557 557 """validate the correctness of the current dirstate"""
558 558 errors = verify.verifier(repo)._verify_dirstate()
559 559 if errors:
560 560 errstr = _(b"dirstate inconsistent with current parent's manifest")
561 561 raise error.Abort(errstr)
562 562
563 563
564 564 @command(
565 565 b'debugcolor',
566 566 [(b'', b'style', None, _(b'show all configured styles'))],
567 567 b'hg debugcolor',
568 568 )
569 569 def debugcolor(ui, repo, **opts):
570 570 """show available color, effects or style"""
571 571 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
572 572 if opts.get('style'):
573 573 return _debugdisplaystyle(ui)
574 574 else:
575 575 return _debugdisplaycolor(ui)
576 576
577 577
578 578 def _debugdisplaycolor(ui):
579 579 ui = ui.copy()
580 580 ui._styles.clear()
581 581 for effect in color._activeeffects(ui).keys():
582 582 ui._styles[effect] = effect
583 583 if ui._terminfoparams:
584 584 for k, v in ui.configitems(b'color'):
585 585 if k.startswith(b'color.'):
586 586 ui._styles[k] = k[6:]
587 587 elif k.startswith(b'terminfo.'):
588 588 ui._styles[k] = k[9:]
589 589 ui.write(_(b'available colors:\n'))
590 590 # sort label with a '_' after the other to group '_background' entry.
591 591 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
592 592 for colorname, label in items:
593 593 ui.write(b'%s\n' % colorname, label=label)
594 594
595 595
596 596 def _debugdisplaystyle(ui):
597 597 ui.write(_(b'available style:\n'))
598 598 if not ui._styles:
599 599 return
600 600 width = max(len(s) for s in ui._styles)
601 601 for label, effects in sorted(ui._styles.items()):
602 602 ui.write(b'%s' % label, label=label)
603 603 if effects:
604 604 # 50
605 605 ui.write(b': ')
606 606 ui.write(b' ' * (max(0, width - len(label))))
607 607 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
608 608 ui.write(b'\n')
609 609
610 610
611 611 @command(b'debugcreatestreamclonebundle', [], b'FILE')
612 612 def debugcreatestreamclonebundle(ui, repo, fname):
613 613 """create a stream clone bundle file
614 614
615 615 Stream bundles are special bundles that are essentially archives of
616 616 revlog files. They are commonly used for cloning very quickly.
617 617 """
618 618 # TODO we may want to turn this into an abort when this functionality
619 619 # is moved into `hg bundle`.
620 620 if phases.hassecret(repo):
621 621 ui.warn(
622 622 _(
623 623 b'(warning: stream clone bundle will contain secret '
624 624 b'revisions)\n'
625 625 )
626 626 )
627 627
628 628 requirements, gen = streamclone.generatebundlev1(repo)
629 629 changegroup.writechunks(ui, gen, fname)
630 630
631 631 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
632 632
633 633
634 634 @command(
635 635 b'debugdag',
636 636 [
637 637 (b't', b'tags', None, _(b'use tags as labels')),
638 638 (b'b', b'branches', None, _(b'annotate with branch names')),
639 639 (b'', b'dots', None, _(b'use dots for runs')),
640 640 (b's', b'spaces', None, _(b'separate elements by spaces')),
641 641 ],
642 642 _(b'[OPTION]... [FILE [REV]...]'),
643 643 optionalrepo=True,
644 644 )
645 645 def debugdag(ui, repo, file_=None, *revs, **opts):
646 646 """format the changelog or an index DAG as a concise textual description
647 647
648 648 If you pass a revlog index, the revlog's DAG is emitted. If you list
649 649 revision numbers, they get labeled in the output as rN.
650 650
651 651 Otherwise, the changelog DAG of the current repo is emitted.
652 652 """
653 653 spaces = opts.get('spaces')
654 654 dots = opts.get('dots')
655 655 if file_:
656 656 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
657 657 revs = {int(r) for r in revs}
658 658
659 659 def events():
660 660 for r in rlog:
661 661 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
662 662 if r in revs:
663 663 yield b'l', (r, b"r%i" % r)
664 664
665 665 elif repo:
666 666 cl = repo.changelog
667 667 tags = opts.get('tags')
668 668 branches = opts.get('branches')
669 669 if tags:
670 670 labels = {}
671 671 for l, n in repo.tags().items():
672 672 labels.setdefault(cl.rev(n), []).append(l)
673 673
674 674 def events():
675 675 b = b"default"
676 676 for r in cl:
677 677 if branches:
678 678 newb = cl.read(cl.node(r))[5][b'branch']
679 679 if newb != b:
680 680 yield b'a', newb
681 681 b = newb
682 682 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
683 683 if tags:
684 684 ls = labels.get(r)
685 685 if ls:
686 686 for l in ls:
687 687 yield b'l', (r, l)
688 688
689 689 else:
690 690 raise error.Abort(_(b'need repo for changelog dag'))
691 691
692 692 for line in dagparser.dagtextlines(
693 693 events(),
694 694 addspaces=spaces,
695 695 wraplabels=True,
696 696 wrapannotations=True,
697 697 wrapnonlinear=dots,
698 698 usedots=dots,
699 699 maxlinewidth=70,
700 700 ):
701 701 ui.write(line)
702 702 ui.write(b"\n")
703 703
704 704
705 705 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
706 706 def debugdata(ui, repo, file_, rev=None, **opts):
707 707 """dump the contents of a data file revision"""
708 708 opts = pycompat.byteskwargs(opts)
709 709 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
710 710 if rev is not None:
711 711 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
712 712 file_, rev = None, file_
713 713 elif rev is None:
714 714 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
715 715 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
716 716 try:
717 717 ui.write(r.rawdata(r.lookup(rev)))
718 718 except KeyError:
719 719 raise error.Abort(_(b'invalid revision identifier %s') % rev)
720 720
721 721
722 722 @command(
723 723 b'debugdate',
724 724 [(b'e', b'extended', None, _(b'try extended date formats'))],
725 725 _(b'[-e] DATE [RANGE]'),
726 726 norepo=True,
727 727 optionalrepo=True,
728 728 )
729 729 def debugdate(ui, date, range=None, **opts):
730 730 """parse and display a date"""
731 731 if opts["extended"]:
732 732 d = dateutil.parsedate(date, dateutil.extendeddateformats)
733 733 else:
734 734 d = dateutil.parsedate(date)
735 735 ui.writenoi18n(b"internal: %d %d\n" % d)
736 736 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
737 737 if range:
738 738 m = dateutil.matchdate(range)
739 739 ui.writenoi18n(b"match: %s\n" % m(d[0]))
740 740
741 741
742 742 @command(
743 743 b'debugdeltachain',
744 744 cmdutil.debugrevlogopts + cmdutil.formatteropts,
745 745 _(b'-c|-m|FILE'),
746 746 optionalrepo=True,
747 747 )
748 748 def debugdeltachain(ui, repo, file_=None, **opts):
749 749 """dump information about delta chains in a revlog
750 750
751 751 Output can be templatized. Available template keywords are:
752 752
753 753 :``rev``: revision number
754 754 :``p1``: parent 1 revision number (for reference)
755 755 :``p2``: parent 2 revision number (for reference)
756 756 :``chainid``: delta chain identifier (numbered by unique base)
757 757 :``chainlen``: delta chain length to this revision
758 758 :``prevrev``: previous revision in delta chain
759 759 :``deltatype``: role of delta / how it was computed
760 760 - base: a full snapshot
761 761 - snap: an intermediate snapshot
762 762 - p1: a delta against the first parent
763 763 - p2: a delta against the second parent
764 764 - skip1: a delta against the same base as p1
765 765 (when p1 has empty delta
766 766 - skip2: a delta against the same base as p2
767 767 (when p2 has empty delta
768 768 - prev: a delta against the previous revision
769 769 - other: a delta against an arbitrary revision
770 770 :``compsize``: compressed size of revision
771 771 :``uncompsize``: uncompressed size of revision
772 772 :``chainsize``: total size of compressed revisions in chain
773 773 :``chainratio``: total chain size divided by uncompressed revision size
774 774 (new delta chains typically start at ratio 2.00)
775 775 :``lindist``: linear distance from base revision in delta chain to end
776 776 of this revision
777 777 :``extradist``: total size of revisions not part of this delta chain from
778 778 base of delta chain to end of this revision; a measurement
779 779 of how much extra data we need to read/seek across to read
780 780 the delta chain for this revision
781 781 :``extraratio``: extradist divided by chainsize; another representation of
782 782 how much unrelated data is needed to load this delta chain
783 783
784 784 If the repository is configured to use the sparse read, additional keywords
785 785 are available:
786 786
787 787 :``readsize``: total size of data read from the disk for a revision
788 788 (sum of the sizes of all the blocks)
789 789 :``largestblock``: size of the largest block of data read from the disk
790 790 :``readdensity``: density of useful bytes in the data read from the disk
791 791 :``srchunks``: in how many data hunks the whole revision would be read
792 792
793 793 The sparse read can be enabled with experimental.sparse-read = True
794 794 """
795 795 opts = pycompat.byteskwargs(opts)
796 796 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
797 797 index = r.index
798 798 start = r.start
799 799 length = r.length
800 800 generaldelta = r._generaldelta
801 801 withsparseread = getattr(r, '_withsparseread', False)
802 802
803 803 # security to avoid crash on corrupted revlogs
804 804 total_revs = len(index)
805 805
806 806 chain_size_cache = {}
807 807
808 808 def revinfo(rev):
809 809 e = index[rev]
810 810 compsize = e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
811 811 uncompsize = e[revlog_constants.ENTRY_DATA_UNCOMPRESSED_LENGTH]
812 812
813 813 base = e[revlog_constants.ENTRY_DELTA_BASE]
814 814 p1 = e[revlog_constants.ENTRY_PARENT_1]
815 815 p2 = e[revlog_constants.ENTRY_PARENT_2]
816 816
817 817 # If the parents of a revision has an empty delta, we never try to delta
818 818 # against that parent, but directly against the delta base of that
819 819 # parent (recursively). It avoids adding a useless entry in the chain.
820 820 #
821 821 # However we need to detect that as a special case for delta-type, that
822 822 # is not simply "other".
823 823 p1_base = p1
824 824 if p1 != nullrev and p1 < total_revs:
825 825 e1 = index[p1]
826 826 while e1[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
827 827 new_base = e1[revlog_constants.ENTRY_DELTA_BASE]
828 828 if (
829 829 new_base == p1_base
830 830 or new_base == nullrev
831 831 or new_base >= total_revs
832 832 ):
833 833 break
834 834 p1_base = new_base
835 835 e1 = index[p1_base]
836 836 p2_base = p2
837 837 if p2 != nullrev and p2 < total_revs:
838 838 e2 = index[p2]
839 839 while e2[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
840 840 new_base = e2[revlog_constants.ENTRY_DELTA_BASE]
841 841 if (
842 842 new_base == p2_base
843 843 or new_base == nullrev
844 844 or new_base >= total_revs
845 845 ):
846 846 break
847 847 p2_base = new_base
848 848 e2 = index[p2_base]
849 849
850 850 if generaldelta:
851 851 if base == p1:
852 852 deltatype = b'p1'
853 853 elif base == p2:
854 854 deltatype = b'p2'
855 855 elif base == rev:
856 856 deltatype = b'base'
857 857 elif base == p1_base:
858 858 deltatype = b'skip1'
859 859 elif base == p2_base:
860 860 deltatype = b'skip2'
861 861 elif r.issnapshot(rev):
862 862 deltatype = b'snap'
863 863 elif base == rev - 1:
864 864 deltatype = b'prev'
865 865 else:
866 866 deltatype = b'other'
867 867 else:
868 868 if base == rev:
869 869 deltatype = b'base'
870 870 else:
871 871 deltatype = b'prev'
872 872
873 873 chain = r._deltachain(rev)[0]
874 874 chain_size = 0
875 875 for iter_rev in reversed(chain):
876 876 cached = chain_size_cache.get(iter_rev)
877 877 if cached is not None:
878 878 chain_size += cached
879 879 break
880 880 e = index[iter_rev]
881 881 chain_size += e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
882 882 chain_size_cache[rev] = chain_size
883 883
884 884 return p1, p2, compsize, uncompsize, deltatype, chain, chain_size
885 885
886 886 fm = ui.formatter(b'debugdeltachain', opts)
887 887
888 888 fm.plain(
889 889 b' rev p1 p2 chain# chainlen prev delta '
890 890 b'size rawsize chainsize ratio lindist extradist '
891 891 b'extraratio'
892 892 )
893 893 if withsparseread:
894 894 fm.plain(b' readsize largestblk rddensity srchunks')
895 895 fm.plain(b'\n')
896 896
897 897 chainbases = {}
898 898 for rev in r:
899 899 p1, p2, comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
900 900 chainbase = chain[0]
901 901 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
902 902 basestart = start(chainbase)
903 903 revstart = start(rev)
904 904 lineardist = revstart + comp - basestart
905 905 extradist = lineardist - chainsize
906 906 try:
907 907 prevrev = chain[-2]
908 908 except IndexError:
909 909 prevrev = -1
910 910
911 911 if uncomp != 0:
912 912 chainratio = float(chainsize) / float(uncomp)
913 913 else:
914 914 chainratio = chainsize
915 915
916 916 if chainsize != 0:
917 917 extraratio = float(extradist) / float(chainsize)
918 918 else:
919 919 extraratio = extradist
920 920
921 921 fm.startitem()
922 922 fm.write(
923 923 b'rev p1 p2 chainid chainlen prevrev deltatype compsize '
924 924 b'uncompsize chainsize chainratio lindist extradist '
925 925 b'extraratio',
926 926 b'%7d %7d %7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
927 927 rev,
928 928 p1,
929 929 p2,
930 930 chainid,
931 931 len(chain),
932 932 prevrev,
933 933 deltatype,
934 934 comp,
935 935 uncomp,
936 936 chainsize,
937 937 chainratio,
938 938 lineardist,
939 939 extradist,
940 940 extraratio,
941 941 rev=rev,
942 942 chainid=chainid,
943 943 chainlen=len(chain),
944 944 prevrev=prevrev,
945 945 deltatype=deltatype,
946 946 compsize=comp,
947 947 uncompsize=uncomp,
948 948 chainsize=chainsize,
949 949 chainratio=chainratio,
950 950 lindist=lineardist,
951 951 extradist=extradist,
952 952 extraratio=extraratio,
953 953 )
954 954 if withsparseread:
955 955 readsize = 0
956 956 largestblock = 0
957 957 srchunks = 0
958 958
959 959 for revschunk in deltautil.slicechunk(r, chain):
960 960 srchunks += 1
961 961 blkend = start(revschunk[-1]) + length(revschunk[-1])
962 962 blksize = blkend - start(revschunk[0])
963 963
964 964 readsize += blksize
965 965 if largestblock < blksize:
966 966 largestblock = blksize
967 967
968 968 if readsize:
969 969 readdensity = float(chainsize) / float(readsize)
970 970 else:
971 971 readdensity = 1
972 972
973 973 fm.write(
974 974 b'readsize largestblock readdensity srchunks',
975 975 b' %10d %10d %9.5f %8d',
976 976 readsize,
977 977 largestblock,
978 978 readdensity,
979 979 srchunks,
980 980 readsize=readsize,
981 981 largestblock=largestblock,
982 982 readdensity=readdensity,
983 983 srchunks=srchunks,
984 984 )
985 985
986 986 fm.plain(b'\n')
987 987
988 988 fm.end()
989 989
990 990
991 991 @command(
992 992 b'debug-delta-find',
993 993 cmdutil.debugrevlogopts
994 994 + cmdutil.formatteropts
995 995 + [
996 996 (
997 997 b'',
998 998 b'source',
999 999 b'full',
1000 1000 _(b'input data feed to the process (full, storage, p1, p2, prev)'),
1001 1001 ),
1002 1002 ],
1003 1003 _(b'-c|-m|FILE REV'),
1004 1004 optionalrepo=True,
1005 1005 )
1006 1006 def debugdeltafind(ui, repo, arg_1, arg_2=None, source=b'full', **opts):
1007 1007 """display the computation to get to a valid delta for storing REV
1008 1008
1009 1009 This command will replay the process used to find the "best" delta to store
1010 1010 a revision and display information about all the steps used to get to that
1011 1011 result.
1012 1012
1013 1013 By default, the process is fed with a the full-text for the revision. This
1014 1014 can be controlled with the --source flag.
1015 1015
1016 1016 The revision use the revision number of the target storage (not changelog
1017 1017 revision number).
1018 1018
1019 1019 note: the process is initiated from a full text of the revision to store.
1020 1020 """
1021 1021 opts = pycompat.byteskwargs(opts)
1022 1022 if arg_2 is None:
1023 1023 file_ = None
1024 1024 rev = arg_1
1025 1025 else:
1026 1026 file_ = arg_1
1027 1027 rev = arg_2
1028 1028
1029 1029 rev = int(rev)
1030 1030
1031 1031 revlog = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
1032 1032 p1r, p2r = revlog.parentrevs(rev)
1033 1033
1034 1034 if source == b'full':
1035 1035 base_rev = nullrev
1036 1036 elif source == b'storage':
1037 1037 base_rev = revlog.deltaparent(rev)
1038 1038 elif source == b'p1':
1039 1039 base_rev = p1r
1040 1040 elif source == b'p2':
1041 1041 base_rev = p2r
1042 1042 elif source == b'prev':
1043 1043 base_rev = rev - 1
1044 1044 else:
1045 1045 raise error.InputError(b"invalid --source value: %s" % source)
1046 1046
1047 1047 revlog_debug.debug_delta_find(ui, revlog, rev, base_rev=base_rev)
1048 1048
1049 1049
1050 1050 @command(
1051 1051 b'debugdirstate|debugstate',
1052 1052 [
1053 1053 (
1054 1054 b'',
1055 1055 b'nodates',
1056 1056 None,
1057 1057 _(b'do not display the saved mtime (DEPRECATED)'),
1058 1058 ),
1059 1059 (b'', b'dates', True, _(b'display the saved mtime')),
1060 1060 (b'', b'datesort', None, _(b'sort by saved mtime')),
1061 1061 (
1062 1062 b'',
1063 1063 b'docket',
1064 1064 False,
1065 1065 _(b'display the docket (metadata file) instead'),
1066 1066 ),
1067 1067 (
1068 1068 b'',
1069 1069 b'all',
1070 1070 False,
1071 1071 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
1072 1072 ),
1073 1073 ],
1074 1074 _(b'[OPTION]...'),
1075 1075 )
1076 1076 def debugstate(ui, repo, **opts):
1077 1077 """show the contents of the current dirstate"""
1078 1078
1079 1079 if opts.get("docket"):
1080 1080 if not repo.dirstate._use_dirstate_v2:
1081 1081 raise error.Abort(_(b'dirstate v1 does not have a docket'))
1082 1082
1083 1083 docket = repo.dirstate._map.docket
1084 1084 (
1085 1085 start_offset,
1086 1086 root_nodes,
1087 1087 nodes_with_entry,
1088 1088 nodes_with_copy,
1089 1089 unused_bytes,
1090 1090 _unused,
1091 1091 ignore_pattern,
1092 1092 ) = dirstateutils.v2.TREE_METADATA.unpack(docket.tree_metadata)
1093 1093
1094 1094 ui.write(_(b"size of dirstate data: %d\n") % docket.data_size)
1095 1095 ui.write(_(b"data file uuid: %s\n") % docket.uuid)
1096 1096 ui.write(_(b"start offset of root nodes: %d\n") % start_offset)
1097 1097 ui.write(_(b"number of root nodes: %d\n") % root_nodes)
1098 1098 ui.write(_(b"nodes with entries: %d\n") % nodes_with_entry)
1099 1099 ui.write(_(b"nodes with copies: %d\n") % nodes_with_copy)
1100 1100 ui.write(_(b"number of unused bytes: %d\n") % unused_bytes)
1101 1101 ui.write(
1102 1102 _(b"ignore pattern hash: %s\n") % binascii.hexlify(ignore_pattern)
1103 1103 )
1104 1104 return
1105 1105
1106 1106 nodates = not opts['dates']
1107 1107 if opts.get('nodates') is not None:
1108 1108 nodates = True
1109 1109 datesort = opts.get('datesort')
1110 1110
1111 1111 if datesort:
1112 1112
1113 1113 def keyfunc(entry):
1114 1114 filename, _state, _mode, _size, mtime = entry
1115 1115 return (mtime, filename)
1116 1116
1117 1117 else:
1118 1118 keyfunc = None # sort by filename
1119 1119 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
1120 1120 entries.sort(key=keyfunc)
1121 1121 for entry in entries:
1122 1122 filename, state, mode, size, mtime = entry
1123 1123 if mtime == -1:
1124 1124 timestr = b'unset '
1125 1125 elif nodates:
1126 1126 timestr = b'set '
1127 1127 else:
1128 1128 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
1129 1129 timestr = encoding.strtolocal(timestr)
1130 1130 if mode & 0o20000:
1131 1131 mode = b'lnk'
1132 1132 else:
1133 1133 mode = b'%3o' % (mode & 0o777 & ~util.umask)
1134 1134 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
1135 1135 for f in repo.dirstate.copies():
1136 1136 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
1137 1137
1138 1138
1139 1139 @command(
1140 1140 b'debugdirstateignorepatternshash',
1141 1141 [],
1142 1142 _(b''),
1143 1143 )
1144 1144 def debugdirstateignorepatternshash(ui, repo, **opts):
1145 1145 """show the hash of ignore patterns stored in dirstate if v2,
1146 1146 or nothing for dirstate-v2
1147 1147 """
1148 1148 if repo.dirstate._use_dirstate_v2:
1149 1149 docket = repo.dirstate._map.docket
1150 1150 hash_len = 20 # 160 bits for SHA-1
1151 1151 hash_bytes = docket.tree_metadata[-hash_len:]
1152 1152 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1153 1153
1154 1154
1155 1155 @command(
1156 1156 b'debugdiscovery',
1157 1157 [
1158 1158 (b'', b'old', None, _(b'use old-style discovery')),
1159 1159 (
1160 1160 b'',
1161 1161 b'nonheads',
1162 1162 None,
1163 1163 _(b'use old-style discovery with non-heads included'),
1164 1164 ),
1165 1165 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1166 1166 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1167 1167 (
1168 1168 b'',
1169 1169 b'local-as-revs',
1170 1170 b"",
1171 1171 b'treat local has having these revisions only',
1172 1172 ),
1173 1173 (
1174 1174 b'',
1175 1175 b'remote-as-revs',
1176 1176 b"",
1177 1177 b'use local as remote, with only these revisions',
1178 1178 ),
1179 1179 ]
1180 1180 + cmdutil.remoteopts
1181 1181 + cmdutil.formatteropts,
1182 1182 _(b'[--rev REV] [OTHER]'),
1183 1183 )
1184 1184 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1185 1185 """runs the changeset discovery protocol in isolation
1186 1186
1187 1187 The local peer can be "replaced" by a subset of the local repository by
1188 1188 using the `--local-as-revs` flag. In the same way, the usual `remote` peer
1189 1189 can be "replaced" by a subset of the local repository using the
1190 1190 `--remote-as-revs` flag. This is useful to efficiently debug pathological
1191 1191 discovery situations.
1192 1192
1193 1193 The following developer oriented config are relevant for people playing with this command:
1194 1194
1195 1195 * devel.discovery.exchange-heads=True
1196 1196
1197 1197 If False, the discovery will not start with
1198 1198 remote head fetching and local head querying.
1199 1199
1200 1200 * devel.discovery.grow-sample=True
1201 1201
1202 1202 If False, the sample size used in set discovery will not be increased
1203 1203 through the process
1204 1204
1205 1205 * devel.discovery.grow-sample.dynamic=True
1206 1206
1207 1207 When discovery.grow-sample.dynamic is True, the default, the sample size is
1208 1208 adapted to the shape of the undecided set (it is set to the max of:
1209 1209 <target-size>, len(roots(undecided)), len(heads(undecided)
1210 1210
1211 1211 * devel.discovery.grow-sample.rate=1.05
1212 1212
1213 1213 the rate at which the sample grow
1214 1214
1215 1215 * devel.discovery.randomize=True
1216 1216
1217 1217 If andom sampling during discovery are deterministic. It is meant for
1218 1218 integration tests.
1219 1219
1220 1220 * devel.discovery.sample-size=200
1221 1221
1222 1222 Control the initial size of the discovery sample
1223 1223
1224 1224 * devel.discovery.sample-size.initial=100
1225 1225
1226 1226 Control the initial size of the discovery for initial change
1227 1227 """
1228 1228 opts = pycompat.byteskwargs(opts)
1229 1229 unfi = repo.unfiltered()
1230 1230
1231 1231 # setup potential extra filtering
1232 1232 local_revs = opts[b"local_as_revs"]
1233 1233 remote_revs = opts[b"remote_as_revs"]
1234 1234
1235 1235 # make sure tests are repeatable
1236 1236 random.seed(int(opts[b'seed']))
1237 1237
1238 1238 if not remote_revs:
1239 1239 path = urlutil.get_unique_pull_path_obj(
1240 1240 b'debugdiscovery', ui, remoteurl
1241 1241 )
1242 1242 branches = (path.branch, [])
1243 1243 remote = hg.peer(repo, opts, path)
1244 1244 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
1245 1245 else:
1246 1246 branches = (None, [])
1247 1247 remote_filtered_revs = logcmdutil.revrange(
1248 1248 unfi, [b"not (::(%s))" % remote_revs]
1249 1249 )
1250 1250 remote_filtered_revs = frozenset(remote_filtered_revs)
1251 1251
1252 1252 def remote_func(x):
1253 1253 return remote_filtered_revs
1254 1254
1255 1255 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1256 1256
1257 1257 remote = repo.peer()
1258 1258 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1259 1259
1260 1260 if local_revs:
1261 1261 local_filtered_revs = logcmdutil.revrange(
1262 1262 unfi, [b"not (::(%s))" % local_revs]
1263 1263 )
1264 1264 local_filtered_revs = frozenset(local_filtered_revs)
1265 1265
1266 1266 def local_func(x):
1267 1267 return local_filtered_revs
1268 1268
1269 1269 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1270 1270 repo = repo.filtered(b'debug-discovery-local-filter')
1271 1271
1272 1272 data = {}
1273 1273 if opts.get(b'old'):
1274 1274
1275 1275 def doit(pushedrevs, remoteheads, remote=remote):
1276 1276 if not util.safehasattr(remote, b'branches'):
1277 1277 # enable in-client legacy support
1278 1278 remote = localrepo.locallegacypeer(remote.local())
1279 1279 if remote_revs:
1280 1280 r = remote._repo.filtered(b'debug-discovery-remote-filter')
1281 1281 remote._repo = r
1282 1282 common, _in, hds = treediscovery.findcommonincoming(
1283 1283 repo, remote, force=True, audit=data
1284 1284 )
1285 1285 common = set(common)
1286 1286 if not opts.get(b'nonheads'):
1287 1287 ui.writenoi18n(
1288 1288 b"unpruned common: %s\n"
1289 1289 % b" ".join(sorted(short(n) for n in common))
1290 1290 )
1291 1291
1292 1292 clnode = repo.changelog.node
1293 1293 common = repo.revs(b'heads(::%ln)', common)
1294 1294 common = {clnode(r) for r in common}
1295 1295 return common, hds
1296 1296
1297 1297 else:
1298 1298
1299 1299 def doit(pushedrevs, remoteheads, remote=remote):
1300 1300 nodes = None
1301 1301 if pushedrevs:
1302 1302 revs = logcmdutil.revrange(repo, pushedrevs)
1303 1303 nodes = [repo[r].node() for r in revs]
1304 1304 common, any, hds = setdiscovery.findcommonheads(
1305 1305 ui,
1306 1306 repo,
1307 1307 remote,
1308 1308 ancestorsof=nodes,
1309 1309 audit=data,
1310 1310 abortwhenunrelated=False,
1311 1311 )
1312 1312 return common, hds
1313 1313
1314 1314 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1315 1315 localrevs = opts[b'rev']
1316 1316
1317 1317 fm = ui.formatter(b'debugdiscovery', opts)
1318 1318 if fm.strict_format:
1319 1319
1320 1320 @contextlib.contextmanager
1321 1321 def may_capture_output():
1322 1322 ui.pushbuffer()
1323 1323 yield
1324 1324 data[b'output'] = ui.popbuffer()
1325 1325
1326 1326 else:
1327 1327 may_capture_output = util.nullcontextmanager
1328 1328 with may_capture_output():
1329 1329 with util.timedcm('debug-discovery') as t:
1330 1330 common, hds = doit(localrevs, remoterevs)
1331 1331
1332 1332 # compute all statistics
1333 1333 if len(common) == 1 and repo.nullid in common:
1334 1334 common = set()
1335 1335 heads_common = set(common)
1336 1336 heads_remote = set(hds)
1337 1337 heads_local = set(repo.heads())
1338 1338 # note: they cannot be a local or remote head that is in common and not
1339 1339 # itself a head of common.
1340 1340 heads_common_local = heads_common & heads_local
1341 1341 heads_common_remote = heads_common & heads_remote
1342 1342 heads_common_both = heads_common & heads_remote & heads_local
1343 1343
1344 1344 all = repo.revs(b'all()')
1345 1345 common = repo.revs(b'::%ln', common)
1346 1346 roots_common = repo.revs(b'roots(::%ld)', common)
1347 1347 missing = repo.revs(b'not ::%ld', common)
1348 1348 heads_missing = repo.revs(b'heads(%ld)', missing)
1349 1349 roots_missing = repo.revs(b'roots(%ld)', missing)
1350 1350 assert len(common) + len(missing) == len(all)
1351 1351
1352 1352 initial_undecided = repo.revs(
1353 1353 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1354 1354 )
1355 1355 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1356 1356 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1357 1357 common_initial_undecided = initial_undecided & common
1358 1358 missing_initial_undecided = initial_undecided & missing
1359 1359
1360 1360 data[b'elapsed'] = t.elapsed
1361 1361 data[b'nb-common-heads'] = len(heads_common)
1362 1362 data[b'nb-common-heads-local'] = len(heads_common_local)
1363 1363 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1364 1364 data[b'nb-common-heads-both'] = len(heads_common_both)
1365 1365 data[b'nb-common-roots'] = len(roots_common)
1366 1366 data[b'nb-head-local'] = len(heads_local)
1367 1367 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1368 1368 data[b'nb-head-remote'] = len(heads_remote)
1369 1369 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1370 1370 heads_common_remote
1371 1371 )
1372 1372 data[b'nb-revs'] = len(all)
1373 1373 data[b'nb-revs-common'] = len(common)
1374 1374 data[b'nb-revs-missing'] = len(missing)
1375 1375 data[b'nb-missing-heads'] = len(heads_missing)
1376 1376 data[b'nb-missing-roots'] = len(roots_missing)
1377 1377 data[b'nb-ini_und'] = len(initial_undecided)
1378 1378 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1379 1379 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1380 1380 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1381 1381 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1382 1382
1383 1383 fm.startitem()
1384 1384 fm.data(**pycompat.strkwargs(data))
1385 1385 # display discovery summary
1386 1386 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1387 1387 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1388 1388 if b'total-round-trips-heads' in data:
1389 1389 fm.plain(
1390 1390 b" round-trips-heads: %(total-round-trips-heads)9d\n" % data
1391 1391 )
1392 1392 if b'total-round-trips-branches' in data:
1393 1393 fm.plain(
1394 1394 b" round-trips-branches: %(total-round-trips-branches)9d\n"
1395 1395 % data
1396 1396 )
1397 1397 if b'total-round-trips-between' in data:
1398 1398 fm.plain(
1399 1399 b" round-trips-between: %(total-round-trips-between)9d\n" % data
1400 1400 )
1401 1401 fm.plain(b"queries: %(total-queries)9d\n" % data)
1402 1402 if b'total-queries-branches' in data:
1403 1403 fm.plain(b" queries-branches: %(total-queries-branches)9d\n" % data)
1404 1404 if b'total-queries-between' in data:
1405 1405 fm.plain(b" queries-between: %(total-queries-between)9d\n" % data)
1406 1406 fm.plain(b"heads summary:\n")
1407 1407 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1408 1408 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1409 1409 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1410 1410 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1411 1411 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1412 1412 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1413 1413 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1414 1414 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1415 1415 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1416 1416 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1417 1417 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1418 1418 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1419 1419 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1420 1420 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1421 1421 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1422 1422 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1423 1423 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1424 1424 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1425 1425 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1426 1426 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1427 1427 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1428 1428 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1429 1429
1430 1430 if ui.verbose:
1431 1431 fm.plain(
1432 1432 b"common heads: %s\n"
1433 1433 % b" ".join(sorted(short(n) for n in heads_common))
1434 1434 )
1435 1435 fm.end()
1436 1436
1437 1437
1438 1438 _chunksize = 4 << 10
1439 1439
1440 1440
1441 1441 @command(
1442 1442 b'debugdownload',
1443 1443 [
1444 1444 (b'o', b'output', b'', _(b'path')),
1445 1445 ],
1446 1446 optionalrepo=True,
1447 1447 )
1448 1448 def debugdownload(ui, repo, url, output=None, **opts):
1449 1449 """download a resource using Mercurial logic and config"""
1450 1450 fh = urlmod.open(ui, url, output)
1451 1451
1452 1452 dest = ui
1453 1453 if output:
1454 1454 dest = open(output, b"wb", _chunksize)
1455 1455 try:
1456 1456 data = fh.read(_chunksize)
1457 1457 while data:
1458 1458 dest.write(data)
1459 1459 data = fh.read(_chunksize)
1460 1460 finally:
1461 1461 if output:
1462 1462 dest.close()
1463 1463
1464 1464
1465 1465 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1466 1466 def debugextensions(ui, repo, **opts):
1467 1467 '''show information about active extensions'''
1468 1468 opts = pycompat.byteskwargs(opts)
1469 1469 exts = extensions.extensions(ui)
1470 1470 hgver = util.version()
1471 1471 fm = ui.formatter(b'debugextensions', opts)
1472 1472 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1473 1473 isinternal = extensions.ismoduleinternal(extmod)
1474 1474 extsource = None
1475 1475
1476 1476 if util.safehasattr(extmod, '__file__'):
1477 1477 extsource = pycompat.fsencode(extmod.__file__)
1478 1478 elif getattr(sys, 'oxidized', False):
1479 1479 extsource = pycompat.sysexecutable
1480 1480 if isinternal:
1481 1481 exttestedwith = [] # never expose magic string to users
1482 1482 else:
1483 1483 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1484 1484 extbuglink = getattr(extmod, 'buglink', None)
1485 1485
1486 1486 fm.startitem()
1487 1487
1488 1488 if ui.quiet or ui.verbose:
1489 1489 fm.write(b'name', b'%s\n', extname)
1490 1490 else:
1491 1491 fm.write(b'name', b'%s', extname)
1492 1492 if isinternal or hgver in exttestedwith:
1493 1493 fm.plain(b'\n')
1494 1494 elif not exttestedwith:
1495 1495 fm.plain(_(b' (untested!)\n'))
1496 1496 else:
1497 1497 lasttestedversion = exttestedwith[-1]
1498 1498 fm.plain(b' (%s!)\n' % lasttestedversion)
1499 1499
1500 1500 fm.condwrite(
1501 1501 ui.verbose and extsource,
1502 1502 b'source',
1503 1503 _(b' location: %s\n'),
1504 1504 extsource or b"",
1505 1505 )
1506 1506
1507 1507 if ui.verbose:
1508 1508 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1509 1509 fm.data(bundled=isinternal)
1510 1510
1511 1511 fm.condwrite(
1512 1512 ui.verbose and exttestedwith,
1513 1513 b'testedwith',
1514 1514 _(b' tested with: %s\n'),
1515 1515 fm.formatlist(exttestedwith, name=b'ver'),
1516 1516 )
1517 1517
1518 1518 fm.condwrite(
1519 1519 ui.verbose and extbuglink,
1520 1520 b'buglink',
1521 1521 _(b' bug reporting: %s\n'),
1522 1522 extbuglink or b"",
1523 1523 )
1524 1524
1525 1525 fm.end()
1526 1526
1527 1527
1528 1528 @command(
1529 1529 b'debugfileset',
1530 1530 [
1531 1531 (
1532 1532 b'r',
1533 1533 b'rev',
1534 1534 b'',
1535 1535 _(b'apply the filespec on this revision'),
1536 1536 _(b'REV'),
1537 1537 ),
1538 1538 (
1539 1539 b'',
1540 1540 b'all-files',
1541 1541 False,
1542 1542 _(b'test files from all revisions and working directory'),
1543 1543 ),
1544 1544 (
1545 1545 b's',
1546 1546 b'show-matcher',
1547 1547 None,
1548 1548 _(b'print internal representation of matcher'),
1549 1549 ),
1550 1550 (
1551 1551 b'p',
1552 1552 b'show-stage',
1553 1553 [],
1554 1554 _(b'print parsed tree at the given stage'),
1555 1555 _(b'NAME'),
1556 1556 ),
1557 1557 ],
1558 1558 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1559 1559 )
1560 1560 def debugfileset(ui, repo, expr, **opts):
1561 1561 '''parse and apply a fileset specification'''
1562 1562 from . import fileset
1563 1563
1564 1564 fileset.symbols # force import of fileset so we have predicates to optimize
1565 1565 opts = pycompat.byteskwargs(opts)
1566 1566 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
1567 1567
1568 1568 stages = [
1569 1569 (b'parsed', pycompat.identity),
1570 1570 (b'analyzed', filesetlang.analyze),
1571 1571 (b'optimized', filesetlang.optimize),
1572 1572 ]
1573 1573 stagenames = {n for n, f in stages}
1574 1574
1575 1575 showalways = set()
1576 1576 if ui.verbose and not opts[b'show_stage']:
1577 1577 # show parsed tree by --verbose (deprecated)
1578 1578 showalways.add(b'parsed')
1579 1579 if opts[b'show_stage'] == [b'all']:
1580 1580 showalways.update(stagenames)
1581 1581 else:
1582 1582 for n in opts[b'show_stage']:
1583 1583 if n not in stagenames:
1584 1584 raise error.Abort(_(b'invalid stage name: %s') % n)
1585 1585 showalways.update(opts[b'show_stage'])
1586 1586
1587 1587 tree = filesetlang.parse(expr)
1588 1588 for n, f in stages:
1589 1589 tree = f(tree)
1590 1590 if n in showalways:
1591 1591 if opts[b'show_stage'] or n != b'parsed':
1592 1592 ui.write(b"* %s:\n" % n)
1593 1593 ui.write(filesetlang.prettyformat(tree), b"\n")
1594 1594
1595 1595 files = set()
1596 1596 if opts[b'all_files']:
1597 1597 for r in repo:
1598 1598 c = repo[r]
1599 1599 files.update(c.files())
1600 1600 files.update(c.substate)
1601 1601 if opts[b'all_files'] or ctx.rev() is None:
1602 1602 wctx = repo[None]
1603 1603 files.update(
1604 1604 repo.dirstate.walk(
1605 1605 scmutil.matchall(repo),
1606 1606 subrepos=list(wctx.substate),
1607 1607 unknown=True,
1608 1608 ignored=True,
1609 1609 )
1610 1610 )
1611 1611 files.update(wctx.substate)
1612 1612 else:
1613 1613 files.update(ctx.files())
1614 1614 files.update(ctx.substate)
1615 1615
1616 1616 m = ctx.matchfileset(repo.getcwd(), expr)
1617 1617 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1618 1618 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1619 1619 for f in sorted(files):
1620 1620 if not m(f):
1621 1621 continue
1622 1622 ui.write(b"%s\n" % f)
1623 1623
1624 1624
1625 1625 @command(
1626 1626 b"debug-repair-issue6528",
1627 1627 [
1628 1628 (
1629 1629 b'',
1630 1630 b'to-report',
1631 1631 b'',
1632 1632 _(b'build a report of affected revisions to this file'),
1633 1633 _(b'FILE'),
1634 1634 ),
1635 1635 (
1636 1636 b'',
1637 1637 b'from-report',
1638 1638 b'',
1639 1639 _(b'repair revisions listed in this report file'),
1640 1640 _(b'FILE'),
1641 1641 ),
1642 1642 (
1643 1643 b'',
1644 1644 b'paranoid',
1645 1645 False,
1646 1646 _(b'check that both detection methods do the same thing'),
1647 1647 ),
1648 1648 ]
1649 1649 + cmdutil.dryrunopts,
1650 1650 )
1651 1651 def debug_repair_issue6528(ui, repo, **opts):
1652 1652 """find affected revisions and repair them. See issue6528 for more details.
1653 1653
1654 1654 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1655 1655 computation of affected revisions for a given repository across clones.
1656 1656 The report format is line-based (with empty lines ignored):
1657 1657
1658 1658 ```
1659 1659 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1660 1660 ```
1661 1661
1662 1662 There can be multiple broken revisions per filelog, they are separated by
1663 1663 a comma with no spaces. The only space is between the revision(s) and the
1664 1664 filename.
1665 1665
1666 1666 Note that this does *not* mean that this repairs future affected revisions,
1667 1667 that needs a separate fix at the exchange level that was introduced in
1668 1668 Mercurial 5.9.1.
1669 1669
1670 1670 There is a `--paranoid` flag to test that the fast implementation is correct
1671 1671 by checking it against the slow implementation. Since this matter is quite
1672 1672 urgent and testing every edge-case is probably quite costly, we use this
1673 1673 method to test on large repositories as a fuzzing method of sorts.
1674 1674 """
1675 1675 cmdutil.check_incompatible_arguments(
1676 1676 opts, 'to_report', ['from_report', 'dry_run']
1677 1677 )
1678 1678 dry_run = opts.get('dry_run')
1679 1679 to_report = opts.get('to_report')
1680 1680 from_report = opts.get('from_report')
1681 1681 paranoid = opts.get('paranoid')
1682 1682 # TODO maybe add filelog pattern and revision pattern parameters to help
1683 1683 # narrow down the search for users that know what they're looking for?
1684 1684
1685 1685 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1686 1686 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1687 1687 raise error.Abort(_(msg))
1688 1688
1689 1689 rewrite.repair_issue6528(
1690 1690 ui,
1691 1691 repo,
1692 1692 dry_run=dry_run,
1693 1693 to_report=to_report,
1694 1694 from_report=from_report,
1695 1695 paranoid=paranoid,
1696 1696 )
1697 1697
1698 1698
1699 1699 @command(b'debugformat', [] + cmdutil.formatteropts)
1700 1700 def debugformat(ui, repo, **opts):
1701 1701 """display format information about the current repository
1702 1702
1703 1703 Use --verbose to get extra information about current config value and
1704 1704 Mercurial default."""
1705 1705 opts = pycompat.byteskwargs(opts)
1706 1706 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1707 1707 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1708 1708
1709 1709 def makeformatname(name):
1710 1710 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1711 1711
1712 1712 fm = ui.formatter(b'debugformat', opts)
1713 1713 if fm.isplain():
1714 1714
1715 1715 def formatvalue(value):
1716 1716 if util.safehasattr(value, b'startswith'):
1717 1717 return value
1718 1718 if value:
1719 1719 return b'yes'
1720 1720 else:
1721 1721 return b'no'
1722 1722
1723 1723 else:
1724 1724 formatvalue = pycompat.identity
1725 1725
1726 1726 fm.plain(b'format-variant')
1727 1727 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1728 1728 fm.plain(b' repo')
1729 1729 if ui.verbose:
1730 1730 fm.plain(b' config default')
1731 1731 fm.plain(b'\n')
1732 1732 for fv in upgrade.allformatvariant:
1733 1733 fm.startitem()
1734 1734 repovalue = fv.fromrepo(repo)
1735 1735 configvalue = fv.fromconfig(repo)
1736 1736
1737 1737 if repovalue != configvalue:
1738 1738 namelabel = b'formatvariant.name.mismatchconfig'
1739 1739 repolabel = b'formatvariant.repo.mismatchconfig'
1740 1740 elif repovalue != fv.default:
1741 1741 namelabel = b'formatvariant.name.mismatchdefault'
1742 1742 repolabel = b'formatvariant.repo.mismatchdefault'
1743 1743 else:
1744 1744 namelabel = b'formatvariant.name.uptodate'
1745 1745 repolabel = b'formatvariant.repo.uptodate'
1746 1746
1747 1747 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1748 1748 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1749 1749 if fv.default != configvalue:
1750 1750 configlabel = b'formatvariant.config.special'
1751 1751 else:
1752 1752 configlabel = b'formatvariant.config.default'
1753 1753 fm.condwrite(
1754 1754 ui.verbose,
1755 1755 b'config',
1756 1756 b' %6s',
1757 1757 formatvalue(configvalue),
1758 1758 label=configlabel,
1759 1759 )
1760 1760 fm.condwrite(
1761 1761 ui.verbose,
1762 1762 b'default',
1763 1763 b' %7s',
1764 1764 formatvalue(fv.default),
1765 1765 label=b'formatvariant.default',
1766 1766 )
1767 1767 fm.plain(b'\n')
1768 1768 fm.end()
1769 1769
1770 1770
1771 1771 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1772 1772 def debugfsinfo(ui, path=b"."):
1773 1773 """show information detected about current filesystem"""
1774 1774 ui.writenoi18n(b'path: %s\n' % path)
1775 1775 ui.writenoi18n(
1776 1776 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1777 1777 )
1778 1778 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1779 1779 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1780 1780 ui.writenoi18n(
1781 1781 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1782 1782 )
1783 1783 ui.writenoi18n(
1784 1784 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1785 1785 )
1786 1786 casesensitive = b'(unknown)'
1787 1787 try:
1788 1788 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1789 1789 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1790 1790 except OSError:
1791 1791 pass
1792 1792 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1793 1793
1794 1794
1795 1795 @command(
1796 1796 b'debuggetbundle',
1797 1797 [
1798 1798 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1799 1799 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1800 1800 (
1801 1801 b't',
1802 1802 b'type',
1803 1803 b'bzip2',
1804 1804 _(b'bundle compression type to use'),
1805 1805 _(b'TYPE'),
1806 1806 ),
1807 1807 ],
1808 1808 _(b'REPO FILE [-H|-C ID]...'),
1809 1809 norepo=True,
1810 1810 )
1811 1811 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1812 1812 """retrieves a bundle from a repo
1813 1813
1814 1814 Every ID must be a full-length hex node id string. Saves the bundle to the
1815 1815 given file.
1816 1816 """
1817 1817 opts = pycompat.byteskwargs(opts)
1818 1818 repo = hg.peer(ui, opts, repopath)
1819 1819 if not repo.capable(b'getbundle'):
1820 1820 raise error.Abort(b"getbundle() not supported by target repository")
1821 1821 args = {}
1822 1822 if common:
1823 1823 args['common'] = [bin(s) for s in common]
1824 1824 if head:
1825 1825 args['heads'] = [bin(s) for s in head]
1826 1826 # TODO: get desired bundlecaps from command line.
1827 1827 args['bundlecaps'] = None
1828 1828 bundle = repo.getbundle(b'debug', **args)
1829 1829
1830 1830 bundletype = opts.get(b'type', b'bzip2').lower()
1831 1831 btypes = {
1832 1832 b'none': b'HG10UN',
1833 1833 b'bzip2': b'HG10BZ',
1834 1834 b'gzip': b'HG10GZ',
1835 1835 b'bundle2': b'HG20',
1836 1836 }
1837 1837 bundletype = btypes.get(bundletype)
1838 1838 if bundletype not in bundle2.bundletypes:
1839 1839 raise error.Abort(_(b'unknown bundle type specified with --type'))
1840 1840 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1841 1841
1842 1842
1843 1843 @command(b'debugignore', [], b'[FILE]')
1844 1844 def debugignore(ui, repo, *files, **opts):
1845 1845 """display the combined ignore pattern and information about ignored files
1846 1846
1847 1847 With no argument display the combined ignore pattern.
1848 1848
1849 1849 Given space separated file names, shows if the given file is ignored and
1850 1850 if so, show the ignore rule (file and line number) that matched it.
1851 1851 """
1852 1852 ignore = repo.dirstate._ignore
1853 1853 if not files:
1854 1854 # Show all the patterns
1855 1855 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1856 1856 else:
1857 1857 m = scmutil.match(repo[None], pats=files)
1858 1858 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1859 1859 for f in m.files():
1860 1860 nf = util.normpath(f)
1861 1861 ignored = None
1862 1862 ignoredata = None
1863 1863 if nf != b'.':
1864 1864 if ignore(nf):
1865 1865 ignored = nf
1866 1866 ignoredata = repo.dirstate._ignorefileandline(nf)
1867 1867 else:
1868 1868 for p in pathutil.finddirs(nf):
1869 1869 if ignore(p):
1870 1870 ignored = p
1871 1871 ignoredata = repo.dirstate._ignorefileandline(p)
1872 1872 break
1873 1873 if ignored:
1874 1874 if ignored == nf:
1875 1875 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1876 1876 else:
1877 1877 ui.write(
1878 1878 _(
1879 1879 b"%s is ignored because of "
1880 1880 b"containing directory %s\n"
1881 1881 )
1882 1882 % (uipathfn(f), ignored)
1883 1883 )
1884 1884 ignorefile, lineno, line = ignoredata
1885 1885 ui.write(
1886 1886 _(b"(ignore rule in %s, line %d: '%s')\n")
1887 1887 % (ignorefile, lineno, line)
1888 1888 )
1889 1889 else:
1890 1890 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1891 1891
1892 1892
1893 1893 @command(
1894 1894 b'debug-revlog-index|debugindex',
1895 1895 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1896 1896 _(b'-c|-m|FILE'),
1897 1897 )
1898 1898 def debugindex(ui, repo, file_=None, **opts):
1899 1899 """dump index data for a revlog"""
1900 1900 opts = pycompat.byteskwargs(opts)
1901 1901 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1902 1902
1903 1903 fm = ui.formatter(b'debugindex', opts)
1904 1904
1905 1905 revlog = getattr(store, b'_revlog', store)
1906 1906
1907 1907 return revlog_debug.debug_index(
1908 1908 ui,
1909 1909 repo,
1910 1910 formatter=fm,
1911 1911 revlog=revlog,
1912 1912 full_node=ui.debugflag,
1913 1913 )
1914 1914
1915 1915
1916 1916 @command(
1917 1917 b'debugindexdot',
1918 1918 cmdutil.debugrevlogopts,
1919 1919 _(b'-c|-m|FILE'),
1920 1920 optionalrepo=True,
1921 1921 )
1922 1922 def debugindexdot(ui, repo, file_=None, **opts):
1923 1923 """dump an index DAG as a graphviz dot file"""
1924 1924 opts = pycompat.byteskwargs(opts)
1925 1925 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1926 1926 ui.writenoi18n(b"digraph G {\n")
1927 1927 for i in r:
1928 1928 node = r.node(i)
1929 1929 pp = r.parents(node)
1930 1930 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1931 1931 if pp[1] != repo.nullid:
1932 1932 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1933 1933 ui.write(b"}\n")
1934 1934
1935 1935
1936 1936 @command(b'debugindexstats', [])
1937 1937 def debugindexstats(ui, repo):
1938 1938 """show stats related to the changelog index"""
1939 1939 repo.changelog.shortest(repo.nullid, 1)
1940 1940 index = repo.changelog.index
1941 1941 if not util.safehasattr(index, b'stats'):
1942 1942 raise error.Abort(_(b'debugindexstats only works with native code'))
1943 1943 for k, v in sorted(index.stats().items()):
1944 1944 ui.write(b'%s: %d\n' % (k, v))
1945 1945
1946 1946
1947 1947 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1948 1948 def debuginstall(ui, **opts):
1949 1949 """test Mercurial installation
1950 1950
1951 1951 Returns 0 on success.
1952 1952 """
1953 1953 opts = pycompat.byteskwargs(opts)
1954 1954
1955 1955 problems = 0
1956 1956
1957 1957 fm = ui.formatter(b'debuginstall', opts)
1958 1958 fm.startitem()
1959 1959
1960 1960 # encoding might be unknown or wrong. don't translate these messages.
1961 1961 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1962 1962 err = None
1963 1963 try:
1964 1964 codecs.lookup(pycompat.sysstr(encoding.encoding))
1965 1965 except LookupError as inst:
1966 1966 err = stringutil.forcebytestr(inst)
1967 1967 problems += 1
1968 1968 fm.condwrite(
1969 1969 err,
1970 1970 b'encodingerror',
1971 1971 b" %s\n (check that your locale is properly set)\n",
1972 1972 err,
1973 1973 )
1974 1974
1975 1975 # Python
1976 1976 pythonlib = None
1977 1977 if util.safehasattr(os, '__file__'):
1978 1978 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1979 1979 elif getattr(sys, 'oxidized', False):
1980 1980 pythonlib = pycompat.sysexecutable
1981 1981
1982 1982 fm.write(
1983 1983 b'pythonexe',
1984 1984 _(b"checking Python executable (%s)\n"),
1985 1985 pycompat.sysexecutable or _(b"unknown"),
1986 1986 )
1987 1987 fm.write(
1988 1988 b'pythonimplementation',
1989 1989 _(b"checking Python implementation (%s)\n"),
1990 1990 pycompat.sysbytes(platform.python_implementation()),
1991 1991 )
1992 1992 fm.write(
1993 1993 b'pythonver',
1994 1994 _(b"checking Python version (%s)\n"),
1995 1995 (b"%d.%d.%d" % sys.version_info[:3]),
1996 1996 )
1997 1997 fm.write(
1998 1998 b'pythonlib',
1999 1999 _(b"checking Python lib (%s)...\n"),
2000 2000 pythonlib or _(b"unknown"),
2001 2001 )
2002 2002
2003 2003 try:
2004 2004 from . import rustext # pytype: disable=import-error
2005 2005
2006 2006 rustext.__doc__ # trigger lazy import
2007 2007 except ImportError:
2008 2008 rustext = None
2009 2009
2010 2010 security = set(sslutil.supportedprotocols)
2011 2011 if sslutil.hassni:
2012 2012 security.add(b'sni')
2013 2013
2014 2014 fm.write(
2015 2015 b'pythonsecurity',
2016 2016 _(b"checking Python security support (%s)\n"),
2017 2017 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
2018 2018 )
2019 2019
2020 2020 # These are warnings, not errors. So don't increment problem count. This
2021 2021 # may change in the future.
2022 2022 if b'tls1.2' not in security:
2023 2023 fm.plain(
2024 2024 _(
2025 2025 b' TLS 1.2 not supported by Python install; '
2026 2026 b'network connections lack modern security\n'
2027 2027 )
2028 2028 )
2029 2029 if b'sni' not in security:
2030 2030 fm.plain(
2031 2031 _(
2032 2032 b' SNI not supported by Python install; may have '
2033 2033 b'connectivity issues with some servers\n'
2034 2034 )
2035 2035 )
2036 2036
2037 2037 fm.plain(
2038 2038 _(
2039 2039 b"checking Rust extensions (%s)\n"
2040 2040 % (b'missing' if rustext is None else b'installed')
2041 2041 ),
2042 2042 )
2043 2043
2044 2044 # TODO print CA cert info
2045 2045
2046 2046 # hg version
2047 2047 hgver = util.version()
2048 2048 fm.write(
2049 2049 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
2050 2050 )
2051 2051 fm.write(
2052 2052 b'hgverextra',
2053 2053 _(b"checking Mercurial custom build (%s)\n"),
2054 2054 b'+'.join(hgver.split(b'+')[1:]),
2055 2055 )
2056 2056
2057 2057 # compiled modules
2058 2058 hgmodules = None
2059 2059 if util.safehasattr(sys.modules[__name__], '__file__'):
2060 2060 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
2061 2061 elif getattr(sys, 'oxidized', False):
2062 2062 hgmodules = pycompat.sysexecutable
2063 2063
2064 2064 fm.write(
2065 2065 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
2066 2066 )
2067 2067 fm.write(
2068 2068 b'hgmodules',
2069 2069 _(b"checking installed modules (%s)...\n"),
2070 2070 hgmodules or _(b"unknown"),
2071 2071 )
2072 2072
2073 2073 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
2074 2074 rustext = rustandc # for now, that's the only case
2075 2075 cext = policy.policy in (b'c', b'allow') or rustandc
2076 2076 nopure = cext or rustext
2077 2077 if nopure:
2078 2078 err = None
2079 2079 try:
2080 2080 if cext:
2081 2081 from .cext import ( # pytype: disable=import-error
2082 2082 base85,
2083 2083 bdiff,
2084 2084 mpatch,
2085 2085 osutil,
2086 2086 )
2087 2087
2088 2088 # quiet pyflakes
2089 2089 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
2090 2090 if rustext:
2091 2091 from .rustext import ( # pytype: disable=import-error
2092 2092 ancestor,
2093 2093 dirstate,
2094 2094 )
2095 2095
2096 2096 dir(ancestor), dir(dirstate) # quiet pyflakes
2097 2097 except Exception as inst:
2098 2098 err = stringutil.forcebytestr(inst)
2099 2099 problems += 1
2100 2100 fm.condwrite(err, b'extensionserror', b" %s\n", err)
2101 2101
2102 2102 compengines = util.compengines._engines.values()
2103 2103 fm.write(
2104 2104 b'compengines',
2105 2105 _(b'checking registered compression engines (%s)\n'),
2106 2106 fm.formatlist(
2107 2107 sorted(e.name() for e in compengines),
2108 2108 name=b'compengine',
2109 2109 fmt=b'%s',
2110 2110 sep=b', ',
2111 2111 ),
2112 2112 )
2113 2113 fm.write(
2114 2114 b'compenginesavail',
2115 2115 _(b'checking available compression engines (%s)\n'),
2116 2116 fm.formatlist(
2117 2117 sorted(e.name() for e in compengines if e.available()),
2118 2118 name=b'compengine',
2119 2119 fmt=b'%s',
2120 2120 sep=b', ',
2121 2121 ),
2122 2122 )
2123 2123 wirecompengines = compression.compengines.supportedwireengines(
2124 2124 compression.SERVERROLE
2125 2125 )
2126 2126 fm.write(
2127 2127 b'compenginesserver',
2128 2128 _(
2129 2129 b'checking available compression engines '
2130 2130 b'for wire protocol (%s)\n'
2131 2131 ),
2132 2132 fm.formatlist(
2133 2133 [e.name() for e in wirecompengines if e.wireprotosupport()],
2134 2134 name=b'compengine',
2135 2135 fmt=b'%s',
2136 2136 sep=b', ',
2137 2137 ),
2138 2138 )
2139 2139 re2 = b'missing'
2140 2140 if util._re2:
2141 2141 re2 = b'available'
2142 2142 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
2143 2143 fm.data(re2=bool(util._re2))
2144 2144
2145 2145 # templates
2146 2146 p = templater.templatedir()
2147 2147 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
2148 2148 fm.condwrite(not p, b'', _(b" no template directories found\n"))
2149 2149 if p:
2150 2150 (m, fp) = templater.try_open_template(b"map-cmdline.default")
2151 2151 if m:
2152 2152 # template found, check if it is working
2153 2153 err = None
2154 2154 try:
2155 2155 templater.templater.frommapfile(m)
2156 2156 except Exception as inst:
2157 2157 err = stringutil.forcebytestr(inst)
2158 2158 p = None
2159 2159 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2160 2160 else:
2161 2161 p = None
2162 2162 fm.condwrite(
2163 2163 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2164 2164 )
2165 2165 fm.condwrite(
2166 2166 not m,
2167 2167 b'defaulttemplatenotfound',
2168 2168 _(b" template '%s' not found\n"),
2169 2169 b"default",
2170 2170 )
2171 2171 if not p:
2172 2172 problems += 1
2173 2173 fm.condwrite(
2174 2174 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2175 2175 )
2176 2176
2177 2177 # editor
2178 2178 editor = ui.geteditor()
2179 2179 editor = util.expandpath(editor)
2180 2180 editorbin = procutil.shellsplit(editor)[0]
2181 2181 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2182 2182 cmdpath = procutil.findexe(editorbin)
2183 2183 fm.condwrite(
2184 2184 not cmdpath and editor == b'vi',
2185 2185 b'vinotfound',
2186 2186 _(
2187 2187 b" No commit editor set and can't find %s in PATH\n"
2188 2188 b" (specify a commit editor in your configuration"
2189 2189 b" file)\n"
2190 2190 ),
2191 2191 not cmdpath and editor == b'vi' and editorbin,
2192 2192 )
2193 2193 fm.condwrite(
2194 2194 not cmdpath and editor != b'vi',
2195 2195 b'editornotfound',
2196 2196 _(
2197 2197 b" Can't find editor '%s' in PATH\n"
2198 2198 b" (specify a commit editor in your configuration"
2199 2199 b" file)\n"
2200 2200 ),
2201 2201 not cmdpath and editorbin,
2202 2202 )
2203 2203 if not cmdpath and editor != b'vi':
2204 2204 problems += 1
2205 2205
2206 2206 # check username
2207 2207 username = None
2208 2208 err = None
2209 2209 try:
2210 2210 username = ui.username()
2211 2211 except error.Abort as e:
2212 2212 err = e.message
2213 2213 problems += 1
2214 2214
2215 2215 fm.condwrite(
2216 2216 username, b'username', _(b"checking username (%s)\n"), username
2217 2217 )
2218 2218 fm.condwrite(
2219 2219 err,
2220 2220 b'usernameerror',
2221 2221 _(
2222 2222 b"checking username...\n %s\n"
2223 2223 b" (specify a username in your configuration file)\n"
2224 2224 ),
2225 2225 err,
2226 2226 )
2227 2227
2228 2228 for name, mod in extensions.extensions():
2229 2229 handler = getattr(mod, 'debuginstall', None)
2230 2230 if handler is not None:
2231 2231 problems += handler(ui, fm)
2232 2232
2233 2233 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2234 2234 if not problems:
2235 2235 fm.data(problems=problems)
2236 2236 fm.condwrite(
2237 2237 problems,
2238 2238 b'problems',
2239 2239 _(b"%d problems detected, please check your install!\n"),
2240 2240 problems,
2241 2241 )
2242 2242 fm.end()
2243 2243
2244 2244 return problems
2245 2245
2246 2246
2247 2247 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2248 2248 def debugknown(ui, repopath, *ids, **opts):
2249 2249 """test whether node ids are known to a repo
2250 2250
2251 2251 Every ID must be a full-length hex node id string. Returns a list of 0s
2252 2252 and 1s indicating unknown/known.
2253 2253 """
2254 2254 opts = pycompat.byteskwargs(opts)
2255 2255 repo = hg.peer(ui, opts, repopath)
2256 2256 if not repo.capable(b'known'):
2257 2257 raise error.Abort(b"known() not supported by target repository")
2258 2258 flags = repo.known([bin(s) for s in ids])
2259 2259 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2260 2260
2261 2261
2262 2262 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2263 2263 def debuglabelcomplete(ui, repo, *args):
2264 2264 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2265 2265 debugnamecomplete(ui, repo, *args)
2266 2266
2267 2267
2268 2268 @command(
2269 2269 b'debuglocks',
2270 2270 [
2271 2271 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2272 2272 (
2273 2273 b'W',
2274 2274 b'force-free-wlock',
2275 2275 None,
2276 2276 _(b'free the working state lock (DANGEROUS)'),
2277 2277 ),
2278 2278 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2279 2279 (
2280 2280 b'S',
2281 2281 b'set-wlock',
2282 2282 None,
2283 2283 _(b'set the working state lock until stopped'),
2284 2284 ),
2285 2285 ],
2286 2286 _(b'[OPTION]...'),
2287 2287 )
2288 2288 def debuglocks(ui, repo, **opts):
2289 2289 """show or modify state of locks
2290 2290
2291 2291 By default, this command will show which locks are held. This
2292 2292 includes the user and process holding the lock, the amount of time
2293 2293 the lock has been held, and the machine name where the process is
2294 2294 running if it's not local.
2295 2295
2296 2296 Locks protect the integrity of Mercurial's data, so should be
2297 2297 treated with care. System crashes or other interruptions may cause
2298 2298 locks to not be properly released, though Mercurial will usually
2299 2299 detect and remove such stale locks automatically.
2300 2300
2301 2301 However, detecting stale locks may not always be possible (for
2302 2302 instance, on a shared filesystem). Removing locks may also be
2303 2303 blocked by filesystem permissions.
2304 2304
2305 2305 Setting a lock will prevent other commands from changing the data.
2306 2306 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2307 2307 The set locks are removed when the command exits.
2308 2308
2309 2309 Returns 0 if no locks are held.
2310 2310
2311 2311 """
2312 2312
2313 2313 if opts.get('force_free_lock'):
2314 2314 repo.svfs.tryunlink(b'lock')
2315 2315 if opts.get('force_free_wlock'):
2316 2316 repo.vfs.tryunlink(b'wlock')
2317 2317 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2318 2318 return 0
2319 2319
2320 2320 locks = []
2321 2321 try:
2322 2322 if opts.get('set_wlock'):
2323 2323 try:
2324 2324 locks.append(repo.wlock(False))
2325 2325 except error.LockHeld:
2326 2326 raise error.Abort(_(b'wlock is already held'))
2327 2327 if opts.get('set_lock'):
2328 2328 try:
2329 2329 locks.append(repo.lock(False))
2330 2330 except error.LockHeld:
2331 2331 raise error.Abort(_(b'lock is already held'))
2332 2332 if len(locks):
2333 2333 try:
2334 2334 if ui.interactive():
2335 2335 prompt = _(b"ready to release the lock (y)? $$ &Yes")
2336 2336 ui.promptchoice(prompt)
2337 2337 else:
2338 2338 msg = b"%d locks held, waiting for signal\n"
2339 2339 msg %= len(locks)
2340 2340 ui.status(msg)
2341 2341 while True: # XXX wait for a signal
2342 2342 time.sleep(0.1)
2343 2343 except KeyboardInterrupt:
2344 2344 msg = b"signal-received releasing locks\n"
2345 2345 ui.status(msg)
2346 2346 return 0
2347 2347 finally:
2348 2348 release(*locks)
2349 2349
2350 2350 now = time.time()
2351 2351 held = 0
2352 2352
2353 2353 def report(vfs, name, method):
2354 2354 # this causes stale locks to get reaped for more accurate reporting
2355 2355 try:
2356 2356 l = method(False)
2357 2357 except error.LockHeld:
2358 2358 l = None
2359 2359
2360 2360 if l:
2361 2361 l.release()
2362 2362 else:
2363 2363 try:
2364 2364 st = vfs.lstat(name)
2365 2365 age = now - st[stat.ST_MTIME]
2366 2366 user = util.username(st.st_uid)
2367 2367 locker = vfs.readlock(name)
2368 2368 if b":" in locker:
2369 2369 host, pid = locker.split(b':')
2370 2370 if host == socket.gethostname():
2371 2371 locker = b'user %s, process %s' % (user or b'None', pid)
2372 2372 else:
2373 2373 locker = b'user %s, process %s, host %s' % (
2374 2374 user or b'None',
2375 2375 pid,
2376 2376 host,
2377 2377 )
2378 2378 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2379 2379 return 1
2380 2380 except FileNotFoundError:
2381 2381 pass
2382 2382
2383 2383 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2384 2384 return 0
2385 2385
2386 2386 held += report(repo.svfs, b"lock", repo.lock)
2387 2387 held += report(repo.vfs, b"wlock", repo.wlock)
2388 2388
2389 2389 return held
2390 2390
2391 2391
2392 2392 @command(
2393 2393 b'debugmanifestfulltextcache',
2394 2394 [
2395 2395 (b'', b'clear', False, _(b'clear the cache')),
2396 2396 (
2397 2397 b'a',
2398 2398 b'add',
2399 2399 [],
2400 2400 _(b'add the given manifest nodes to the cache'),
2401 2401 _(b'NODE'),
2402 2402 ),
2403 2403 ],
2404 2404 b'',
2405 2405 )
2406 2406 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2407 2407 """show, clear or amend the contents of the manifest fulltext cache"""
2408 2408
2409 2409 def getcache():
2410 2410 r = repo.manifestlog.getstorage(b'')
2411 2411 try:
2412 2412 return r._fulltextcache
2413 2413 except AttributeError:
2414 2414 msg = _(
2415 2415 b"Current revlog implementation doesn't appear to have a "
2416 2416 b"manifest fulltext cache\n"
2417 2417 )
2418 2418 raise error.Abort(msg)
2419 2419
2420 2420 if opts.get('clear'):
2421 2421 with repo.wlock():
2422 2422 cache = getcache()
2423 2423 cache.clear(clear_persisted_data=True)
2424 2424 return
2425 2425
2426 2426 if add:
2427 2427 with repo.wlock():
2428 2428 m = repo.manifestlog
2429 2429 store = m.getstorage(b'')
2430 2430 for n in add:
2431 2431 try:
2432 2432 manifest = m[store.lookup(n)]
2433 2433 except error.LookupError as e:
2434 2434 raise error.Abort(
2435 2435 bytes(e), hint=b"Check your manifest node id"
2436 2436 )
2437 2437 manifest.read() # stores revisision in cache too
2438 2438 return
2439 2439
2440 2440 cache = getcache()
2441 2441 if not len(cache):
2442 2442 ui.write(_(b'cache empty\n'))
2443 2443 else:
2444 2444 ui.write(
2445 2445 _(
2446 2446 b'cache contains %d manifest entries, in order of most to '
2447 2447 b'least recent:\n'
2448 2448 )
2449 2449 % (len(cache),)
2450 2450 )
2451 2451 totalsize = 0
2452 2452 for nodeid in cache:
2453 2453 # Use cache.get to not update the LRU order
2454 2454 data = cache.peek(nodeid)
2455 2455 size = len(data)
2456 2456 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2457 2457 ui.write(
2458 2458 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2459 2459 )
2460 2460 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2461 2461 ui.write(
2462 2462 _(b'total cache data size %s, on-disk %s\n')
2463 2463 % (util.bytecount(totalsize), util.bytecount(ondisk))
2464 2464 )
2465 2465
2466 2466
2467 2467 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2468 2468 def debugmergestate(ui, repo, *args, **opts):
2469 2469 """print merge state
2470 2470
2471 2471 Use --verbose to print out information about whether v1 or v2 merge state
2472 2472 was chosen."""
2473 2473
2474 2474 if ui.verbose:
2475 2475 ms = mergestatemod.mergestate(repo)
2476 2476
2477 2477 # sort so that reasonable information is on top
2478 2478 v1records = ms._readrecordsv1()
2479 2479 v2records = ms._readrecordsv2()
2480 2480
2481 2481 if not v1records and not v2records:
2482 2482 pass
2483 2483 elif not v2records:
2484 2484 ui.writenoi18n(b'no version 2 merge state\n')
2485 2485 elif ms._v1v2match(v1records, v2records):
2486 2486 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2487 2487 else:
2488 2488 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2489 2489
2490 2490 opts = pycompat.byteskwargs(opts)
2491 2491 if not opts[b'template']:
2492 2492 opts[b'template'] = (
2493 2493 b'{if(commits, "", "no merge state found\n")}'
2494 2494 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2495 2495 b'{files % "file: {path} (state \\"{state}\\")\n'
2496 2496 b'{if(local_path, "'
2497 2497 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2498 2498 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2499 2499 b' other path: {other_path} (node {other_node})\n'
2500 2500 b'")}'
2501 2501 b'{if(rename_side, "'
2502 2502 b' rename side: {rename_side}\n'
2503 2503 b' renamed path: {renamed_path}\n'
2504 2504 b'")}'
2505 2505 b'{extras % " extra: {key} = {value}\n"}'
2506 2506 b'"}'
2507 2507 b'{extras % "extra: {file} ({key} = {value})\n"}'
2508 2508 )
2509 2509
2510 2510 ms = mergestatemod.mergestate.read(repo)
2511 2511
2512 2512 fm = ui.formatter(b'debugmergestate', opts)
2513 2513 fm.startitem()
2514 2514
2515 2515 fm_commits = fm.nested(b'commits')
2516 2516 if ms.active():
2517 2517 for name, node, label_index in (
2518 2518 (b'local', ms.local, 0),
2519 2519 (b'other', ms.other, 1),
2520 2520 ):
2521 2521 fm_commits.startitem()
2522 2522 fm_commits.data(name=name)
2523 2523 fm_commits.data(node=hex(node))
2524 2524 if ms._labels and len(ms._labels) > label_index:
2525 2525 fm_commits.data(label=ms._labels[label_index])
2526 2526 fm_commits.end()
2527 2527
2528 2528 fm_files = fm.nested(b'files')
2529 2529 if ms.active():
2530 2530 for f in ms:
2531 2531 fm_files.startitem()
2532 2532 fm_files.data(path=f)
2533 2533 state = ms._state[f]
2534 2534 fm_files.data(state=state[0])
2535 2535 if state[0] in (
2536 2536 mergestatemod.MERGE_RECORD_UNRESOLVED,
2537 2537 mergestatemod.MERGE_RECORD_RESOLVED,
2538 2538 ):
2539 2539 fm_files.data(local_key=state[1])
2540 2540 fm_files.data(local_path=state[2])
2541 2541 fm_files.data(ancestor_path=state[3])
2542 2542 fm_files.data(ancestor_node=state[4])
2543 2543 fm_files.data(other_path=state[5])
2544 2544 fm_files.data(other_node=state[6])
2545 2545 fm_files.data(local_flags=state[7])
2546 2546 elif state[0] in (
2547 2547 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2548 2548 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2549 2549 ):
2550 2550 fm_files.data(renamed_path=state[1])
2551 2551 fm_files.data(rename_side=state[2])
2552 2552 fm_extras = fm_files.nested(b'extras')
2553 2553 for k, v in sorted(ms.extras(f).items()):
2554 2554 fm_extras.startitem()
2555 2555 fm_extras.data(key=k)
2556 2556 fm_extras.data(value=v)
2557 2557 fm_extras.end()
2558 2558
2559 2559 fm_files.end()
2560 2560
2561 2561 fm_extras = fm.nested(b'extras')
2562 2562 for f, d in sorted(ms.allextras().items()):
2563 2563 if f in ms:
2564 2564 # If file is in mergestate, we have already processed it's extras
2565 2565 continue
2566 2566 for k, v in d.items():
2567 2567 fm_extras.startitem()
2568 2568 fm_extras.data(file=f)
2569 2569 fm_extras.data(key=k)
2570 2570 fm_extras.data(value=v)
2571 2571 fm_extras.end()
2572 2572
2573 2573 fm.end()
2574 2574
2575 2575
2576 2576 @command(b'debugnamecomplete', [], _(b'NAME...'))
2577 2577 def debugnamecomplete(ui, repo, *args):
2578 2578 '''complete "names" - tags, open branch names, bookmark names'''
2579 2579
2580 2580 names = set()
2581 2581 # since we previously only listed open branches, we will handle that
2582 2582 # specially (after this for loop)
2583 2583 for name, ns in repo.names.items():
2584 2584 if name != b'branches':
2585 2585 names.update(ns.listnames(repo))
2586 2586 names.update(
2587 2587 tag
2588 2588 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2589 2589 if not closed
2590 2590 )
2591 2591 completions = set()
2592 2592 if not args:
2593 2593 args = [b'']
2594 2594 for a in args:
2595 2595 completions.update(n for n in names if n.startswith(a))
2596 2596 ui.write(b'\n'.join(sorted(completions)))
2597 2597 ui.write(b'\n')
2598 2598
2599 2599
2600 2600 @command(
2601 2601 b'debugnodemap',
2602 2602 [
2603 2603 (
2604 2604 b'',
2605 2605 b'dump-new',
2606 2606 False,
2607 2607 _(b'write a (new) persistent binary nodemap on stdout'),
2608 2608 ),
2609 2609 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2610 2610 (
2611 2611 b'',
2612 2612 b'check',
2613 2613 False,
2614 2614 _(b'check that the data on disk data are correct.'),
2615 2615 ),
2616 2616 (
2617 2617 b'',
2618 2618 b'metadata',
2619 2619 False,
2620 2620 _(b'display the on disk meta data for the nodemap'),
2621 2621 ),
2622 2622 ],
2623 2623 )
2624 2624 def debugnodemap(ui, repo, **opts):
2625 2625 """write and inspect on disk nodemap"""
2626 2626 if opts['dump_new']:
2627 2627 unfi = repo.unfiltered()
2628 2628 cl = unfi.changelog
2629 2629 if util.safehasattr(cl.index, "nodemap_data_all"):
2630 2630 data = cl.index.nodemap_data_all()
2631 2631 else:
2632 2632 data = nodemap.persistent_data(cl.index)
2633 2633 ui.write(data)
2634 2634 elif opts['dump_disk']:
2635 2635 unfi = repo.unfiltered()
2636 2636 cl = unfi.changelog
2637 2637 nm_data = nodemap.persisted_data(cl)
2638 2638 if nm_data is not None:
2639 2639 docket, data = nm_data
2640 2640 ui.write(data[:])
2641 2641 elif opts['check']:
2642 2642 unfi = repo.unfiltered()
2643 2643 cl = unfi.changelog
2644 2644 nm_data = nodemap.persisted_data(cl)
2645 2645 if nm_data is not None:
2646 2646 docket, data = nm_data
2647 2647 return nodemap.check_data(ui, cl.index, data)
2648 2648 elif opts['metadata']:
2649 2649 unfi = repo.unfiltered()
2650 2650 cl = unfi.changelog
2651 2651 nm_data = nodemap.persisted_data(cl)
2652 2652 if nm_data is not None:
2653 2653 docket, data = nm_data
2654 2654 ui.write((b"uid: %s\n") % docket.uid)
2655 2655 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2656 2656 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2657 2657 ui.write((b"data-length: %d\n") % docket.data_length)
2658 2658 ui.write((b"data-unused: %d\n") % docket.data_unused)
2659 2659 unused_perc = docket.data_unused * 100.0 / docket.data_length
2660 2660 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2661 2661
2662 2662
2663 2663 @command(
2664 2664 b'debugobsolete',
2665 2665 [
2666 2666 (b'', b'flags', 0, _(b'markers flag')),
2667 2667 (
2668 2668 b'',
2669 2669 b'record-parents',
2670 2670 False,
2671 2671 _(b'record parent information for the precursor'),
2672 2672 ),
2673 2673 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2674 2674 (
2675 2675 b'',
2676 2676 b'exclusive',
2677 2677 False,
2678 2678 _(b'restrict display to markers only relevant to REV'),
2679 2679 ),
2680 2680 (b'', b'index', False, _(b'display index of the marker')),
2681 2681 (b'', b'delete', [], _(b'delete markers specified by indices')),
2682 2682 ]
2683 2683 + cmdutil.commitopts2
2684 2684 + cmdutil.formatteropts,
2685 2685 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2686 2686 )
2687 2687 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2688 2688 """create arbitrary obsolete marker
2689 2689
2690 2690 With no arguments, displays the list of obsolescence markers."""
2691 2691
2692 2692 opts = pycompat.byteskwargs(opts)
2693 2693
2694 2694 def parsenodeid(s):
2695 2695 try:
2696 2696 # We do not use revsingle/revrange functions here to accept
2697 2697 # arbitrary node identifiers, possibly not present in the
2698 2698 # local repository.
2699 2699 n = bin(s)
2700 2700 if len(n) != repo.nodeconstants.nodelen:
2701 2701 raise ValueError
2702 2702 return n
2703 2703 except ValueError:
2704 2704 raise error.InputError(
2705 2705 b'changeset references must be full hexadecimal '
2706 2706 b'node identifiers'
2707 2707 )
2708 2708
2709 2709 if opts.get(b'delete'):
2710 2710 indices = []
2711 2711 for v in opts.get(b'delete'):
2712 2712 try:
2713 2713 indices.append(int(v))
2714 2714 except ValueError:
2715 2715 raise error.InputError(
2716 2716 _(b'invalid index value: %r') % v,
2717 2717 hint=_(b'use integers for indices'),
2718 2718 )
2719 2719
2720 2720 if repo.currenttransaction():
2721 2721 raise error.Abort(
2722 2722 _(b'cannot delete obsmarkers in the middle of transaction.')
2723 2723 )
2724 2724
2725 2725 with repo.lock():
2726 2726 n = repair.deleteobsmarkers(repo.obsstore, indices)
2727 2727 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2728 2728
2729 2729 return
2730 2730
2731 2731 if precursor is not None:
2732 2732 if opts[b'rev']:
2733 2733 raise error.InputError(
2734 2734 b'cannot select revision when creating marker'
2735 2735 )
2736 2736 metadata = {}
2737 2737 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2738 2738 succs = tuple(parsenodeid(succ) for succ in successors)
2739 2739 l = repo.lock()
2740 2740 try:
2741 2741 tr = repo.transaction(b'debugobsolete')
2742 2742 try:
2743 2743 date = opts.get(b'date')
2744 2744 if date:
2745 2745 date = dateutil.parsedate(date)
2746 2746 else:
2747 2747 date = None
2748 2748 prec = parsenodeid(precursor)
2749 2749 parents = None
2750 2750 if opts[b'record_parents']:
2751 2751 if prec not in repo.unfiltered():
2752 2752 raise error.Abort(
2753 2753 b'cannot used --record-parents on '
2754 2754 b'unknown changesets'
2755 2755 )
2756 2756 parents = repo.unfiltered()[prec].parents()
2757 2757 parents = tuple(p.node() for p in parents)
2758 2758 repo.obsstore.create(
2759 2759 tr,
2760 2760 prec,
2761 2761 succs,
2762 2762 opts[b'flags'],
2763 2763 parents=parents,
2764 2764 date=date,
2765 2765 metadata=metadata,
2766 2766 ui=ui,
2767 2767 )
2768 2768 tr.close()
2769 2769 except ValueError as exc:
2770 2770 raise error.Abort(
2771 2771 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2772 2772 )
2773 2773 finally:
2774 2774 tr.release()
2775 2775 finally:
2776 2776 l.release()
2777 2777 else:
2778 2778 if opts[b'rev']:
2779 2779 revs = logcmdutil.revrange(repo, opts[b'rev'])
2780 2780 nodes = [repo[r].node() for r in revs]
2781 2781 markers = list(
2782 2782 obsutil.getmarkers(
2783 2783 repo, nodes=nodes, exclusive=opts[b'exclusive']
2784 2784 )
2785 2785 )
2786 2786 markers.sort(key=lambda x: x._data)
2787 2787 else:
2788 2788 markers = obsutil.getmarkers(repo)
2789 2789
2790 2790 markerstoiter = markers
2791 2791 isrelevant = lambda m: True
2792 2792 if opts.get(b'rev') and opts.get(b'index'):
2793 2793 markerstoiter = obsutil.getmarkers(repo)
2794 2794 markerset = set(markers)
2795 2795 isrelevant = lambda m: m in markerset
2796 2796
2797 2797 fm = ui.formatter(b'debugobsolete', opts)
2798 2798 for i, m in enumerate(markerstoiter):
2799 2799 if not isrelevant(m):
2800 2800 # marker can be irrelevant when we're iterating over a set
2801 2801 # of markers (markerstoiter) which is bigger than the set
2802 2802 # of markers we want to display (markers)
2803 2803 # this can happen if both --index and --rev options are
2804 2804 # provided and thus we need to iterate over all of the markers
2805 2805 # to get the correct indices, but only display the ones that
2806 2806 # are relevant to --rev value
2807 2807 continue
2808 2808 fm.startitem()
2809 2809 ind = i if opts.get(b'index') else None
2810 2810 cmdutil.showmarker(fm, m, index=ind)
2811 2811 fm.end()
2812 2812
2813 2813
2814 2814 @command(
2815 2815 b'debugp1copies',
2816 2816 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2817 2817 _(b'[-r REV]'),
2818 2818 )
2819 2819 def debugp1copies(ui, repo, **opts):
2820 2820 """dump copy information compared to p1"""
2821 2821
2822 2822 opts = pycompat.byteskwargs(opts)
2823 2823 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2824 2824 for dst, src in ctx.p1copies().items():
2825 2825 ui.write(b'%s -> %s\n' % (src, dst))
2826 2826
2827 2827
2828 2828 @command(
2829 2829 b'debugp2copies',
2830 2830 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2831 2831 _(b'[-r REV]'),
2832 2832 )
2833 2833 def debugp2copies(ui, repo, **opts):
2834 2834 """dump copy information compared to p2"""
2835 2835
2836 2836 opts = pycompat.byteskwargs(opts)
2837 2837 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2838 2838 for dst, src in ctx.p2copies().items():
2839 2839 ui.write(b'%s -> %s\n' % (src, dst))
2840 2840
2841 2841
2842 2842 @command(
2843 2843 b'debugpathcomplete',
2844 2844 [
2845 2845 (b'f', b'full', None, _(b'complete an entire path')),
2846 2846 (b'n', b'normal', None, _(b'show only normal files')),
2847 2847 (b'a', b'added', None, _(b'show only added files')),
2848 2848 (b'r', b'removed', None, _(b'show only removed files')),
2849 2849 ],
2850 2850 _(b'FILESPEC...'),
2851 2851 )
2852 2852 def debugpathcomplete(ui, repo, *specs, **opts):
2853 2853 """complete part or all of a tracked path
2854 2854
2855 2855 This command supports shells that offer path name completion. It
2856 2856 currently completes only files already known to the dirstate.
2857 2857
2858 2858 Completion extends only to the next path segment unless
2859 2859 --full is specified, in which case entire paths are used."""
2860 2860
2861 2861 def complete(path, acceptable):
2862 2862 dirstate = repo.dirstate
2863 2863 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2864 2864 rootdir = repo.root + pycompat.ossep
2865 2865 if spec != repo.root and not spec.startswith(rootdir):
2866 2866 return [], []
2867 2867 if os.path.isdir(spec):
2868 2868 spec += b'/'
2869 2869 spec = spec[len(rootdir) :]
2870 2870 fixpaths = pycompat.ossep != b'/'
2871 2871 if fixpaths:
2872 2872 spec = spec.replace(pycompat.ossep, b'/')
2873 2873 speclen = len(spec)
2874 2874 fullpaths = opts['full']
2875 2875 files, dirs = set(), set()
2876 2876 adddir, addfile = dirs.add, files.add
2877 2877 for f, st in dirstate.items():
2878 2878 if f.startswith(spec) and st.state in acceptable:
2879 2879 if fixpaths:
2880 2880 f = f.replace(b'/', pycompat.ossep)
2881 2881 if fullpaths:
2882 2882 addfile(f)
2883 2883 continue
2884 2884 s = f.find(pycompat.ossep, speclen)
2885 2885 if s >= 0:
2886 2886 adddir(f[:s])
2887 2887 else:
2888 2888 addfile(f)
2889 2889 return files, dirs
2890 2890
2891 2891 acceptable = b''
2892 2892 if opts['normal']:
2893 2893 acceptable += b'nm'
2894 2894 if opts['added']:
2895 2895 acceptable += b'a'
2896 2896 if opts['removed']:
2897 2897 acceptable += b'r'
2898 2898 cwd = repo.getcwd()
2899 2899 if not specs:
2900 2900 specs = [b'.']
2901 2901
2902 2902 files, dirs = set(), set()
2903 2903 for spec in specs:
2904 2904 f, d = complete(spec, acceptable or b'nmar')
2905 2905 files.update(f)
2906 2906 dirs.update(d)
2907 2907 files.update(dirs)
2908 2908 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2909 2909 ui.write(b'\n')
2910 2910
2911 2911
2912 2912 @command(
2913 2913 b'debugpathcopies',
2914 2914 cmdutil.walkopts,
2915 2915 b'hg debugpathcopies REV1 REV2 [FILE]',
2916 2916 inferrepo=True,
2917 2917 )
2918 2918 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2919 2919 """show copies between two revisions"""
2920 2920 ctx1 = scmutil.revsingle(repo, rev1)
2921 2921 ctx2 = scmutil.revsingle(repo, rev2)
2922 2922 m = scmutil.match(ctx1, pats, opts)
2923 2923 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2924 2924 ui.write(b'%s -> %s\n' % (src, dst))
2925 2925
2926 2926
2927 2927 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2928 2928 def debugpeer(ui, path):
2929 2929 """establish a connection to a peer repository"""
2930 2930 # Always enable peer request logging. Requires --debug to display
2931 2931 # though.
2932 2932 overrides = {
2933 2933 (b'devel', b'debug.peer-request'): True,
2934 2934 }
2935 2935
2936 2936 with ui.configoverride(overrides):
2937 2937 peer = hg.peer(ui, {}, path)
2938 2938
2939 2939 try:
2940 2940 local = peer.local() is not None
2941 2941 canpush = peer.canpush()
2942 2942
2943 2943 ui.write(_(b'url: %s\n') % peer.url())
2944 2944 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2945 2945 ui.write(
2946 2946 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2947 2947 )
2948 2948 finally:
2949 2949 peer.close()
2950 2950
2951 2951
2952 2952 @command(
2953 2953 b'debugpickmergetool',
2954 2954 [
2955 2955 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2956 2956 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2957 2957 ]
2958 2958 + cmdutil.walkopts
2959 2959 + cmdutil.mergetoolopts,
2960 2960 _(b'[PATTERN]...'),
2961 2961 inferrepo=True,
2962 2962 )
2963 2963 def debugpickmergetool(ui, repo, *pats, **opts):
2964 2964 """examine which merge tool is chosen for specified file
2965 2965
2966 2966 As described in :hg:`help merge-tools`, Mercurial examines
2967 2967 configurations below in this order to decide which merge tool is
2968 2968 chosen for specified file.
2969 2969
2970 2970 1. ``--tool`` option
2971 2971 2. ``HGMERGE`` environment variable
2972 2972 3. configurations in ``merge-patterns`` section
2973 2973 4. configuration of ``ui.merge``
2974 2974 5. configurations in ``merge-tools`` section
2975 2975 6. ``hgmerge`` tool (for historical reason only)
2976 2976 7. default tool for fallback (``:merge`` or ``:prompt``)
2977 2977
2978 2978 This command writes out examination result in the style below::
2979 2979
2980 2980 FILE = MERGETOOL
2981 2981
2982 2982 By default, all files known in the first parent context of the
2983 2983 working directory are examined. Use file patterns and/or -I/-X
2984 2984 options to limit target files. -r/--rev is also useful to examine
2985 2985 files in another context without actual updating to it.
2986 2986
2987 2987 With --debug, this command shows warning messages while matching
2988 2988 against ``merge-patterns`` and so on, too. It is recommended to
2989 2989 use this option with explicit file patterns and/or -I/-X options,
2990 2990 because this option increases amount of output per file according
2991 2991 to configurations in hgrc.
2992 2992
2993 2993 With -v/--verbose, this command shows configurations below at
2994 2994 first (only if specified).
2995 2995
2996 2996 - ``--tool`` option
2997 2997 - ``HGMERGE`` environment variable
2998 2998 - configuration of ``ui.merge``
2999 2999
3000 3000 If merge tool is chosen before matching against
3001 3001 ``merge-patterns``, this command can't show any helpful
3002 3002 information, even with --debug. In such case, information above is
3003 3003 useful to know why a merge tool is chosen.
3004 3004 """
3005 3005 opts = pycompat.byteskwargs(opts)
3006 3006 overrides = {}
3007 3007 if opts[b'tool']:
3008 3008 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
3009 3009 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
3010 3010
3011 3011 with ui.configoverride(overrides, b'debugmergepatterns'):
3012 3012 hgmerge = encoding.environ.get(b"HGMERGE")
3013 3013 if hgmerge is not None:
3014 3014 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
3015 3015 uimerge = ui.config(b"ui", b"merge")
3016 3016 if uimerge:
3017 3017 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
3018 3018
3019 3019 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3020 3020 m = scmutil.match(ctx, pats, opts)
3021 3021 changedelete = opts[b'changedelete']
3022 3022 for path in ctx.walk(m):
3023 3023 fctx = ctx[path]
3024 3024 with ui.silent(
3025 3025 error=True
3026 3026 ) if not ui.debugflag else util.nullcontextmanager():
3027 3027 tool, toolpath = filemerge._picktool(
3028 3028 repo,
3029 3029 ui,
3030 3030 path,
3031 3031 fctx.isbinary(),
3032 3032 b'l' in fctx.flags(),
3033 3033 changedelete,
3034 3034 )
3035 3035 ui.write(b'%s = %s\n' % (path, tool))
3036 3036
3037 3037
3038 3038 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
3039 3039 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
3040 3040 """access the pushkey key/value protocol
3041 3041
3042 3042 With two args, list the keys in the given namespace.
3043 3043
3044 3044 With five args, set a key to new if it currently is set to old.
3045 3045 Reports success or failure.
3046 3046 """
3047 3047
3048 3048 target = hg.peer(ui, {}, repopath)
3049 3049 try:
3050 3050 if keyinfo:
3051 3051 key, old, new = keyinfo
3052 3052 with target.commandexecutor() as e:
3053 3053 r = e.callcommand(
3054 3054 b'pushkey',
3055 3055 {
3056 3056 b'namespace': namespace,
3057 3057 b'key': key,
3058 3058 b'old': old,
3059 3059 b'new': new,
3060 3060 },
3061 3061 ).result()
3062 3062
3063 3063 ui.status(pycompat.bytestr(r) + b'\n')
3064 3064 return not r
3065 3065 else:
3066 3066 for k, v in sorted(target.listkeys(namespace).items()):
3067 3067 ui.write(
3068 3068 b"%s\t%s\n"
3069 3069 % (stringutil.escapestr(k), stringutil.escapestr(v))
3070 3070 )
3071 3071 finally:
3072 3072 target.close()
3073 3073
3074 3074
3075 3075 @command(b'debugpvec', [], _(b'A B'))
3076 3076 def debugpvec(ui, repo, a, b=None):
3077 3077 ca = scmutil.revsingle(repo, a)
3078 3078 cb = scmutil.revsingle(repo, b)
3079 3079 pa = pvec.ctxpvec(ca)
3080 3080 pb = pvec.ctxpvec(cb)
3081 3081 if pa == pb:
3082 3082 rel = b"="
3083 3083 elif pa > pb:
3084 3084 rel = b">"
3085 3085 elif pa < pb:
3086 3086 rel = b"<"
3087 3087 elif pa | pb:
3088 3088 rel = b"|"
3089 3089 ui.write(_(b"a: %s\n") % pa)
3090 3090 ui.write(_(b"b: %s\n") % pb)
3091 3091 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
3092 3092 ui.write(
3093 3093 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
3094 3094 % (
3095 3095 abs(pa._depth - pb._depth),
3096 3096 pvec._hamming(pa._vec, pb._vec),
3097 3097 pa.distance(pb),
3098 3098 rel,
3099 3099 )
3100 3100 )
3101 3101
3102 3102
3103 3103 @command(
3104 3104 b'debugrebuilddirstate|debugrebuildstate',
3105 3105 [
3106 3106 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
3107 3107 (
3108 3108 b'',
3109 3109 b'minimal',
3110 3110 None,
3111 3111 _(
3112 3112 b'only rebuild files that are inconsistent with '
3113 3113 b'the working copy parent'
3114 3114 ),
3115 3115 ),
3116 3116 ],
3117 3117 _(b'[-r REV]'),
3118 3118 )
3119 3119 def debugrebuilddirstate(ui, repo, rev, **opts):
3120 3120 """rebuild the dirstate as it would look like for the given revision
3121 3121
3122 3122 If no revision is specified the first current parent will be used.
3123 3123
3124 3124 The dirstate will be set to the files of the given revision.
3125 3125 The actual working directory content or existing dirstate
3126 3126 information such as adds or removes is not considered.
3127 3127
3128 3128 ``minimal`` will only rebuild the dirstate status for files that claim to be
3129 3129 tracked but are not in the parent manifest, or that exist in the parent
3130 3130 manifest but are not in the dirstate. It will not change adds, removes, or
3131 3131 modified files that are in the working copy parent.
3132 3132
3133 3133 One use of this command is to make the next :hg:`status` invocation
3134 3134 check the actual file content.
3135 3135 """
3136 3136 ctx = scmutil.revsingle(repo, rev)
3137 3137 with repo.wlock():
3138 3138 if repo.currenttransaction() is not None:
3139 3139 msg = b'rebuild the dirstate outside of a transaction'
3140 3140 raise error.ProgrammingError(msg)
3141 3141 dirstate = repo.dirstate
3142 3142 changedfiles = None
3143 3143 # See command doc for what minimal does.
3144 3144 if opts.get('minimal'):
3145 3145 manifestfiles = set(ctx.manifest().keys())
3146 3146 dirstatefiles = set(dirstate)
3147 3147 manifestonly = manifestfiles - dirstatefiles
3148 3148 dsonly = dirstatefiles - manifestfiles
3149 3149 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
3150 3150 changedfiles = manifestonly | dsnotadded
3151 3151
3152 3152 with dirstate.changing_parents(repo):
3153 3153 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
3154 3154
3155 3155
3156 3156 @command(
3157 3157 b'debugrebuildfncache',
3158 3158 [
3159 3159 (
3160 3160 b'',
3161 3161 b'only-data',
3162 3162 False,
3163 3163 _(b'only look for wrong .d files (much faster)'),
3164 3164 )
3165 3165 ],
3166 3166 b'',
3167 3167 )
3168 3168 def debugrebuildfncache(ui, repo, **opts):
3169 3169 """rebuild the fncache file"""
3170 3170 opts = pycompat.byteskwargs(opts)
3171 3171 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
3172 3172
3173 3173
3174 3174 @command(
3175 3175 b'debugrename',
3176 3176 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3177 3177 _(b'[-r REV] [FILE]...'),
3178 3178 )
3179 3179 def debugrename(ui, repo, *pats, **opts):
3180 3180 """dump rename information"""
3181 3181
3182 3182 opts = pycompat.byteskwargs(opts)
3183 3183 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3184 3184 m = scmutil.match(ctx, pats, opts)
3185 3185 for abs in ctx.walk(m):
3186 3186 fctx = ctx[abs]
3187 3187 o = fctx.filelog().renamed(fctx.filenode())
3188 3188 rel = repo.pathto(abs)
3189 3189 if o:
3190 3190 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3191 3191 else:
3192 3192 ui.write(_(b"%s not renamed\n") % rel)
3193 3193
3194 3194
3195 3195 @command(b'debugrequires|debugrequirements', [], b'')
3196 3196 def debugrequirements(ui, repo):
3197 3197 """print the current repo requirements"""
3198 3198 for r in sorted(repo.requirements):
3199 3199 ui.write(b"%s\n" % r)
3200 3200
3201 3201
3202 3202 @command(
3203 3203 b'debugrevlog',
3204 3204 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3205 3205 _(b'-c|-m|FILE'),
3206 3206 optionalrepo=True,
3207 3207 )
3208 3208 def debugrevlog(ui, repo, file_=None, **opts):
3209 3209 """show data and statistics about a revlog"""
3210 3210 opts = pycompat.byteskwargs(opts)
3211 3211 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
3212 3212
3213 3213 if opts.get(b"dump"):
3214 3214 revlog_debug.dump(ui, r)
3215 3215 else:
3216 3216 revlog_debug.debug_revlog(ui, r)
3217 3217 return 0
3218 3218
3219 3219
3220 3220 @command(
3221 3221 b'debugrevlogindex',
3222 3222 cmdutil.debugrevlogopts
3223 3223 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3224 3224 _(b'[-f FORMAT] -c|-m|FILE'),
3225 3225 optionalrepo=True,
3226 3226 )
3227 3227 def debugrevlogindex(ui, repo, file_=None, **opts):
3228 3228 """dump the contents of a revlog index"""
3229 3229 opts = pycompat.byteskwargs(opts)
3230 3230 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3231 3231 format = opts.get(b'format', 0)
3232 3232 if format not in (0, 1):
3233 3233 raise error.Abort(_(b"unknown format %d") % format)
3234 3234
3235 3235 if ui.debugflag:
3236 3236 shortfn = hex
3237 3237 else:
3238 3238 shortfn = short
3239 3239
3240 3240 # There might not be anything in r, so have a sane default
3241 3241 idlen = 12
3242 3242 for i in r:
3243 3243 idlen = len(shortfn(r.node(i)))
3244 3244 break
3245 3245
3246 3246 if format == 0:
3247 3247 if ui.verbose:
3248 3248 ui.writenoi18n(
3249 3249 b" rev offset length linkrev %s %s p2\n"
3250 3250 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3251 3251 )
3252 3252 else:
3253 3253 ui.writenoi18n(
3254 3254 b" rev linkrev %s %s p2\n"
3255 3255 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3256 3256 )
3257 3257 elif format == 1:
3258 3258 if ui.verbose:
3259 3259 ui.writenoi18n(
3260 3260 (
3261 3261 b" rev flag offset length size link p1"
3262 3262 b" p2 %s\n"
3263 3263 )
3264 3264 % b"nodeid".rjust(idlen)
3265 3265 )
3266 3266 else:
3267 3267 ui.writenoi18n(
3268 3268 b" rev flag size link p1 p2 %s\n"
3269 3269 % b"nodeid".rjust(idlen)
3270 3270 )
3271 3271
3272 3272 for i in r:
3273 3273 node = r.node(i)
3274 3274 if format == 0:
3275 3275 try:
3276 3276 pp = r.parents(node)
3277 3277 except Exception:
3278 3278 pp = [repo.nullid, repo.nullid]
3279 3279 if ui.verbose:
3280 3280 ui.write(
3281 3281 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3282 3282 % (
3283 3283 i,
3284 3284 r.start(i),
3285 3285 r.length(i),
3286 3286 r.linkrev(i),
3287 3287 shortfn(node),
3288 3288 shortfn(pp[0]),
3289 3289 shortfn(pp[1]),
3290 3290 )
3291 3291 )
3292 3292 else:
3293 3293 ui.write(
3294 3294 b"% 6d % 7d %s %s %s\n"
3295 3295 % (
3296 3296 i,
3297 3297 r.linkrev(i),
3298 3298 shortfn(node),
3299 3299 shortfn(pp[0]),
3300 3300 shortfn(pp[1]),
3301 3301 )
3302 3302 )
3303 3303 elif format == 1:
3304 3304 pr = r.parentrevs(i)
3305 3305 if ui.verbose:
3306 3306 ui.write(
3307 3307 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3308 3308 % (
3309 3309 i,
3310 3310 r.flags(i),
3311 3311 r.start(i),
3312 3312 r.length(i),
3313 3313 r.rawsize(i),
3314 3314 r.linkrev(i),
3315 3315 pr[0],
3316 3316 pr[1],
3317 3317 shortfn(node),
3318 3318 )
3319 3319 )
3320 3320 else:
3321 3321 ui.write(
3322 3322 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3323 3323 % (
3324 3324 i,
3325 3325 r.flags(i),
3326 3326 r.rawsize(i),
3327 3327 r.linkrev(i),
3328 3328 pr[0],
3329 3329 pr[1],
3330 3330 shortfn(node),
3331 3331 )
3332 3332 )
3333 3333
3334 3334
3335 3335 @command(
3336 3336 b'debugrevspec',
3337 3337 [
3338 3338 (
3339 3339 b'',
3340 3340 b'optimize',
3341 3341 None,
3342 3342 _(b'print parsed tree after optimizing (DEPRECATED)'),
3343 3343 ),
3344 3344 (
3345 3345 b'',
3346 3346 b'show-revs',
3347 3347 True,
3348 3348 _(b'print list of result revisions (default)'),
3349 3349 ),
3350 3350 (
3351 3351 b's',
3352 3352 b'show-set',
3353 3353 None,
3354 3354 _(b'print internal representation of result set'),
3355 3355 ),
3356 3356 (
3357 3357 b'p',
3358 3358 b'show-stage',
3359 3359 [],
3360 3360 _(b'print parsed tree at the given stage'),
3361 3361 _(b'NAME'),
3362 3362 ),
3363 3363 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3364 3364 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3365 3365 ],
3366 3366 b'REVSPEC',
3367 3367 )
3368 3368 def debugrevspec(ui, repo, expr, **opts):
3369 3369 """parse and apply a revision specification
3370 3370
3371 3371 Use -p/--show-stage option to print the parsed tree at the given stages.
3372 3372 Use -p all to print tree at every stage.
3373 3373
3374 3374 Use --no-show-revs option with -s or -p to print only the set
3375 3375 representation or the parsed tree respectively.
3376 3376
3377 3377 Use --verify-optimized to compare the optimized result with the unoptimized
3378 3378 one. Returns 1 if the optimized result differs.
3379 3379 """
3380 3380 opts = pycompat.byteskwargs(opts)
3381 3381 aliases = ui.configitems(b'revsetalias')
3382 3382 stages = [
3383 3383 (b'parsed', lambda tree: tree),
3384 3384 (
3385 3385 b'expanded',
3386 3386 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3387 3387 ),
3388 3388 (b'concatenated', revsetlang.foldconcat),
3389 3389 (b'analyzed', revsetlang.analyze),
3390 3390 (b'optimized', revsetlang.optimize),
3391 3391 ]
3392 3392 if opts[b'no_optimized']:
3393 3393 stages = stages[:-1]
3394 3394 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3395 3395 raise error.Abort(
3396 3396 _(b'cannot use --verify-optimized with --no-optimized')
3397 3397 )
3398 3398 stagenames = {n for n, f in stages}
3399 3399
3400 3400 showalways = set()
3401 3401 showchanged = set()
3402 3402 if ui.verbose and not opts[b'show_stage']:
3403 3403 # show parsed tree by --verbose (deprecated)
3404 3404 showalways.add(b'parsed')
3405 3405 showchanged.update([b'expanded', b'concatenated'])
3406 3406 if opts[b'optimize']:
3407 3407 showalways.add(b'optimized')
3408 3408 if opts[b'show_stage'] and opts[b'optimize']:
3409 3409 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3410 3410 if opts[b'show_stage'] == [b'all']:
3411 3411 showalways.update(stagenames)
3412 3412 else:
3413 3413 for n in opts[b'show_stage']:
3414 3414 if n not in stagenames:
3415 3415 raise error.Abort(_(b'invalid stage name: %s') % n)
3416 3416 showalways.update(opts[b'show_stage'])
3417 3417
3418 3418 treebystage = {}
3419 3419 printedtree = None
3420 3420 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3421 3421 for n, f in stages:
3422 3422 treebystage[n] = tree = f(tree)
3423 3423 if n in showalways or (n in showchanged and tree != printedtree):
3424 3424 if opts[b'show_stage'] or n != b'parsed':
3425 3425 ui.write(b"* %s:\n" % n)
3426 3426 ui.write(revsetlang.prettyformat(tree), b"\n")
3427 3427 printedtree = tree
3428 3428
3429 3429 if opts[b'verify_optimized']:
3430 3430 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3431 3431 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3432 3432 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3433 3433 ui.writenoi18n(
3434 3434 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3435 3435 )
3436 3436 ui.writenoi18n(
3437 3437 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3438 3438 )
3439 3439 arevs = list(arevs)
3440 3440 brevs = list(brevs)
3441 3441 if arevs == brevs:
3442 3442 return 0
3443 3443 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3444 3444 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3445 3445 sm = difflib.SequenceMatcher(None, arevs, brevs)
3446 3446 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3447 3447 if tag in ('delete', 'replace'):
3448 3448 for c in arevs[alo:ahi]:
3449 3449 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3450 3450 if tag in ('insert', 'replace'):
3451 3451 for c in brevs[blo:bhi]:
3452 3452 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3453 3453 if tag == 'equal':
3454 3454 for c in arevs[alo:ahi]:
3455 3455 ui.write(b' %d\n' % c)
3456 3456 return 1
3457 3457
3458 3458 func = revset.makematcher(tree)
3459 3459 revs = func(repo)
3460 3460 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3461 3461 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3462 3462 if not opts[b'show_revs']:
3463 3463 return
3464 3464 for c in revs:
3465 3465 ui.write(b"%d\n" % c)
3466 3466
3467 3467
3468 3468 @command(
3469 3469 b'debugserve',
3470 3470 [
3471 3471 (
3472 3472 b'',
3473 3473 b'sshstdio',
3474 3474 False,
3475 3475 _(b'run an SSH server bound to process handles'),
3476 3476 ),
3477 3477 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3478 3478 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3479 3479 ],
3480 3480 b'',
3481 3481 )
3482 3482 def debugserve(ui, repo, **opts):
3483 3483 """run a server with advanced settings
3484 3484
3485 3485 This command is similar to :hg:`serve`. It exists partially as a
3486 3486 workaround to the fact that ``hg serve --stdio`` must have specific
3487 3487 arguments for security reasons.
3488 3488 """
3489 3489 opts = pycompat.byteskwargs(opts)
3490 3490
3491 3491 if not opts[b'sshstdio']:
3492 3492 raise error.Abort(_(b'only --sshstdio is currently supported'))
3493 3493
3494 3494 logfh = None
3495 3495
3496 3496 if opts[b'logiofd'] and opts[b'logiofile']:
3497 3497 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3498 3498
3499 3499 if opts[b'logiofd']:
3500 3500 # Ideally we would be line buffered. But line buffering in binary
3501 3501 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3502 3502 # buffering could have performance impacts. But since this isn't
3503 3503 # performance critical code, it should be fine.
3504 3504 try:
3505 3505 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3506 3506 except OSError as e:
3507 3507 if e.errno != errno.ESPIPE:
3508 3508 raise
3509 3509 # can't seek a pipe, so `ab` mode fails on py3
3510 3510 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3511 3511 elif opts[b'logiofile']:
3512 3512 logfh = open(opts[b'logiofile'], b'ab', 0)
3513 3513
3514 3514 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3515 3515 s.serve_forever()
3516 3516
3517 3517
3518 3518 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3519 3519 def debugsetparents(ui, repo, rev1, rev2=None):
3520 3520 """manually set the parents of the current working directory (DANGEROUS)
3521 3521
3522 3522 This command is not what you are looking for and should not be used. Using
3523 3523 this command will most certainly results in slight corruption of the file
3524 3524 level histories withing your repository. DO NOT USE THIS COMMAND.
3525 3525
3526 3526 The command update the p1 and p2 field in the dirstate, and not touching
3527 3527 anything else. This useful for writing repository conversion tools, but
3528 3528 should be used with extreme care. For example, neither the working
3529 3529 directory nor the dirstate is updated, so file status may be incorrect
3530 3530 after running this command. Only used if you are one of the few people that
3531 3531 deeply unstand both conversion tools and file level histories. If you are
3532 3532 reading this help, you are not one of this people (most of them sailed west
3533 3533 from Mithlond anyway.
3534 3534
3535 3535 So one last time DO NOT USE THIS COMMAND.
3536 3536
3537 3537 Returns 0 on success.
3538 3538 """
3539 3539
3540 3540 node1 = scmutil.revsingle(repo, rev1).node()
3541 3541 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3542 3542
3543 3543 with repo.wlock():
3544 3544 repo.setparents(node1, node2)
3545 3545
3546 3546
3547 3547 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3548 3548 def debugsidedata(ui, repo, file_, rev=None, **opts):
3549 3549 """dump the side data for a cl/manifest/file revision
3550 3550
3551 3551 Use --verbose to dump the sidedata content."""
3552 3552 opts = pycompat.byteskwargs(opts)
3553 3553 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3554 3554 if rev is not None:
3555 3555 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3556 3556 file_, rev = None, file_
3557 3557 elif rev is None:
3558 3558 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3559 3559 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3560 3560 r = getattr(r, '_revlog', r)
3561 3561 try:
3562 3562 sidedata = r.sidedata(r.lookup(rev))
3563 3563 except KeyError:
3564 3564 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3565 3565 if sidedata:
3566 3566 sidedata = list(sidedata.items())
3567 3567 sidedata.sort()
3568 3568 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3569 3569 for key, value in sidedata:
3570 3570 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3571 3571 if ui.verbose:
3572 3572 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3573 3573
3574 3574
3575 3575 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3576 3576 def debugssl(ui, repo, source=None, **opts):
3577 3577 """test a secure connection to a server
3578 3578
3579 3579 This builds the certificate chain for the server on Windows, installing the
3580 3580 missing intermediates and trusted root via Windows Update if necessary. It
3581 3581 does nothing on other platforms.
3582 3582
3583 3583 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3584 3584 that server is used. See :hg:`help urls` for more information.
3585 3585
3586 3586 If the update succeeds, retry the original operation. Otherwise, the cause
3587 3587 of the SSL error is likely another issue.
3588 3588 """
3589 3589 if not pycompat.iswindows:
3590 3590 raise error.Abort(
3591 3591 _(b'certificate chain building is only possible on Windows')
3592 3592 )
3593 3593
3594 3594 if not source:
3595 3595 if not repo:
3596 3596 raise error.Abort(
3597 3597 _(
3598 3598 b"there is no Mercurial repository here, and no "
3599 3599 b"server specified"
3600 3600 )
3601 3601 )
3602 3602 source = b"default"
3603 3603
3604 3604 path = urlutil.get_unique_pull_path_obj(b'debugssl', ui, source)
3605 3605 url = path.url
3606 3606
3607 3607 defaultport = {b'https': 443, b'ssh': 22}
3608 3608 if url.scheme in defaultport:
3609 3609 try:
3610 3610 addr = (url.host, int(url.port or defaultport[url.scheme]))
3611 3611 except ValueError:
3612 3612 raise error.Abort(_(b"malformed port number in URL"))
3613 3613 else:
3614 3614 raise error.Abort(_(b"only https and ssh connections are supported"))
3615 3615
3616 3616 from . import win32
3617 3617
3618 3618 s = ssl.wrap_socket(
3619 3619 socket.socket(),
3620 3620 ssl_version=ssl.PROTOCOL_TLS,
3621 3621 cert_reqs=ssl.CERT_NONE,
3622 3622 ca_certs=None,
3623 3623 )
3624 3624
3625 3625 try:
3626 3626 s.connect(addr)
3627 3627 cert = s.getpeercert(True)
3628 3628
3629 3629 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3630 3630
3631 3631 complete = win32.checkcertificatechain(cert, build=False)
3632 3632
3633 3633 if not complete:
3634 3634 ui.status(_(b'certificate chain is incomplete, updating... '))
3635 3635
3636 3636 if not win32.checkcertificatechain(cert):
3637 3637 ui.status(_(b'failed.\n'))
3638 3638 else:
3639 3639 ui.status(_(b'done.\n'))
3640 3640 else:
3641 3641 ui.status(_(b'full certificate chain is available\n'))
3642 3642 finally:
3643 3643 s.close()
3644 3644
3645 3645
3646 3646 @command(
3647 3647 b"debugbackupbundle",
3648 3648 [
3649 3649 (
3650 3650 b"",
3651 3651 b"recover",
3652 3652 b"",
3653 3653 b"brings the specified changeset back into the repository",
3654 3654 )
3655 3655 ]
3656 3656 + cmdutil.logopts,
3657 3657 _(b"hg debugbackupbundle [--recover HASH]"),
3658 3658 )
3659 3659 def debugbackupbundle(ui, repo, *pats, **opts):
3660 3660 """lists the changesets available in backup bundles
3661 3661
3662 3662 Without any arguments, this command prints a list of the changesets in each
3663 3663 backup bundle.
3664 3664
3665 3665 --recover takes a changeset hash and unbundles the first bundle that
3666 3666 contains that hash, which puts that changeset back in your repository.
3667 3667
3668 3668 --verbose will print the entire commit message and the bundle path for that
3669 3669 backup.
3670 3670 """
3671 3671 backups = list(
3672 3672 filter(
3673 3673 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3674 3674 )
3675 3675 )
3676 3676 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3677 3677
3678 3678 opts = pycompat.byteskwargs(opts)
3679 3679 opts[b"bundle"] = b""
3680 3680 opts[b"force"] = None
3681 3681 limit = logcmdutil.getlimit(opts)
3682 3682
3683 3683 def display(other, chlist, displayer):
3684 3684 if opts.get(b"newest_first"):
3685 3685 chlist.reverse()
3686 3686 count = 0
3687 3687 for n in chlist:
3688 3688 if limit is not None and count >= limit:
3689 3689 break
3690 3690 parents = [
3691 3691 True for p in other.changelog.parents(n) if p != repo.nullid
3692 3692 ]
3693 3693 if opts.get(b"no_merges") and len(parents) == 2:
3694 3694 continue
3695 3695 count += 1
3696 3696 displayer.show(other[n])
3697 3697
3698 3698 recovernode = opts.get(b"recover")
3699 3699 if recovernode:
3700 3700 if scmutil.isrevsymbol(repo, recovernode):
3701 3701 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3702 3702 return
3703 3703 elif backups:
3704 3704 msg = _(
3705 3705 b"Recover changesets using: hg debugbackupbundle --recover "
3706 3706 b"<changeset hash>\n\nAvailable backup changesets:"
3707 3707 )
3708 3708 ui.status(msg, label=b"status.removed")
3709 3709 else:
3710 3710 ui.status(_(b"no backup changesets found\n"))
3711 3711 return
3712 3712
3713 3713 for backup in backups:
3714 3714 # Much of this is copied from the hg incoming logic
3715 3715 source = os.path.relpath(backup, encoding.getcwd())
3716 3716 path = urlutil.get_unique_pull_path_obj(
3717 3717 b'debugbackupbundle',
3718 3718 ui,
3719 3719 source,
3720 3720 )
3721 3721 try:
3722 3722 other = hg.peer(repo, opts, path)
3723 3723 except error.LookupError as ex:
3724 3724 msg = _(b"\nwarning: unable to open bundle %s") % path.loc
3725 3725 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3726 3726 ui.warn(msg, hint=hint)
3727 3727 continue
3728 3728 branches = (path.branch, opts.get(b'branch', []))
3729 3729 revs, checkout = hg.addbranchrevs(
3730 3730 repo, other, branches, opts.get(b"rev")
3731 3731 )
3732 3732
3733 3733 if revs:
3734 3734 revs = [other.lookup(rev) for rev in revs]
3735 3735
3736 3736 with ui.silent():
3737 3737 try:
3738 3738 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3739 3739 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3740 3740 )
3741 3741 except error.LookupError:
3742 3742 continue
3743 3743
3744 3744 try:
3745 3745 if not chlist:
3746 3746 continue
3747 3747 if recovernode:
3748 3748 with repo.lock(), repo.transaction(b"unbundle") as tr:
3749 3749 if scmutil.isrevsymbol(other, recovernode):
3750 3750 ui.status(_(b"Unbundling %s\n") % (recovernode))
3751 3751 f = hg.openpath(ui, path.loc)
3752 3752 gen = exchange.readbundle(ui, f, path.loc)
3753 3753 if isinstance(gen, bundle2.unbundle20):
3754 3754 bundle2.applybundle(
3755 3755 repo,
3756 3756 gen,
3757 3757 tr,
3758 3758 source=b"unbundle",
3759 3759 url=b"bundle:" + path.loc,
3760 3760 )
3761 3761 else:
3762 3762 gen.apply(repo, b"unbundle", b"bundle:" + path.loc)
3763 3763 break
3764 3764 else:
3765 3765 backupdate = encoding.strtolocal(
3766 3766 time.strftime(
3767 3767 "%a %H:%M, %Y-%m-%d",
3768 3768 time.localtime(os.path.getmtime(path.loc)),
3769 3769 )
3770 3770 )
3771 3771 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3772 3772 if ui.verbose:
3773 3773 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), path.loc))
3774 3774 else:
3775 3775 opts[
3776 3776 b"template"
3777 3777 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3778 3778 displayer = logcmdutil.changesetdisplayer(
3779 3779 ui, other, opts, False
3780 3780 )
3781 3781 display(other, chlist, displayer)
3782 3782 displayer.close()
3783 3783 finally:
3784 3784 cleanupfn()
3785 3785
3786 3786
3787 3787 @command(
3788 3788 b'debugsub',
3789 3789 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3790 3790 _(b'[-r REV] [REV]'),
3791 3791 )
3792 3792 def debugsub(ui, repo, rev=None):
3793 3793 ctx = scmutil.revsingle(repo, rev, None)
3794 3794 for k, v in sorted(ctx.substate.items()):
3795 3795 ui.writenoi18n(b'path %s\n' % k)
3796 3796 ui.writenoi18n(b' source %s\n' % v[0])
3797 3797 ui.writenoi18n(b' revision %s\n' % v[1])
3798 3798
3799 3799
3800 3800 @command(
3801 3801 b'debugshell',
3802 3802 [
3803 3803 (
3804 3804 b'c',
3805 3805 b'command',
3806 3806 b'',
3807 3807 _(b'program passed in as a string'),
3808 3808 _(b'COMMAND'),
3809 3809 )
3810 3810 ],
3811 3811 _(b'[-c COMMAND]'),
3812 3812 optionalrepo=True,
3813 3813 )
3814 3814 def debugshell(ui, repo, **opts):
3815 3815 """run an interactive Python interpreter
3816 3816
3817 3817 The local namespace is provided with a reference to the ui and
3818 3818 the repo instance (if available).
3819 3819 """
3820 3820 import code
3821 3821
3822 3822 imported_objects = {
3823 3823 'ui': ui,
3824 3824 'repo': repo,
3825 3825 }
3826 3826
3827 3827 # py2exe disables initialization of the site module, which is responsible
3828 3828 # for arranging for ``quit()`` to exit the interpreter. Manually initialize
3829 3829 # the stuff that site normally does here, so that the interpreter can be
3830 3830 # quit in a consistent manner, whether run with pyoxidizer, exewrapper.c,
3831 3831 # py.exe, or py2exe.
3832 3832 if getattr(sys, "frozen", None) == 'console_exe':
3833 3833 try:
3834 3834 import site
3835 3835
3836 3836 site.setcopyright()
3837 3837 site.sethelper()
3838 3838 site.setquit()
3839 3839 except ImportError:
3840 3840 site = None # Keep PyCharm happy
3841 3841
3842 3842 command = opts.get('command')
3843 3843 if command:
3844 3844 compiled = code.compile_command(encoding.strfromlocal(command))
3845 3845 code.InteractiveInterpreter(locals=imported_objects).runcode(compiled)
3846 3846 return
3847 3847
3848 3848 code.interact(local=imported_objects)
3849 3849
3850 3850
3851 3851 @command(
3852 3852 b'debug-revlog-stats',
3853 3853 [
3854 3854 (b'c', b'changelog', None, _(b'Display changelog statistics')),
3855 3855 (b'm', b'manifest', None, _(b'Display manifest statistics')),
3856 3856 (b'f', b'filelogs', None, _(b'Display filelogs statistics')),
3857 3857 ]
3858 3858 + cmdutil.formatteropts,
3859 3859 )
3860 3860 def debug_revlog_stats(ui, repo, **opts):
3861 3861 """display statistics about revlogs in the store"""
3862 3862 opts = pycompat.byteskwargs(opts)
3863 3863 changelog = opts[b"changelog"]
3864 3864 manifest = opts[b"manifest"]
3865 3865 filelogs = opts[b"filelogs"]
3866 3866
3867 3867 if changelog is None and manifest is None and filelogs is None:
3868 3868 changelog = True
3869 3869 manifest = True
3870 3870 filelogs = True
3871 3871
3872 3872 repo = repo.unfiltered()
3873 3873 fm = ui.formatter(b'debug-revlog-stats', opts)
3874 3874 revlog_debug.debug_revlog_stats(repo, fm, changelog, manifest, filelogs)
3875 3875 fm.end()
3876 3876
3877 3877
3878 3878 @command(
3879 3879 b'debugsuccessorssets',
3880 3880 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3881 3881 _(b'[REV]'),
3882 3882 )
3883 3883 def debugsuccessorssets(ui, repo, *revs, **opts):
3884 3884 """show set of successors for revision
3885 3885
3886 3886 A successors set of changeset A is a consistent group of revisions that
3887 3887 succeed A. It contains non-obsolete changesets only unless closests
3888 3888 successors set is set.
3889 3889
3890 3890 In most cases a changeset A has a single successors set containing a single
3891 3891 successor (changeset A replaced by A').
3892 3892
3893 3893 A changeset that is made obsolete with no successors are called "pruned".
3894 3894 Such changesets have no successors sets at all.
3895 3895
3896 3896 A changeset that has been "split" will have a successors set containing
3897 3897 more than one successor.
3898 3898
3899 3899 A changeset that has been rewritten in multiple different ways is called
3900 3900 "divergent". Such changesets have multiple successor sets (each of which
3901 3901 may also be split, i.e. have multiple successors).
3902 3902
3903 3903 Results are displayed as follows::
3904 3904
3905 3905 <rev1>
3906 3906 <successors-1A>
3907 3907 <rev2>
3908 3908 <successors-2A>
3909 3909 <successors-2B1> <successors-2B2> <successors-2B3>
3910 3910
3911 3911 Here rev2 has two possible (i.e. divergent) successors sets. The first
3912 3912 holds one element, whereas the second holds three (i.e. the changeset has
3913 3913 been split).
3914 3914 """
3915 3915 # passed to successorssets caching computation from one call to another
3916 3916 cache = {}
3917 3917 ctx2str = bytes
3918 3918 node2str = short
3919 3919 for rev in logcmdutil.revrange(repo, revs):
3920 3920 ctx = repo[rev]
3921 3921 ui.write(b'%s\n' % ctx2str(ctx))
3922 3922 for succsset in obsutil.successorssets(
3923 3923 repo, ctx.node(), closest=opts['closest'], cache=cache
3924 3924 ):
3925 3925 if succsset:
3926 3926 ui.write(b' ')
3927 3927 ui.write(node2str(succsset[0]))
3928 3928 for node in succsset[1:]:
3929 3929 ui.write(b' ')
3930 3930 ui.write(node2str(node))
3931 3931 ui.write(b'\n')
3932 3932
3933 3933
3934 3934 @command(b'debugtagscache', [])
3935 3935 def debugtagscache(ui, repo):
3936 3936 """display the contents of .hg/cache/hgtagsfnodes1"""
3937 3937 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3938 3938 flog = repo.file(b'.hgtags')
3939 3939 for r in repo:
3940 3940 node = repo[r].node()
3941 3941 tagsnode = cache.getfnode(node, computemissing=False)
3942 3942 if tagsnode:
3943 3943 tagsnodedisplay = hex(tagsnode)
3944 3944 if not flog.hasnode(tagsnode):
3945 3945 tagsnodedisplay += b' (unknown node)'
3946 3946 elif tagsnode is None:
3947 3947 tagsnodedisplay = b'missing'
3948 3948 else:
3949 3949 tagsnodedisplay = b'invalid'
3950 3950
3951 3951 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3952 3952
3953 3953
3954 3954 @command(
3955 3955 b'debugtemplate',
3956 3956 [
3957 3957 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3958 3958 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3959 3959 ],
3960 3960 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3961 3961 optionalrepo=True,
3962 3962 )
3963 3963 def debugtemplate(ui, repo, tmpl, **opts):
3964 3964 """parse and apply a template
3965 3965
3966 3966 If -r/--rev is given, the template is processed as a log template and
3967 3967 applied to the given changesets. Otherwise, it is processed as a generic
3968 3968 template.
3969 3969
3970 3970 Use --verbose to print the parsed tree.
3971 3971 """
3972 3972 revs = None
3973 3973 if opts['rev']:
3974 3974 if repo is None:
3975 3975 raise error.RepoError(
3976 3976 _(b'there is no Mercurial repository here (.hg not found)')
3977 3977 )
3978 3978 revs = logcmdutil.revrange(repo, opts['rev'])
3979 3979
3980 3980 props = {}
3981 3981 for d in opts['define']:
3982 3982 try:
3983 3983 k, v = (e.strip() for e in d.split(b'=', 1))
3984 3984 if not k or k == b'ui':
3985 3985 raise ValueError
3986 3986 props[k] = v
3987 3987 except ValueError:
3988 3988 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3989 3989
3990 3990 if ui.verbose:
3991 3991 aliases = ui.configitems(b'templatealias')
3992 3992 tree = templater.parse(tmpl)
3993 3993 ui.note(templater.prettyformat(tree), b'\n')
3994 3994 newtree = templater.expandaliases(tree, aliases)
3995 3995 if newtree != tree:
3996 3996 ui.notenoi18n(
3997 3997 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3998 3998 )
3999 3999
4000 4000 if revs is None:
4001 4001 tres = formatter.templateresources(ui, repo)
4002 4002 t = formatter.maketemplater(ui, tmpl, resources=tres)
4003 4003 if ui.verbose:
4004 4004 kwds, funcs = t.symbolsuseddefault()
4005 4005 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4006 4006 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4007 4007 ui.write(t.renderdefault(props))
4008 4008 else:
4009 4009 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4010 4010 if ui.verbose:
4011 4011 kwds, funcs = displayer.t.symbolsuseddefault()
4012 4012 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4013 4013 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4014 4014 for r in revs:
4015 4015 displayer.show(repo[r], **pycompat.strkwargs(props))
4016 4016 displayer.close()
4017 4017
4018 4018
4019 4019 @command(
4020 4020 b'debuguigetpass',
4021 4021 [
4022 4022 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4023 4023 ],
4024 4024 _(b'[-p TEXT]'),
4025 4025 norepo=True,
4026 4026 )
4027 4027 def debuguigetpass(ui, prompt=b''):
4028 4028 """show prompt to type password"""
4029 4029 r = ui.getpass(prompt)
4030 4030 if r is None:
4031 4031 r = b"<default response>"
4032 4032 ui.writenoi18n(b'response: %s\n' % r)
4033 4033
4034 4034
4035 4035 @command(
4036 4036 b'debuguiprompt',
4037 4037 [
4038 4038 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4039 4039 ],
4040 4040 _(b'[-p TEXT]'),
4041 4041 norepo=True,
4042 4042 )
4043 4043 def debuguiprompt(ui, prompt=b''):
4044 4044 """show plain prompt"""
4045 4045 r = ui.prompt(prompt)
4046 4046 ui.writenoi18n(b'response: %s\n' % r)
4047 4047
4048 4048
4049 4049 @command(b'debugupdatecaches', [])
4050 4050 def debugupdatecaches(ui, repo, *pats, **opts):
4051 4051 """warm all known caches in the repository"""
4052 4052 with repo.wlock(), repo.lock():
4053 4053 repo.updatecaches(caches=repository.CACHES_ALL)
4054 4054
4055 4055
4056 4056 @command(
4057 4057 b'debugupgraderepo',
4058 4058 [
4059 4059 (
4060 4060 b'o',
4061 4061 b'optimize',
4062 4062 [],
4063 4063 _(b'extra optimization to perform'),
4064 4064 _(b'NAME'),
4065 4065 ),
4066 4066 (b'', b'run', False, _(b'performs an upgrade')),
4067 4067 (b'', b'backup', True, _(b'keep the old repository content around')),
4068 4068 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4069 4069 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4070 4070 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4071 4071 ],
4072 4072 )
4073 4073 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4074 4074 """upgrade a repository to use different features
4075 4075
4076 4076 If no arguments are specified, the repository is evaluated for upgrade
4077 4077 and a list of problems and potential optimizations is printed.
4078 4078
4079 4079 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4080 4080 can be influenced via additional arguments. More details will be provided
4081 4081 by the command output when run without ``--run``.
4082 4082
4083 4083 During the upgrade, the repository will be locked and no writes will be
4084 4084 allowed.
4085 4085
4086 4086 At the end of the upgrade, the repository may not be readable while new
4087 4087 repository data is swapped in. This window will be as long as it takes to
4088 4088 rename some directories inside the ``.hg`` directory. On most machines, this
4089 4089 should complete almost instantaneously and the chances of a consumer being
4090 4090 unable to access the repository should be low.
4091 4091
4092 4092 By default, all revlogs will be upgraded. You can restrict this using flags
4093 4093 such as `--manifest`:
4094 4094
4095 4095 * `--manifest`: only optimize the manifest
4096 4096 * `--no-manifest`: optimize all revlog but the manifest
4097 4097 * `--changelog`: optimize the changelog only
4098 4098 * `--no-changelog --no-manifest`: optimize filelogs only
4099 4099 * `--filelogs`: optimize the filelogs only
4100 4100 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4101 4101 """
4102 4102 return upgrade.upgraderepo(
4103 4103 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4104 4104 )
4105 4105
4106 4106
4107 4107 @command(
4108 4108 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4109 4109 )
4110 4110 def debugwalk(ui, repo, *pats, **opts):
4111 4111 """show how files match on given patterns"""
4112 4112 opts = pycompat.byteskwargs(opts)
4113 4113 m = scmutil.match(repo[None], pats, opts)
4114 4114 if ui.verbose:
4115 4115 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4116 4116 items = list(repo[None].walk(m))
4117 4117 if not items:
4118 4118 return
4119 4119 f = lambda fn: fn
4120 4120 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4121 4121 f = lambda fn: util.normpath(fn)
4122 4122 fmt = b'f %%-%ds %%-%ds %%s' % (
4123 4123 max([len(abs) for abs in items]),
4124 4124 max([len(repo.pathto(abs)) for abs in items]),
4125 4125 )
4126 4126 for abs in items:
4127 4127 line = fmt % (
4128 4128 abs,
4129 4129 f(repo.pathto(abs)),
4130 4130 m.exact(abs) and b'exact' or b'',
4131 4131 )
4132 4132 ui.write(b"%s\n" % line.rstrip())
4133 4133
4134 4134
4135 4135 @command(b'debugwhyunstable', [], _(b'REV'))
4136 4136 def debugwhyunstable(ui, repo, rev):
4137 4137 """explain instabilities of a changeset"""
4138 4138 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4139 4139 dnodes = b''
4140 4140 if entry.get(b'divergentnodes'):
4141 4141 dnodes = (
4142 4142 b' '.join(
4143 4143 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4144 4144 for ctx in entry[b'divergentnodes']
4145 4145 )
4146 4146 + b' '
4147 4147 )
4148 4148 ui.write(
4149 4149 b'%s: %s%s %s\n'
4150 4150 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4151 4151 )
4152 4152
4153 4153
4154 4154 @command(
4155 4155 b'debugwireargs',
4156 4156 [
4157 4157 (b'', b'three', b'', b'three'),
4158 4158 (b'', b'four', b'', b'four'),
4159 4159 (b'', b'five', b'', b'five'),
4160 4160 ]
4161 4161 + cmdutil.remoteopts,
4162 4162 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4163 4163 norepo=True,
4164 4164 )
4165 4165 def debugwireargs(ui, repopath, *vals, **opts):
4166 4166 opts = pycompat.byteskwargs(opts)
4167 4167 repo = hg.peer(ui, opts, repopath)
4168 4168 try:
4169 4169 for opt in cmdutil.remoteopts:
4170 4170 del opts[opt[1]]
4171 4171 args = {}
4172 4172 for k, v in opts.items():
4173 4173 if v:
4174 4174 args[k] = v
4175 4175 args = pycompat.strkwargs(args)
4176 4176 # run twice to check that we don't mess up the stream for the next command
4177 4177 res1 = repo.debugwireargs(*vals, **args)
4178 4178 res2 = repo.debugwireargs(*vals, **args)
4179 4179 ui.write(b"%s\n" % res1)
4180 4180 if res1 != res2:
4181 4181 ui.warn(b"%s\n" % res2)
4182 4182 finally:
4183 4183 repo.close()
4184 4184
4185 4185
4186 4186 def _parsewirelangblocks(fh):
4187 4187 activeaction = None
4188 4188 blocklines = []
4189 4189 lastindent = 0
4190 4190
4191 4191 for line in fh:
4192 4192 line = line.rstrip()
4193 4193 if not line:
4194 4194 continue
4195 4195
4196 4196 if line.startswith(b'#'):
4197 4197 continue
4198 4198
4199 4199 if not line.startswith(b' '):
4200 4200 # New block. Flush previous one.
4201 4201 if activeaction:
4202 4202 yield activeaction, blocklines
4203 4203
4204 4204 activeaction = line
4205 4205 blocklines = []
4206 4206 lastindent = 0
4207 4207 continue
4208 4208
4209 4209 # Else we start with an indent.
4210 4210
4211 4211 if not activeaction:
4212 4212 raise error.Abort(_(b'indented line outside of block'))
4213 4213
4214 4214 indent = len(line) - len(line.lstrip())
4215 4215
4216 4216 # If this line is indented more than the last line, concatenate it.
4217 4217 if indent > lastindent and blocklines:
4218 4218 blocklines[-1] += line.lstrip()
4219 4219 else:
4220 4220 blocklines.append(line)
4221 4221 lastindent = indent
4222 4222
4223 4223 # Flush last block.
4224 4224 if activeaction:
4225 4225 yield activeaction, blocklines
4226 4226
4227 4227
4228 4228 @command(
4229 4229 b'debugwireproto',
4230 4230 [
4231 4231 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4232 4232 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4233 4233 (
4234 4234 b'',
4235 4235 b'noreadstderr',
4236 4236 False,
4237 4237 _(b'do not read from stderr of the remote'),
4238 4238 ),
4239 4239 (
4240 4240 b'',
4241 4241 b'nologhandshake',
4242 4242 False,
4243 4243 _(b'do not log I/O related to the peer handshake'),
4244 4244 ),
4245 4245 ]
4246 4246 + cmdutil.remoteopts,
4247 4247 _(b'[PATH]'),
4248 4248 optionalrepo=True,
4249 4249 )
4250 4250 def debugwireproto(ui, repo, path=None, **opts):
4251 4251 """send wire protocol commands to a server
4252 4252
4253 4253 This command can be used to issue wire protocol commands to remote
4254 4254 peers and to debug the raw data being exchanged.
4255 4255
4256 4256 ``--localssh`` will start an SSH server against the current repository
4257 4257 and connect to that. By default, the connection will perform a handshake
4258 4258 and establish an appropriate peer instance.
4259 4259
4260 4260 ``--peer`` can be used to bypass the handshake protocol and construct a
4261 4261 peer instance using the specified class type. Valid values are ``raw``,
4262 4262 ``ssh1``. ``raw`` instances only allow sending raw data payloads and
4263 4263 don't support higher-level command actions.
4264 4264
4265 4265 ``--noreadstderr`` can be used to disable automatic reading from stderr
4266 4266 of the peer (for SSH connections only). Disabling automatic reading of
4267 4267 stderr is useful for making output more deterministic.
4268 4268
4269 4269 Commands are issued via a mini language which is specified via stdin.
4270 4270 The language consists of individual actions to perform. An action is
4271 4271 defined by a block. A block is defined as a line with no leading
4272 4272 space followed by 0 or more lines with leading space. Blocks are
4273 4273 effectively a high-level command with additional metadata.
4274 4274
4275 4275 Lines beginning with ``#`` are ignored.
4276 4276
4277 4277 The following sections denote available actions.
4278 4278
4279 4279 raw
4280 4280 ---
4281 4281
4282 4282 Send raw data to the server.
4283 4283
4284 4284 The block payload contains the raw data to send as one atomic send
4285 4285 operation. The data may not actually be delivered in a single system
4286 4286 call: it depends on the abilities of the transport being used.
4287 4287
4288 4288 Each line in the block is de-indented and concatenated. Then, that
4289 4289 value is evaluated as a Python b'' literal. This allows the use of
4290 4290 backslash escaping, etc.
4291 4291
4292 4292 raw+
4293 4293 ----
4294 4294
4295 4295 Behaves like ``raw`` except flushes output afterwards.
4296 4296
4297 4297 command <X>
4298 4298 -----------
4299 4299
4300 4300 Send a request to run a named command, whose name follows the ``command``
4301 4301 string.
4302 4302
4303 4303 Arguments to the command are defined as lines in this block. The format of
4304 4304 each line is ``<key> <value>``. e.g.::
4305 4305
4306 4306 command listkeys
4307 4307 namespace bookmarks
4308 4308
4309 4309 If the value begins with ``eval:``, it will be interpreted as a Python
4310 4310 literal expression. Otherwise values are interpreted as Python b'' literals.
4311 4311 This allows sending complex types and encoding special byte sequences via
4312 4312 backslash escaping.
4313 4313
4314 4314 The following arguments have special meaning:
4315 4315
4316 4316 ``PUSHFILE``
4317 4317 When defined, the *push* mechanism of the peer will be used instead
4318 4318 of the static request-response mechanism and the content of the
4319 4319 file specified in the value of this argument will be sent as the
4320 4320 command payload.
4321 4321
4322 4322 This can be used to submit a local bundle file to the remote.
4323 4323
4324 4324 batchbegin
4325 4325 ----------
4326 4326
4327 4327 Instruct the peer to begin a batched send.
4328 4328
4329 4329 All ``command`` blocks are queued for execution until the next
4330 4330 ``batchsubmit`` block.
4331 4331
4332 4332 batchsubmit
4333 4333 -----------
4334 4334
4335 4335 Submit previously queued ``command`` blocks as a batch request.
4336 4336
4337 4337 This action MUST be paired with a ``batchbegin`` action.
4338 4338
4339 4339 httprequest <method> <path>
4340 4340 ---------------------------
4341 4341
4342 4342 (HTTP peer only)
4343 4343
4344 4344 Send an HTTP request to the peer.
4345 4345
4346 4346 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4347 4347
4348 4348 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4349 4349 headers to add to the request. e.g. ``Accept: foo``.
4350 4350
4351 4351 The following arguments are special:
4352 4352
4353 4353 ``BODYFILE``
4354 4354 The content of the file defined as the value to this argument will be
4355 4355 transferred verbatim as the HTTP request body.
4356 4356
4357 4357 ``frame <type> <flags> <payload>``
4358 4358 Send a unified protocol frame as part of the request body.
4359 4359
4360 4360 All frames will be collected and sent as the body to the HTTP
4361 4361 request.
4362 4362
4363 4363 close
4364 4364 -----
4365 4365
4366 4366 Close the connection to the server.
4367 4367
4368 4368 flush
4369 4369 -----
4370 4370
4371 4371 Flush data written to the server.
4372 4372
4373 4373 readavailable
4374 4374 -------------
4375 4375
4376 4376 Close the write end of the connection and read all available data from
4377 4377 the server.
4378 4378
4379 4379 If the connection to the server encompasses multiple pipes, we poll both
4380 4380 pipes and read available data.
4381 4381
4382 4382 readline
4383 4383 --------
4384 4384
4385 4385 Read a line of output from the server. If there are multiple output
4386 4386 pipes, reads only the main pipe.
4387 4387
4388 4388 ereadline
4389 4389 ---------
4390 4390
4391 4391 Like ``readline``, but read from the stderr pipe, if available.
4392 4392
4393 4393 read <X>
4394 4394 --------
4395 4395
4396 4396 ``read()`` N bytes from the server's main output pipe.
4397 4397
4398 4398 eread <X>
4399 4399 ---------
4400 4400
4401 4401 ``read()`` N bytes from the server's stderr pipe, if available.
4402 4402
4403 4403 Specifying Unified Frame-Based Protocol Frames
4404 4404 ----------------------------------------------
4405 4405
4406 4406 It is possible to emit a *Unified Frame-Based Protocol* by using special
4407 4407 syntax.
4408 4408
4409 4409 A frame is composed as a type, flags, and payload. These can be parsed
4410 4410 from a string of the form:
4411 4411
4412 4412 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4413 4413
4414 4414 ``request-id`` and ``stream-id`` are integers defining the request and
4415 4415 stream identifiers.
4416 4416
4417 4417 ``type`` can be an integer value for the frame type or the string name
4418 4418 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4419 4419 ``command-name``.
4420 4420
4421 4421 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4422 4422 components. Each component (and there can be just one) can be an integer
4423 4423 or a flag name for stream flags or frame flags, respectively. Values are
4424 4424 resolved to integers and then bitwise OR'd together.
4425 4425
4426 4426 ``payload`` represents the raw frame payload. If it begins with
4427 4427 ``cbor:``, the following string is evaluated as Python code and the
4428 4428 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4429 4429 as a Python byte string literal.
4430 4430 """
4431 4431 opts = pycompat.byteskwargs(opts)
4432 4432
4433 4433 if opts[b'localssh'] and not repo:
4434 4434 raise error.Abort(_(b'--localssh requires a repository'))
4435 4435
4436 4436 if opts[b'peer'] and opts[b'peer'] not in (
4437 4437 b'raw',
4438 4438 b'ssh1',
4439 4439 ):
4440 4440 raise error.Abort(
4441 4441 _(b'invalid value for --peer'),
4442 4442 hint=_(b'valid values are "raw" and "ssh1"'),
4443 4443 )
4444 4444
4445 4445 if path and opts[b'localssh']:
4446 4446 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4447 4447
4448 4448 if ui.interactive():
4449 4449 ui.write(_(b'(waiting for commands on stdin)\n'))
4450 4450
4451 4451 blocks = list(_parsewirelangblocks(ui.fin))
4452 4452
4453 4453 proc = None
4454 4454 stdin = None
4455 4455 stdout = None
4456 4456 stderr = None
4457 4457 opener = None
4458 4458
4459 4459 if opts[b'localssh']:
4460 4460 # We start the SSH server in its own process so there is process
4461 4461 # separation. This prevents a whole class of potential bugs around
4462 4462 # shared state from interfering with server operation.
4463 4463 args = procutil.hgcmd() + [
4464 4464 b'-R',
4465 4465 repo.root,
4466 4466 b'debugserve',
4467 4467 b'--sshstdio',
4468 4468 ]
4469 4469 proc = subprocess.Popen(
4470 4470 pycompat.rapply(procutil.tonativestr, args),
4471 4471 stdin=subprocess.PIPE,
4472 4472 stdout=subprocess.PIPE,
4473 4473 stderr=subprocess.PIPE,
4474 4474 bufsize=0,
4475 4475 )
4476 4476
4477 4477 stdin = proc.stdin
4478 4478 stdout = proc.stdout
4479 4479 stderr = proc.stderr
4480 4480
4481 4481 # We turn the pipes into observers so we can log I/O.
4482 4482 if ui.verbose or opts[b'peer'] == b'raw':
4483 4483 stdin = util.makeloggingfileobject(
4484 4484 ui, proc.stdin, b'i', logdata=True
4485 4485 )
4486 4486 stdout = util.makeloggingfileobject(
4487 4487 ui, proc.stdout, b'o', logdata=True
4488 4488 )
4489 4489 stderr = util.makeloggingfileobject(
4490 4490 ui, proc.stderr, b'e', logdata=True
4491 4491 )
4492 4492
4493 4493 # --localssh also implies the peer connection settings.
4494 4494
4495 4495 url = b'ssh://localserver'
4496 4496 autoreadstderr = not opts[b'noreadstderr']
4497 4497
4498 4498 if opts[b'peer'] == b'ssh1':
4499 4499 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4500 4500 peer = sshpeer.sshv1peer(
4501 4501 ui,
4502 4502 url,
4503 4503 proc,
4504 4504 stdin,
4505 4505 stdout,
4506 4506 stderr,
4507 4507 None,
4508 4508 autoreadstderr=autoreadstderr,
4509 4509 )
4510 4510 elif opts[b'peer'] == b'raw':
4511 4511 ui.write(_(b'using raw connection to peer\n'))
4512 4512 peer = None
4513 4513 else:
4514 4514 ui.write(_(b'creating ssh peer from handshake results\n'))
4515 peer = sshpeer.makepeer(
4515 peer = sshpeer._make_peer(
4516 4516 ui,
4517 4517 url,
4518 4518 proc,
4519 4519 stdin,
4520 4520 stdout,
4521 4521 stderr,
4522 4522 autoreadstderr=autoreadstderr,
4523 4523 )
4524 4524
4525 4525 elif path:
4526 4526 # We bypass hg.peer() so we can proxy the sockets.
4527 4527 # TODO consider not doing this because we skip
4528 4528 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4529 4529 u = urlutil.url(path)
4530 4530 if u.scheme != b'http':
4531 4531 raise error.Abort(_(b'only http:// paths are currently supported'))
4532 4532
4533 4533 url, authinfo = u.authinfo()
4534 4534 openerargs = {
4535 4535 'useragent': b'Mercurial debugwireproto',
4536 4536 }
4537 4537
4538 4538 # Turn pipes/sockets into observers so we can log I/O.
4539 4539 if ui.verbose:
4540 4540 openerargs.update(
4541 4541 {
4542 4542 'loggingfh': ui,
4543 4543 'loggingname': b's',
4544 4544 'loggingopts': {
4545 4545 'logdata': True,
4546 4546 'logdataapis': False,
4547 4547 },
4548 4548 }
4549 4549 )
4550 4550
4551 4551 if ui.debugflag:
4552 4552 openerargs['loggingopts']['logdataapis'] = True
4553 4553
4554 4554 # Don't send default headers when in raw mode. This allows us to
4555 4555 # bypass most of the behavior of our URL handling code so we can
4556 4556 # have near complete control over what's sent on the wire.
4557 4557 if opts[b'peer'] == b'raw':
4558 4558 openerargs['sendaccept'] = False
4559 4559
4560 4560 opener = urlmod.opener(ui, authinfo, **openerargs)
4561 4561
4562 4562 if opts[b'peer'] == b'raw':
4563 4563 ui.write(_(b'using raw connection to peer\n'))
4564 4564 peer = None
4565 4565 elif opts[b'peer']:
4566 4566 raise error.Abort(
4567 4567 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4568 4568 )
4569 4569 else:
4570 4570 peer_path = urlutil.try_path(ui, path)
4571 peer = httppeer.makepeer(ui, peer_path, opener=opener)
4571 peer = httppeer._make_peer(ui, peer_path, opener=opener)
4572 4572
4573 4573 # We /could/ populate stdin/stdout with sock.makefile()...
4574 4574 else:
4575 4575 raise error.Abort(_(b'unsupported connection configuration'))
4576 4576
4577 4577 batchedcommands = None
4578 4578
4579 4579 # Now perform actions based on the parsed wire language instructions.
4580 4580 for action, lines in blocks:
4581 4581 if action in (b'raw', b'raw+'):
4582 4582 if not stdin:
4583 4583 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4584 4584
4585 4585 # Concatenate the data together.
4586 4586 data = b''.join(l.lstrip() for l in lines)
4587 4587 data = stringutil.unescapestr(data)
4588 4588 stdin.write(data)
4589 4589
4590 4590 if action == b'raw+':
4591 4591 stdin.flush()
4592 4592 elif action == b'flush':
4593 4593 if not stdin:
4594 4594 raise error.Abort(_(b'cannot call flush on this peer'))
4595 4595 stdin.flush()
4596 4596 elif action.startswith(b'command'):
4597 4597 if not peer:
4598 4598 raise error.Abort(
4599 4599 _(
4600 4600 b'cannot send commands unless peer instance '
4601 4601 b'is available'
4602 4602 )
4603 4603 )
4604 4604
4605 4605 command = action.split(b' ', 1)[1]
4606 4606
4607 4607 args = {}
4608 4608 for line in lines:
4609 4609 # We need to allow empty values.
4610 4610 fields = line.lstrip().split(b' ', 1)
4611 4611 if len(fields) == 1:
4612 4612 key = fields[0]
4613 4613 value = b''
4614 4614 else:
4615 4615 key, value = fields
4616 4616
4617 4617 if value.startswith(b'eval:'):
4618 4618 value = stringutil.evalpythonliteral(value[5:])
4619 4619 else:
4620 4620 value = stringutil.unescapestr(value)
4621 4621
4622 4622 args[key] = value
4623 4623
4624 4624 if batchedcommands is not None:
4625 4625 batchedcommands.append((command, args))
4626 4626 continue
4627 4627
4628 4628 ui.status(_(b'sending %s command\n') % command)
4629 4629
4630 4630 if b'PUSHFILE' in args:
4631 4631 with open(args[b'PUSHFILE'], 'rb') as fh:
4632 4632 del args[b'PUSHFILE']
4633 4633 res, output = peer._callpush(
4634 4634 command, fh, **pycompat.strkwargs(args)
4635 4635 )
4636 4636 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4637 4637 ui.status(
4638 4638 _(b'remote output: %s\n') % stringutil.escapestr(output)
4639 4639 )
4640 4640 else:
4641 4641 with peer.commandexecutor() as e:
4642 4642 res = e.callcommand(command, args).result()
4643 4643
4644 4644 ui.status(
4645 4645 _(b'response: %s\n')
4646 4646 % stringutil.pprint(res, bprefix=True, indent=2)
4647 4647 )
4648 4648
4649 4649 elif action == b'batchbegin':
4650 4650 if batchedcommands is not None:
4651 4651 raise error.Abort(_(b'nested batchbegin not allowed'))
4652 4652
4653 4653 batchedcommands = []
4654 4654 elif action == b'batchsubmit':
4655 4655 # There is a batching API we could go through. But it would be
4656 4656 # difficult to normalize requests into function calls. It is easier
4657 4657 # to bypass this layer and normalize to commands + args.
4658 4658 ui.status(
4659 4659 _(b'sending batch with %d sub-commands\n')
4660 4660 % len(batchedcommands)
4661 4661 )
4662 4662 assert peer is not None
4663 4663 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4664 4664 ui.status(
4665 4665 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4666 4666 )
4667 4667
4668 4668 batchedcommands = None
4669 4669
4670 4670 elif action.startswith(b'httprequest '):
4671 4671 if not opener:
4672 4672 raise error.Abort(
4673 4673 _(b'cannot use httprequest without an HTTP peer')
4674 4674 )
4675 4675
4676 4676 request = action.split(b' ', 2)
4677 4677 if len(request) != 3:
4678 4678 raise error.Abort(
4679 4679 _(
4680 4680 b'invalid httprequest: expected format is '
4681 4681 b'"httprequest <method> <path>'
4682 4682 )
4683 4683 )
4684 4684
4685 4685 method, httppath = request[1:]
4686 4686 headers = {}
4687 4687 body = None
4688 4688 frames = []
4689 4689 for line in lines:
4690 4690 line = line.lstrip()
4691 4691 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4692 4692 if m:
4693 4693 # Headers need to use native strings.
4694 4694 key = pycompat.strurl(m.group(1))
4695 4695 value = pycompat.strurl(m.group(2))
4696 4696 headers[key] = value
4697 4697 continue
4698 4698
4699 4699 if line.startswith(b'BODYFILE '):
4700 4700 with open(line.split(b' ', 1), b'rb') as fh:
4701 4701 body = fh.read()
4702 4702 elif line.startswith(b'frame '):
4703 4703 frame = wireprotoframing.makeframefromhumanstring(
4704 4704 line[len(b'frame ') :]
4705 4705 )
4706 4706
4707 4707 frames.append(frame)
4708 4708 else:
4709 4709 raise error.Abort(
4710 4710 _(b'unknown argument to httprequest: %s') % line
4711 4711 )
4712 4712
4713 4713 url = path + httppath
4714 4714
4715 4715 if frames:
4716 4716 body = b''.join(bytes(f) for f in frames)
4717 4717
4718 4718 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4719 4719
4720 4720 # urllib.Request insists on using has_data() as a proxy for
4721 4721 # determining the request method. Override that to use our
4722 4722 # explicitly requested method.
4723 4723 req.get_method = lambda: pycompat.sysstr(method)
4724 4724
4725 4725 try:
4726 4726 res = opener.open(req)
4727 4727 body = res.read()
4728 4728 except util.urlerr.urlerror as e:
4729 4729 # read() method must be called, but only exists in Python 2
4730 4730 getattr(e, 'read', lambda: None)()
4731 4731 continue
4732 4732
4733 4733 ct = res.headers.get('Content-Type')
4734 4734 if ct == 'application/mercurial-cbor':
4735 4735 ui.write(
4736 4736 _(b'cbor> %s\n')
4737 4737 % stringutil.pprint(
4738 4738 cborutil.decodeall(body), bprefix=True, indent=2
4739 4739 )
4740 4740 )
4741 4741
4742 4742 elif action == b'close':
4743 4743 assert peer is not None
4744 4744 peer.close()
4745 4745 elif action == b'readavailable':
4746 4746 if not stdout or not stderr:
4747 4747 raise error.Abort(
4748 4748 _(b'readavailable not available on this peer')
4749 4749 )
4750 4750
4751 4751 stdin.close()
4752 4752 stdout.read()
4753 4753 stderr.read()
4754 4754
4755 4755 elif action == b'readline':
4756 4756 if not stdout:
4757 4757 raise error.Abort(_(b'readline not available on this peer'))
4758 4758 stdout.readline()
4759 4759 elif action == b'ereadline':
4760 4760 if not stderr:
4761 4761 raise error.Abort(_(b'ereadline not available on this peer'))
4762 4762 stderr.readline()
4763 4763 elif action.startswith(b'read '):
4764 4764 count = int(action.split(b' ', 1)[1])
4765 4765 if not stdout:
4766 4766 raise error.Abort(_(b'read not available on this peer'))
4767 4767 stdout.read(count)
4768 4768 elif action.startswith(b'eread '):
4769 4769 count = int(action.split(b' ', 1)[1])
4770 4770 if not stderr:
4771 4771 raise error.Abort(_(b'eread not available on this peer'))
4772 4772 stderr.read(count)
4773 4773 else:
4774 4774 raise error.Abort(_(b'unknown action: %s') % action)
4775 4775
4776 4776 if batchedcommands is not None:
4777 4777 raise error.Abort(_(b'unclosed "batchbegin" request'))
4778 4778
4779 4779 if peer:
4780 4780 peer.close()
4781 4781
4782 4782 if proc:
4783 4783 proc.kill()
@@ -1,640 +1,640 b''
1 1 # httppeer.py - HTTP repository proxy classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import errno
11 11 import io
12 12 import os
13 13 import socket
14 14 import struct
15 15
16 16 from concurrent import futures
17 17 from .i18n import _
18 18 from .pycompat import getattr
19 19 from . import (
20 20 bundle2,
21 21 error,
22 22 httpconnection,
23 23 pycompat,
24 24 statichttprepo,
25 25 url as urlmod,
26 26 util,
27 27 wireprotov1peer,
28 28 )
29 29 from .utils import urlutil
30 30
31 31 httplib = util.httplib
32 32 urlerr = util.urlerr
33 33 urlreq = util.urlreq
34 34
35 35
36 36 def encodevalueinheaders(value, header, limit):
37 37 """Encode a string value into multiple HTTP headers.
38 38
39 39 ``value`` will be encoded into 1 or more HTTP headers with the names
40 40 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
41 41 name + value will be at most ``limit`` bytes long.
42 42
43 43 Returns an iterable of 2-tuples consisting of header names and
44 44 values as native strings.
45 45 """
46 46 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
47 47 # not bytes. This function always takes bytes in as arguments.
48 48 fmt = pycompat.strurl(header) + r'-%s'
49 49 # Note: it is *NOT* a bug that the last bit here is a bytestring
50 50 # and not a unicode: we're just getting the encoded length anyway,
51 51 # and using an r-string to make it portable between Python 2 and 3
52 52 # doesn't work because then the \r is a literal backslash-r
53 53 # instead of a carriage return.
54 54 valuelen = limit - len(fmt % '000') - len(b': \r\n')
55 55 result = []
56 56
57 57 n = 0
58 58 for i in range(0, len(value), valuelen):
59 59 n += 1
60 60 result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen])))
61 61
62 62 return result
63 63
64 64
65 65 class _multifile:
66 66 def __init__(self, *fileobjs):
67 67 for f in fileobjs:
68 68 if not util.safehasattr(f, b'length'):
69 69 raise ValueError(
70 70 b'_multifile only supports file objects that '
71 71 b'have a length but this one does not:',
72 72 type(f),
73 73 f,
74 74 )
75 75 self._fileobjs = fileobjs
76 76 self._index = 0
77 77
78 78 @property
79 79 def length(self):
80 80 return sum(f.length for f in self._fileobjs)
81 81
82 82 def read(self, amt=None):
83 83 if amt <= 0:
84 84 return b''.join(f.read() for f in self._fileobjs)
85 85 parts = []
86 86 while amt and self._index < len(self._fileobjs):
87 87 parts.append(self._fileobjs[self._index].read(amt))
88 88 got = len(parts[-1])
89 89 if got < amt:
90 90 self._index += 1
91 91 amt -= got
92 92 return b''.join(parts)
93 93
94 94 def seek(self, offset, whence=os.SEEK_SET):
95 95 if whence != os.SEEK_SET:
96 96 raise NotImplementedError(
97 97 b'_multifile does not support anything other'
98 98 b' than os.SEEK_SET for whence on seek()'
99 99 )
100 100 if offset != 0:
101 101 raise NotImplementedError(
102 102 b'_multifile only supports seeking to start, but that '
103 103 b'could be fixed if you need it'
104 104 )
105 105 for f in self._fileobjs:
106 106 f.seek(0)
107 107 self._index = 0
108 108
109 109
110 110 def makev1commandrequest(
111 111 ui, requestbuilder, caps, capablefn, repobaseurl, cmd, args
112 112 ):
113 113 """Make an HTTP request to run a command for a version 1 client.
114 114
115 115 ``caps`` is a set of known server capabilities. The value may be
116 116 None if capabilities are not yet known.
117 117
118 118 ``capablefn`` is a function to evaluate a capability.
119 119
120 120 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
121 121 raw data to pass to it.
122 122 """
123 123 if cmd == b'pushkey':
124 124 args[b'data'] = b''
125 125 data = args.pop(b'data', None)
126 126 headers = args.pop(b'headers', {})
127 127
128 128 ui.debug(b"sending %s command\n" % cmd)
129 129 q = [(b'cmd', cmd)]
130 130 headersize = 0
131 131 # Important: don't use self.capable() here or else you end up
132 132 # with infinite recursion when trying to look up capabilities
133 133 # for the first time.
134 134 postargsok = caps is not None and b'httppostargs' in caps
135 135
136 136 # Send arguments via POST.
137 137 if postargsok and args:
138 138 strargs = urlreq.urlencode(sorted(args.items()))
139 139 if not data:
140 140 data = strargs
141 141 else:
142 142 if isinstance(data, bytes):
143 143 i = io.BytesIO(data)
144 144 i.length = len(data)
145 145 data = i
146 146 argsio = io.BytesIO(strargs)
147 147 argsio.length = len(strargs)
148 148 data = _multifile(argsio, data)
149 149 headers['X-HgArgs-Post'] = len(strargs)
150 150 elif args:
151 151 # Calling self.capable() can infinite loop if we are calling
152 152 # "capabilities". But that command should never accept wire
153 153 # protocol arguments. So this should never happen.
154 154 assert cmd != b'capabilities'
155 155 httpheader = capablefn(b'httpheader')
156 156 if httpheader:
157 157 headersize = int(httpheader.split(b',', 1)[0])
158 158
159 159 # Send arguments via HTTP headers.
160 160 if headersize > 0:
161 161 # The headers can typically carry more data than the URL.
162 162 encoded_args = urlreq.urlencode(sorted(args.items()))
163 163 for header, value in encodevalueinheaders(
164 164 encoded_args, b'X-HgArg', headersize
165 165 ):
166 166 headers[header] = value
167 167 # Send arguments via query string (Mercurial <1.9).
168 168 else:
169 169 q += sorted(args.items())
170 170
171 171 qs = b'?%s' % urlreq.urlencode(q)
172 172 cu = b"%s%s" % (repobaseurl, qs)
173 173 size = 0
174 174 if util.safehasattr(data, b'length'):
175 175 size = data.length
176 176 elif data is not None:
177 177 size = len(data)
178 178 if data is not None and 'Content-Type' not in headers:
179 179 headers['Content-Type'] = 'application/mercurial-0.1'
180 180
181 181 # Tell the server we accept application/mercurial-0.2 and multiple
182 182 # compression formats if the server is capable of emitting those
183 183 # payloads.
184 184 # Note: Keep this set empty by default, as client advertisement of
185 185 # protocol parameters should only occur after the handshake.
186 186 protoparams = set()
187 187
188 188 mediatypes = set()
189 189 if caps is not None:
190 190 mt = capablefn(b'httpmediatype')
191 191 if mt:
192 192 protoparams.add(b'0.1')
193 193 mediatypes = set(mt.split(b','))
194 194
195 195 protoparams.add(b'partial-pull')
196 196
197 197 if b'0.2tx' in mediatypes:
198 198 protoparams.add(b'0.2')
199 199
200 200 if b'0.2tx' in mediatypes and capablefn(b'compression'):
201 201 # We /could/ compare supported compression formats and prune
202 202 # non-mutually supported or error if nothing is mutually supported.
203 203 # For now, send the full list to the server and have it error.
204 204 comps = [
205 205 e.wireprotosupport().name
206 206 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
207 207 ]
208 208 protoparams.add(b'comp=%s' % b','.join(comps))
209 209
210 210 if protoparams:
211 211 protoheaders = encodevalueinheaders(
212 212 b' '.join(sorted(protoparams)), b'X-HgProto', headersize or 1024
213 213 )
214 214 for header, value in protoheaders:
215 215 headers[header] = value
216 216
217 217 varyheaders = []
218 218 for header in headers:
219 219 if header.lower().startswith('x-hg'):
220 220 varyheaders.append(header)
221 221
222 222 if varyheaders:
223 223 headers['Vary'] = ','.join(sorted(varyheaders))
224 224
225 225 req = requestbuilder(pycompat.strurl(cu), data, headers)
226 226
227 227 if data is not None:
228 228 ui.debug(b"sending %d bytes\n" % size)
229 229 req.add_unredirected_header('Content-Length', '%d' % size)
230 230
231 231 return req, cu, qs
232 232
233 233
234 234 def sendrequest(ui, opener, req):
235 235 """Send a prepared HTTP request.
236 236
237 237 Returns the response object.
238 238 """
239 239 dbg = ui.debug
240 240 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
241 241 line = b'devel-peer-request: %s\n'
242 242 dbg(
243 243 line
244 244 % b'%s %s'
245 245 % (
246 246 pycompat.bytesurl(req.get_method()),
247 247 pycompat.bytesurl(req.get_full_url()),
248 248 )
249 249 )
250 250 hgargssize = None
251 251
252 252 for header, value in sorted(req.header_items()):
253 253 header = pycompat.bytesurl(header)
254 254 value = pycompat.bytesurl(value)
255 255 if header.startswith(b'X-hgarg-'):
256 256 if hgargssize is None:
257 257 hgargssize = 0
258 258 hgargssize += len(value)
259 259 else:
260 260 dbg(line % b' %s %s' % (header, value))
261 261
262 262 if hgargssize is not None:
263 263 dbg(
264 264 line
265 265 % b' %d bytes of commands arguments in headers'
266 266 % hgargssize
267 267 )
268 268 data = req.data
269 269 if data is not None:
270 270 length = getattr(data, 'length', None)
271 271 if length is None:
272 272 length = len(data)
273 273 dbg(line % b' %d bytes of data' % length)
274 274
275 275 start = util.timer()
276 276
277 277 res = None
278 278 try:
279 279 res = opener.open(req)
280 280 except urlerr.httperror as inst:
281 281 if inst.code == 401:
282 282 raise error.Abort(_(b'authorization failed'))
283 283 raise
284 284 except httplib.HTTPException as inst:
285 285 ui.debug(
286 286 b'http error requesting %s\n'
287 287 % urlutil.hidepassword(req.get_full_url())
288 288 )
289 289 ui.traceback()
290 290 raise IOError(None, inst)
291 291 finally:
292 292 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
293 293 code = res.code if res else -1
294 294 dbg(
295 295 line
296 296 % b' finished in %.4f seconds (%d)'
297 297 % (util.timer() - start, code)
298 298 )
299 299
300 300 # Insert error handlers for common I/O failures.
301 301 urlmod.wrapresponse(res)
302 302
303 303 return res
304 304
305 305
306 306 class RedirectedRepoError(error.RepoError):
307 307 def __init__(self, msg, respurl):
308 308 super(RedirectedRepoError, self).__init__(msg)
309 309 self.respurl = respurl
310 310
311 311
312 312 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible):
313 313 # record the url we got redirected to
314 314 redirected = False
315 315 respurl = pycompat.bytesurl(resp.geturl())
316 316 if respurl.endswith(qs):
317 317 respurl = respurl[: -len(qs)]
318 318 qsdropped = False
319 319 else:
320 320 qsdropped = True
321 321
322 322 if baseurl.rstrip(b'/') != respurl.rstrip(b'/'):
323 323 redirected = True
324 324 if not ui.quiet:
325 325 ui.warn(_(b'real URL is %s\n') % respurl)
326 326
327 327 try:
328 328 proto = pycompat.bytesurl(resp.getheader('content-type', ''))
329 329 except AttributeError:
330 330 proto = pycompat.bytesurl(resp.headers.get('content-type', ''))
331 331
332 332 safeurl = urlutil.hidepassword(baseurl)
333 333 if proto.startswith(b'application/hg-error'):
334 334 raise error.OutOfBandError(resp.read())
335 335
336 336 # Pre 1.0 versions of Mercurial used text/plain and
337 337 # application/hg-changegroup. We don't support such old servers.
338 338 if not proto.startswith(b'application/mercurial-'):
339 339 ui.debug(b"requested URL: '%s'\n" % urlutil.hidepassword(requrl))
340 340 msg = _(
341 341 b"'%s' does not appear to be an hg repository:\n"
342 342 b"---%%<--- (%s)\n%s\n---%%<---\n"
343 343 ) % (safeurl, proto or b'no content-type', resp.read(1024))
344 344
345 345 # Some servers may strip the query string from the redirect. We
346 346 # raise a special error type so callers can react to this specially.
347 347 if redirected and qsdropped:
348 348 raise RedirectedRepoError(msg, respurl)
349 349 else:
350 350 raise error.RepoError(msg)
351 351
352 352 try:
353 353 subtype = proto.split(b'-', 1)[1]
354 354
355 355 version_info = tuple([int(n) for n in subtype.split(b'.')])
356 356 except ValueError:
357 357 raise error.RepoError(
358 358 _(b"'%s' sent a broken Content-Type header (%s)") % (safeurl, proto)
359 359 )
360 360
361 361 # TODO consider switching to a decompression reader that uses
362 362 # generators.
363 363 if version_info == (0, 1):
364 364 if compressible:
365 365 resp = util.compengines[b'zlib'].decompressorreader(resp)
366 366
367 367 elif version_info == (0, 2):
368 368 # application/mercurial-0.2 always identifies the compression
369 369 # engine in the payload header.
370 370 elen = struct.unpack(b'B', util.readexactly(resp, 1))[0]
371 371 ename = util.readexactly(resp, elen)
372 372 engine = util.compengines.forwiretype(ename)
373 373
374 374 resp = engine.decompressorreader(resp)
375 375 else:
376 376 raise error.RepoError(
377 377 _(b"'%s' uses newer protocol %s") % (safeurl, subtype)
378 378 )
379 379
380 380 return respurl, proto, resp
381 381
382 382
383 383 class httppeer(wireprotov1peer.wirepeer):
384 384 def __init__(self, ui, path, url, opener, requestbuilder, caps):
385 385 super().__init__(ui, path=path)
386 386 self._url = url
387 387 self._caps = caps
388 388 self.limitedarguments = caps is not None and b'httppostargs' not in caps
389 389 self._urlopener = opener
390 390 self._requestbuilder = requestbuilder
391 391
392 392 def __del__(self):
393 393 for h in self._urlopener.handlers:
394 394 h.close()
395 395 getattr(h, "close_all", lambda: None)()
396 396
397 397 # Begin of ipeerconnection interface.
398 398
399 399 def url(self):
400 400 return self.path.loc
401 401
402 402 def local(self):
403 403 return None
404 404
405 405 def canpush(self):
406 406 return True
407 407
408 408 def close(self):
409 409 try:
410 410 reqs, sent, recv = (
411 411 self._urlopener.requestscount,
412 412 self._urlopener.sentbytescount,
413 413 self._urlopener.receivedbytescount,
414 414 )
415 415 except AttributeError:
416 416 return
417 417 self.ui.note(
418 418 _(
419 419 b'(sent %d HTTP requests and %d bytes; '
420 420 b'received %d bytes in responses)\n'
421 421 )
422 422 % (reqs, sent, recv)
423 423 )
424 424
425 425 # End of ipeerconnection interface.
426 426
427 427 # Begin of ipeercommands interface.
428 428
429 429 def capabilities(self):
430 430 return self._caps
431 431
432 432 # End of ipeercommands interface.
433 433
434 434 def _callstream(self, cmd, _compressible=False, **args):
435 435 args = pycompat.byteskwargs(args)
436 436
437 437 req, cu, qs = makev1commandrequest(
438 438 self.ui,
439 439 self._requestbuilder,
440 440 self._caps,
441 441 self.capable,
442 442 self._url,
443 443 cmd,
444 444 args,
445 445 )
446 446
447 447 resp = sendrequest(self.ui, self._urlopener, req)
448 448
449 449 self._url, ct, resp = parsev1commandresponse(
450 450 self.ui, self._url, cu, qs, resp, _compressible
451 451 )
452 452
453 453 return resp
454 454
455 455 def _call(self, cmd, **args):
456 456 fp = self._callstream(cmd, **args)
457 457 try:
458 458 return fp.read()
459 459 finally:
460 460 # if using keepalive, allow connection to be reused
461 461 fp.close()
462 462
463 463 def _callpush(self, cmd, cg, **args):
464 464 # have to stream bundle to a temp file because we do not have
465 465 # http 1.1 chunked transfer.
466 466
467 467 types = self.capable(b'unbundle')
468 468 try:
469 469 types = types.split(b',')
470 470 except AttributeError:
471 471 # servers older than d1b16a746db6 will send 'unbundle' as a
472 472 # boolean capability. They only support headerless/uncompressed
473 473 # bundles.
474 474 types = [b""]
475 475 for x in types:
476 476 if x in bundle2.bundletypes:
477 477 type = x
478 478 break
479 479
480 480 tempname = bundle2.writebundle(self.ui, cg, None, type)
481 481 fp = httpconnection.httpsendfile(self.ui, tempname, b"rb")
482 482 headers = {'Content-Type': 'application/mercurial-0.1'}
483 483
484 484 try:
485 485 r = self._call(cmd, data=fp, headers=headers, **args)
486 486 vals = r.split(b'\n', 1)
487 487 if len(vals) < 2:
488 488 raise error.ResponseError(_(b"unexpected response:"), r)
489 489 return vals
490 490 except urlerr.httperror:
491 491 # Catch and re-raise these so we don't try and treat them
492 492 # like generic socket errors. They lack any values in
493 493 # .args on Python 3 which breaks our socket.error block.
494 494 raise
495 495 except socket.error as err:
496 496 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
497 497 raise error.Abort(_(b'push failed: %s') % err.args[1])
498 498 raise error.Abort(err.args[1])
499 499 finally:
500 500 fp.close()
501 501 os.unlink(tempname)
502 502
503 503 def _calltwowaystream(self, cmd, fp, **args):
504 504 filename = None
505 505 try:
506 506 # dump bundle to disk
507 507 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
508 508 with os.fdopen(fd, "wb") as fh:
509 509 d = fp.read(4096)
510 510 while d:
511 511 fh.write(d)
512 512 d = fp.read(4096)
513 513 # start http push
514 514 with httpconnection.httpsendfile(self.ui, filename, b"rb") as fp_:
515 515 headers = {'Content-Type': 'application/mercurial-0.1'}
516 516 return self._callstream(cmd, data=fp_, headers=headers, **args)
517 517 finally:
518 518 if filename is not None:
519 519 os.unlink(filename)
520 520
521 521 def _callcompressable(self, cmd, **args):
522 522 return self._callstream(cmd, _compressible=True, **args)
523 523
524 524 def _abort(self, exception):
525 525 raise exception
526 526
527 527
528 528 class queuedcommandfuture(futures.Future):
529 529 """Wraps result() on command futures to trigger submission on call."""
530 530
531 531 def result(self, timeout=None):
532 532 if self.done():
533 533 return futures.Future.result(self, timeout)
534 534
535 535 self._peerexecutor.sendcommands()
536 536
537 537 # sendcommands() will restore the original __class__ and self.result
538 538 # will resolve to Future.result.
539 539 return self.result(timeout)
540 540
541 541
542 542 def performhandshake(ui, url, opener, requestbuilder):
543 543 # The handshake is a request to the capabilities command.
544 544
545 545 caps = None
546 546
547 547 def capable(x):
548 548 raise error.ProgrammingError(b'should not be called')
549 549
550 550 args = {}
551 551
552 552 req, requrl, qs = makev1commandrequest(
553 553 ui, requestbuilder, caps, capable, url, b'capabilities', args
554 554 )
555 555 resp = sendrequest(ui, opener, req)
556 556
557 557 # The server may redirect us to the repo root, stripping the
558 558 # ?cmd=capabilities query string from the URL. The server would likely
559 559 # return HTML in this case and ``parsev1commandresponse()`` would raise.
560 560 # We catch this special case and re-issue the capabilities request against
561 561 # the new URL.
562 562 #
563 563 # We should ideally not do this, as a redirect that drops the query
564 564 # string from the URL is arguably a server bug. (Garbage in, garbage out).
565 565 # However, Mercurial clients for several years appeared to handle this
566 566 # issue without behavior degradation. And according to issue 5860, it may
567 567 # be a longstanding bug in some server implementations. So we allow a
568 568 # redirect that drops the query string to "just work."
569 569 try:
570 570 respurl, ct, resp = parsev1commandresponse(
571 571 ui, url, requrl, qs, resp, compressible=False
572 572 )
573 573 except RedirectedRepoError as e:
574 574 req, requrl, qs = makev1commandrequest(
575 575 ui, requestbuilder, caps, capable, e.respurl, b'capabilities', args
576 576 )
577 577 resp = sendrequest(ui, opener, req)
578 578 respurl, ct, resp = parsev1commandresponse(
579 579 ui, url, requrl, qs, resp, compressible=False
580 580 )
581 581
582 582 try:
583 583 rawdata = resp.read()
584 584 finally:
585 585 resp.close()
586 586
587 587 if not ct.startswith(b'application/mercurial-'):
588 588 raise error.ProgrammingError(b'unexpected content-type: %s' % ct)
589 589
590 590 info = {b'v1capabilities': set(rawdata.split())}
591 591
592 592 return respurl, info
593 593
594 594
595 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
595 def _make_peer(ui, path, opener=None, requestbuilder=urlreq.request):
596 596 """Construct an appropriate HTTP peer instance.
597 597
598 598 ``opener`` is an ``url.opener`` that should be used to establish
599 599 connections, perform HTTP requests.
600 600
601 601 ``requestbuilder`` is the type used for constructing HTTP requests.
602 602 It exists as an argument so extensions can override the default.
603 603 """
604 604 if path.url.query or path.url.fragment:
605 605 msg = _(b'unsupported URL component: "%s"')
606 606 msg %= path.url.query or path.url.fragment
607 607 raise error.Abort(msg)
608 608
609 609 # urllib cannot handle URLs with embedded user or passwd.
610 610 url, authinfo = path.url.authinfo()
611 611 ui.debug(b'using %s\n' % url)
612 612
613 613 opener = opener or urlmod.opener(ui, authinfo)
614 614
615 615 respurl, info = performhandshake(ui, url, opener, requestbuilder)
616 616
617 617 return httppeer(
618 618 ui, path, respurl, opener, requestbuilder, info[b'v1capabilities']
619 619 )
620 620
621 621
622 622 def make_peer(ui, path, create, intents=None, createopts=None):
623 623 if create:
624 624 raise error.Abort(_(b'cannot create new http repository'))
625 625 try:
626 626 if path.url.scheme == b'https' and not urlmod.has_https:
627 627 raise error.Abort(
628 628 _(b'Python support for SSL and HTTPS is not installed')
629 629 )
630 630
631 inst = makepeer(ui, path)
631 inst = _make_peer(ui, path)
632 632
633 633 return inst
634 634 except error.RepoError as httpexception:
635 635 try:
636 636 r = statichttprepo.make_peer(ui, b"static-" + path.loc, create)
637 637 ui.note(_(b'(falling back to static-http)\n'))
638 638 return r
639 639 except error.RepoError:
640 640 raise httpexception # use the original http RepoError instead
@@ -1,674 +1,674 b''
1 1 # sshpeer.py - ssh repository proxy class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import re
10 10 import uuid
11 11
12 12 from .i18n import _
13 13 from .pycompat import getattr
14 14 from . import (
15 15 error,
16 16 pycompat,
17 17 util,
18 18 wireprototypes,
19 19 wireprotov1peer,
20 20 wireprotov1server,
21 21 )
22 22 from .utils import (
23 23 procutil,
24 24 stringutil,
25 25 urlutil,
26 26 )
27 27
28 28
29 29 def _serverquote(s):
30 30 """quote a string for the remote shell ... which we assume is sh"""
31 31 if not s:
32 32 return s
33 33 if re.match(b'[a-zA-Z0-9@%_+=:,./-]*$', s):
34 34 return s
35 35 return b"'%s'" % s.replace(b"'", b"'\\''")
36 36
37 37
38 38 def _forwardoutput(ui, pipe, warn=False):
39 39 """display all data currently available on pipe as remote output.
40 40
41 41 This is non blocking."""
42 42 if pipe and not pipe.closed:
43 43 s = procutil.readpipe(pipe)
44 44 if s:
45 45 display = ui.warn if warn else ui.status
46 46 for l in s.splitlines():
47 47 display(_(b"remote: "), l, b'\n')
48 48
49 49
50 50 class doublepipe:
51 51 """Operate a side-channel pipe in addition of a main one
52 52
53 53 The side-channel pipe contains server output to be forwarded to the user
54 54 input. The double pipe will behave as the "main" pipe, but will ensure the
55 55 content of the "side" pipe is properly processed while we wait for blocking
56 56 call on the "main" pipe.
57 57
58 58 If large amounts of data are read from "main", the forward will cease after
59 59 the first bytes start to appear. This simplifies the implementation
60 60 without affecting actual output of sshpeer too much as we rarely issue
61 61 large read for data not yet emitted by the server.
62 62
63 63 The main pipe is expected to be a 'bufferedinputpipe' from the util module
64 64 that handle all the os specific bits. This class lives in this module
65 65 because it focus on behavior specific to the ssh protocol."""
66 66
67 67 def __init__(self, ui, main, side):
68 68 self._ui = ui
69 69 self._main = main
70 70 self._side = side
71 71
72 72 def _wait(self):
73 73 """wait until some data are available on main or side
74 74
75 75 return a pair of boolean (ismainready, issideready)
76 76
77 77 (This will only wait for data if the setup is supported by `util.poll`)
78 78 """
79 79 if (
80 80 isinstance(self._main, util.bufferedinputpipe)
81 81 and self._main.hasbuffer
82 82 ):
83 83 # Main has data. Assume side is worth poking at.
84 84 return True, True
85 85
86 86 fds = [self._main.fileno(), self._side.fileno()]
87 87 try:
88 88 act = util.poll(fds)
89 89 except NotImplementedError:
90 90 # non supported yet case, assume all have data.
91 91 act = fds
92 92 return (self._main.fileno() in act, self._side.fileno() in act)
93 93
94 94 def write(self, data):
95 95 return self._call(b'write', data)
96 96
97 97 def read(self, size):
98 98 r = self._call(b'read', size)
99 99 if size != 0 and not r:
100 100 # We've observed a condition that indicates the
101 101 # stdout closed unexpectedly. Check stderr one
102 102 # more time and snag anything that's there before
103 103 # letting anyone know the main part of the pipe
104 104 # closed prematurely.
105 105 _forwardoutput(self._ui, self._side)
106 106 return r
107 107
108 108 def unbufferedread(self, size):
109 109 r = self._call(b'unbufferedread', size)
110 110 if size != 0 and not r:
111 111 # We've observed a condition that indicates the
112 112 # stdout closed unexpectedly. Check stderr one
113 113 # more time and snag anything that's there before
114 114 # letting anyone know the main part of the pipe
115 115 # closed prematurely.
116 116 _forwardoutput(self._ui, self._side)
117 117 return r
118 118
119 119 def readline(self):
120 120 return self._call(b'readline')
121 121
122 122 def _call(self, methname, data=None):
123 123 """call <methname> on "main", forward output of "side" while blocking"""
124 124 # data can be '' or 0
125 125 if (data is not None and not data) or self._main.closed:
126 126 _forwardoutput(self._ui, self._side)
127 127 return b''
128 128 while True:
129 129 mainready, sideready = self._wait()
130 130 if sideready:
131 131 _forwardoutput(self._ui, self._side)
132 132 if mainready:
133 133 meth = getattr(self._main, methname)
134 134 if data is None:
135 135 return meth()
136 136 else:
137 137 return meth(data)
138 138
139 139 def close(self):
140 140 return self._main.close()
141 141
142 142 @property
143 143 def closed(self):
144 144 return self._main.closed
145 145
146 146 def flush(self):
147 147 return self._main.flush()
148 148
149 149
150 150 def _cleanuppipes(ui, pipei, pipeo, pipee, warn):
151 151 """Clean up pipes used by an SSH connection."""
152 152 didsomething = False
153 153 if pipeo and not pipeo.closed:
154 154 didsomething = True
155 155 pipeo.close()
156 156 if pipei and not pipei.closed:
157 157 didsomething = True
158 158 pipei.close()
159 159
160 160 if pipee and not pipee.closed:
161 161 didsomething = True
162 162 # Try to read from the err descriptor until EOF.
163 163 try:
164 164 for l in pipee:
165 165 ui.status(_(b'remote: '), l)
166 166 except (IOError, ValueError):
167 167 pass
168 168
169 169 pipee.close()
170 170
171 171 if didsomething and warn is not None:
172 172 # Encourage explicit close of sshpeers. Closing via __del__ is
173 173 # not very predictable when exceptions are thrown, which has led
174 174 # to deadlocks due to a peer get gc'ed in a fork
175 175 # We add our own stack trace, because the stacktrace when called
176 176 # from __del__ is useless.
177 177 ui.develwarn(b'missing close on SSH connection created at:\n%s' % warn)
178 178
179 179
180 180 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
181 181 """Create an SSH connection to a server.
182 182
183 183 Returns a tuple of (process, stdin, stdout, stderr) for the
184 184 spawned process.
185 185 """
186 186 cmd = b'%s %s %s' % (
187 187 sshcmd,
188 188 args,
189 189 procutil.shellquote(
190 190 b'%s -R %s serve --stdio'
191 191 % (_serverquote(remotecmd), _serverquote(path))
192 192 ),
193 193 )
194 194
195 195 ui.debug(b'running %s\n' % cmd)
196 196
197 197 # no buffer allow the use of 'select'
198 198 # feel free to remove buffering and select usage when we ultimately
199 199 # move to threading.
200 200 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
201 201
202 202 return proc, stdin, stdout, stderr
203 203
204 204
205 205 def _clientcapabilities():
206 206 """Return list of capabilities of this client.
207 207
208 208 Returns a list of capabilities that are supported by this client.
209 209 """
210 210 protoparams = {b'partial-pull'}
211 211 comps = [
212 212 e.wireprotosupport().name
213 213 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
214 214 ]
215 215 protoparams.add(b'comp=%s' % b','.join(comps))
216 216 return protoparams
217 217
218 218
219 219 def _performhandshake(ui, stdin, stdout, stderr):
220 220 def badresponse():
221 221 # Flush any output on stderr. In general, the stderr contains errors
222 222 # from the remote (ssh errors, some hg errors), and status indications
223 223 # (like "adding changes"), with no current way to tell them apart.
224 224 # Here we failed so early that it's almost certainly only errors, so
225 225 # use warn=True so -q doesn't hide them.
226 226 _forwardoutput(ui, stderr, warn=True)
227 227
228 228 msg = _(b'no suitable response from remote hg')
229 229 hint = ui.config(b'ui', b'ssherrorhint')
230 230 raise error.RepoError(msg, hint=hint)
231 231
232 232 # The handshake consists of sending wire protocol commands in reverse
233 233 # order of protocol implementation and then sniffing for a response
234 234 # to one of them.
235 235 #
236 236 # Those commands (from oldest to newest) are:
237 237 #
238 238 # ``between``
239 239 # Asks for the set of revisions between a pair of revisions. Command
240 240 # present in all Mercurial server implementations.
241 241 #
242 242 # ``hello``
243 243 # Instructs the server to advertise its capabilities. Introduced in
244 244 # Mercurial 0.9.1.
245 245 #
246 246 # ``upgrade``
247 247 # Requests upgrade from default transport protocol version 1 to
248 248 # a newer version. Introduced in Mercurial 4.6 as an experimental
249 249 # feature.
250 250 #
251 251 # The ``between`` command is issued with a request for the null
252 252 # range. If the remote is a Mercurial server, this request will
253 253 # generate a specific response: ``1\n\n``. This represents the
254 254 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
255 255 # in the output stream and know this is the response to ``between``
256 256 # and we're at the end of our handshake reply.
257 257 #
258 258 # The response to the ``hello`` command will be a line with the
259 259 # length of the value returned by that command followed by that
260 260 # value. If the server doesn't support ``hello`` (which should be
261 261 # rare), that line will be ``0\n``. Otherwise, the value will contain
262 262 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
263 263 # the capabilities of the server.
264 264 #
265 265 # The ``upgrade`` command isn't really a command in the traditional
266 266 # sense of version 1 of the transport because it isn't using the
267 267 # proper mechanism for formatting insteads: instead, it just encodes
268 268 # arguments on the line, delimited by spaces.
269 269 #
270 270 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
271 271 # If the server doesn't support protocol upgrades, it will reply to
272 272 # this line with ``0\n``. Otherwise, it emits an
273 273 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
274 274 # Content immediately following this line describes additional
275 275 # protocol and server state.
276 276 #
277 277 # In addition to the responses to our command requests, the server
278 278 # may emit "banner" output on stdout. SSH servers are allowed to
279 279 # print messages to stdout on login. Issuing commands on connection
280 280 # allows us to flush this banner output from the server by scanning
281 281 # for output to our well-known ``between`` command. Of course, if
282 282 # the banner contains ``1\n\n``, this will throw off our detection.
283 283
284 284 requestlog = ui.configbool(b'devel', b'debug.peer-request')
285 285
286 286 # Generate a random token to help identify responses to version 2
287 287 # upgrade request.
288 288 token = pycompat.sysbytes(str(uuid.uuid4()))
289 289
290 290 try:
291 291 pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40)
292 292 handshake = [
293 293 b'hello\n',
294 294 b'between\n',
295 295 b'pairs %d\n' % len(pairsarg),
296 296 pairsarg,
297 297 ]
298 298
299 299 if requestlog:
300 300 ui.debug(b'devel-peer-request: hello+between\n')
301 301 ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
302 302 ui.debug(b'sending hello command\n')
303 303 ui.debug(b'sending between command\n')
304 304
305 305 stdin.write(b''.join(handshake))
306 306 stdin.flush()
307 307 except IOError:
308 308 badresponse()
309 309
310 310 # Assume version 1 of wire protocol by default.
311 311 protoname = wireprototypes.SSHV1
312 312 reupgraded = re.compile(b'^upgraded %s (.*)$' % stringutil.reescape(token))
313 313
314 314 lines = [b'', b'dummy']
315 315 max_noise = 500
316 316 while lines[-1] and max_noise:
317 317 try:
318 318 l = stdout.readline()
319 319 _forwardoutput(ui, stderr, warn=True)
320 320
321 321 # Look for reply to protocol upgrade request. It has a token
322 322 # in it, so there should be no false positives.
323 323 m = reupgraded.match(l)
324 324 if m:
325 325 protoname = m.group(1)
326 326 ui.debug(b'protocol upgraded to %s\n' % protoname)
327 327 # If an upgrade was handled, the ``hello`` and ``between``
328 328 # requests are ignored. The next output belongs to the
329 329 # protocol, so stop scanning lines.
330 330 break
331 331
332 332 # Otherwise it could be a banner, ``0\n`` response if server
333 333 # doesn't support upgrade.
334 334
335 335 if lines[-1] == b'1\n' and l == b'\n':
336 336 break
337 337 if l:
338 338 ui.debug(b'remote: ', l)
339 339 lines.append(l)
340 340 max_noise -= 1
341 341 except IOError:
342 342 badresponse()
343 343 else:
344 344 badresponse()
345 345
346 346 caps = set()
347 347
348 348 # For version 1, we should see a ``capabilities`` line in response to the
349 349 # ``hello`` command.
350 350 if protoname == wireprototypes.SSHV1:
351 351 for l in reversed(lines):
352 352 # Look for response to ``hello`` command. Scan from the back so
353 353 # we don't misinterpret banner output as the command reply.
354 354 if l.startswith(b'capabilities:'):
355 355 caps.update(l[:-1].split(b':')[1].split())
356 356 break
357 357
358 358 # Error if we couldn't find capabilities, this means:
359 359 #
360 360 # 1. Remote isn't a Mercurial server
361 361 # 2. Remote is a <0.9.1 Mercurial server
362 362 # 3. Remote is a future Mercurial server that dropped ``hello``
363 363 # and other attempted handshake mechanisms.
364 364 if not caps:
365 365 badresponse()
366 366
367 367 # Flush any output on stderr before proceeding.
368 368 _forwardoutput(ui, stderr, warn=True)
369 369
370 370 return protoname, caps
371 371
372 372
373 373 class sshv1peer(wireprotov1peer.wirepeer):
374 374 def __init__(
375 375 self, ui, path, proc, stdin, stdout, stderr, caps, autoreadstderr=True
376 376 ):
377 377 """Create a peer from an existing SSH connection.
378 378
379 379 ``proc`` is a handle on the underlying SSH process.
380 380 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
381 381 pipes for that process.
382 382 ``caps`` is a set of capabilities supported by the remote.
383 383 ``autoreadstderr`` denotes whether to automatically read from
384 384 stderr and to forward its output.
385 385 """
386 386 super().__init__(ui, path=path)
387 387 # self._subprocess is unused. Keeping a handle on the process
388 388 # holds a reference and prevents it from being garbage collected.
389 389 self._subprocess = proc
390 390
391 391 # And we hook up our "doublepipe" wrapper to allow querying
392 392 # stderr any time we perform I/O.
393 393 if autoreadstderr:
394 394 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
395 395 stdin = doublepipe(ui, stdin, stderr)
396 396
397 397 self._pipeo = stdin
398 398 self._pipei = stdout
399 399 self._pipee = stderr
400 400 self._caps = caps
401 401 self._autoreadstderr = autoreadstderr
402 402 self._initstack = b''.join(util.getstackframes(1))
403 403
404 404 # Commands that have a "framed" response where the first line of the
405 405 # response contains the length of that response.
406 406 _FRAMED_COMMANDS = {
407 407 b'batch',
408 408 }
409 409
410 410 # Begin of ipeerconnection interface.
411 411
412 412 def url(self):
413 413 return self.path.loc
414 414
415 415 def local(self):
416 416 return None
417 417
418 418 def canpush(self):
419 419 return True
420 420
421 421 def close(self):
422 422 self._cleanup()
423 423
424 424 # End of ipeerconnection interface.
425 425
426 426 # Begin of ipeercommands interface.
427 427
428 428 def capabilities(self):
429 429 return self._caps
430 430
431 431 # End of ipeercommands interface.
432 432
433 433 def _readerr(self):
434 434 _forwardoutput(self.ui, self._pipee)
435 435
436 436 def _abort(self, exception):
437 437 self._cleanup()
438 438 raise exception
439 439
440 440 def _cleanup(self, warn=None):
441 441 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee, warn=warn)
442 442
443 443 def __del__(self):
444 444 self._cleanup(warn=self._initstack)
445 445
446 446 def _sendrequest(self, cmd, args, framed=False):
447 447 if self.ui.debugflag and self.ui.configbool(
448 448 b'devel', b'debug.peer-request'
449 449 ):
450 450 dbg = self.ui.debug
451 451 line = b'devel-peer-request: %s\n'
452 452 dbg(line % cmd)
453 453 for key, value in sorted(args.items()):
454 454 if not isinstance(value, dict):
455 455 dbg(line % b' %s: %d bytes' % (key, len(value)))
456 456 else:
457 457 for dk, dv in sorted(value.items()):
458 458 dbg(line % b' %s-%s: %d' % (key, dk, len(dv)))
459 459 self.ui.debug(b"sending %s command\n" % cmd)
460 460 self._pipeo.write(b"%s\n" % cmd)
461 461 _func, names = wireprotov1server.commands[cmd]
462 462 keys = names.split()
463 463 wireargs = {}
464 464 for k in keys:
465 465 if k == b'*':
466 466 wireargs[b'*'] = args
467 467 break
468 468 else:
469 469 wireargs[k] = args[k]
470 470 del args[k]
471 471 for k, v in sorted(wireargs.items()):
472 472 self._pipeo.write(b"%s %d\n" % (k, len(v)))
473 473 if isinstance(v, dict):
474 474 for dk, dv in v.items():
475 475 self._pipeo.write(b"%s %d\n" % (dk, len(dv)))
476 476 self._pipeo.write(dv)
477 477 else:
478 478 self._pipeo.write(v)
479 479 self._pipeo.flush()
480 480
481 481 # We know exactly how many bytes are in the response. So return a proxy
482 482 # around the raw output stream that allows reading exactly this many
483 483 # bytes. Callers then can read() without fear of overrunning the
484 484 # response.
485 485 if framed:
486 486 amount = self._getamount()
487 487 return util.cappedreader(self._pipei, amount)
488 488
489 489 return self._pipei
490 490
491 491 def _callstream(self, cmd, **args):
492 492 args = pycompat.byteskwargs(args)
493 493 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
494 494
495 495 def _callcompressable(self, cmd, **args):
496 496 args = pycompat.byteskwargs(args)
497 497 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
498 498
499 499 def _call(self, cmd, **args):
500 500 args = pycompat.byteskwargs(args)
501 501 return self._sendrequest(cmd, args, framed=True).read()
502 502
503 503 def _callpush(self, cmd, fp, **args):
504 504 # The server responds with an empty frame if the client should
505 505 # continue submitting the payload.
506 506 r = self._call(cmd, **args)
507 507 if r:
508 508 return b'', r
509 509
510 510 # The payload consists of frames with content followed by an empty
511 511 # frame.
512 512 for d in iter(lambda: fp.read(4096), b''):
513 513 self._writeframed(d)
514 514 self._writeframed(b"", flush=True)
515 515
516 516 # In case of success, there is an empty frame and a frame containing
517 517 # the integer result (as a string).
518 518 # In case of error, there is a non-empty frame containing the error.
519 519 r = self._readframed()
520 520 if r:
521 521 return b'', r
522 522 return self._readframed(), b''
523 523
524 524 def _calltwowaystream(self, cmd, fp, **args):
525 525 # The server responds with an empty frame if the client should
526 526 # continue submitting the payload.
527 527 r = self._call(cmd, **args)
528 528 if r:
529 529 # XXX needs to be made better
530 530 raise error.Abort(_(b'unexpected remote reply: %s') % r)
531 531
532 532 # The payload consists of frames with content followed by an empty
533 533 # frame.
534 534 for d in iter(lambda: fp.read(4096), b''):
535 535 self._writeframed(d)
536 536 self._writeframed(b"", flush=True)
537 537
538 538 return self._pipei
539 539
540 540 def _getamount(self):
541 541 l = self._pipei.readline()
542 542 if l == b'\n':
543 543 if self._autoreadstderr:
544 544 self._readerr()
545 545 msg = _(b'check previous remote output')
546 546 self._abort(error.OutOfBandError(hint=msg))
547 547 if self._autoreadstderr:
548 548 self._readerr()
549 549 try:
550 550 return int(l)
551 551 except ValueError:
552 552 self._abort(error.ResponseError(_(b"unexpected response:"), l))
553 553
554 554 def _readframed(self):
555 555 size = self._getamount()
556 556 if not size:
557 557 return b''
558 558
559 559 return self._pipei.read(size)
560 560
561 561 def _writeframed(self, data, flush=False):
562 562 self._pipeo.write(b"%d\n" % len(data))
563 563 if data:
564 564 self._pipeo.write(data)
565 565 if flush:
566 566 self._pipeo.flush()
567 567 if self._autoreadstderr:
568 568 self._readerr()
569 569
570 570
571 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
571 def _make_peer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
572 572 """Make a peer instance from existing pipes.
573 573
574 574 ``path`` and ``proc`` are stored on the eventual peer instance and may
575 575 not be used for anything meaningful.
576 576
577 577 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
578 578 SSH server's stdio handles.
579 579
580 580 This function is factored out to allow creating peers that don't
581 581 actually spawn a new process. It is useful for starting SSH protocol
582 582 servers and clients via non-standard means, which can be useful for
583 583 testing.
584 584 """
585 585 try:
586 586 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
587 587 except Exception:
588 588 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
589 589 raise
590 590
591 591 if protoname == wireprototypes.SSHV1:
592 592 return sshv1peer(
593 593 ui,
594 594 path,
595 595 proc,
596 596 stdin,
597 597 stdout,
598 598 stderr,
599 599 caps,
600 600 autoreadstderr=autoreadstderr,
601 601 )
602 602 else:
603 603 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
604 604 raise error.RepoError(
605 605 _(b'unknown version of SSH protocol: %s') % protoname
606 606 )
607 607
608 608
609 609 def make_peer(ui, path, create, intents=None, createopts=None):
610 610 """Create an SSH peer.
611 611
612 612 The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
613 613 """
614 614 u = urlutil.url(path.loc, parsequery=False, parsefragment=False)
615 615 if u.scheme != b'ssh' or not u.host or u.path is None:
616 616 raise error.RepoError(_(b"couldn't parse location %s") % path)
617 617
618 618 urlutil.checksafessh(path.loc)
619 619
620 620 if u.passwd is not None:
621 621 raise error.RepoError(_(b'password in URL not supported'))
622 622
623 623 sshcmd = ui.config(b'ui', b'ssh')
624 624 remotecmd = ui.config(b'ui', b'remotecmd')
625 625 sshaddenv = dict(ui.configitems(b'sshenv'))
626 626 sshenv = procutil.shellenviron(sshaddenv)
627 627 remotepath = u.path or b'.'
628 628
629 629 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
630 630
631 631 if create:
632 632 # We /could/ do this, but only if the remote init command knows how to
633 633 # handle them. We don't yet make any assumptions about that. And without
634 634 # querying the remote, there's no way of knowing if the remote even
635 635 # supports said requested feature.
636 636 if createopts:
637 637 raise error.RepoError(
638 638 _(
639 639 b'cannot create remote SSH repositories '
640 640 b'with extra options'
641 641 )
642 642 )
643 643
644 644 cmd = b'%s %s %s' % (
645 645 sshcmd,
646 646 args,
647 647 procutil.shellquote(
648 648 b'%s init %s'
649 649 % (_serverquote(remotecmd), _serverquote(remotepath))
650 650 ),
651 651 )
652 652 ui.debug(b'running %s\n' % cmd)
653 653 res = ui.system(cmd, blockedtag=b'sshpeer', environ=sshenv)
654 654 if res != 0:
655 655 raise error.RepoError(_(b'could not create remote repo'))
656 656
657 657 proc, stdin, stdout, stderr = _makeconnection(
658 658 ui, sshcmd, args, remotecmd, remotepath, sshenv
659 659 )
660 660
661 peer = makepeer(ui, path, proc, stdin, stdout, stderr)
661 peer = _make_peer(ui, path, proc, stdin, stdout, stderr)
662 662
663 663 # Finally, if supported by the server, notify it about our own
664 664 # capabilities.
665 665 if b'protocaps' in peer.capabilities():
666 666 try:
667 667 peer._call(
668 668 b"protocaps", caps=b' '.join(sorted(_clientcapabilities()))
669 669 )
670 670 except IOError:
671 671 peer._cleanup()
672 672 raise error.RepoError(_(b'capability exchange failed'))
673 673
674 674 return peer
General Comments 0
You need to be logged in to leave comments. Login now