##// END OF EJS Templates
debugtagscache: verify that filenode is correct...
Pulkit Goyal -
r47401:e4e971ab default
parent child Browse files
Show More
@@ -1,4755 +1,4758
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import codecs
11 11 import collections
12 12 import difflib
13 13 import errno
14 14 import glob
15 15 import operator
16 16 import os
17 17 import platform
18 18 import random
19 19 import re
20 20 import socket
21 21 import ssl
22 22 import stat
23 23 import string
24 24 import subprocess
25 25 import sys
26 26 import time
27 27
28 28 from .i18n import _
29 29 from .node import (
30 30 bin,
31 31 hex,
32 32 nullid,
33 33 nullrev,
34 34 short,
35 35 )
36 36 from .pycompat import (
37 37 getattr,
38 38 open,
39 39 )
40 40 from . import (
41 41 bundle2,
42 42 bundlerepo,
43 43 changegroup,
44 44 cmdutil,
45 45 color,
46 46 context,
47 47 copies,
48 48 dagparser,
49 49 encoding,
50 50 error,
51 51 exchange,
52 52 extensions,
53 53 filemerge,
54 54 filesetlang,
55 55 formatter,
56 56 hg,
57 57 httppeer,
58 58 localrepo,
59 59 lock as lockmod,
60 60 logcmdutil,
61 61 mergestate as mergestatemod,
62 62 metadata,
63 63 obsolete,
64 64 obsutil,
65 65 pathutil,
66 66 phases,
67 67 policy,
68 68 pvec,
69 69 pycompat,
70 70 registrar,
71 71 repair,
72 72 repoview,
73 73 revlog,
74 74 revset,
75 75 revsetlang,
76 76 scmutil,
77 77 setdiscovery,
78 78 simplemerge,
79 79 sshpeer,
80 80 sslutil,
81 81 streamclone,
82 82 strip,
83 83 tags as tagsmod,
84 84 templater,
85 85 treediscovery,
86 86 upgrade,
87 87 url as urlmod,
88 88 util,
89 89 vfs as vfsmod,
90 90 wireprotoframing,
91 91 wireprotoserver,
92 92 wireprotov2peer,
93 93 )
94 94 from .utils import (
95 95 cborutil,
96 96 compression,
97 97 dateutil,
98 98 procutil,
99 99 stringutil,
100 100 )
101 101
102 102 from .revlogutils import (
103 103 deltas as deltautil,
104 104 nodemap,
105 105 sidedata,
106 106 )
107 107
108 108 release = lockmod.release
109 109
110 110 table = {}
111 111 table.update(strip.command._table)
112 112 command = registrar.command(table)
113 113
114 114
115 115 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
116 116 def debugancestor(ui, repo, *args):
117 117 """find the ancestor revision of two revisions in a given index"""
118 118 if len(args) == 3:
119 119 index, rev1, rev2 = args
120 120 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
121 121 lookup = r.lookup
122 122 elif len(args) == 2:
123 123 if not repo:
124 124 raise error.Abort(
125 125 _(b'there is no Mercurial repository here (.hg not found)')
126 126 )
127 127 rev1, rev2 = args
128 128 r = repo.changelog
129 129 lookup = repo.lookup
130 130 else:
131 131 raise error.Abort(_(b'either two or three arguments required'))
132 132 a = r.ancestor(lookup(rev1), lookup(rev2))
133 133 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
134 134
135 135
136 136 @command(b'debugantivirusrunning', [])
137 137 def debugantivirusrunning(ui, repo):
138 138 """attempt to trigger an antivirus scanner to see if one is active"""
139 139 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
140 140 f.write(
141 141 util.b85decode(
142 142 # This is a base85-armored version of the EICAR test file. See
143 143 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
144 144 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
145 145 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
146 146 )
147 147 )
148 148 # Give an AV engine time to scan the file.
149 149 time.sleep(2)
150 150 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
151 151
152 152
153 153 @command(b'debugapplystreamclonebundle', [], b'FILE')
154 154 def debugapplystreamclonebundle(ui, repo, fname):
155 155 """apply a stream clone bundle file"""
156 156 f = hg.openpath(ui, fname)
157 157 gen = exchange.readbundle(ui, f, fname)
158 158 gen.apply(repo)
159 159
160 160
161 161 @command(
162 162 b'debugbuilddag',
163 163 [
164 164 (
165 165 b'm',
166 166 b'mergeable-file',
167 167 None,
168 168 _(b'add single file mergeable changes'),
169 169 ),
170 170 (
171 171 b'o',
172 172 b'overwritten-file',
173 173 None,
174 174 _(b'add single file all revs overwrite'),
175 175 ),
176 176 (b'n', b'new-file', None, _(b'add new file at each rev')),
177 177 ],
178 178 _(b'[OPTION]... [TEXT]'),
179 179 )
180 180 def debugbuilddag(
181 181 ui,
182 182 repo,
183 183 text=None,
184 184 mergeable_file=False,
185 185 overwritten_file=False,
186 186 new_file=False,
187 187 ):
188 188 """builds a repo with a given DAG from scratch in the current empty repo
189 189
190 190 The description of the DAG is read from stdin if not given on the
191 191 command line.
192 192
193 193 Elements:
194 194
195 195 - "+n" is a linear run of n nodes based on the current default parent
196 196 - "." is a single node based on the current default parent
197 197 - "$" resets the default parent to null (implied at the start);
198 198 otherwise the default parent is always the last node created
199 199 - "<p" sets the default parent to the backref p
200 200 - "*p" is a fork at parent p, which is a backref
201 201 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
202 202 - "/p2" is a merge of the preceding node and p2
203 203 - ":tag" defines a local tag for the preceding node
204 204 - "@branch" sets the named branch for subsequent nodes
205 205 - "#...\\n" is a comment up to the end of the line
206 206
207 207 Whitespace between the above elements is ignored.
208 208
209 209 A backref is either
210 210
211 211 - a number n, which references the node curr-n, where curr is the current
212 212 node, or
213 213 - the name of a local tag you placed earlier using ":tag", or
214 214 - empty to denote the default parent.
215 215
216 216 All string valued-elements are either strictly alphanumeric, or must
217 217 be enclosed in double quotes ("..."), with "\\" as escape character.
218 218 """
219 219
220 220 if text is None:
221 221 ui.status(_(b"reading DAG from stdin\n"))
222 222 text = ui.fin.read()
223 223
224 224 cl = repo.changelog
225 225 if len(cl) > 0:
226 226 raise error.Abort(_(b'repository is not empty'))
227 227
228 228 # determine number of revs in DAG
229 229 total = 0
230 230 for type, data in dagparser.parsedag(text):
231 231 if type == b'n':
232 232 total += 1
233 233
234 234 if mergeable_file:
235 235 linesperrev = 2
236 236 # make a file with k lines per rev
237 237 initialmergedlines = [
238 238 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
239 239 ]
240 240 initialmergedlines.append(b"")
241 241
242 242 tags = []
243 243 progress = ui.makeprogress(
244 244 _(b'building'), unit=_(b'revisions'), total=total
245 245 )
246 246 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
247 247 at = -1
248 248 atbranch = b'default'
249 249 nodeids = []
250 250 id = 0
251 251 progress.update(id)
252 252 for type, data in dagparser.parsedag(text):
253 253 if type == b'n':
254 254 ui.note((b'node %s\n' % pycompat.bytestr(data)))
255 255 id, ps = data
256 256
257 257 files = []
258 258 filecontent = {}
259 259
260 260 p2 = None
261 261 if mergeable_file:
262 262 fn = b"mf"
263 263 p1 = repo[ps[0]]
264 264 if len(ps) > 1:
265 265 p2 = repo[ps[1]]
266 266 pa = p1.ancestor(p2)
267 267 base, local, other = [
268 268 x[fn].data() for x in (pa, p1, p2)
269 269 ]
270 270 m3 = simplemerge.Merge3Text(base, local, other)
271 271 ml = [l.strip() for l in m3.merge_lines()]
272 272 ml.append(b"")
273 273 elif at > 0:
274 274 ml = p1[fn].data().split(b"\n")
275 275 else:
276 276 ml = initialmergedlines
277 277 ml[id * linesperrev] += b" r%i" % id
278 278 mergedtext = b"\n".join(ml)
279 279 files.append(fn)
280 280 filecontent[fn] = mergedtext
281 281
282 282 if overwritten_file:
283 283 fn = b"of"
284 284 files.append(fn)
285 285 filecontent[fn] = b"r%i\n" % id
286 286
287 287 if new_file:
288 288 fn = b"nf%i" % id
289 289 files.append(fn)
290 290 filecontent[fn] = b"r%i\n" % id
291 291 if len(ps) > 1:
292 292 if not p2:
293 293 p2 = repo[ps[1]]
294 294 for fn in p2:
295 295 if fn.startswith(b"nf"):
296 296 files.append(fn)
297 297 filecontent[fn] = p2[fn].data()
298 298
299 299 def fctxfn(repo, cx, path):
300 300 if path in filecontent:
301 301 return context.memfilectx(
302 302 repo, cx, path, filecontent[path]
303 303 )
304 304 return None
305 305
306 306 if len(ps) == 0 or ps[0] < 0:
307 307 pars = [None, None]
308 308 elif len(ps) == 1:
309 309 pars = [nodeids[ps[0]], None]
310 310 else:
311 311 pars = [nodeids[p] for p in ps]
312 312 cx = context.memctx(
313 313 repo,
314 314 pars,
315 315 b"r%i" % id,
316 316 files,
317 317 fctxfn,
318 318 date=(id, 0),
319 319 user=b"debugbuilddag",
320 320 extra={b'branch': atbranch},
321 321 )
322 322 nodeid = repo.commitctx(cx)
323 323 nodeids.append(nodeid)
324 324 at = id
325 325 elif type == b'l':
326 326 id, name = data
327 327 ui.note((b'tag %s\n' % name))
328 328 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
329 329 elif type == b'a':
330 330 ui.note((b'branch %s\n' % data))
331 331 atbranch = data
332 332 progress.update(id)
333 333
334 334 if tags:
335 335 repo.vfs.write(b"localtags", b"".join(tags))
336 336
337 337
338 338 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
339 339 indent_string = b' ' * indent
340 340 if all:
341 341 ui.writenoi18n(
342 342 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
343 343 % indent_string
344 344 )
345 345
346 346 def showchunks(named):
347 347 ui.write(b"\n%s%s\n" % (indent_string, named))
348 348 for deltadata in gen.deltaiter():
349 349 node, p1, p2, cs, deltabase, delta, flags = deltadata
350 350 ui.write(
351 351 b"%s%s %s %s %s %s %d\n"
352 352 % (
353 353 indent_string,
354 354 hex(node),
355 355 hex(p1),
356 356 hex(p2),
357 357 hex(cs),
358 358 hex(deltabase),
359 359 len(delta),
360 360 )
361 361 )
362 362
363 363 gen.changelogheader()
364 364 showchunks(b"changelog")
365 365 gen.manifestheader()
366 366 showchunks(b"manifest")
367 367 for chunkdata in iter(gen.filelogheader, {}):
368 368 fname = chunkdata[b'filename']
369 369 showchunks(fname)
370 370 else:
371 371 if isinstance(gen, bundle2.unbundle20):
372 372 raise error.Abort(_(b'use debugbundle2 for this file'))
373 373 gen.changelogheader()
374 374 for deltadata in gen.deltaiter():
375 375 node, p1, p2, cs, deltabase, delta, flags = deltadata
376 376 ui.write(b"%s%s\n" % (indent_string, hex(node)))
377 377
378 378
379 379 def _debugobsmarkers(ui, part, indent=0, **opts):
380 380 """display version and markers contained in 'data'"""
381 381 opts = pycompat.byteskwargs(opts)
382 382 data = part.read()
383 383 indent_string = b' ' * indent
384 384 try:
385 385 version, markers = obsolete._readmarkers(data)
386 386 except error.UnknownVersion as exc:
387 387 msg = b"%sunsupported version: %s (%d bytes)\n"
388 388 msg %= indent_string, exc.version, len(data)
389 389 ui.write(msg)
390 390 else:
391 391 msg = b"%sversion: %d (%d bytes)\n"
392 392 msg %= indent_string, version, len(data)
393 393 ui.write(msg)
394 394 fm = ui.formatter(b'debugobsolete', opts)
395 395 for rawmarker in sorted(markers):
396 396 m = obsutil.marker(None, rawmarker)
397 397 fm.startitem()
398 398 fm.plain(indent_string)
399 399 cmdutil.showmarker(fm, m)
400 400 fm.end()
401 401
402 402
403 403 def _debugphaseheads(ui, data, indent=0):
404 404 """display version and markers contained in 'data'"""
405 405 indent_string = b' ' * indent
406 406 headsbyphase = phases.binarydecode(data)
407 407 for phase in phases.allphases:
408 408 for head in headsbyphase[phase]:
409 409 ui.write(indent_string)
410 410 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
411 411
412 412
413 413 def _quasirepr(thing):
414 414 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
415 415 return b'{%s}' % (
416 416 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
417 417 )
418 418 return pycompat.bytestr(repr(thing))
419 419
420 420
421 421 def _debugbundle2(ui, gen, all=None, **opts):
422 422 """lists the contents of a bundle2"""
423 423 if not isinstance(gen, bundle2.unbundle20):
424 424 raise error.Abort(_(b'not a bundle2 file'))
425 425 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
426 426 parttypes = opts.get('part_type', [])
427 427 for part in gen.iterparts():
428 428 if parttypes and part.type not in parttypes:
429 429 continue
430 430 msg = b'%s -- %s (mandatory: %r)\n'
431 431 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
432 432 if part.type == b'changegroup':
433 433 version = part.params.get(b'version', b'01')
434 434 cg = changegroup.getunbundler(version, part, b'UN')
435 435 if not ui.quiet:
436 436 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
437 437 if part.type == b'obsmarkers':
438 438 if not ui.quiet:
439 439 _debugobsmarkers(ui, part, indent=4, **opts)
440 440 if part.type == b'phase-heads':
441 441 if not ui.quiet:
442 442 _debugphaseheads(ui, part, indent=4)
443 443
444 444
445 445 @command(
446 446 b'debugbundle',
447 447 [
448 448 (b'a', b'all', None, _(b'show all details')),
449 449 (b'', b'part-type', [], _(b'show only the named part type')),
450 450 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
451 451 ],
452 452 _(b'FILE'),
453 453 norepo=True,
454 454 )
455 455 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
456 456 """lists the contents of a bundle"""
457 457 with hg.openpath(ui, bundlepath) as f:
458 458 if spec:
459 459 spec = exchange.getbundlespec(ui, f)
460 460 ui.write(b'%s\n' % spec)
461 461 return
462 462
463 463 gen = exchange.readbundle(ui, f, bundlepath)
464 464 if isinstance(gen, bundle2.unbundle20):
465 465 return _debugbundle2(ui, gen, all=all, **opts)
466 466 _debugchangegroup(ui, gen, all=all, **opts)
467 467
468 468
469 469 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
470 470 def debugcapabilities(ui, path, **opts):
471 471 """lists the capabilities of a remote peer"""
472 472 opts = pycompat.byteskwargs(opts)
473 473 peer = hg.peer(ui, opts, path)
474 474 caps = peer.capabilities()
475 475 ui.writenoi18n(b'Main capabilities:\n')
476 476 for c in sorted(caps):
477 477 ui.write(b' %s\n' % c)
478 478 b2caps = bundle2.bundle2caps(peer)
479 479 if b2caps:
480 480 ui.writenoi18n(b'Bundle2 capabilities:\n')
481 481 for key, values in sorted(pycompat.iteritems(b2caps)):
482 482 ui.write(b' %s\n' % key)
483 483 for v in values:
484 484 ui.write(b' %s\n' % v)
485 485
486 486
487 487 @command(
488 488 b'debugchangedfiles',
489 489 [
490 490 (
491 491 b'',
492 492 b'compute',
493 493 False,
494 494 b"compute information instead of reading it from storage",
495 495 ),
496 496 ],
497 497 b'REV',
498 498 )
499 499 def debugchangedfiles(ui, repo, rev, **opts):
500 500 """list the stored files changes for a revision"""
501 501 ctx = scmutil.revsingle(repo, rev, None)
502 502 files = None
503 503
504 504 if opts['compute']:
505 505 files = metadata.compute_all_files_changes(ctx)
506 506 else:
507 507 sd = repo.changelog.sidedata(ctx.rev())
508 508 files_block = sd.get(sidedata.SD_FILES)
509 509 if files_block is not None:
510 510 files = metadata.decode_files_sidedata(sd)
511 511 if files is not None:
512 512 for f in sorted(files.touched):
513 513 if f in files.added:
514 514 action = b"added"
515 515 elif f in files.removed:
516 516 action = b"removed"
517 517 elif f in files.merged:
518 518 action = b"merged"
519 519 elif f in files.salvaged:
520 520 action = b"salvaged"
521 521 else:
522 522 action = b"touched"
523 523
524 524 copy_parent = b""
525 525 copy_source = b""
526 526 if f in files.copied_from_p1:
527 527 copy_parent = b"p1"
528 528 copy_source = files.copied_from_p1[f]
529 529 elif f in files.copied_from_p2:
530 530 copy_parent = b"p2"
531 531 copy_source = files.copied_from_p2[f]
532 532
533 533 data = (action, copy_parent, f, copy_source)
534 534 template = b"%-8s %2s: %s, %s;\n"
535 535 ui.write(template % data)
536 536
537 537
538 538 @command(b'debugcheckstate', [], b'')
539 539 def debugcheckstate(ui, repo):
540 540 """validate the correctness of the current dirstate"""
541 541 parent1, parent2 = repo.dirstate.parents()
542 542 m1 = repo[parent1].manifest()
543 543 m2 = repo[parent2].manifest()
544 544 errors = 0
545 545 for f in repo.dirstate:
546 546 state = repo.dirstate[f]
547 547 if state in b"nr" and f not in m1:
548 548 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
549 549 errors += 1
550 550 if state in b"a" and f in m1:
551 551 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
552 552 errors += 1
553 553 if state in b"m" and f not in m1 and f not in m2:
554 554 ui.warn(
555 555 _(b"%s in state %s, but not in either manifest\n") % (f, state)
556 556 )
557 557 errors += 1
558 558 for f in m1:
559 559 state = repo.dirstate[f]
560 560 if state not in b"nrm":
561 561 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
562 562 errors += 1
563 563 if errors:
564 564 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
565 565 raise error.Abort(errstr)
566 566
567 567
568 568 @command(
569 569 b'debugcolor',
570 570 [(b'', b'style', None, _(b'show all configured styles'))],
571 571 b'hg debugcolor',
572 572 )
573 573 def debugcolor(ui, repo, **opts):
574 574 """show available color, effects or style"""
575 575 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
576 576 if opts.get('style'):
577 577 return _debugdisplaystyle(ui)
578 578 else:
579 579 return _debugdisplaycolor(ui)
580 580
581 581
582 582 def _debugdisplaycolor(ui):
583 583 ui = ui.copy()
584 584 ui._styles.clear()
585 585 for effect in color._activeeffects(ui).keys():
586 586 ui._styles[effect] = effect
587 587 if ui._terminfoparams:
588 588 for k, v in ui.configitems(b'color'):
589 589 if k.startswith(b'color.'):
590 590 ui._styles[k] = k[6:]
591 591 elif k.startswith(b'terminfo.'):
592 592 ui._styles[k] = k[9:]
593 593 ui.write(_(b'available colors:\n'))
594 594 # sort label with a '_' after the other to group '_background' entry.
595 595 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
596 596 for colorname, label in items:
597 597 ui.write(b'%s\n' % colorname, label=label)
598 598
599 599
600 600 def _debugdisplaystyle(ui):
601 601 ui.write(_(b'available style:\n'))
602 602 if not ui._styles:
603 603 return
604 604 width = max(len(s) for s in ui._styles)
605 605 for label, effects in sorted(ui._styles.items()):
606 606 ui.write(b'%s' % label, label=label)
607 607 if effects:
608 608 # 50
609 609 ui.write(b': ')
610 610 ui.write(b' ' * (max(0, width - len(label))))
611 611 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
612 612 ui.write(b'\n')
613 613
614 614
615 615 @command(b'debugcreatestreamclonebundle', [], b'FILE')
616 616 def debugcreatestreamclonebundle(ui, repo, fname):
617 617 """create a stream clone bundle file
618 618
619 619 Stream bundles are special bundles that are essentially archives of
620 620 revlog files. They are commonly used for cloning very quickly.
621 621 """
622 622 # TODO we may want to turn this into an abort when this functionality
623 623 # is moved into `hg bundle`.
624 624 if phases.hassecret(repo):
625 625 ui.warn(
626 626 _(
627 627 b'(warning: stream clone bundle will contain secret '
628 628 b'revisions)\n'
629 629 )
630 630 )
631 631
632 632 requirements, gen = streamclone.generatebundlev1(repo)
633 633 changegroup.writechunks(ui, gen, fname)
634 634
635 635 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
636 636
637 637
638 638 @command(
639 639 b'debugdag',
640 640 [
641 641 (b't', b'tags', None, _(b'use tags as labels')),
642 642 (b'b', b'branches', None, _(b'annotate with branch names')),
643 643 (b'', b'dots', None, _(b'use dots for runs')),
644 644 (b's', b'spaces', None, _(b'separate elements by spaces')),
645 645 ],
646 646 _(b'[OPTION]... [FILE [REV]...]'),
647 647 optionalrepo=True,
648 648 )
649 649 def debugdag(ui, repo, file_=None, *revs, **opts):
650 650 """format the changelog or an index DAG as a concise textual description
651 651
652 652 If you pass a revlog index, the revlog's DAG is emitted. If you list
653 653 revision numbers, they get labeled in the output as rN.
654 654
655 655 Otherwise, the changelog DAG of the current repo is emitted.
656 656 """
657 657 spaces = opts.get('spaces')
658 658 dots = opts.get('dots')
659 659 if file_:
660 660 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
661 661 revs = {int(r) for r in revs}
662 662
663 663 def events():
664 664 for r in rlog:
665 665 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
666 666 if r in revs:
667 667 yield b'l', (r, b"r%i" % r)
668 668
669 669 elif repo:
670 670 cl = repo.changelog
671 671 tags = opts.get('tags')
672 672 branches = opts.get('branches')
673 673 if tags:
674 674 labels = {}
675 675 for l, n in repo.tags().items():
676 676 labels.setdefault(cl.rev(n), []).append(l)
677 677
678 678 def events():
679 679 b = b"default"
680 680 for r in cl:
681 681 if branches:
682 682 newb = cl.read(cl.node(r))[5][b'branch']
683 683 if newb != b:
684 684 yield b'a', newb
685 685 b = newb
686 686 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
687 687 if tags:
688 688 ls = labels.get(r)
689 689 if ls:
690 690 for l in ls:
691 691 yield b'l', (r, l)
692 692
693 693 else:
694 694 raise error.Abort(_(b'need repo for changelog dag'))
695 695
696 696 for line in dagparser.dagtextlines(
697 697 events(),
698 698 addspaces=spaces,
699 699 wraplabels=True,
700 700 wrapannotations=True,
701 701 wrapnonlinear=dots,
702 702 usedots=dots,
703 703 maxlinewidth=70,
704 704 ):
705 705 ui.write(line)
706 706 ui.write(b"\n")
707 707
708 708
709 709 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
710 710 def debugdata(ui, repo, file_, rev=None, **opts):
711 711 """dump the contents of a data file revision"""
712 712 opts = pycompat.byteskwargs(opts)
713 713 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
714 714 if rev is not None:
715 715 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
716 716 file_, rev = None, file_
717 717 elif rev is None:
718 718 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
719 719 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
720 720 try:
721 721 ui.write(r.rawdata(r.lookup(rev)))
722 722 except KeyError:
723 723 raise error.Abort(_(b'invalid revision identifier %s') % rev)
724 724
725 725
726 726 @command(
727 727 b'debugdate',
728 728 [(b'e', b'extended', None, _(b'try extended date formats'))],
729 729 _(b'[-e] DATE [RANGE]'),
730 730 norepo=True,
731 731 optionalrepo=True,
732 732 )
733 733 def debugdate(ui, date, range=None, **opts):
734 734 """parse and display a date"""
735 735 if opts["extended"]:
736 736 d = dateutil.parsedate(date, dateutil.extendeddateformats)
737 737 else:
738 738 d = dateutil.parsedate(date)
739 739 ui.writenoi18n(b"internal: %d %d\n" % d)
740 740 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
741 741 if range:
742 742 m = dateutil.matchdate(range)
743 743 ui.writenoi18n(b"match: %s\n" % m(d[0]))
744 744
745 745
746 746 @command(
747 747 b'debugdeltachain',
748 748 cmdutil.debugrevlogopts + cmdutil.formatteropts,
749 749 _(b'-c|-m|FILE'),
750 750 optionalrepo=True,
751 751 )
752 752 def debugdeltachain(ui, repo, file_=None, **opts):
753 753 """dump information about delta chains in a revlog
754 754
755 755 Output can be templatized. Available template keywords are:
756 756
757 757 :``rev``: revision number
758 758 :``chainid``: delta chain identifier (numbered by unique base)
759 759 :``chainlen``: delta chain length to this revision
760 760 :``prevrev``: previous revision in delta chain
761 761 :``deltatype``: role of delta / how it was computed
762 762 :``compsize``: compressed size of revision
763 763 :``uncompsize``: uncompressed size of revision
764 764 :``chainsize``: total size of compressed revisions in chain
765 765 :``chainratio``: total chain size divided by uncompressed revision size
766 766 (new delta chains typically start at ratio 2.00)
767 767 :``lindist``: linear distance from base revision in delta chain to end
768 768 of this revision
769 769 :``extradist``: total size of revisions not part of this delta chain from
770 770 base of delta chain to end of this revision; a measurement
771 771 of how much extra data we need to read/seek across to read
772 772 the delta chain for this revision
773 773 :``extraratio``: extradist divided by chainsize; another representation of
774 774 how much unrelated data is needed to load this delta chain
775 775
776 776 If the repository is configured to use the sparse read, additional keywords
777 777 are available:
778 778
779 779 :``readsize``: total size of data read from the disk for a revision
780 780 (sum of the sizes of all the blocks)
781 781 :``largestblock``: size of the largest block of data read from the disk
782 782 :``readdensity``: density of useful bytes in the data read from the disk
783 783 :``srchunks``: in how many data hunks the whole revision would be read
784 784
785 785 The sparse read can be enabled with experimental.sparse-read = True
786 786 """
787 787 opts = pycompat.byteskwargs(opts)
788 788 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
789 789 index = r.index
790 790 start = r.start
791 791 length = r.length
792 792 generaldelta = r.version & revlog.FLAG_GENERALDELTA
793 793 withsparseread = getattr(r, '_withsparseread', False)
794 794
795 795 def revinfo(rev):
796 796 e = index[rev]
797 797 compsize = e[1]
798 798 uncompsize = e[2]
799 799 chainsize = 0
800 800
801 801 if generaldelta:
802 802 if e[3] == e[5]:
803 803 deltatype = b'p1'
804 804 elif e[3] == e[6]:
805 805 deltatype = b'p2'
806 806 elif e[3] == rev - 1:
807 807 deltatype = b'prev'
808 808 elif e[3] == rev:
809 809 deltatype = b'base'
810 810 else:
811 811 deltatype = b'other'
812 812 else:
813 813 if e[3] == rev:
814 814 deltatype = b'base'
815 815 else:
816 816 deltatype = b'prev'
817 817
818 818 chain = r._deltachain(rev)[0]
819 819 for iterrev in chain:
820 820 e = index[iterrev]
821 821 chainsize += e[1]
822 822
823 823 return compsize, uncompsize, deltatype, chain, chainsize
824 824
825 825 fm = ui.formatter(b'debugdeltachain', opts)
826 826
827 827 fm.plain(
828 828 b' rev chain# chainlen prev delta '
829 829 b'size rawsize chainsize ratio lindist extradist '
830 830 b'extraratio'
831 831 )
832 832 if withsparseread:
833 833 fm.plain(b' readsize largestblk rddensity srchunks')
834 834 fm.plain(b'\n')
835 835
836 836 chainbases = {}
837 837 for rev in r:
838 838 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
839 839 chainbase = chain[0]
840 840 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
841 841 basestart = start(chainbase)
842 842 revstart = start(rev)
843 843 lineardist = revstart + comp - basestart
844 844 extradist = lineardist - chainsize
845 845 try:
846 846 prevrev = chain[-2]
847 847 except IndexError:
848 848 prevrev = -1
849 849
850 850 if uncomp != 0:
851 851 chainratio = float(chainsize) / float(uncomp)
852 852 else:
853 853 chainratio = chainsize
854 854
855 855 if chainsize != 0:
856 856 extraratio = float(extradist) / float(chainsize)
857 857 else:
858 858 extraratio = extradist
859 859
860 860 fm.startitem()
861 861 fm.write(
862 862 b'rev chainid chainlen prevrev deltatype compsize '
863 863 b'uncompsize chainsize chainratio lindist extradist '
864 864 b'extraratio',
865 865 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
866 866 rev,
867 867 chainid,
868 868 len(chain),
869 869 prevrev,
870 870 deltatype,
871 871 comp,
872 872 uncomp,
873 873 chainsize,
874 874 chainratio,
875 875 lineardist,
876 876 extradist,
877 877 extraratio,
878 878 rev=rev,
879 879 chainid=chainid,
880 880 chainlen=len(chain),
881 881 prevrev=prevrev,
882 882 deltatype=deltatype,
883 883 compsize=comp,
884 884 uncompsize=uncomp,
885 885 chainsize=chainsize,
886 886 chainratio=chainratio,
887 887 lindist=lineardist,
888 888 extradist=extradist,
889 889 extraratio=extraratio,
890 890 )
891 891 if withsparseread:
892 892 readsize = 0
893 893 largestblock = 0
894 894 srchunks = 0
895 895
896 896 for revschunk in deltautil.slicechunk(r, chain):
897 897 srchunks += 1
898 898 blkend = start(revschunk[-1]) + length(revschunk[-1])
899 899 blksize = blkend - start(revschunk[0])
900 900
901 901 readsize += blksize
902 902 if largestblock < blksize:
903 903 largestblock = blksize
904 904
905 905 if readsize:
906 906 readdensity = float(chainsize) / float(readsize)
907 907 else:
908 908 readdensity = 1
909 909
910 910 fm.write(
911 911 b'readsize largestblock readdensity srchunks',
912 912 b' %10d %10d %9.5f %8d',
913 913 readsize,
914 914 largestblock,
915 915 readdensity,
916 916 srchunks,
917 917 readsize=readsize,
918 918 largestblock=largestblock,
919 919 readdensity=readdensity,
920 920 srchunks=srchunks,
921 921 )
922 922
923 923 fm.plain(b'\n')
924 924
925 925 fm.end()
926 926
927 927
928 928 @command(
929 929 b'debugdirstate|debugstate',
930 930 [
931 931 (
932 932 b'',
933 933 b'nodates',
934 934 None,
935 935 _(b'do not display the saved mtime (DEPRECATED)'),
936 936 ),
937 937 (b'', b'dates', True, _(b'display the saved mtime')),
938 938 (b'', b'datesort', None, _(b'sort by saved mtime')),
939 939 ],
940 940 _(b'[OPTION]...'),
941 941 )
942 942 def debugstate(ui, repo, **opts):
943 943 """show the contents of the current dirstate"""
944 944
945 945 nodates = not opts['dates']
946 946 if opts.get('nodates') is not None:
947 947 nodates = True
948 948 datesort = opts.get('datesort')
949 949
950 950 if datesort:
951 951 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
952 952 else:
953 953 keyfunc = None # sort by filename
954 954 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
955 955 if ent[3] == -1:
956 956 timestr = b'unset '
957 957 elif nodates:
958 958 timestr = b'set '
959 959 else:
960 960 timestr = time.strftime(
961 961 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
962 962 )
963 963 timestr = encoding.strtolocal(timestr)
964 964 if ent[1] & 0o20000:
965 965 mode = b'lnk'
966 966 else:
967 967 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
968 968 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
969 969 for f in repo.dirstate.copies():
970 970 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
971 971
972 972
973 973 @command(
974 974 b'debugdiscovery',
975 975 [
976 976 (b'', b'old', None, _(b'use old-style discovery')),
977 977 (
978 978 b'',
979 979 b'nonheads',
980 980 None,
981 981 _(b'use old-style discovery with non-heads included'),
982 982 ),
983 983 (b'', b'rev', [], b'restrict discovery to this set of revs'),
984 984 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
985 985 (
986 986 b'',
987 987 b'local-as-revs',
988 988 "",
989 989 'treat local has having these revisions only',
990 990 ),
991 991 (
992 992 b'',
993 993 b'remote-as-revs',
994 994 "",
995 995 'use local as remote, with only these these revisions',
996 996 ),
997 997 ]
998 998 + cmdutil.remoteopts,
999 999 _(b'[--rev REV] [OTHER]'),
1000 1000 )
1001 1001 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1002 1002 """runs the changeset discovery protocol in isolation
1003 1003
1004 1004 The local peer can be "replaced" by a subset of the local repository by
1005 1005 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1006 1006 be "replaced" by a subset of the local repository using the
1007 1007 `--local-as-revs` flag. This is useful to efficiently debug pathological
1008 1008 discovery situation.
1009 1009 """
1010 1010 opts = pycompat.byteskwargs(opts)
1011 1011 unfi = repo.unfiltered()
1012 1012
1013 1013 # setup potential extra filtering
1014 1014 local_revs = opts[b"local_as_revs"]
1015 1015 remote_revs = opts[b"remote_as_revs"]
1016 1016
1017 1017 # make sure tests are repeatable
1018 1018 random.seed(int(opts[b'seed']))
1019 1019
1020 1020 if not remote_revs:
1021 1021
1022 1022 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
1023 1023 remote = hg.peer(repo, opts, remoteurl)
1024 1024 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
1025 1025 else:
1026 1026 branches = (None, [])
1027 1027 remote_filtered_revs = scmutil.revrange(
1028 1028 unfi, [b"not (::(%s))" % remote_revs]
1029 1029 )
1030 1030 remote_filtered_revs = frozenset(remote_filtered_revs)
1031 1031
1032 1032 def remote_func(x):
1033 1033 return remote_filtered_revs
1034 1034
1035 1035 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1036 1036
1037 1037 remote = repo.peer()
1038 1038 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1039 1039
1040 1040 if local_revs:
1041 1041 local_filtered_revs = scmutil.revrange(
1042 1042 unfi, [b"not (::(%s))" % local_revs]
1043 1043 )
1044 1044 local_filtered_revs = frozenset(local_filtered_revs)
1045 1045
1046 1046 def local_func(x):
1047 1047 return local_filtered_revs
1048 1048
1049 1049 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1050 1050 repo = repo.filtered(b'debug-discovery-local-filter')
1051 1051
1052 1052 data = {}
1053 1053 if opts.get(b'old'):
1054 1054
1055 1055 def doit(pushedrevs, remoteheads, remote=remote):
1056 1056 if not util.safehasattr(remote, b'branches'):
1057 1057 # enable in-client legacy support
1058 1058 remote = localrepo.locallegacypeer(remote.local())
1059 1059 common, _in, hds = treediscovery.findcommonincoming(
1060 1060 repo, remote, force=True, audit=data
1061 1061 )
1062 1062 common = set(common)
1063 1063 if not opts.get(b'nonheads'):
1064 1064 ui.writenoi18n(
1065 1065 b"unpruned common: %s\n"
1066 1066 % b" ".join(sorted(short(n) for n in common))
1067 1067 )
1068 1068
1069 1069 clnode = repo.changelog.node
1070 1070 common = repo.revs(b'heads(::%ln)', common)
1071 1071 common = {clnode(r) for r in common}
1072 1072 return common, hds
1073 1073
1074 1074 else:
1075 1075
1076 1076 def doit(pushedrevs, remoteheads, remote=remote):
1077 1077 nodes = None
1078 1078 if pushedrevs:
1079 1079 revs = scmutil.revrange(repo, pushedrevs)
1080 1080 nodes = [repo[r].node() for r in revs]
1081 1081 common, any, hds = setdiscovery.findcommonheads(
1082 1082 ui, repo, remote, ancestorsof=nodes, audit=data
1083 1083 )
1084 1084 return common, hds
1085 1085
1086 1086 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1087 1087 localrevs = opts[b'rev']
1088 1088 with util.timedcm('debug-discovery') as t:
1089 1089 common, hds = doit(localrevs, remoterevs)
1090 1090
1091 1091 # compute all statistics
1092 1092 heads_common = set(common)
1093 1093 heads_remote = set(hds)
1094 1094 heads_local = set(repo.heads())
1095 1095 # note: they cannot be a local or remote head that is in common and not
1096 1096 # itself a head of common.
1097 1097 heads_common_local = heads_common & heads_local
1098 1098 heads_common_remote = heads_common & heads_remote
1099 1099 heads_common_both = heads_common & heads_remote & heads_local
1100 1100
1101 1101 all = repo.revs(b'all()')
1102 1102 common = repo.revs(b'::%ln', common)
1103 1103 roots_common = repo.revs(b'roots(::%ld)', common)
1104 1104 missing = repo.revs(b'not ::%ld', common)
1105 1105 heads_missing = repo.revs(b'heads(%ld)', missing)
1106 1106 roots_missing = repo.revs(b'roots(%ld)', missing)
1107 1107 assert len(common) + len(missing) == len(all)
1108 1108
1109 1109 initial_undecided = repo.revs(
1110 1110 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1111 1111 )
1112 1112 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1113 1113 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1114 1114 common_initial_undecided = initial_undecided & common
1115 1115 missing_initial_undecided = initial_undecided & missing
1116 1116
1117 1117 data[b'elapsed'] = t.elapsed
1118 1118 data[b'nb-common-heads'] = len(heads_common)
1119 1119 data[b'nb-common-heads-local'] = len(heads_common_local)
1120 1120 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1121 1121 data[b'nb-common-heads-both'] = len(heads_common_both)
1122 1122 data[b'nb-common-roots'] = len(roots_common)
1123 1123 data[b'nb-head-local'] = len(heads_local)
1124 1124 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1125 1125 data[b'nb-head-remote'] = len(heads_remote)
1126 1126 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1127 1127 heads_common_remote
1128 1128 )
1129 1129 data[b'nb-revs'] = len(all)
1130 1130 data[b'nb-revs-common'] = len(common)
1131 1131 data[b'nb-revs-missing'] = len(missing)
1132 1132 data[b'nb-missing-heads'] = len(heads_missing)
1133 1133 data[b'nb-missing-roots'] = len(roots_missing)
1134 1134 data[b'nb-ini_und'] = len(initial_undecided)
1135 1135 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1136 1136 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1137 1137 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1138 1138 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1139 1139
1140 1140 # display discovery summary
1141 1141 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
1142 1142 ui.writenoi18n(b"round-trips: %(total-roundtrips)9d\n" % data)
1143 1143 ui.writenoi18n(b"heads summary:\n")
1144 1144 ui.writenoi18n(b" total common heads: %(nb-common-heads)9d\n" % data)
1145 1145 ui.writenoi18n(
1146 1146 b" also local heads: %(nb-common-heads-local)9d\n" % data
1147 1147 )
1148 1148 ui.writenoi18n(
1149 1149 b" also remote heads: %(nb-common-heads-remote)9d\n" % data
1150 1150 )
1151 1151 ui.writenoi18n(b" both: %(nb-common-heads-both)9d\n" % data)
1152 1152 ui.writenoi18n(b" local heads: %(nb-head-local)9d\n" % data)
1153 1153 ui.writenoi18n(
1154 1154 b" common: %(nb-common-heads-local)9d\n" % data
1155 1155 )
1156 1156 ui.writenoi18n(
1157 1157 b" missing: %(nb-head-local-missing)9d\n" % data
1158 1158 )
1159 1159 ui.writenoi18n(b" remote heads: %(nb-head-remote)9d\n" % data)
1160 1160 ui.writenoi18n(
1161 1161 b" common: %(nb-common-heads-remote)9d\n" % data
1162 1162 )
1163 1163 ui.writenoi18n(
1164 1164 b" unknown: %(nb-head-remote-unknown)9d\n" % data
1165 1165 )
1166 1166 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
1167 1167 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
1168 1168 ui.writenoi18n(b" heads: %(nb-common-heads)9d\n" % data)
1169 1169 ui.writenoi18n(b" roots: %(nb-common-roots)9d\n" % data)
1170 1170 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
1171 1171 ui.writenoi18n(b" heads: %(nb-missing-heads)9d\n" % data)
1172 1172 ui.writenoi18n(b" roots: %(nb-missing-roots)9d\n" % data)
1173 1173 ui.writenoi18n(b" first undecided set: %(nb-ini_und)9d\n" % data)
1174 1174 ui.writenoi18n(b" heads: %(nb-ini_und-heads)9d\n" % data)
1175 1175 ui.writenoi18n(b" roots: %(nb-ini_und-roots)9d\n" % data)
1176 1176 ui.writenoi18n(b" common: %(nb-ini_und-common)9d\n" % data)
1177 1177 ui.writenoi18n(b" missing: %(nb-ini_und-missing)9d\n" % data)
1178 1178
1179 1179 if ui.verbose:
1180 1180 ui.writenoi18n(
1181 1181 b"common heads: %s\n"
1182 1182 % b" ".join(sorted(short(n) for n in heads_common))
1183 1183 )
1184 1184
1185 1185
1186 1186 _chunksize = 4 << 10
1187 1187
1188 1188
1189 1189 @command(
1190 1190 b'debugdownload',
1191 1191 [
1192 1192 (b'o', b'output', b'', _(b'path')),
1193 1193 ],
1194 1194 optionalrepo=True,
1195 1195 )
1196 1196 def debugdownload(ui, repo, url, output=None, **opts):
1197 1197 """download a resource using Mercurial logic and config"""
1198 1198 fh = urlmod.open(ui, url, output)
1199 1199
1200 1200 dest = ui
1201 1201 if output:
1202 1202 dest = open(output, b"wb", _chunksize)
1203 1203 try:
1204 1204 data = fh.read(_chunksize)
1205 1205 while data:
1206 1206 dest.write(data)
1207 1207 data = fh.read(_chunksize)
1208 1208 finally:
1209 1209 if output:
1210 1210 dest.close()
1211 1211
1212 1212
1213 1213 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1214 1214 def debugextensions(ui, repo, **opts):
1215 1215 '''show information about active extensions'''
1216 1216 opts = pycompat.byteskwargs(opts)
1217 1217 exts = extensions.extensions(ui)
1218 1218 hgver = util.version()
1219 1219 fm = ui.formatter(b'debugextensions', opts)
1220 1220 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1221 1221 isinternal = extensions.ismoduleinternal(extmod)
1222 1222 extsource = None
1223 1223
1224 1224 if util.safehasattr(extmod, '__file__'):
1225 1225 extsource = pycompat.fsencode(extmod.__file__)
1226 1226 elif getattr(sys, 'oxidized', False):
1227 1227 extsource = pycompat.sysexecutable
1228 1228 if isinternal:
1229 1229 exttestedwith = [] # never expose magic string to users
1230 1230 else:
1231 1231 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1232 1232 extbuglink = getattr(extmod, 'buglink', None)
1233 1233
1234 1234 fm.startitem()
1235 1235
1236 1236 if ui.quiet or ui.verbose:
1237 1237 fm.write(b'name', b'%s\n', extname)
1238 1238 else:
1239 1239 fm.write(b'name', b'%s', extname)
1240 1240 if isinternal or hgver in exttestedwith:
1241 1241 fm.plain(b'\n')
1242 1242 elif not exttestedwith:
1243 1243 fm.plain(_(b' (untested!)\n'))
1244 1244 else:
1245 1245 lasttestedversion = exttestedwith[-1]
1246 1246 fm.plain(b' (%s!)\n' % lasttestedversion)
1247 1247
1248 1248 fm.condwrite(
1249 1249 ui.verbose and extsource,
1250 1250 b'source',
1251 1251 _(b' location: %s\n'),
1252 1252 extsource or b"",
1253 1253 )
1254 1254
1255 1255 if ui.verbose:
1256 1256 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1257 1257 fm.data(bundled=isinternal)
1258 1258
1259 1259 fm.condwrite(
1260 1260 ui.verbose and exttestedwith,
1261 1261 b'testedwith',
1262 1262 _(b' tested with: %s\n'),
1263 1263 fm.formatlist(exttestedwith, name=b'ver'),
1264 1264 )
1265 1265
1266 1266 fm.condwrite(
1267 1267 ui.verbose and extbuglink,
1268 1268 b'buglink',
1269 1269 _(b' bug reporting: %s\n'),
1270 1270 extbuglink or b"",
1271 1271 )
1272 1272
1273 1273 fm.end()
1274 1274
1275 1275
1276 1276 @command(
1277 1277 b'debugfileset',
1278 1278 [
1279 1279 (
1280 1280 b'r',
1281 1281 b'rev',
1282 1282 b'',
1283 1283 _(b'apply the filespec on this revision'),
1284 1284 _(b'REV'),
1285 1285 ),
1286 1286 (
1287 1287 b'',
1288 1288 b'all-files',
1289 1289 False,
1290 1290 _(b'test files from all revisions and working directory'),
1291 1291 ),
1292 1292 (
1293 1293 b's',
1294 1294 b'show-matcher',
1295 1295 None,
1296 1296 _(b'print internal representation of matcher'),
1297 1297 ),
1298 1298 (
1299 1299 b'p',
1300 1300 b'show-stage',
1301 1301 [],
1302 1302 _(b'print parsed tree at the given stage'),
1303 1303 _(b'NAME'),
1304 1304 ),
1305 1305 ],
1306 1306 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1307 1307 )
1308 1308 def debugfileset(ui, repo, expr, **opts):
1309 1309 '''parse and apply a fileset specification'''
1310 1310 from . import fileset
1311 1311
1312 1312 fileset.symbols # force import of fileset so we have predicates to optimize
1313 1313 opts = pycompat.byteskwargs(opts)
1314 1314 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1315 1315
1316 1316 stages = [
1317 1317 (b'parsed', pycompat.identity),
1318 1318 (b'analyzed', filesetlang.analyze),
1319 1319 (b'optimized', filesetlang.optimize),
1320 1320 ]
1321 1321 stagenames = {n for n, f in stages}
1322 1322
1323 1323 showalways = set()
1324 1324 if ui.verbose and not opts[b'show_stage']:
1325 1325 # show parsed tree by --verbose (deprecated)
1326 1326 showalways.add(b'parsed')
1327 1327 if opts[b'show_stage'] == [b'all']:
1328 1328 showalways.update(stagenames)
1329 1329 else:
1330 1330 for n in opts[b'show_stage']:
1331 1331 if n not in stagenames:
1332 1332 raise error.Abort(_(b'invalid stage name: %s') % n)
1333 1333 showalways.update(opts[b'show_stage'])
1334 1334
1335 1335 tree = filesetlang.parse(expr)
1336 1336 for n, f in stages:
1337 1337 tree = f(tree)
1338 1338 if n in showalways:
1339 1339 if opts[b'show_stage'] or n != b'parsed':
1340 1340 ui.write(b"* %s:\n" % n)
1341 1341 ui.write(filesetlang.prettyformat(tree), b"\n")
1342 1342
1343 1343 files = set()
1344 1344 if opts[b'all_files']:
1345 1345 for r in repo:
1346 1346 c = repo[r]
1347 1347 files.update(c.files())
1348 1348 files.update(c.substate)
1349 1349 if opts[b'all_files'] or ctx.rev() is None:
1350 1350 wctx = repo[None]
1351 1351 files.update(
1352 1352 repo.dirstate.walk(
1353 1353 scmutil.matchall(repo),
1354 1354 subrepos=list(wctx.substate),
1355 1355 unknown=True,
1356 1356 ignored=True,
1357 1357 )
1358 1358 )
1359 1359 files.update(wctx.substate)
1360 1360 else:
1361 1361 files.update(ctx.files())
1362 1362 files.update(ctx.substate)
1363 1363
1364 1364 m = ctx.matchfileset(repo.getcwd(), expr)
1365 1365 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1366 1366 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1367 1367 for f in sorted(files):
1368 1368 if not m(f):
1369 1369 continue
1370 1370 ui.write(b"%s\n" % f)
1371 1371
1372 1372
1373 1373 @command(b'debugformat', [] + cmdutil.formatteropts)
1374 1374 def debugformat(ui, repo, **opts):
1375 1375 """display format information about the current repository
1376 1376
1377 1377 Use --verbose to get extra information about current config value and
1378 1378 Mercurial default."""
1379 1379 opts = pycompat.byteskwargs(opts)
1380 1380 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1381 1381 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1382 1382
1383 1383 def makeformatname(name):
1384 1384 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1385 1385
1386 1386 fm = ui.formatter(b'debugformat', opts)
1387 1387 if fm.isplain():
1388 1388
1389 1389 def formatvalue(value):
1390 1390 if util.safehasattr(value, b'startswith'):
1391 1391 return value
1392 1392 if value:
1393 1393 return b'yes'
1394 1394 else:
1395 1395 return b'no'
1396 1396
1397 1397 else:
1398 1398 formatvalue = pycompat.identity
1399 1399
1400 1400 fm.plain(b'format-variant')
1401 1401 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1402 1402 fm.plain(b' repo')
1403 1403 if ui.verbose:
1404 1404 fm.plain(b' config default')
1405 1405 fm.plain(b'\n')
1406 1406 for fv in upgrade.allformatvariant:
1407 1407 fm.startitem()
1408 1408 repovalue = fv.fromrepo(repo)
1409 1409 configvalue = fv.fromconfig(repo)
1410 1410
1411 1411 if repovalue != configvalue:
1412 1412 namelabel = b'formatvariant.name.mismatchconfig'
1413 1413 repolabel = b'formatvariant.repo.mismatchconfig'
1414 1414 elif repovalue != fv.default:
1415 1415 namelabel = b'formatvariant.name.mismatchdefault'
1416 1416 repolabel = b'formatvariant.repo.mismatchdefault'
1417 1417 else:
1418 1418 namelabel = b'formatvariant.name.uptodate'
1419 1419 repolabel = b'formatvariant.repo.uptodate'
1420 1420
1421 1421 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1422 1422 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1423 1423 if fv.default != configvalue:
1424 1424 configlabel = b'formatvariant.config.special'
1425 1425 else:
1426 1426 configlabel = b'formatvariant.config.default'
1427 1427 fm.condwrite(
1428 1428 ui.verbose,
1429 1429 b'config',
1430 1430 b' %6s',
1431 1431 formatvalue(configvalue),
1432 1432 label=configlabel,
1433 1433 )
1434 1434 fm.condwrite(
1435 1435 ui.verbose,
1436 1436 b'default',
1437 1437 b' %7s',
1438 1438 formatvalue(fv.default),
1439 1439 label=b'formatvariant.default',
1440 1440 )
1441 1441 fm.plain(b'\n')
1442 1442 fm.end()
1443 1443
1444 1444
1445 1445 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1446 1446 def debugfsinfo(ui, path=b"."):
1447 1447 """show information detected about current filesystem"""
1448 1448 ui.writenoi18n(b'path: %s\n' % path)
1449 1449 ui.writenoi18n(
1450 1450 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1451 1451 )
1452 1452 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1453 1453 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1454 1454 ui.writenoi18n(
1455 1455 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1456 1456 )
1457 1457 ui.writenoi18n(
1458 1458 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1459 1459 )
1460 1460 casesensitive = b'(unknown)'
1461 1461 try:
1462 1462 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1463 1463 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1464 1464 except OSError:
1465 1465 pass
1466 1466 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1467 1467
1468 1468
1469 1469 @command(
1470 1470 b'debuggetbundle',
1471 1471 [
1472 1472 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1473 1473 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1474 1474 (
1475 1475 b't',
1476 1476 b'type',
1477 1477 b'bzip2',
1478 1478 _(b'bundle compression type to use'),
1479 1479 _(b'TYPE'),
1480 1480 ),
1481 1481 ],
1482 1482 _(b'REPO FILE [-H|-C ID]...'),
1483 1483 norepo=True,
1484 1484 )
1485 1485 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1486 1486 """retrieves a bundle from a repo
1487 1487
1488 1488 Every ID must be a full-length hex node id string. Saves the bundle to the
1489 1489 given file.
1490 1490 """
1491 1491 opts = pycompat.byteskwargs(opts)
1492 1492 repo = hg.peer(ui, opts, repopath)
1493 1493 if not repo.capable(b'getbundle'):
1494 1494 raise error.Abort(b"getbundle() not supported by target repository")
1495 1495 args = {}
1496 1496 if common:
1497 1497 args['common'] = [bin(s) for s in common]
1498 1498 if head:
1499 1499 args['heads'] = [bin(s) for s in head]
1500 1500 # TODO: get desired bundlecaps from command line.
1501 1501 args['bundlecaps'] = None
1502 1502 bundle = repo.getbundle(b'debug', **args)
1503 1503
1504 1504 bundletype = opts.get(b'type', b'bzip2').lower()
1505 1505 btypes = {
1506 1506 b'none': b'HG10UN',
1507 1507 b'bzip2': b'HG10BZ',
1508 1508 b'gzip': b'HG10GZ',
1509 1509 b'bundle2': b'HG20',
1510 1510 }
1511 1511 bundletype = btypes.get(bundletype)
1512 1512 if bundletype not in bundle2.bundletypes:
1513 1513 raise error.Abort(_(b'unknown bundle type specified with --type'))
1514 1514 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1515 1515
1516 1516
1517 1517 @command(b'debugignore', [], b'[FILE]')
1518 1518 def debugignore(ui, repo, *files, **opts):
1519 1519 """display the combined ignore pattern and information about ignored files
1520 1520
1521 1521 With no argument display the combined ignore pattern.
1522 1522
1523 1523 Given space separated file names, shows if the given file is ignored and
1524 1524 if so, show the ignore rule (file and line number) that matched it.
1525 1525 """
1526 1526 ignore = repo.dirstate._ignore
1527 1527 if not files:
1528 1528 # Show all the patterns
1529 1529 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1530 1530 else:
1531 1531 m = scmutil.match(repo[None], pats=files)
1532 1532 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1533 1533 for f in m.files():
1534 1534 nf = util.normpath(f)
1535 1535 ignored = None
1536 1536 ignoredata = None
1537 1537 if nf != b'.':
1538 1538 if ignore(nf):
1539 1539 ignored = nf
1540 1540 ignoredata = repo.dirstate._ignorefileandline(nf)
1541 1541 else:
1542 1542 for p in pathutil.finddirs(nf):
1543 1543 if ignore(p):
1544 1544 ignored = p
1545 1545 ignoredata = repo.dirstate._ignorefileandline(p)
1546 1546 break
1547 1547 if ignored:
1548 1548 if ignored == nf:
1549 1549 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1550 1550 else:
1551 1551 ui.write(
1552 1552 _(
1553 1553 b"%s is ignored because of "
1554 1554 b"containing directory %s\n"
1555 1555 )
1556 1556 % (uipathfn(f), ignored)
1557 1557 )
1558 1558 ignorefile, lineno, line = ignoredata
1559 1559 ui.write(
1560 1560 _(b"(ignore rule in %s, line %d: '%s')\n")
1561 1561 % (ignorefile, lineno, line)
1562 1562 )
1563 1563 else:
1564 1564 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1565 1565
1566 1566
1567 1567 @command(
1568 1568 b'debugindex',
1569 1569 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1570 1570 _(b'-c|-m|FILE'),
1571 1571 )
1572 1572 def debugindex(ui, repo, file_=None, **opts):
1573 1573 """dump index data for a storage primitive"""
1574 1574 opts = pycompat.byteskwargs(opts)
1575 1575 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1576 1576
1577 1577 if ui.debugflag:
1578 1578 shortfn = hex
1579 1579 else:
1580 1580 shortfn = short
1581 1581
1582 1582 idlen = 12
1583 1583 for i in store:
1584 1584 idlen = len(shortfn(store.node(i)))
1585 1585 break
1586 1586
1587 1587 fm = ui.formatter(b'debugindex', opts)
1588 1588 fm.plain(
1589 1589 b' rev linkrev %s %s p2\n'
1590 1590 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1591 1591 )
1592 1592
1593 1593 for rev in store:
1594 1594 node = store.node(rev)
1595 1595 parents = store.parents(node)
1596 1596
1597 1597 fm.startitem()
1598 1598 fm.write(b'rev', b'%6d ', rev)
1599 1599 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1600 1600 fm.write(b'node', b'%s ', shortfn(node))
1601 1601 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1602 1602 fm.write(b'p2', b'%s', shortfn(parents[1]))
1603 1603 fm.plain(b'\n')
1604 1604
1605 1605 fm.end()
1606 1606
1607 1607
1608 1608 @command(
1609 1609 b'debugindexdot',
1610 1610 cmdutil.debugrevlogopts,
1611 1611 _(b'-c|-m|FILE'),
1612 1612 optionalrepo=True,
1613 1613 )
1614 1614 def debugindexdot(ui, repo, file_=None, **opts):
1615 1615 """dump an index DAG as a graphviz dot file"""
1616 1616 opts = pycompat.byteskwargs(opts)
1617 1617 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1618 1618 ui.writenoi18n(b"digraph G {\n")
1619 1619 for i in r:
1620 1620 node = r.node(i)
1621 1621 pp = r.parents(node)
1622 1622 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1623 1623 if pp[1] != nullid:
1624 1624 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1625 1625 ui.write(b"}\n")
1626 1626
1627 1627
1628 1628 @command(b'debugindexstats', [])
1629 1629 def debugindexstats(ui, repo):
1630 1630 """show stats related to the changelog index"""
1631 1631 repo.changelog.shortest(nullid, 1)
1632 1632 index = repo.changelog.index
1633 1633 if not util.safehasattr(index, b'stats'):
1634 1634 raise error.Abort(_(b'debugindexstats only works with native code'))
1635 1635 for k, v in sorted(index.stats().items()):
1636 1636 ui.write(b'%s: %d\n' % (k, v))
1637 1637
1638 1638
1639 1639 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1640 1640 def debuginstall(ui, **opts):
1641 1641 """test Mercurial installation
1642 1642
1643 1643 Returns 0 on success.
1644 1644 """
1645 1645 opts = pycompat.byteskwargs(opts)
1646 1646
1647 1647 problems = 0
1648 1648
1649 1649 fm = ui.formatter(b'debuginstall', opts)
1650 1650 fm.startitem()
1651 1651
1652 1652 # encoding might be unknown or wrong. don't translate these messages.
1653 1653 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1654 1654 err = None
1655 1655 try:
1656 1656 codecs.lookup(pycompat.sysstr(encoding.encoding))
1657 1657 except LookupError as inst:
1658 1658 err = stringutil.forcebytestr(inst)
1659 1659 problems += 1
1660 1660 fm.condwrite(
1661 1661 err,
1662 1662 b'encodingerror',
1663 1663 b" %s\n (check that your locale is properly set)\n",
1664 1664 err,
1665 1665 )
1666 1666
1667 1667 # Python
1668 1668 pythonlib = None
1669 1669 if util.safehasattr(os, '__file__'):
1670 1670 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1671 1671 elif getattr(sys, 'oxidized', False):
1672 1672 pythonlib = pycompat.sysexecutable
1673 1673
1674 1674 fm.write(
1675 1675 b'pythonexe',
1676 1676 _(b"checking Python executable (%s)\n"),
1677 1677 pycompat.sysexecutable or _(b"unknown"),
1678 1678 )
1679 1679 fm.write(
1680 1680 b'pythonimplementation',
1681 1681 _(b"checking Python implementation (%s)\n"),
1682 1682 pycompat.sysbytes(platform.python_implementation()),
1683 1683 )
1684 1684 fm.write(
1685 1685 b'pythonver',
1686 1686 _(b"checking Python version (%s)\n"),
1687 1687 (b"%d.%d.%d" % sys.version_info[:3]),
1688 1688 )
1689 1689 fm.write(
1690 1690 b'pythonlib',
1691 1691 _(b"checking Python lib (%s)...\n"),
1692 1692 pythonlib or _(b"unknown"),
1693 1693 )
1694 1694
1695 1695 try:
1696 1696 from . import rustext
1697 1697
1698 1698 rustext.__doc__ # trigger lazy import
1699 1699 except ImportError:
1700 1700 rustext = None
1701 1701
1702 1702 security = set(sslutil.supportedprotocols)
1703 1703 if sslutil.hassni:
1704 1704 security.add(b'sni')
1705 1705
1706 1706 fm.write(
1707 1707 b'pythonsecurity',
1708 1708 _(b"checking Python security support (%s)\n"),
1709 1709 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1710 1710 )
1711 1711
1712 1712 # These are warnings, not errors. So don't increment problem count. This
1713 1713 # may change in the future.
1714 1714 if b'tls1.2' not in security:
1715 1715 fm.plain(
1716 1716 _(
1717 1717 b' TLS 1.2 not supported by Python install; '
1718 1718 b'network connections lack modern security\n'
1719 1719 )
1720 1720 )
1721 1721 if b'sni' not in security:
1722 1722 fm.plain(
1723 1723 _(
1724 1724 b' SNI not supported by Python install; may have '
1725 1725 b'connectivity issues with some servers\n'
1726 1726 )
1727 1727 )
1728 1728
1729 1729 fm.plain(
1730 1730 _(
1731 1731 b"checking Rust extensions (%s)\n"
1732 1732 % (b'missing' if rustext is None else b'installed')
1733 1733 ),
1734 1734 )
1735 1735
1736 1736 # TODO print CA cert info
1737 1737
1738 1738 # hg version
1739 1739 hgver = util.version()
1740 1740 fm.write(
1741 1741 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1742 1742 )
1743 1743 fm.write(
1744 1744 b'hgverextra',
1745 1745 _(b"checking Mercurial custom build (%s)\n"),
1746 1746 b'+'.join(hgver.split(b'+')[1:]),
1747 1747 )
1748 1748
1749 1749 # compiled modules
1750 1750 hgmodules = None
1751 1751 if util.safehasattr(sys.modules[__name__], '__file__'):
1752 1752 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1753 1753 elif getattr(sys, 'oxidized', False):
1754 1754 hgmodules = pycompat.sysexecutable
1755 1755
1756 1756 fm.write(
1757 1757 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1758 1758 )
1759 1759 fm.write(
1760 1760 b'hgmodules',
1761 1761 _(b"checking installed modules (%s)...\n"),
1762 1762 hgmodules or _(b"unknown"),
1763 1763 )
1764 1764
1765 1765 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1766 1766 rustext = rustandc # for now, that's the only case
1767 1767 cext = policy.policy in (b'c', b'allow') or rustandc
1768 1768 nopure = cext or rustext
1769 1769 if nopure:
1770 1770 err = None
1771 1771 try:
1772 1772 if cext:
1773 1773 from .cext import ( # pytype: disable=import-error
1774 1774 base85,
1775 1775 bdiff,
1776 1776 mpatch,
1777 1777 osutil,
1778 1778 )
1779 1779
1780 1780 # quiet pyflakes
1781 1781 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1782 1782 if rustext:
1783 1783 from .rustext import ( # pytype: disable=import-error
1784 1784 ancestor,
1785 1785 dirstate,
1786 1786 )
1787 1787
1788 1788 dir(ancestor), dir(dirstate) # quiet pyflakes
1789 1789 except Exception as inst:
1790 1790 err = stringutil.forcebytestr(inst)
1791 1791 problems += 1
1792 1792 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1793 1793
1794 1794 compengines = util.compengines._engines.values()
1795 1795 fm.write(
1796 1796 b'compengines',
1797 1797 _(b'checking registered compression engines (%s)\n'),
1798 1798 fm.formatlist(
1799 1799 sorted(e.name() for e in compengines),
1800 1800 name=b'compengine',
1801 1801 fmt=b'%s',
1802 1802 sep=b', ',
1803 1803 ),
1804 1804 )
1805 1805 fm.write(
1806 1806 b'compenginesavail',
1807 1807 _(b'checking available compression engines (%s)\n'),
1808 1808 fm.formatlist(
1809 1809 sorted(e.name() for e in compengines if e.available()),
1810 1810 name=b'compengine',
1811 1811 fmt=b'%s',
1812 1812 sep=b', ',
1813 1813 ),
1814 1814 )
1815 1815 wirecompengines = compression.compengines.supportedwireengines(
1816 1816 compression.SERVERROLE
1817 1817 )
1818 1818 fm.write(
1819 1819 b'compenginesserver',
1820 1820 _(
1821 1821 b'checking available compression engines '
1822 1822 b'for wire protocol (%s)\n'
1823 1823 ),
1824 1824 fm.formatlist(
1825 1825 [e.name() for e in wirecompengines if e.wireprotosupport()],
1826 1826 name=b'compengine',
1827 1827 fmt=b'%s',
1828 1828 sep=b', ',
1829 1829 ),
1830 1830 )
1831 1831 re2 = b'missing'
1832 1832 if util._re2:
1833 1833 re2 = b'available'
1834 1834 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1835 1835 fm.data(re2=bool(util._re2))
1836 1836
1837 1837 # templates
1838 1838 p = templater.templatedir()
1839 1839 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1840 1840 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1841 1841 if p:
1842 1842 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1843 1843 if m:
1844 1844 # template found, check if it is working
1845 1845 err = None
1846 1846 try:
1847 1847 templater.templater.frommapfile(m)
1848 1848 except Exception as inst:
1849 1849 err = stringutil.forcebytestr(inst)
1850 1850 p = None
1851 1851 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1852 1852 else:
1853 1853 p = None
1854 1854 fm.condwrite(
1855 1855 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1856 1856 )
1857 1857 fm.condwrite(
1858 1858 not m,
1859 1859 b'defaulttemplatenotfound',
1860 1860 _(b" template '%s' not found\n"),
1861 1861 b"default",
1862 1862 )
1863 1863 if not p:
1864 1864 problems += 1
1865 1865 fm.condwrite(
1866 1866 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1867 1867 )
1868 1868
1869 1869 # editor
1870 1870 editor = ui.geteditor()
1871 1871 editor = util.expandpath(editor)
1872 1872 editorbin = procutil.shellsplit(editor)[0]
1873 1873 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1874 1874 cmdpath = procutil.findexe(editorbin)
1875 1875 fm.condwrite(
1876 1876 not cmdpath and editor == b'vi',
1877 1877 b'vinotfound',
1878 1878 _(
1879 1879 b" No commit editor set and can't find %s in PATH\n"
1880 1880 b" (specify a commit editor in your configuration"
1881 1881 b" file)\n"
1882 1882 ),
1883 1883 not cmdpath and editor == b'vi' and editorbin,
1884 1884 )
1885 1885 fm.condwrite(
1886 1886 not cmdpath and editor != b'vi',
1887 1887 b'editornotfound',
1888 1888 _(
1889 1889 b" Can't find editor '%s' in PATH\n"
1890 1890 b" (specify a commit editor in your configuration"
1891 1891 b" file)\n"
1892 1892 ),
1893 1893 not cmdpath and editorbin,
1894 1894 )
1895 1895 if not cmdpath and editor != b'vi':
1896 1896 problems += 1
1897 1897
1898 1898 # check username
1899 1899 username = None
1900 1900 err = None
1901 1901 try:
1902 1902 username = ui.username()
1903 1903 except error.Abort as e:
1904 1904 err = e.message
1905 1905 problems += 1
1906 1906
1907 1907 fm.condwrite(
1908 1908 username, b'username', _(b"checking username (%s)\n"), username
1909 1909 )
1910 1910 fm.condwrite(
1911 1911 err,
1912 1912 b'usernameerror',
1913 1913 _(
1914 1914 b"checking username...\n %s\n"
1915 1915 b" (specify a username in your configuration file)\n"
1916 1916 ),
1917 1917 err,
1918 1918 )
1919 1919
1920 1920 for name, mod in extensions.extensions():
1921 1921 handler = getattr(mod, 'debuginstall', None)
1922 1922 if handler is not None:
1923 1923 problems += handler(ui, fm)
1924 1924
1925 1925 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1926 1926 if not problems:
1927 1927 fm.data(problems=problems)
1928 1928 fm.condwrite(
1929 1929 problems,
1930 1930 b'problems',
1931 1931 _(b"%d problems detected, please check your install!\n"),
1932 1932 problems,
1933 1933 )
1934 1934 fm.end()
1935 1935
1936 1936 return problems
1937 1937
1938 1938
1939 1939 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1940 1940 def debugknown(ui, repopath, *ids, **opts):
1941 1941 """test whether node ids are known to a repo
1942 1942
1943 1943 Every ID must be a full-length hex node id string. Returns a list of 0s
1944 1944 and 1s indicating unknown/known.
1945 1945 """
1946 1946 opts = pycompat.byteskwargs(opts)
1947 1947 repo = hg.peer(ui, opts, repopath)
1948 1948 if not repo.capable(b'known'):
1949 1949 raise error.Abort(b"known() not supported by target repository")
1950 1950 flags = repo.known([bin(s) for s in ids])
1951 1951 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1952 1952
1953 1953
1954 1954 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1955 1955 def debuglabelcomplete(ui, repo, *args):
1956 1956 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1957 1957 debugnamecomplete(ui, repo, *args)
1958 1958
1959 1959
1960 1960 @command(
1961 1961 b'debuglocks',
1962 1962 [
1963 1963 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
1964 1964 (
1965 1965 b'W',
1966 1966 b'force-free-wlock',
1967 1967 None,
1968 1968 _(b'free the working state lock (DANGEROUS)'),
1969 1969 ),
1970 1970 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1971 1971 (
1972 1972 b'S',
1973 1973 b'set-wlock',
1974 1974 None,
1975 1975 _(b'set the working state lock until stopped'),
1976 1976 ),
1977 1977 ],
1978 1978 _(b'[OPTION]...'),
1979 1979 )
1980 1980 def debuglocks(ui, repo, **opts):
1981 1981 """show or modify state of locks
1982 1982
1983 1983 By default, this command will show which locks are held. This
1984 1984 includes the user and process holding the lock, the amount of time
1985 1985 the lock has been held, and the machine name where the process is
1986 1986 running if it's not local.
1987 1987
1988 1988 Locks protect the integrity of Mercurial's data, so should be
1989 1989 treated with care. System crashes or other interruptions may cause
1990 1990 locks to not be properly released, though Mercurial will usually
1991 1991 detect and remove such stale locks automatically.
1992 1992
1993 1993 However, detecting stale locks may not always be possible (for
1994 1994 instance, on a shared filesystem). Removing locks may also be
1995 1995 blocked by filesystem permissions.
1996 1996
1997 1997 Setting a lock will prevent other commands from changing the data.
1998 1998 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1999 1999 The set locks are removed when the command exits.
2000 2000
2001 2001 Returns 0 if no locks are held.
2002 2002
2003 2003 """
2004 2004
2005 2005 if opts.get('force_free_lock'):
2006 2006 repo.svfs.unlink(b'lock')
2007 2007 if opts.get('force_free_wlock'):
2008 2008 repo.vfs.unlink(b'wlock')
2009 2009 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2010 2010 return 0
2011 2011
2012 2012 locks = []
2013 2013 try:
2014 2014 if opts.get('set_wlock'):
2015 2015 try:
2016 2016 locks.append(repo.wlock(False))
2017 2017 except error.LockHeld:
2018 2018 raise error.Abort(_(b'wlock is already held'))
2019 2019 if opts.get('set_lock'):
2020 2020 try:
2021 2021 locks.append(repo.lock(False))
2022 2022 except error.LockHeld:
2023 2023 raise error.Abort(_(b'lock is already held'))
2024 2024 if len(locks):
2025 2025 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2026 2026 return 0
2027 2027 finally:
2028 2028 release(*locks)
2029 2029
2030 2030 now = time.time()
2031 2031 held = 0
2032 2032
2033 2033 def report(vfs, name, method):
2034 2034 # this causes stale locks to get reaped for more accurate reporting
2035 2035 try:
2036 2036 l = method(False)
2037 2037 except error.LockHeld:
2038 2038 l = None
2039 2039
2040 2040 if l:
2041 2041 l.release()
2042 2042 else:
2043 2043 try:
2044 2044 st = vfs.lstat(name)
2045 2045 age = now - st[stat.ST_MTIME]
2046 2046 user = util.username(st.st_uid)
2047 2047 locker = vfs.readlock(name)
2048 2048 if b":" in locker:
2049 2049 host, pid = locker.split(b':')
2050 2050 if host == socket.gethostname():
2051 2051 locker = b'user %s, process %s' % (user or b'None', pid)
2052 2052 else:
2053 2053 locker = b'user %s, process %s, host %s' % (
2054 2054 user or b'None',
2055 2055 pid,
2056 2056 host,
2057 2057 )
2058 2058 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2059 2059 return 1
2060 2060 except OSError as e:
2061 2061 if e.errno != errno.ENOENT:
2062 2062 raise
2063 2063
2064 2064 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2065 2065 return 0
2066 2066
2067 2067 held += report(repo.svfs, b"lock", repo.lock)
2068 2068 held += report(repo.vfs, b"wlock", repo.wlock)
2069 2069
2070 2070 return held
2071 2071
2072 2072
2073 2073 @command(
2074 2074 b'debugmanifestfulltextcache',
2075 2075 [
2076 2076 (b'', b'clear', False, _(b'clear the cache')),
2077 2077 (
2078 2078 b'a',
2079 2079 b'add',
2080 2080 [],
2081 2081 _(b'add the given manifest nodes to the cache'),
2082 2082 _(b'NODE'),
2083 2083 ),
2084 2084 ],
2085 2085 b'',
2086 2086 )
2087 2087 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2088 2088 """show, clear or amend the contents of the manifest fulltext cache"""
2089 2089
2090 2090 def getcache():
2091 2091 r = repo.manifestlog.getstorage(b'')
2092 2092 try:
2093 2093 return r._fulltextcache
2094 2094 except AttributeError:
2095 2095 msg = _(
2096 2096 b"Current revlog implementation doesn't appear to have a "
2097 2097 b"manifest fulltext cache\n"
2098 2098 )
2099 2099 raise error.Abort(msg)
2100 2100
2101 2101 if opts.get('clear'):
2102 2102 with repo.wlock():
2103 2103 cache = getcache()
2104 2104 cache.clear(clear_persisted_data=True)
2105 2105 return
2106 2106
2107 2107 if add:
2108 2108 with repo.wlock():
2109 2109 m = repo.manifestlog
2110 2110 store = m.getstorage(b'')
2111 2111 for n in add:
2112 2112 try:
2113 2113 manifest = m[store.lookup(n)]
2114 2114 except error.LookupError as e:
2115 2115 raise error.Abort(e, hint=b"Check your manifest node id")
2116 2116 manifest.read() # stores revisision in cache too
2117 2117 return
2118 2118
2119 2119 cache = getcache()
2120 2120 if not len(cache):
2121 2121 ui.write(_(b'cache empty\n'))
2122 2122 else:
2123 2123 ui.write(
2124 2124 _(
2125 2125 b'cache contains %d manifest entries, in order of most to '
2126 2126 b'least recent:\n'
2127 2127 )
2128 2128 % (len(cache),)
2129 2129 )
2130 2130 totalsize = 0
2131 2131 for nodeid in cache:
2132 2132 # Use cache.get to not update the LRU order
2133 2133 data = cache.peek(nodeid)
2134 2134 size = len(data)
2135 2135 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2136 2136 ui.write(
2137 2137 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2138 2138 )
2139 2139 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2140 2140 ui.write(
2141 2141 _(b'total cache data size %s, on-disk %s\n')
2142 2142 % (util.bytecount(totalsize), util.bytecount(ondisk))
2143 2143 )
2144 2144
2145 2145
2146 2146 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2147 2147 def debugmergestate(ui, repo, *args, **opts):
2148 2148 """print merge state
2149 2149
2150 2150 Use --verbose to print out information about whether v1 or v2 merge state
2151 2151 was chosen."""
2152 2152
2153 2153 if ui.verbose:
2154 2154 ms = mergestatemod.mergestate(repo)
2155 2155
2156 2156 # sort so that reasonable information is on top
2157 2157 v1records = ms._readrecordsv1()
2158 2158 v2records = ms._readrecordsv2()
2159 2159
2160 2160 if not v1records and not v2records:
2161 2161 pass
2162 2162 elif not v2records:
2163 2163 ui.writenoi18n(b'no version 2 merge state\n')
2164 2164 elif ms._v1v2match(v1records, v2records):
2165 2165 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2166 2166 else:
2167 2167 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2168 2168
2169 2169 opts = pycompat.byteskwargs(opts)
2170 2170 if not opts[b'template']:
2171 2171 opts[b'template'] = (
2172 2172 b'{if(commits, "", "no merge state found\n")}'
2173 2173 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2174 2174 b'{files % "file: {path} (state \\"{state}\\")\n'
2175 2175 b'{if(local_path, "'
2176 2176 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2177 2177 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2178 2178 b' other path: {other_path} (node {other_node})\n'
2179 2179 b'")}'
2180 2180 b'{if(rename_side, "'
2181 2181 b' rename side: {rename_side}\n'
2182 2182 b' renamed path: {renamed_path}\n'
2183 2183 b'")}'
2184 2184 b'{extras % " extra: {key} = {value}\n"}'
2185 2185 b'"}'
2186 2186 b'{extras % "extra: {file} ({key} = {value})\n"}'
2187 2187 )
2188 2188
2189 2189 ms = mergestatemod.mergestate.read(repo)
2190 2190
2191 2191 fm = ui.formatter(b'debugmergestate', opts)
2192 2192 fm.startitem()
2193 2193
2194 2194 fm_commits = fm.nested(b'commits')
2195 2195 if ms.active():
2196 2196 for name, node, label_index in (
2197 2197 (b'local', ms.local, 0),
2198 2198 (b'other', ms.other, 1),
2199 2199 ):
2200 2200 fm_commits.startitem()
2201 2201 fm_commits.data(name=name)
2202 2202 fm_commits.data(node=hex(node))
2203 2203 if ms._labels and len(ms._labels) > label_index:
2204 2204 fm_commits.data(label=ms._labels[label_index])
2205 2205 fm_commits.end()
2206 2206
2207 2207 fm_files = fm.nested(b'files')
2208 2208 if ms.active():
2209 2209 for f in ms:
2210 2210 fm_files.startitem()
2211 2211 fm_files.data(path=f)
2212 2212 state = ms._state[f]
2213 2213 fm_files.data(state=state[0])
2214 2214 if state[0] in (
2215 2215 mergestatemod.MERGE_RECORD_UNRESOLVED,
2216 2216 mergestatemod.MERGE_RECORD_RESOLVED,
2217 2217 ):
2218 2218 fm_files.data(local_key=state[1])
2219 2219 fm_files.data(local_path=state[2])
2220 2220 fm_files.data(ancestor_path=state[3])
2221 2221 fm_files.data(ancestor_node=state[4])
2222 2222 fm_files.data(other_path=state[5])
2223 2223 fm_files.data(other_node=state[6])
2224 2224 fm_files.data(local_flags=state[7])
2225 2225 elif state[0] in (
2226 2226 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2227 2227 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2228 2228 ):
2229 2229 fm_files.data(renamed_path=state[1])
2230 2230 fm_files.data(rename_side=state[2])
2231 2231 fm_extras = fm_files.nested(b'extras')
2232 2232 for k, v in sorted(ms.extras(f).items()):
2233 2233 fm_extras.startitem()
2234 2234 fm_extras.data(key=k)
2235 2235 fm_extras.data(value=v)
2236 2236 fm_extras.end()
2237 2237
2238 2238 fm_files.end()
2239 2239
2240 2240 fm_extras = fm.nested(b'extras')
2241 2241 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2242 2242 if f in ms:
2243 2243 # If file is in mergestate, we have already processed it's extras
2244 2244 continue
2245 2245 for k, v in pycompat.iteritems(d):
2246 2246 fm_extras.startitem()
2247 2247 fm_extras.data(file=f)
2248 2248 fm_extras.data(key=k)
2249 2249 fm_extras.data(value=v)
2250 2250 fm_extras.end()
2251 2251
2252 2252 fm.end()
2253 2253
2254 2254
2255 2255 @command(b'debugnamecomplete', [], _(b'NAME...'))
2256 2256 def debugnamecomplete(ui, repo, *args):
2257 2257 '''complete "names" - tags, open branch names, bookmark names'''
2258 2258
2259 2259 names = set()
2260 2260 # since we previously only listed open branches, we will handle that
2261 2261 # specially (after this for loop)
2262 2262 for name, ns in pycompat.iteritems(repo.names):
2263 2263 if name != b'branches':
2264 2264 names.update(ns.listnames(repo))
2265 2265 names.update(
2266 2266 tag
2267 2267 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2268 2268 if not closed
2269 2269 )
2270 2270 completions = set()
2271 2271 if not args:
2272 2272 args = [b'']
2273 2273 for a in args:
2274 2274 completions.update(n for n in names if n.startswith(a))
2275 2275 ui.write(b'\n'.join(sorted(completions)))
2276 2276 ui.write(b'\n')
2277 2277
2278 2278
2279 2279 @command(
2280 2280 b'debugnodemap',
2281 2281 [
2282 2282 (
2283 2283 b'',
2284 2284 b'dump-new',
2285 2285 False,
2286 2286 _(b'write a (new) persistent binary nodemap on stdout'),
2287 2287 ),
2288 2288 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2289 2289 (
2290 2290 b'',
2291 2291 b'check',
2292 2292 False,
2293 2293 _(b'check that the data on disk data are correct.'),
2294 2294 ),
2295 2295 (
2296 2296 b'',
2297 2297 b'metadata',
2298 2298 False,
2299 2299 _(b'display the on disk meta data for the nodemap'),
2300 2300 ),
2301 2301 ],
2302 2302 )
2303 2303 def debugnodemap(ui, repo, **opts):
2304 2304 """write and inspect on disk nodemap"""
2305 2305 if opts['dump_new']:
2306 2306 unfi = repo.unfiltered()
2307 2307 cl = unfi.changelog
2308 2308 if util.safehasattr(cl.index, "nodemap_data_all"):
2309 2309 data = cl.index.nodemap_data_all()
2310 2310 else:
2311 2311 data = nodemap.persistent_data(cl.index)
2312 2312 ui.write(data)
2313 2313 elif opts['dump_disk']:
2314 2314 unfi = repo.unfiltered()
2315 2315 cl = unfi.changelog
2316 2316 nm_data = nodemap.persisted_data(cl)
2317 2317 if nm_data is not None:
2318 2318 docket, data = nm_data
2319 2319 ui.write(data[:])
2320 2320 elif opts['check']:
2321 2321 unfi = repo.unfiltered()
2322 2322 cl = unfi.changelog
2323 2323 nm_data = nodemap.persisted_data(cl)
2324 2324 if nm_data is not None:
2325 2325 docket, data = nm_data
2326 2326 return nodemap.check_data(ui, cl.index, data)
2327 2327 elif opts['metadata']:
2328 2328 unfi = repo.unfiltered()
2329 2329 cl = unfi.changelog
2330 2330 nm_data = nodemap.persisted_data(cl)
2331 2331 if nm_data is not None:
2332 2332 docket, data = nm_data
2333 2333 ui.write((b"uid: %s\n") % docket.uid)
2334 2334 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2335 2335 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2336 2336 ui.write((b"data-length: %d\n") % docket.data_length)
2337 2337 ui.write((b"data-unused: %d\n") % docket.data_unused)
2338 2338 unused_perc = docket.data_unused * 100.0 / docket.data_length
2339 2339 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2340 2340
2341 2341
2342 2342 @command(
2343 2343 b'debugobsolete',
2344 2344 [
2345 2345 (b'', b'flags', 0, _(b'markers flag')),
2346 2346 (
2347 2347 b'',
2348 2348 b'record-parents',
2349 2349 False,
2350 2350 _(b'record parent information for the precursor'),
2351 2351 ),
2352 2352 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2353 2353 (
2354 2354 b'',
2355 2355 b'exclusive',
2356 2356 False,
2357 2357 _(b'restrict display to markers only relevant to REV'),
2358 2358 ),
2359 2359 (b'', b'index', False, _(b'display index of the marker')),
2360 2360 (b'', b'delete', [], _(b'delete markers specified by indices')),
2361 2361 ]
2362 2362 + cmdutil.commitopts2
2363 2363 + cmdutil.formatteropts,
2364 2364 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2365 2365 )
2366 2366 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2367 2367 """create arbitrary obsolete marker
2368 2368
2369 2369 With no arguments, displays the list of obsolescence markers."""
2370 2370
2371 2371 opts = pycompat.byteskwargs(opts)
2372 2372
2373 2373 def parsenodeid(s):
2374 2374 try:
2375 2375 # We do not use revsingle/revrange functions here to accept
2376 2376 # arbitrary node identifiers, possibly not present in the
2377 2377 # local repository.
2378 2378 n = bin(s)
2379 2379 if len(n) != len(nullid):
2380 2380 raise TypeError()
2381 2381 return n
2382 2382 except TypeError:
2383 2383 raise error.InputError(
2384 2384 b'changeset references must be full hexadecimal '
2385 2385 b'node identifiers'
2386 2386 )
2387 2387
2388 2388 if opts.get(b'delete'):
2389 2389 indices = []
2390 2390 for v in opts.get(b'delete'):
2391 2391 try:
2392 2392 indices.append(int(v))
2393 2393 except ValueError:
2394 2394 raise error.InputError(
2395 2395 _(b'invalid index value: %r') % v,
2396 2396 hint=_(b'use integers for indices'),
2397 2397 )
2398 2398
2399 2399 if repo.currenttransaction():
2400 2400 raise error.Abort(
2401 2401 _(b'cannot delete obsmarkers in the middle of transaction.')
2402 2402 )
2403 2403
2404 2404 with repo.lock():
2405 2405 n = repair.deleteobsmarkers(repo.obsstore, indices)
2406 2406 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2407 2407
2408 2408 return
2409 2409
2410 2410 if precursor is not None:
2411 2411 if opts[b'rev']:
2412 2412 raise error.InputError(
2413 2413 b'cannot select revision when creating marker'
2414 2414 )
2415 2415 metadata = {}
2416 2416 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2417 2417 succs = tuple(parsenodeid(succ) for succ in successors)
2418 2418 l = repo.lock()
2419 2419 try:
2420 2420 tr = repo.transaction(b'debugobsolete')
2421 2421 try:
2422 2422 date = opts.get(b'date')
2423 2423 if date:
2424 2424 date = dateutil.parsedate(date)
2425 2425 else:
2426 2426 date = None
2427 2427 prec = parsenodeid(precursor)
2428 2428 parents = None
2429 2429 if opts[b'record_parents']:
2430 2430 if prec not in repo.unfiltered():
2431 2431 raise error.Abort(
2432 2432 b'cannot used --record-parents on '
2433 2433 b'unknown changesets'
2434 2434 )
2435 2435 parents = repo.unfiltered()[prec].parents()
2436 2436 parents = tuple(p.node() for p in parents)
2437 2437 repo.obsstore.create(
2438 2438 tr,
2439 2439 prec,
2440 2440 succs,
2441 2441 opts[b'flags'],
2442 2442 parents=parents,
2443 2443 date=date,
2444 2444 metadata=metadata,
2445 2445 ui=ui,
2446 2446 )
2447 2447 tr.close()
2448 2448 except ValueError as exc:
2449 2449 raise error.Abort(
2450 2450 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2451 2451 )
2452 2452 finally:
2453 2453 tr.release()
2454 2454 finally:
2455 2455 l.release()
2456 2456 else:
2457 2457 if opts[b'rev']:
2458 2458 revs = scmutil.revrange(repo, opts[b'rev'])
2459 2459 nodes = [repo[r].node() for r in revs]
2460 2460 markers = list(
2461 2461 obsutil.getmarkers(
2462 2462 repo, nodes=nodes, exclusive=opts[b'exclusive']
2463 2463 )
2464 2464 )
2465 2465 markers.sort(key=lambda x: x._data)
2466 2466 else:
2467 2467 markers = obsutil.getmarkers(repo)
2468 2468
2469 2469 markerstoiter = markers
2470 2470 isrelevant = lambda m: True
2471 2471 if opts.get(b'rev') and opts.get(b'index'):
2472 2472 markerstoiter = obsutil.getmarkers(repo)
2473 2473 markerset = set(markers)
2474 2474 isrelevant = lambda m: m in markerset
2475 2475
2476 2476 fm = ui.formatter(b'debugobsolete', opts)
2477 2477 for i, m in enumerate(markerstoiter):
2478 2478 if not isrelevant(m):
2479 2479 # marker can be irrelevant when we're iterating over a set
2480 2480 # of markers (markerstoiter) which is bigger than the set
2481 2481 # of markers we want to display (markers)
2482 2482 # this can happen if both --index and --rev options are
2483 2483 # provided and thus we need to iterate over all of the markers
2484 2484 # to get the correct indices, but only display the ones that
2485 2485 # are relevant to --rev value
2486 2486 continue
2487 2487 fm.startitem()
2488 2488 ind = i if opts.get(b'index') else None
2489 2489 cmdutil.showmarker(fm, m, index=ind)
2490 2490 fm.end()
2491 2491
2492 2492
2493 2493 @command(
2494 2494 b'debugp1copies',
2495 2495 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2496 2496 _(b'[-r REV]'),
2497 2497 )
2498 2498 def debugp1copies(ui, repo, **opts):
2499 2499 """dump copy information compared to p1"""
2500 2500
2501 2501 opts = pycompat.byteskwargs(opts)
2502 2502 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2503 2503 for dst, src in ctx.p1copies().items():
2504 2504 ui.write(b'%s -> %s\n' % (src, dst))
2505 2505
2506 2506
2507 2507 @command(
2508 2508 b'debugp2copies',
2509 2509 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2510 2510 _(b'[-r REV]'),
2511 2511 )
2512 2512 def debugp1copies(ui, repo, **opts):
2513 2513 """dump copy information compared to p2"""
2514 2514
2515 2515 opts = pycompat.byteskwargs(opts)
2516 2516 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2517 2517 for dst, src in ctx.p2copies().items():
2518 2518 ui.write(b'%s -> %s\n' % (src, dst))
2519 2519
2520 2520
2521 2521 @command(
2522 2522 b'debugpathcomplete',
2523 2523 [
2524 2524 (b'f', b'full', None, _(b'complete an entire path')),
2525 2525 (b'n', b'normal', None, _(b'show only normal files')),
2526 2526 (b'a', b'added', None, _(b'show only added files')),
2527 2527 (b'r', b'removed', None, _(b'show only removed files')),
2528 2528 ],
2529 2529 _(b'FILESPEC...'),
2530 2530 )
2531 2531 def debugpathcomplete(ui, repo, *specs, **opts):
2532 2532 """complete part or all of a tracked path
2533 2533
2534 2534 This command supports shells that offer path name completion. It
2535 2535 currently completes only files already known to the dirstate.
2536 2536
2537 2537 Completion extends only to the next path segment unless
2538 2538 --full is specified, in which case entire paths are used."""
2539 2539
2540 2540 def complete(path, acceptable):
2541 2541 dirstate = repo.dirstate
2542 2542 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2543 2543 rootdir = repo.root + pycompat.ossep
2544 2544 if spec != repo.root and not spec.startswith(rootdir):
2545 2545 return [], []
2546 2546 if os.path.isdir(spec):
2547 2547 spec += b'/'
2548 2548 spec = spec[len(rootdir) :]
2549 2549 fixpaths = pycompat.ossep != b'/'
2550 2550 if fixpaths:
2551 2551 spec = spec.replace(pycompat.ossep, b'/')
2552 2552 speclen = len(spec)
2553 2553 fullpaths = opts['full']
2554 2554 files, dirs = set(), set()
2555 2555 adddir, addfile = dirs.add, files.add
2556 2556 for f, st in pycompat.iteritems(dirstate):
2557 2557 if f.startswith(spec) and st[0] in acceptable:
2558 2558 if fixpaths:
2559 2559 f = f.replace(b'/', pycompat.ossep)
2560 2560 if fullpaths:
2561 2561 addfile(f)
2562 2562 continue
2563 2563 s = f.find(pycompat.ossep, speclen)
2564 2564 if s >= 0:
2565 2565 adddir(f[:s])
2566 2566 else:
2567 2567 addfile(f)
2568 2568 return files, dirs
2569 2569
2570 2570 acceptable = b''
2571 2571 if opts['normal']:
2572 2572 acceptable += b'nm'
2573 2573 if opts['added']:
2574 2574 acceptable += b'a'
2575 2575 if opts['removed']:
2576 2576 acceptable += b'r'
2577 2577 cwd = repo.getcwd()
2578 2578 if not specs:
2579 2579 specs = [b'.']
2580 2580
2581 2581 files, dirs = set(), set()
2582 2582 for spec in specs:
2583 2583 f, d = complete(spec, acceptable or b'nmar')
2584 2584 files.update(f)
2585 2585 dirs.update(d)
2586 2586 files.update(dirs)
2587 2587 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2588 2588 ui.write(b'\n')
2589 2589
2590 2590
2591 2591 @command(
2592 2592 b'debugpathcopies',
2593 2593 cmdutil.walkopts,
2594 2594 b'hg debugpathcopies REV1 REV2 [FILE]',
2595 2595 inferrepo=True,
2596 2596 )
2597 2597 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2598 2598 """show copies between two revisions"""
2599 2599 ctx1 = scmutil.revsingle(repo, rev1)
2600 2600 ctx2 = scmutil.revsingle(repo, rev2)
2601 2601 m = scmutil.match(ctx1, pats, opts)
2602 2602 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2603 2603 ui.write(b'%s -> %s\n' % (src, dst))
2604 2604
2605 2605
2606 2606 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2607 2607 def debugpeer(ui, path):
2608 2608 """establish a connection to a peer repository"""
2609 2609 # Always enable peer request logging. Requires --debug to display
2610 2610 # though.
2611 2611 overrides = {
2612 2612 (b'devel', b'debug.peer-request'): True,
2613 2613 }
2614 2614
2615 2615 with ui.configoverride(overrides):
2616 2616 peer = hg.peer(ui, {}, path)
2617 2617
2618 2618 local = peer.local() is not None
2619 2619 canpush = peer.canpush()
2620 2620
2621 2621 ui.write(_(b'url: %s\n') % peer.url())
2622 2622 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2623 2623 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2624 2624
2625 2625
2626 2626 @command(
2627 2627 b'debugpickmergetool',
2628 2628 [
2629 2629 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2630 2630 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2631 2631 ]
2632 2632 + cmdutil.walkopts
2633 2633 + cmdutil.mergetoolopts,
2634 2634 _(b'[PATTERN]...'),
2635 2635 inferrepo=True,
2636 2636 )
2637 2637 def debugpickmergetool(ui, repo, *pats, **opts):
2638 2638 """examine which merge tool is chosen for specified file
2639 2639
2640 2640 As described in :hg:`help merge-tools`, Mercurial examines
2641 2641 configurations below in this order to decide which merge tool is
2642 2642 chosen for specified file.
2643 2643
2644 2644 1. ``--tool`` option
2645 2645 2. ``HGMERGE`` environment variable
2646 2646 3. configurations in ``merge-patterns`` section
2647 2647 4. configuration of ``ui.merge``
2648 2648 5. configurations in ``merge-tools`` section
2649 2649 6. ``hgmerge`` tool (for historical reason only)
2650 2650 7. default tool for fallback (``:merge`` or ``:prompt``)
2651 2651
2652 2652 This command writes out examination result in the style below::
2653 2653
2654 2654 FILE = MERGETOOL
2655 2655
2656 2656 By default, all files known in the first parent context of the
2657 2657 working directory are examined. Use file patterns and/or -I/-X
2658 2658 options to limit target files. -r/--rev is also useful to examine
2659 2659 files in another context without actual updating to it.
2660 2660
2661 2661 With --debug, this command shows warning messages while matching
2662 2662 against ``merge-patterns`` and so on, too. It is recommended to
2663 2663 use this option with explicit file patterns and/or -I/-X options,
2664 2664 because this option increases amount of output per file according
2665 2665 to configurations in hgrc.
2666 2666
2667 2667 With -v/--verbose, this command shows configurations below at
2668 2668 first (only if specified).
2669 2669
2670 2670 - ``--tool`` option
2671 2671 - ``HGMERGE`` environment variable
2672 2672 - configuration of ``ui.merge``
2673 2673
2674 2674 If merge tool is chosen before matching against
2675 2675 ``merge-patterns``, this command can't show any helpful
2676 2676 information, even with --debug. In such case, information above is
2677 2677 useful to know why a merge tool is chosen.
2678 2678 """
2679 2679 opts = pycompat.byteskwargs(opts)
2680 2680 overrides = {}
2681 2681 if opts[b'tool']:
2682 2682 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2683 2683 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2684 2684
2685 2685 with ui.configoverride(overrides, b'debugmergepatterns'):
2686 2686 hgmerge = encoding.environ.get(b"HGMERGE")
2687 2687 if hgmerge is not None:
2688 2688 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2689 2689 uimerge = ui.config(b"ui", b"merge")
2690 2690 if uimerge:
2691 2691 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2692 2692
2693 2693 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2694 2694 m = scmutil.match(ctx, pats, opts)
2695 2695 changedelete = opts[b'changedelete']
2696 2696 for path in ctx.walk(m):
2697 2697 fctx = ctx[path]
2698 2698 try:
2699 2699 if not ui.debugflag:
2700 2700 ui.pushbuffer(error=True)
2701 2701 tool, toolpath = filemerge._picktool(
2702 2702 repo,
2703 2703 ui,
2704 2704 path,
2705 2705 fctx.isbinary(),
2706 2706 b'l' in fctx.flags(),
2707 2707 changedelete,
2708 2708 )
2709 2709 finally:
2710 2710 if not ui.debugflag:
2711 2711 ui.popbuffer()
2712 2712 ui.write(b'%s = %s\n' % (path, tool))
2713 2713
2714 2714
2715 2715 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2716 2716 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2717 2717 """access the pushkey key/value protocol
2718 2718
2719 2719 With two args, list the keys in the given namespace.
2720 2720
2721 2721 With five args, set a key to new if it currently is set to old.
2722 2722 Reports success or failure.
2723 2723 """
2724 2724
2725 2725 target = hg.peer(ui, {}, repopath)
2726 2726 if keyinfo:
2727 2727 key, old, new = keyinfo
2728 2728 with target.commandexecutor() as e:
2729 2729 r = e.callcommand(
2730 2730 b'pushkey',
2731 2731 {
2732 2732 b'namespace': namespace,
2733 2733 b'key': key,
2734 2734 b'old': old,
2735 2735 b'new': new,
2736 2736 },
2737 2737 ).result()
2738 2738
2739 2739 ui.status(pycompat.bytestr(r) + b'\n')
2740 2740 return not r
2741 2741 else:
2742 2742 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2743 2743 ui.write(
2744 2744 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2745 2745 )
2746 2746
2747 2747
2748 2748 @command(b'debugpvec', [], _(b'A B'))
2749 2749 def debugpvec(ui, repo, a, b=None):
2750 2750 ca = scmutil.revsingle(repo, a)
2751 2751 cb = scmutil.revsingle(repo, b)
2752 2752 pa = pvec.ctxpvec(ca)
2753 2753 pb = pvec.ctxpvec(cb)
2754 2754 if pa == pb:
2755 2755 rel = b"="
2756 2756 elif pa > pb:
2757 2757 rel = b">"
2758 2758 elif pa < pb:
2759 2759 rel = b"<"
2760 2760 elif pa | pb:
2761 2761 rel = b"|"
2762 2762 ui.write(_(b"a: %s\n") % pa)
2763 2763 ui.write(_(b"b: %s\n") % pb)
2764 2764 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2765 2765 ui.write(
2766 2766 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2767 2767 % (
2768 2768 abs(pa._depth - pb._depth),
2769 2769 pvec._hamming(pa._vec, pb._vec),
2770 2770 pa.distance(pb),
2771 2771 rel,
2772 2772 )
2773 2773 )
2774 2774
2775 2775
2776 2776 @command(
2777 2777 b'debugrebuilddirstate|debugrebuildstate',
2778 2778 [
2779 2779 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2780 2780 (
2781 2781 b'',
2782 2782 b'minimal',
2783 2783 None,
2784 2784 _(
2785 2785 b'only rebuild files that are inconsistent with '
2786 2786 b'the working copy parent'
2787 2787 ),
2788 2788 ),
2789 2789 ],
2790 2790 _(b'[-r REV]'),
2791 2791 )
2792 2792 def debugrebuilddirstate(ui, repo, rev, **opts):
2793 2793 """rebuild the dirstate as it would look like for the given revision
2794 2794
2795 2795 If no revision is specified the first current parent will be used.
2796 2796
2797 2797 The dirstate will be set to the files of the given revision.
2798 2798 The actual working directory content or existing dirstate
2799 2799 information such as adds or removes is not considered.
2800 2800
2801 2801 ``minimal`` will only rebuild the dirstate status for files that claim to be
2802 2802 tracked but are not in the parent manifest, or that exist in the parent
2803 2803 manifest but are not in the dirstate. It will not change adds, removes, or
2804 2804 modified files that are in the working copy parent.
2805 2805
2806 2806 One use of this command is to make the next :hg:`status` invocation
2807 2807 check the actual file content.
2808 2808 """
2809 2809 ctx = scmutil.revsingle(repo, rev)
2810 2810 with repo.wlock():
2811 2811 dirstate = repo.dirstate
2812 2812 changedfiles = None
2813 2813 # See command doc for what minimal does.
2814 2814 if opts.get('minimal'):
2815 2815 manifestfiles = set(ctx.manifest().keys())
2816 2816 dirstatefiles = set(dirstate)
2817 2817 manifestonly = manifestfiles - dirstatefiles
2818 2818 dsonly = dirstatefiles - manifestfiles
2819 2819 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2820 2820 changedfiles = manifestonly | dsnotadded
2821 2821
2822 2822 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2823 2823
2824 2824
2825 2825 @command(b'debugrebuildfncache', [], b'')
2826 2826 def debugrebuildfncache(ui, repo):
2827 2827 """rebuild the fncache file"""
2828 2828 repair.rebuildfncache(ui, repo)
2829 2829
2830 2830
2831 2831 @command(
2832 2832 b'debugrename',
2833 2833 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2834 2834 _(b'[-r REV] [FILE]...'),
2835 2835 )
2836 2836 def debugrename(ui, repo, *pats, **opts):
2837 2837 """dump rename information"""
2838 2838
2839 2839 opts = pycompat.byteskwargs(opts)
2840 2840 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2841 2841 m = scmutil.match(ctx, pats, opts)
2842 2842 for abs in ctx.walk(m):
2843 2843 fctx = ctx[abs]
2844 2844 o = fctx.filelog().renamed(fctx.filenode())
2845 2845 rel = repo.pathto(abs)
2846 2846 if o:
2847 2847 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2848 2848 else:
2849 2849 ui.write(_(b"%s not renamed\n") % rel)
2850 2850
2851 2851
2852 2852 @command(b'debugrequires|debugrequirements', [], b'')
2853 2853 def debugrequirements(ui, repo):
2854 2854 """ print the current repo requirements """
2855 2855 for r in sorted(repo.requirements):
2856 2856 ui.write(b"%s\n" % r)
2857 2857
2858 2858
2859 2859 @command(
2860 2860 b'debugrevlog',
2861 2861 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2862 2862 _(b'-c|-m|FILE'),
2863 2863 optionalrepo=True,
2864 2864 )
2865 2865 def debugrevlog(ui, repo, file_=None, **opts):
2866 2866 """show data and statistics about a revlog"""
2867 2867 opts = pycompat.byteskwargs(opts)
2868 2868 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2869 2869
2870 2870 if opts.get(b"dump"):
2871 2871 numrevs = len(r)
2872 2872 ui.write(
2873 2873 (
2874 2874 b"# rev p1rev p2rev start end deltastart base p1 p2"
2875 2875 b" rawsize totalsize compression heads chainlen\n"
2876 2876 )
2877 2877 )
2878 2878 ts = 0
2879 2879 heads = set()
2880 2880
2881 2881 for rev in pycompat.xrange(numrevs):
2882 2882 dbase = r.deltaparent(rev)
2883 2883 if dbase == -1:
2884 2884 dbase = rev
2885 2885 cbase = r.chainbase(rev)
2886 2886 clen = r.chainlen(rev)
2887 2887 p1, p2 = r.parentrevs(rev)
2888 2888 rs = r.rawsize(rev)
2889 2889 ts = ts + rs
2890 2890 heads -= set(r.parentrevs(rev))
2891 2891 heads.add(rev)
2892 2892 try:
2893 2893 compression = ts / r.end(rev)
2894 2894 except ZeroDivisionError:
2895 2895 compression = 0
2896 2896 ui.write(
2897 2897 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2898 2898 b"%11d %5d %8d\n"
2899 2899 % (
2900 2900 rev,
2901 2901 p1,
2902 2902 p2,
2903 2903 r.start(rev),
2904 2904 r.end(rev),
2905 2905 r.start(dbase),
2906 2906 r.start(cbase),
2907 2907 r.start(p1),
2908 2908 r.start(p2),
2909 2909 rs,
2910 2910 ts,
2911 2911 compression,
2912 2912 len(heads),
2913 2913 clen,
2914 2914 )
2915 2915 )
2916 2916 return 0
2917 2917
2918 2918 v = r.version
2919 2919 format = v & 0xFFFF
2920 2920 flags = []
2921 2921 gdelta = False
2922 2922 if v & revlog.FLAG_INLINE_DATA:
2923 2923 flags.append(b'inline')
2924 2924 if v & revlog.FLAG_GENERALDELTA:
2925 2925 gdelta = True
2926 2926 flags.append(b'generaldelta')
2927 2927 if not flags:
2928 2928 flags = [b'(none)']
2929 2929
2930 2930 ### tracks merge vs single parent
2931 2931 nummerges = 0
2932 2932
2933 2933 ### tracks ways the "delta" are build
2934 2934 # nodelta
2935 2935 numempty = 0
2936 2936 numemptytext = 0
2937 2937 numemptydelta = 0
2938 2938 # full file content
2939 2939 numfull = 0
2940 2940 # intermediate snapshot against a prior snapshot
2941 2941 numsemi = 0
2942 2942 # snapshot count per depth
2943 2943 numsnapdepth = collections.defaultdict(lambda: 0)
2944 2944 # delta against previous revision
2945 2945 numprev = 0
2946 2946 # delta against first or second parent (not prev)
2947 2947 nump1 = 0
2948 2948 nump2 = 0
2949 2949 # delta against neither prev nor parents
2950 2950 numother = 0
2951 2951 # delta against prev that are also first or second parent
2952 2952 # (details of `numprev`)
2953 2953 nump1prev = 0
2954 2954 nump2prev = 0
2955 2955
2956 2956 # data about delta chain of each revs
2957 2957 chainlengths = []
2958 2958 chainbases = []
2959 2959 chainspans = []
2960 2960
2961 2961 # data about each revision
2962 2962 datasize = [None, 0, 0]
2963 2963 fullsize = [None, 0, 0]
2964 2964 semisize = [None, 0, 0]
2965 2965 # snapshot count per depth
2966 2966 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2967 2967 deltasize = [None, 0, 0]
2968 2968 chunktypecounts = {}
2969 2969 chunktypesizes = {}
2970 2970
2971 2971 def addsize(size, l):
2972 2972 if l[0] is None or size < l[0]:
2973 2973 l[0] = size
2974 2974 if size > l[1]:
2975 2975 l[1] = size
2976 2976 l[2] += size
2977 2977
2978 2978 numrevs = len(r)
2979 2979 for rev in pycompat.xrange(numrevs):
2980 2980 p1, p2 = r.parentrevs(rev)
2981 2981 delta = r.deltaparent(rev)
2982 2982 if format > 0:
2983 2983 addsize(r.rawsize(rev), datasize)
2984 2984 if p2 != nullrev:
2985 2985 nummerges += 1
2986 2986 size = r.length(rev)
2987 2987 if delta == nullrev:
2988 2988 chainlengths.append(0)
2989 2989 chainbases.append(r.start(rev))
2990 2990 chainspans.append(size)
2991 2991 if size == 0:
2992 2992 numempty += 1
2993 2993 numemptytext += 1
2994 2994 else:
2995 2995 numfull += 1
2996 2996 numsnapdepth[0] += 1
2997 2997 addsize(size, fullsize)
2998 2998 addsize(size, snapsizedepth[0])
2999 2999 else:
3000 3000 chainlengths.append(chainlengths[delta] + 1)
3001 3001 baseaddr = chainbases[delta]
3002 3002 revaddr = r.start(rev)
3003 3003 chainbases.append(baseaddr)
3004 3004 chainspans.append((revaddr - baseaddr) + size)
3005 3005 if size == 0:
3006 3006 numempty += 1
3007 3007 numemptydelta += 1
3008 3008 elif r.issnapshot(rev):
3009 3009 addsize(size, semisize)
3010 3010 numsemi += 1
3011 3011 depth = r.snapshotdepth(rev)
3012 3012 numsnapdepth[depth] += 1
3013 3013 addsize(size, snapsizedepth[depth])
3014 3014 else:
3015 3015 addsize(size, deltasize)
3016 3016 if delta == rev - 1:
3017 3017 numprev += 1
3018 3018 if delta == p1:
3019 3019 nump1prev += 1
3020 3020 elif delta == p2:
3021 3021 nump2prev += 1
3022 3022 elif delta == p1:
3023 3023 nump1 += 1
3024 3024 elif delta == p2:
3025 3025 nump2 += 1
3026 3026 elif delta != nullrev:
3027 3027 numother += 1
3028 3028
3029 3029 # Obtain data on the raw chunks in the revlog.
3030 3030 if util.safehasattr(r, b'_getsegmentforrevs'):
3031 3031 segment = r._getsegmentforrevs(rev, rev)[1]
3032 3032 else:
3033 3033 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3034 3034 if segment:
3035 3035 chunktype = bytes(segment[0:1])
3036 3036 else:
3037 3037 chunktype = b'empty'
3038 3038
3039 3039 if chunktype not in chunktypecounts:
3040 3040 chunktypecounts[chunktype] = 0
3041 3041 chunktypesizes[chunktype] = 0
3042 3042
3043 3043 chunktypecounts[chunktype] += 1
3044 3044 chunktypesizes[chunktype] += size
3045 3045
3046 3046 # Adjust size min value for empty cases
3047 3047 for size in (datasize, fullsize, semisize, deltasize):
3048 3048 if size[0] is None:
3049 3049 size[0] = 0
3050 3050
3051 3051 numdeltas = numrevs - numfull - numempty - numsemi
3052 3052 numoprev = numprev - nump1prev - nump2prev
3053 3053 totalrawsize = datasize[2]
3054 3054 datasize[2] /= numrevs
3055 3055 fulltotal = fullsize[2]
3056 3056 if numfull == 0:
3057 3057 fullsize[2] = 0
3058 3058 else:
3059 3059 fullsize[2] /= numfull
3060 3060 semitotal = semisize[2]
3061 3061 snaptotal = {}
3062 3062 if numsemi > 0:
3063 3063 semisize[2] /= numsemi
3064 3064 for depth in snapsizedepth:
3065 3065 snaptotal[depth] = snapsizedepth[depth][2]
3066 3066 snapsizedepth[depth][2] /= numsnapdepth[depth]
3067 3067
3068 3068 deltatotal = deltasize[2]
3069 3069 if numdeltas > 0:
3070 3070 deltasize[2] /= numdeltas
3071 3071 totalsize = fulltotal + semitotal + deltatotal
3072 3072 avgchainlen = sum(chainlengths) / numrevs
3073 3073 maxchainlen = max(chainlengths)
3074 3074 maxchainspan = max(chainspans)
3075 3075 compratio = 1
3076 3076 if totalsize:
3077 3077 compratio = totalrawsize / totalsize
3078 3078
3079 3079 basedfmtstr = b'%%%dd\n'
3080 3080 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3081 3081
3082 3082 def dfmtstr(max):
3083 3083 return basedfmtstr % len(str(max))
3084 3084
3085 3085 def pcfmtstr(max, padding=0):
3086 3086 return basepcfmtstr % (len(str(max)), b' ' * padding)
3087 3087
3088 3088 def pcfmt(value, total):
3089 3089 if total:
3090 3090 return (value, 100 * float(value) / total)
3091 3091 else:
3092 3092 return value, 100.0
3093 3093
3094 3094 ui.writenoi18n(b'format : %d\n' % format)
3095 3095 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3096 3096
3097 3097 ui.write(b'\n')
3098 3098 fmt = pcfmtstr(totalsize)
3099 3099 fmt2 = dfmtstr(totalsize)
3100 3100 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3101 3101 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3102 3102 ui.writenoi18n(
3103 3103 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3104 3104 )
3105 3105 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3106 3106 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3107 3107 ui.writenoi18n(
3108 3108 b' text : '
3109 3109 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3110 3110 )
3111 3111 ui.writenoi18n(
3112 3112 b' delta : '
3113 3113 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3114 3114 )
3115 3115 ui.writenoi18n(
3116 3116 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3117 3117 )
3118 3118 for depth in sorted(numsnapdepth):
3119 3119 ui.write(
3120 3120 (b' lvl-%-3d : ' % depth)
3121 3121 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3122 3122 )
3123 3123 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3124 3124 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3125 3125 ui.writenoi18n(
3126 3126 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3127 3127 )
3128 3128 for depth in sorted(numsnapdepth):
3129 3129 ui.write(
3130 3130 (b' lvl-%-3d : ' % depth)
3131 3131 + fmt % pcfmt(snaptotal[depth], totalsize)
3132 3132 )
3133 3133 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3134 3134
3135 3135 def fmtchunktype(chunktype):
3136 3136 if chunktype == b'empty':
3137 3137 return b' %s : ' % chunktype
3138 3138 elif chunktype in pycompat.bytestr(string.ascii_letters):
3139 3139 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3140 3140 else:
3141 3141 return b' 0x%s : ' % hex(chunktype)
3142 3142
3143 3143 ui.write(b'\n')
3144 3144 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3145 3145 for chunktype in sorted(chunktypecounts):
3146 3146 ui.write(fmtchunktype(chunktype))
3147 3147 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3148 3148 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3149 3149 for chunktype in sorted(chunktypecounts):
3150 3150 ui.write(fmtchunktype(chunktype))
3151 3151 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3152 3152
3153 3153 ui.write(b'\n')
3154 3154 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3155 3155 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3156 3156 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3157 3157 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3158 3158 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3159 3159
3160 3160 if format > 0:
3161 3161 ui.write(b'\n')
3162 3162 ui.writenoi18n(
3163 3163 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3164 3164 % tuple(datasize)
3165 3165 )
3166 3166 ui.writenoi18n(
3167 3167 b'full revision size (min/max/avg) : %d / %d / %d\n'
3168 3168 % tuple(fullsize)
3169 3169 )
3170 3170 ui.writenoi18n(
3171 3171 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3172 3172 % tuple(semisize)
3173 3173 )
3174 3174 for depth in sorted(snapsizedepth):
3175 3175 if depth == 0:
3176 3176 continue
3177 3177 ui.writenoi18n(
3178 3178 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3179 3179 % ((depth,) + tuple(snapsizedepth[depth]))
3180 3180 )
3181 3181 ui.writenoi18n(
3182 3182 b'delta size (min/max/avg) : %d / %d / %d\n'
3183 3183 % tuple(deltasize)
3184 3184 )
3185 3185
3186 3186 if numdeltas > 0:
3187 3187 ui.write(b'\n')
3188 3188 fmt = pcfmtstr(numdeltas)
3189 3189 fmt2 = pcfmtstr(numdeltas, 4)
3190 3190 ui.writenoi18n(
3191 3191 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3192 3192 )
3193 3193 if numprev > 0:
3194 3194 ui.writenoi18n(
3195 3195 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3196 3196 )
3197 3197 ui.writenoi18n(
3198 3198 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3199 3199 )
3200 3200 ui.writenoi18n(
3201 3201 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3202 3202 )
3203 3203 if gdelta:
3204 3204 ui.writenoi18n(
3205 3205 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3206 3206 )
3207 3207 ui.writenoi18n(
3208 3208 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3209 3209 )
3210 3210 ui.writenoi18n(
3211 3211 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3212 3212 )
3213 3213
3214 3214
3215 3215 @command(
3216 3216 b'debugrevlogindex',
3217 3217 cmdutil.debugrevlogopts
3218 3218 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3219 3219 _(b'[-f FORMAT] -c|-m|FILE'),
3220 3220 optionalrepo=True,
3221 3221 )
3222 3222 def debugrevlogindex(ui, repo, file_=None, **opts):
3223 3223 """dump the contents of a revlog index"""
3224 3224 opts = pycompat.byteskwargs(opts)
3225 3225 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3226 3226 format = opts.get(b'format', 0)
3227 3227 if format not in (0, 1):
3228 3228 raise error.Abort(_(b"unknown format %d") % format)
3229 3229
3230 3230 if ui.debugflag:
3231 3231 shortfn = hex
3232 3232 else:
3233 3233 shortfn = short
3234 3234
3235 3235 # There might not be anything in r, so have a sane default
3236 3236 idlen = 12
3237 3237 for i in r:
3238 3238 idlen = len(shortfn(r.node(i)))
3239 3239 break
3240 3240
3241 3241 if format == 0:
3242 3242 if ui.verbose:
3243 3243 ui.writenoi18n(
3244 3244 b" rev offset length linkrev %s %s p2\n"
3245 3245 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3246 3246 )
3247 3247 else:
3248 3248 ui.writenoi18n(
3249 3249 b" rev linkrev %s %s p2\n"
3250 3250 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3251 3251 )
3252 3252 elif format == 1:
3253 3253 if ui.verbose:
3254 3254 ui.writenoi18n(
3255 3255 (
3256 3256 b" rev flag offset length size link p1"
3257 3257 b" p2 %s\n"
3258 3258 )
3259 3259 % b"nodeid".rjust(idlen)
3260 3260 )
3261 3261 else:
3262 3262 ui.writenoi18n(
3263 3263 b" rev flag size link p1 p2 %s\n"
3264 3264 % b"nodeid".rjust(idlen)
3265 3265 )
3266 3266
3267 3267 for i in r:
3268 3268 node = r.node(i)
3269 3269 if format == 0:
3270 3270 try:
3271 3271 pp = r.parents(node)
3272 3272 except Exception:
3273 3273 pp = [nullid, nullid]
3274 3274 if ui.verbose:
3275 3275 ui.write(
3276 3276 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3277 3277 % (
3278 3278 i,
3279 3279 r.start(i),
3280 3280 r.length(i),
3281 3281 r.linkrev(i),
3282 3282 shortfn(node),
3283 3283 shortfn(pp[0]),
3284 3284 shortfn(pp[1]),
3285 3285 )
3286 3286 )
3287 3287 else:
3288 3288 ui.write(
3289 3289 b"% 6d % 7d %s %s %s\n"
3290 3290 % (
3291 3291 i,
3292 3292 r.linkrev(i),
3293 3293 shortfn(node),
3294 3294 shortfn(pp[0]),
3295 3295 shortfn(pp[1]),
3296 3296 )
3297 3297 )
3298 3298 elif format == 1:
3299 3299 pr = r.parentrevs(i)
3300 3300 if ui.verbose:
3301 3301 ui.write(
3302 3302 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3303 3303 % (
3304 3304 i,
3305 3305 r.flags(i),
3306 3306 r.start(i),
3307 3307 r.length(i),
3308 3308 r.rawsize(i),
3309 3309 r.linkrev(i),
3310 3310 pr[0],
3311 3311 pr[1],
3312 3312 shortfn(node),
3313 3313 )
3314 3314 )
3315 3315 else:
3316 3316 ui.write(
3317 3317 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3318 3318 % (
3319 3319 i,
3320 3320 r.flags(i),
3321 3321 r.rawsize(i),
3322 3322 r.linkrev(i),
3323 3323 pr[0],
3324 3324 pr[1],
3325 3325 shortfn(node),
3326 3326 )
3327 3327 )
3328 3328
3329 3329
3330 3330 @command(
3331 3331 b'debugrevspec',
3332 3332 [
3333 3333 (
3334 3334 b'',
3335 3335 b'optimize',
3336 3336 None,
3337 3337 _(b'print parsed tree after optimizing (DEPRECATED)'),
3338 3338 ),
3339 3339 (
3340 3340 b'',
3341 3341 b'show-revs',
3342 3342 True,
3343 3343 _(b'print list of result revisions (default)'),
3344 3344 ),
3345 3345 (
3346 3346 b's',
3347 3347 b'show-set',
3348 3348 None,
3349 3349 _(b'print internal representation of result set'),
3350 3350 ),
3351 3351 (
3352 3352 b'p',
3353 3353 b'show-stage',
3354 3354 [],
3355 3355 _(b'print parsed tree at the given stage'),
3356 3356 _(b'NAME'),
3357 3357 ),
3358 3358 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3359 3359 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3360 3360 ],
3361 3361 b'REVSPEC',
3362 3362 )
3363 3363 def debugrevspec(ui, repo, expr, **opts):
3364 3364 """parse and apply a revision specification
3365 3365
3366 3366 Use -p/--show-stage option to print the parsed tree at the given stages.
3367 3367 Use -p all to print tree at every stage.
3368 3368
3369 3369 Use --no-show-revs option with -s or -p to print only the set
3370 3370 representation or the parsed tree respectively.
3371 3371
3372 3372 Use --verify-optimized to compare the optimized result with the unoptimized
3373 3373 one. Returns 1 if the optimized result differs.
3374 3374 """
3375 3375 opts = pycompat.byteskwargs(opts)
3376 3376 aliases = ui.configitems(b'revsetalias')
3377 3377 stages = [
3378 3378 (b'parsed', lambda tree: tree),
3379 3379 (
3380 3380 b'expanded',
3381 3381 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3382 3382 ),
3383 3383 (b'concatenated', revsetlang.foldconcat),
3384 3384 (b'analyzed', revsetlang.analyze),
3385 3385 (b'optimized', revsetlang.optimize),
3386 3386 ]
3387 3387 if opts[b'no_optimized']:
3388 3388 stages = stages[:-1]
3389 3389 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3390 3390 raise error.Abort(
3391 3391 _(b'cannot use --verify-optimized with --no-optimized')
3392 3392 )
3393 3393 stagenames = {n for n, f in stages}
3394 3394
3395 3395 showalways = set()
3396 3396 showchanged = set()
3397 3397 if ui.verbose and not opts[b'show_stage']:
3398 3398 # show parsed tree by --verbose (deprecated)
3399 3399 showalways.add(b'parsed')
3400 3400 showchanged.update([b'expanded', b'concatenated'])
3401 3401 if opts[b'optimize']:
3402 3402 showalways.add(b'optimized')
3403 3403 if opts[b'show_stage'] and opts[b'optimize']:
3404 3404 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3405 3405 if opts[b'show_stage'] == [b'all']:
3406 3406 showalways.update(stagenames)
3407 3407 else:
3408 3408 for n in opts[b'show_stage']:
3409 3409 if n not in stagenames:
3410 3410 raise error.Abort(_(b'invalid stage name: %s') % n)
3411 3411 showalways.update(opts[b'show_stage'])
3412 3412
3413 3413 treebystage = {}
3414 3414 printedtree = None
3415 3415 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3416 3416 for n, f in stages:
3417 3417 treebystage[n] = tree = f(tree)
3418 3418 if n in showalways or (n in showchanged and tree != printedtree):
3419 3419 if opts[b'show_stage'] or n != b'parsed':
3420 3420 ui.write(b"* %s:\n" % n)
3421 3421 ui.write(revsetlang.prettyformat(tree), b"\n")
3422 3422 printedtree = tree
3423 3423
3424 3424 if opts[b'verify_optimized']:
3425 3425 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3426 3426 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3427 3427 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3428 3428 ui.writenoi18n(
3429 3429 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3430 3430 )
3431 3431 ui.writenoi18n(
3432 3432 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3433 3433 )
3434 3434 arevs = list(arevs)
3435 3435 brevs = list(brevs)
3436 3436 if arevs == brevs:
3437 3437 return 0
3438 3438 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3439 3439 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3440 3440 sm = difflib.SequenceMatcher(None, arevs, brevs)
3441 3441 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3442 3442 if tag in ('delete', 'replace'):
3443 3443 for c in arevs[alo:ahi]:
3444 3444 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3445 3445 if tag in ('insert', 'replace'):
3446 3446 for c in brevs[blo:bhi]:
3447 3447 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3448 3448 if tag == 'equal':
3449 3449 for c in arevs[alo:ahi]:
3450 3450 ui.write(b' %d\n' % c)
3451 3451 return 1
3452 3452
3453 3453 func = revset.makematcher(tree)
3454 3454 revs = func(repo)
3455 3455 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3456 3456 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3457 3457 if not opts[b'show_revs']:
3458 3458 return
3459 3459 for c in revs:
3460 3460 ui.write(b"%d\n" % c)
3461 3461
3462 3462
3463 3463 @command(
3464 3464 b'debugserve',
3465 3465 [
3466 3466 (
3467 3467 b'',
3468 3468 b'sshstdio',
3469 3469 False,
3470 3470 _(b'run an SSH server bound to process handles'),
3471 3471 ),
3472 3472 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3473 3473 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3474 3474 ],
3475 3475 b'',
3476 3476 )
3477 3477 def debugserve(ui, repo, **opts):
3478 3478 """run a server with advanced settings
3479 3479
3480 3480 This command is similar to :hg:`serve`. It exists partially as a
3481 3481 workaround to the fact that ``hg serve --stdio`` must have specific
3482 3482 arguments for security reasons.
3483 3483 """
3484 3484 opts = pycompat.byteskwargs(opts)
3485 3485
3486 3486 if not opts[b'sshstdio']:
3487 3487 raise error.Abort(_(b'only --sshstdio is currently supported'))
3488 3488
3489 3489 logfh = None
3490 3490
3491 3491 if opts[b'logiofd'] and opts[b'logiofile']:
3492 3492 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3493 3493
3494 3494 if opts[b'logiofd']:
3495 3495 # Ideally we would be line buffered. But line buffering in binary
3496 3496 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3497 3497 # buffering could have performance impacts. But since this isn't
3498 3498 # performance critical code, it should be fine.
3499 3499 try:
3500 3500 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3501 3501 except OSError as e:
3502 3502 if e.errno != errno.ESPIPE:
3503 3503 raise
3504 3504 # can't seek a pipe, so `ab` mode fails on py3
3505 3505 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3506 3506 elif opts[b'logiofile']:
3507 3507 logfh = open(opts[b'logiofile'], b'ab', 0)
3508 3508
3509 3509 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3510 3510 s.serve_forever()
3511 3511
3512 3512
3513 3513 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3514 3514 def debugsetparents(ui, repo, rev1, rev2=None):
3515 3515 """manually set the parents of the current working directory (DANGEROUS)
3516 3516
3517 3517 This command is not what you are looking for and should not be used. Using
3518 3518 this command will most certainly results in slight corruption of the file
3519 3519 level histories withing your repository. DO NOT USE THIS COMMAND.
3520 3520
3521 3521 The command update the p1 and p2 field in the dirstate, and not touching
3522 3522 anything else. This useful for writing repository conversion tools, but
3523 3523 should be used with extreme care. For example, neither the working
3524 3524 directory nor the dirstate is updated, so file status may be incorrect
3525 3525 after running this command. Only used if you are one of the few people that
3526 3526 deeply unstand both conversion tools and file level histories. If you are
3527 3527 reading this help, you are not one of this people (most of them sailed west
3528 3528 from Mithlond anyway.
3529 3529
3530 3530 So one last time DO NOT USE THIS COMMAND.
3531 3531
3532 3532 Returns 0 on success.
3533 3533 """
3534 3534
3535 3535 node1 = scmutil.revsingle(repo, rev1).node()
3536 3536 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3537 3537
3538 3538 with repo.wlock():
3539 3539 repo.setparents(node1, node2)
3540 3540
3541 3541
3542 3542 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3543 3543 def debugsidedata(ui, repo, file_, rev=None, **opts):
3544 3544 """dump the side data for a cl/manifest/file revision
3545 3545
3546 3546 Use --verbose to dump the sidedata content."""
3547 3547 opts = pycompat.byteskwargs(opts)
3548 3548 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3549 3549 if rev is not None:
3550 3550 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3551 3551 file_, rev = None, file_
3552 3552 elif rev is None:
3553 3553 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3554 3554 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3555 3555 r = getattr(r, '_revlog', r)
3556 3556 try:
3557 3557 sidedata = r.sidedata(r.lookup(rev))
3558 3558 except KeyError:
3559 3559 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3560 3560 if sidedata:
3561 3561 sidedata = list(sidedata.items())
3562 3562 sidedata.sort()
3563 3563 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3564 3564 for key, value in sidedata:
3565 3565 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3566 3566 if ui.verbose:
3567 3567 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3568 3568
3569 3569
3570 3570 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3571 3571 def debugssl(ui, repo, source=None, **opts):
3572 3572 """test a secure connection to a server
3573 3573
3574 3574 This builds the certificate chain for the server on Windows, installing the
3575 3575 missing intermediates and trusted root via Windows Update if necessary. It
3576 3576 does nothing on other platforms.
3577 3577
3578 3578 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3579 3579 that server is used. See :hg:`help urls` for more information.
3580 3580
3581 3581 If the update succeeds, retry the original operation. Otherwise, the cause
3582 3582 of the SSL error is likely another issue.
3583 3583 """
3584 3584 if not pycompat.iswindows:
3585 3585 raise error.Abort(
3586 3586 _(b'certificate chain building is only possible on Windows')
3587 3587 )
3588 3588
3589 3589 if not source:
3590 3590 if not repo:
3591 3591 raise error.Abort(
3592 3592 _(
3593 3593 b"there is no Mercurial repository here, and no "
3594 3594 b"server specified"
3595 3595 )
3596 3596 )
3597 3597 source = b"default"
3598 3598
3599 3599 source, branches = hg.parseurl(ui.expandpath(source))
3600 3600 url = util.url(source)
3601 3601
3602 3602 defaultport = {b'https': 443, b'ssh': 22}
3603 3603 if url.scheme in defaultport:
3604 3604 try:
3605 3605 addr = (url.host, int(url.port or defaultport[url.scheme]))
3606 3606 except ValueError:
3607 3607 raise error.Abort(_(b"malformed port number in URL"))
3608 3608 else:
3609 3609 raise error.Abort(_(b"only https and ssh connections are supported"))
3610 3610
3611 3611 from . import win32
3612 3612
3613 3613 s = ssl.wrap_socket(
3614 3614 socket.socket(),
3615 3615 ssl_version=ssl.PROTOCOL_TLS,
3616 3616 cert_reqs=ssl.CERT_NONE,
3617 3617 ca_certs=None,
3618 3618 )
3619 3619
3620 3620 try:
3621 3621 s.connect(addr)
3622 3622 cert = s.getpeercert(True)
3623 3623
3624 3624 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3625 3625
3626 3626 complete = win32.checkcertificatechain(cert, build=False)
3627 3627
3628 3628 if not complete:
3629 3629 ui.status(_(b'certificate chain is incomplete, updating... '))
3630 3630
3631 3631 if not win32.checkcertificatechain(cert):
3632 3632 ui.status(_(b'failed.\n'))
3633 3633 else:
3634 3634 ui.status(_(b'done.\n'))
3635 3635 else:
3636 3636 ui.status(_(b'full certificate chain is available\n'))
3637 3637 finally:
3638 3638 s.close()
3639 3639
3640 3640
3641 3641 @command(
3642 3642 b"debugbackupbundle",
3643 3643 [
3644 3644 (
3645 3645 b"",
3646 3646 b"recover",
3647 3647 b"",
3648 3648 b"brings the specified changeset back into the repository",
3649 3649 )
3650 3650 ]
3651 3651 + cmdutil.logopts,
3652 3652 _(b"hg debugbackupbundle [--recover HASH]"),
3653 3653 )
3654 3654 def debugbackupbundle(ui, repo, *pats, **opts):
3655 3655 """lists the changesets available in backup bundles
3656 3656
3657 3657 Without any arguments, this command prints a list of the changesets in each
3658 3658 backup bundle.
3659 3659
3660 3660 --recover takes a changeset hash and unbundles the first bundle that
3661 3661 contains that hash, which puts that changeset back in your repository.
3662 3662
3663 3663 --verbose will print the entire commit message and the bundle path for that
3664 3664 backup.
3665 3665 """
3666 3666 backups = list(
3667 3667 filter(
3668 3668 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3669 3669 )
3670 3670 )
3671 3671 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3672 3672
3673 3673 opts = pycompat.byteskwargs(opts)
3674 3674 opts[b"bundle"] = b""
3675 3675 opts[b"force"] = None
3676 3676 limit = logcmdutil.getlimit(opts)
3677 3677
3678 3678 def display(other, chlist, displayer):
3679 3679 if opts.get(b"newest_first"):
3680 3680 chlist.reverse()
3681 3681 count = 0
3682 3682 for n in chlist:
3683 3683 if limit is not None and count >= limit:
3684 3684 break
3685 3685 parents = [True for p in other.changelog.parents(n) if p != nullid]
3686 3686 if opts.get(b"no_merges") and len(parents) == 2:
3687 3687 continue
3688 3688 count += 1
3689 3689 displayer.show(other[n])
3690 3690
3691 3691 recovernode = opts.get(b"recover")
3692 3692 if recovernode:
3693 3693 if scmutil.isrevsymbol(repo, recovernode):
3694 3694 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3695 3695 return
3696 3696 elif backups:
3697 3697 msg = _(
3698 3698 b"Recover changesets using: hg debugbackupbundle --recover "
3699 3699 b"<changeset hash>\n\nAvailable backup changesets:"
3700 3700 )
3701 3701 ui.status(msg, label=b"status.removed")
3702 3702 else:
3703 3703 ui.status(_(b"no backup changesets found\n"))
3704 3704 return
3705 3705
3706 3706 for backup in backups:
3707 3707 # Much of this is copied from the hg incoming logic
3708 3708 source = ui.expandpath(os.path.relpath(backup, encoding.getcwd()))
3709 3709 source, branches = hg.parseurl(source, opts.get(b"branch"))
3710 3710 try:
3711 3711 other = hg.peer(repo, opts, source)
3712 3712 except error.LookupError as ex:
3713 3713 msg = _(b"\nwarning: unable to open bundle %s") % source
3714 3714 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3715 3715 ui.warn(msg, hint=hint)
3716 3716 continue
3717 3717 revs, checkout = hg.addbranchrevs(
3718 3718 repo, other, branches, opts.get(b"rev")
3719 3719 )
3720 3720
3721 3721 if revs:
3722 3722 revs = [other.lookup(rev) for rev in revs]
3723 3723
3724 3724 quiet = ui.quiet
3725 3725 try:
3726 3726 ui.quiet = True
3727 3727 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3728 3728 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3729 3729 )
3730 3730 except error.LookupError:
3731 3731 continue
3732 3732 finally:
3733 3733 ui.quiet = quiet
3734 3734
3735 3735 try:
3736 3736 if not chlist:
3737 3737 continue
3738 3738 if recovernode:
3739 3739 with repo.lock(), repo.transaction(b"unbundle") as tr:
3740 3740 if scmutil.isrevsymbol(other, recovernode):
3741 3741 ui.status(_(b"Unbundling %s\n") % (recovernode))
3742 3742 f = hg.openpath(ui, source)
3743 3743 gen = exchange.readbundle(ui, f, source)
3744 3744 if isinstance(gen, bundle2.unbundle20):
3745 3745 bundle2.applybundle(
3746 3746 repo,
3747 3747 gen,
3748 3748 tr,
3749 3749 source=b"unbundle",
3750 3750 url=b"bundle:" + source,
3751 3751 )
3752 3752 else:
3753 3753 gen.apply(repo, b"unbundle", b"bundle:" + source)
3754 3754 break
3755 3755 else:
3756 3756 backupdate = encoding.strtolocal(
3757 3757 time.strftime(
3758 3758 "%a %H:%M, %Y-%m-%d",
3759 3759 time.localtime(os.path.getmtime(source)),
3760 3760 )
3761 3761 )
3762 3762 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3763 3763 if ui.verbose:
3764 3764 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3765 3765 else:
3766 3766 opts[
3767 3767 b"template"
3768 3768 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3769 3769 displayer = logcmdutil.changesetdisplayer(
3770 3770 ui, other, opts, False
3771 3771 )
3772 3772 display(other, chlist, displayer)
3773 3773 displayer.close()
3774 3774 finally:
3775 3775 cleanupfn()
3776 3776
3777 3777
3778 3778 @command(
3779 3779 b'debugsub',
3780 3780 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3781 3781 _(b'[-r REV] [REV]'),
3782 3782 )
3783 3783 def debugsub(ui, repo, rev=None):
3784 3784 ctx = scmutil.revsingle(repo, rev, None)
3785 3785 for k, v in sorted(ctx.substate.items()):
3786 3786 ui.writenoi18n(b'path %s\n' % k)
3787 3787 ui.writenoi18n(b' source %s\n' % v[0])
3788 3788 ui.writenoi18n(b' revision %s\n' % v[1])
3789 3789
3790 3790
3791 3791 @command(b'debugshell', optionalrepo=True)
3792 3792 def debugshell(ui, repo):
3793 3793 """run an interactive Python interpreter
3794 3794
3795 3795 The local namespace is provided with a reference to the ui and
3796 3796 the repo instance (if available).
3797 3797 """
3798 3798 import code
3799 3799
3800 3800 imported_objects = {
3801 3801 'ui': ui,
3802 3802 'repo': repo,
3803 3803 }
3804 3804
3805 3805 code.interact(local=imported_objects)
3806 3806
3807 3807
3808 3808 @command(
3809 3809 b'debugsuccessorssets',
3810 3810 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3811 3811 _(b'[REV]'),
3812 3812 )
3813 3813 def debugsuccessorssets(ui, repo, *revs, **opts):
3814 3814 """show set of successors for revision
3815 3815
3816 3816 A successors set of changeset A is a consistent group of revisions that
3817 3817 succeed A. It contains non-obsolete changesets only unless closests
3818 3818 successors set is set.
3819 3819
3820 3820 In most cases a changeset A has a single successors set containing a single
3821 3821 successor (changeset A replaced by A').
3822 3822
3823 3823 A changeset that is made obsolete with no successors are called "pruned".
3824 3824 Such changesets have no successors sets at all.
3825 3825
3826 3826 A changeset that has been "split" will have a successors set containing
3827 3827 more than one successor.
3828 3828
3829 3829 A changeset that has been rewritten in multiple different ways is called
3830 3830 "divergent". Such changesets have multiple successor sets (each of which
3831 3831 may also be split, i.e. have multiple successors).
3832 3832
3833 3833 Results are displayed as follows::
3834 3834
3835 3835 <rev1>
3836 3836 <successors-1A>
3837 3837 <rev2>
3838 3838 <successors-2A>
3839 3839 <successors-2B1> <successors-2B2> <successors-2B3>
3840 3840
3841 3841 Here rev2 has two possible (i.e. divergent) successors sets. The first
3842 3842 holds one element, whereas the second holds three (i.e. the changeset has
3843 3843 been split).
3844 3844 """
3845 3845 # passed to successorssets caching computation from one call to another
3846 3846 cache = {}
3847 3847 ctx2str = bytes
3848 3848 node2str = short
3849 3849 for rev in scmutil.revrange(repo, revs):
3850 3850 ctx = repo[rev]
3851 3851 ui.write(b'%s\n' % ctx2str(ctx))
3852 3852 for succsset in obsutil.successorssets(
3853 3853 repo, ctx.node(), closest=opts['closest'], cache=cache
3854 3854 ):
3855 3855 if succsset:
3856 3856 ui.write(b' ')
3857 3857 ui.write(node2str(succsset[0]))
3858 3858 for node in succsset[1:]:
3859 3859 ui.write(b' ')
3860 3860 ui.write(node2str(node))
3861 3861 ui.write(b'\n')
3862 3862
3863 3863
3864 3864 @command(b'debugtagscache', [])
3865 3865 def debugtagscache(ui, repo):
3866 3866 """display the contents of .hg/cache/hgtagsfnodes1"""
3867 3867 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3868 flog = repo.file(b'.hgtags')
3868 3869 for r in repo:
3869 3870 node = repo[r].node()
3870 3871 tagsnode = cache.getfnode(node, computemissing=False)
3871 3872 if tagsnode:
3872 3873 tagsnodedisplay = hex(tagsnode)
3874 if not flog.hasnode(tagsnode):
3875 tagsnodedisplay += b' (unknown node)'
3873 3876 elif tagsnode is None:
3874 3877 tagsnodedisplay = b'missing'
3875 3878 else:
3876 3879 tagsnodedisplay = b'invalid'
3877 3880
3878 3881 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3879 3882
3880 3883
3881 3884 @command(
3882 3885 b'debugtemplate',
3883 3886 [
3884 3887 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3885 3888 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3886 3889 ],
3887 3890 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3888 3891 optionalrepo=True,
3889 3892 )
3890 3893 def debugtemplate(ui, repo, tmpl, **opts):
3891 3894 """parse and apply a template
3892 3895
3893 3896 If -r/--rev is given, the template is processed as a log template and
3894 3897 applied to the given changesets. Otherwise, it is processed as a generic
3895 3898 template.
3896 3899
3897 3900 Use --verbose to print the parsed tree.
3898 3901 """
3899 3902 revs = None
3900 3903 if opts['rev']:
3901 3904 if repo is None:
3902 3905 raise error.RepoError(
3903 3906 _(b'there is no Mercurial repository here (.hg not found)')
3904 3907 )
3905 3908 revs = scmutil.revrange(repo, opts['rev'])
3906 3909
3907 3910 props = {}
3908 3911 for d in opts['define']:
3909 3912 try:
3910 3913 k, v = (e.strip() for e in d.split(b'=', 1))
3911 3914 if not k or k == b'ui':
3912 3915 raise ValueError
3913 3916 props[k] = v
3914 3917 except ValueError:
3915 3918 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3916 3919
3917 3920 if ui.verbose:
3918 3921 aliases = ui.configitems(b'templatealias')
3919 3922 tree = templater.parse(tmpl)
3920 3923 ui.note(templater.prettyformat(tree), b'\n')
3921 3924 newtree = templater.expandaliases(tree, aliases)
3922 3925 if newtree != tree:
3923 3926 ui.notenoi18n(
3924 3927 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3925 3928 )
3926 3929
3927 3930 if revs is None:
3928 3931 tres = formatter.templateresources(ui, repo)
3929 3932 t = formatter.maketemplater(ui, tmpl, resources=tres)
3930 3933 if ui.verbose:
3931 3934 kwds, funcs = t.symbolsuseddefault()
3932 3935 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3933 3936 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3934 3937 ui.write(t.renderdefault(props))
3935 3938 else:
3936 3939 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3937 3940 if ui.verbose:
3938 3941 kwds, funcs = displayer.t.symbolsuseddefault()
3939 3942 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3940 3943 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3941 3944 for r in revs:
3942 3945 displayer.show(repo[r], **pycompat.strkwargs(props))
3943 3946 displayer.close()
3944 3947
3945 3948
3946 3949 @command(
3947 3950 b'debuguigetpass',
3948 3951 [
3949 3952 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3950 3953 ],
3951 3954 _(b'[-p TEXT]'),
3952 3955 norepo=True,
3953 3956 )
3954 3957 def debuguigetpass(ui, prompt=b''):
3955 3958 """show prompt to type password"""
3956 3959 r = ui.getpass(prompt)
3957 3960 if r is None:
3958 3961 r = b"<default response>"
3959 3962 ui.writenoi18n(b'response: %s\n' % r)
3960 3963
3961 3964
3962 3965 @command(
3963 3966 b'debuguiprompt',
3964 3967 [
3965 3968 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3966 3969 ],
3967 3970 _(b'[-p TEXT]'),
3968 3971 norepo=True,
3969 3972 )
3970 3973 def debuguiprompt(ui, prompt=b''):
3971 3974 """show plain prompt"""
3972 3975 r = ui.prompt(prompt)
3973 3976 ui.writenoi18n(b'response: %s\n' % r)
3974 3977
3975 3978
3976 3979 @command(b'debugupdatecaches', [])
3977 3980 def debugupdatecaches(ui, repo, *pats, **opts):
3978 3981 """warm all known caches in the repository"""
3979 3982 with repo.wlock(), repo.lock():
3980 3983 repo.updatecaches(full=True)
3981 3984
3982 3985
3983 3986 @command(
3984 3987 b'debugupgraderepo',
3985 3988 [
3986 3989 (
3987 3990 b'o',
3988 3991 b'optimize',
3989 3992 [],
3990 3993 _(b'extra optimization to perform'),
3991 3994 _(b'NAME'),
3992 3995 ),
3993 3996 (b'', b'run', False, _(b'performs an upgrade')),
3994 3997 (b'', b'backup', True, _(b'keep the old repository content around')),
3995 3998 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
3996 3999 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
3997 4000 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
3998 4001 ],
3999 4002 )
4000 4003 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4001 4004 """upgrade a repository to use different features
4002 4005
4003 4006 If no arguments are specified, the repository is evaluated for upgrade
4004 4007 and a list of problems and potential optimizations is printed.
4005 4008
4006 4009 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4007 4010 can be influenced via additional arguments. More details will be provided
4008 4011 by the command output when run without ``--run``.
4009 4012
4010 4013 During the upgrade, the repository will be locked and no writes will be
4011 4014 allowed.
4012 4015
4013 4016 At the end of the upgrade, the repository may not be readable while new
4014 4017 repository data is swapped in. This window will be as long as it takes to
4015 4018 rename some directories inside the ``.hg`` directory. On most machines, this
4016 4019 should complete almost instantaneously and the chances of a consumer being
4017 4020 unable to access the repository should be low.
4018 4021
4019 4022 By default, all revlog will be upgraded. You can restrict this using flag
4020 4023 such as `--manifest`:
4021 4024
4022 4025 * `--manifest`: only optimize the manifest
4023 4026 * `--no-manifest`: optimize all revlog but the manifest
4024 4027 * `--changelog`: optimize the changelog only
4025 4028 * `--no-changelog --no-manifest`: optimize filelogs only
4026 4029 * `--filelogs`: optimize the filelogs only
4027 4030 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4028 4031 """
4029 4032 return upgrade.upgraderepo(
4030 4033 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4031 4034 )
4032 4035
4033 4036
4034 4037 @command(
4035 4038 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4036 4039 )
4037 4040 def debugwalk(ui, repo, *pats, **opts):
4038 4041 """show how files match on given patterns"""
4039 4042 opts = pycompat.byteskwargs(opts)
4040 4043 m = scmutil.match(repo[None], pats, opts)
4041 4044 if ui.verbose:
4042 4045 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4043 4046 items = list(repo[None].walk(m))
4044 4047 if not items:
4045 4048 return
4046 4049 f = lambda fn: fn
4047 4050 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4048 4051 f = lambda fn: util.normpath(fn)
4049 4052 fmt = b'f %%-%ds %%-%ds %%s' % (
4050 4053 max([len(abs) for abs in items]),
4051 4054 max([len(repo.pathto(abs)) for abs in items]),
4052 4055 )
4053 4056 for abs in items:
4054 4057 line = fmt % (
4055 4058 abs,
4056 4059 f(repo.pathto(abs)),
4057 4060 m.exact(abs) and b'exact' or b'',
4058 4061 )
4059 4062 ui.write(b"%s\n" % line.rstrip())
4060 4063
4061 4064
4062 4065 @command(b'debugwhyunstable', [], _(b'REV'))
4063 4066 def debugwhyunstable(ui, repo, rev):
4064 4067 """explain instabilities of a changeset"""
4065 4068 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4066 4069 dnodes = b''
4067 4070 if entry.get(b'divergentnodes'):
4068 4071 dnodes = (
4069 4072 b' '.join(
4070 4073 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4071 4074 for ctx in entry[b'divergentnodes']
4072 4075 )
4073 4076 + b' '
4074 4077 )
4075 4078 ui.write(
4076 4079 b'%s: %s%s %s\n'
4077 4080 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4078 4081 )
4079 4082
4080 4083
4081 4084 @command(
4082 4085 b'debugwireargs',
4083 4086 [
4084 4087 (b'', b'three', b'', b'three'),
4085 4088 (b'', b'four', b'', b'four'),
4086 4089 (b'', b'five', b'', b'five'),
4087 4090 ]
4088 4091 + cmdutil.remoteopts,
4089 4092 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4090 4093 norepo=True,
4091 4094 )
4092 4095 def debugwireargs(ui, repopath, *vals, **opts):
4093 4096 opts = pycompat.byteskwargs(opts)
4094 4097 repo = hg.peer(ui, opts, repopath)
4095 4098 for opt in cmdutil.remoteopts:
4096 4099 del opts[opt[1]]
4097 4100 args = {}
4098 4101 for k, v in pycompat.iteritems(opts):
4099 4102 if v:
4100 4103 args[k] = v
4101 4104 args = pycompat.strkwargs(args)
4102 4105 # run twice to check that we don't mess up the stream for the next command
4103 4106 res1 = repo.debugwireargs(*vals, **args)
4104 4107 res2 = repo.debugwireargs(*vals, **args)
4105 4108 ui.write(b"%s\n" % res1)
4106 4109 if res1 != res2:
4107 4110 ui.warn(b"%s\n" % res2)
4108 4111
4109 4112
4110 4113 def _parsewirelangblocks(fh):
4111 4114 activeaction = None
4112 4115 blocklines = []
4113 4116 lastindent = 0
4114 4117
4115 4118 for line in fh:
4116 4119 line = line.rstrip()
4117 4120 if not line:
4118 4121 continue
4119 4122
4120 4123 if line.startswith(b'#'):
4121 4124 continue
4122 4125
4123 4126 if not line.startswith(b' '):
4124 4127 # New block. Flush previous one.
4125 4128 if activeaction:
4126 4129 yield activeaction, blocklines
4127 4130
4128 4131 activeaction = line
4129 4132 blocklines = []
4130 4133 lastindent = 0
4131 4134 continue
4132 4135
4133 4136 # Else we start with an indent.
4134 4137
4135 4138 if not activeaction:
4136 4139 raise error.Abort(_(b'indented line outside of block'))
4137 4140
4138 4141 indent = len(line) - len(line.lstrip())
4139 4142
4140 4143 # If this line is indented more than the last line, concatenate it.
4141 4144 if indent > lastindent and blocklines:
4142 4145 blocklines[-1] += line.lstrip()
4143 4146 else:
4144 4147 blocklines.append(line)
4145 4148 lastindent = indent
4146 4149
4147 4150 # Flush last block.
4148 4151 if activeaction:
4149 4152 yield activeaction, blocklines
4150 4153
4151 4154
4152 4155 @command(
4153 4156 b'debugwireproto',
4154 4157 [
4155 4158 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4156 4159 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4157 4160 (
4158 4161 b'',
4159 4162 b'noreadstderr',
4160 4163 False,
4161 4164 _(b'do not read from stderr of the remote'),
4162 4165 ),
4163 4166 (
4164 4167 b'',
4165 4168 b'nologhandshake',
4166 4169 False,
4167 4170 _(b'do not log I/O related to the peer handshake'),
4168 4171 ),
4169 4172 ]
4170 4173 + cmdutil.remoteopts,
4171 4174 _(b'[PATH]'),
4172 4175 optionalrepo=True,
4173 4176 )
4174 4177 def debugwireproto(ui, repo, path=None, **opts):
4175 4178 """send wire protocol commands to a server
4176 4179
4177 4180 This command can be used to issue wire protocol commands to remote
4178 4181 peers and to debug the raw data being exchanged.
4179 4182
4180 4183 ``--localssh`` will start an SSH server against the current repository
4181 4184 and connect to that. By default, the connection will perform a handshake
4182 4185 and establish an appropriate peer instance.
4183 4186
4184 4187 ``--peer`` can be used to bypass the handshake protocol and construct a
4185 4188 peer instance using the specified class type. Valid values are ``raw``,
4186 4189 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4187 4190 raw data payloads and don't support higher-level command actions.
4188 4191
4189 4192 ``--noreadstderr`` can be used to disable automatic reading from stderr
4190 4193 of the peer (for SSH connections only). Disabling automatic reading of
4191 4194 stderr is useful for making output more deterministic.
4192 4195
4193 4196 Commands are issued via a mini language which is specified via stdin.
4194 4197 The language consists of individual actions to perform. An action is
4195 4198 defined by a block. A block is defined as a line with no leading
4196 4199 space followed by 0 or more lines with leading space. Blocks are
4197 4200 effectively a high-level command with additional metadata.
4198 4201
4199 4202 Lines beginning with ``#`` are ignored.
4200 4203
4201 4204 The following sections denote available actions.
4202 4205
4203 4206 raw
4204 4207 ---
4205 4208
4206 4209 Send raw data to the server.
4207 4210
4208 4211 The block payload contains the raw data to send as one atomic send
4209 4212 operation. The data may not actually be delivered in a single system
4210 4213 call: it depends on the abilities of the transport being used.
4211 4214
4212 4215 Each line in the block is de-indented and concatenated. Then, that
4213 4216 value is evaluated as a Python b'' literal. This allows the use of
4214 4217 backslash escaping, etc.
4215 4218
4216 4219 raw+
4217 4220 ----
4218 4221
4219 4222 Behaves like ``raw`` except flushes output afterwards.
4220 4223
4221 4224 command <X>
4222 4225 -----------
4223 4226
4224 4227 Send a request to run a named command, whose name follows the ``command``
4225 4228 string.
4226 4229
4227 4230 Arguments to the command are defined as lines in this block. The format of
4228 4231 each line is ``<key> <value>``. e.g.::
4229 4232
4230 4233 command listkeys
4231 4234 namespace bookmarks
4232 4235
4233 4236 If the value begins with ``eval:``, it will be interpreted as a Python
4234 4237 literal expression. Otherwise values are interpreted as Python b'' literals.
4235 4238 This allows sending complex types and encoding special byte sequences via
4236 4239 backslash escaping.
4237 4240
4238 4241 The following arguments have special meaning:
4239 4242
4240 4243 ``PUSHFILE``
4241 4244 When defined, the *push* mechanism of the peer will be used instead
4242 4245 of the static request-response mechanism and the content of the
4243 4246 file specified in the value of this argument will be sent as the
4244 4247 command payload.
4245 4248
4246 4249 This can be used to submit a local bundle file to the remote.
4247 4250
4248 4251 batchbegin
4249 4252 ----------
4250 4253
4251 4254 Instruct the peer to begin a batched send.
4252 4255
4253 4256 All ``command`` blocks are queued for execution until the next
4254 4257 ``batchsubmit`` block.
4255 4258
4256 4259 batchsubmit
4257 4260 -----------
4258 4261
4259 4262 Submit previously queued ``command`` blocks as a batch request.
4260 4263
4261 4264 This action MUST be paired with a ``batchbegin`` action.
4262 4265
4263 4266 httprequest <method> <path>
4264 4267 ---------------------------
4265 4268
4266 4269 (HTTP peer only)
4267 4270
4268 4271 Send an HTTP request to the peer.
4269 4272
4270 4273 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4271 4274
4272 4275 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4273 4276 headers to add to the request. e.g. ``Accept: foo``.
4274 4277
4275 4278 The following arguments are special:
4276 4279
4277 4280 ``BODYFILE``
4278 4281 The content of the file defined as the value to this argument will be
4279 4282 transferred verbatim as the HTTP request body.
4280 4283
4281 4284 ``frame <type> <flags> <payload>``
4282 4285 Send a unified protocol frame as part of the request body.
4283 4286
4284 4287 All frames will be collected and sent as the body to the HTTP
4285 4288 request.
4286 4289
4287 4290 close
4288 4291 -----
4289 4292
4290 4293 Close the connection to the server.
4291 4294
4292 4295 flush
4293 4296 -----
4294 4297
4295 4298 Flush data written to the server.
4296 4299
4297 4300 readavailable
4298 4301 -------------
4299 4302
4300 4303 Close the write end of the connection and read all available data from
4301 4304 the server.
4302 4305
4303 4306 If the connection to the server encompasses multiple pipes, we poll both
4304 4307 pipes and read available data.
4305 4308
4306 4309 readline
4307 4310 --------
4308 4311
4309 4312 Read a line of output from the server. If there are multiple output
4310 4313 pipes, reads only the main pipe.
4311 4314
4312 4315 ereadline
4313 4316 ---------
4314 4317
4315 4318 Like ``readline``, but read from the stderr pipe, if available.
4316 4319
4317 4320 read <X>
4318 4321 --------
4319 4322
4320 4323 ``read()`` N bytes from the server's main output pipe.
4321 4324
4322 4325 eread <X>
4323 4326 ---------
4324 4327
4325 4328 ``read()`` N bytes from the server's stderr pipe, if available.
4326 4329
4327 4330 Specifying Unified Frame-Based Protocol Frames
4328 4331 ----------------------------------------------
4329 4332
4330 4333 It is possible to emit a *Unified Frame-Based Protocol* by using special
4331 4334 syntax.
4332 4335
4333 4336 A frame is composed as a type, flags, and payload. These can be parsed
4334 4337 from a string of the form:
4335 4338
4336 4339 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4337 4340
4338 4341 ``request-id`` and ``stream-id`` are integers defining the request and
4339 4342 stream identifiers.
4340 4343
4341 4344 ``type`` can be an integer value for the frame type or the string name
4342 4345 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4343 4346 ``command-name``.
4344 4347
4345 4348 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4346 4349 components. Each component (and there can be just one) can be an integer
4347 4350 or a flag name for stream flags or frame flags, respectively. Values are
4348 4351 resolved to integers and then bitwise OR'd together.
4349 4352
4350 4353 ``payload`` represents the raw frame payload. If it begins with
4351 4354 ``cbor:``, the following string is evaluated as Python code and the
4352 4355 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4353 4356 as a Python byte string literal.
4354 4357 """
4355 4358 opts = pycompat.byteskwargs(opts)
4356 4359
4357 4360 if opts[b'localssh'] and not repo:
4358 4361 raise error.Abort(_(b'--localssh requires a repository'))
4359 4362
4360 4363 if opts[b'peer'] and opts[b'peer'] not in (
4361 4364 b'raw',
4362 4365 b'http2',
4363 4366 b'ssh1',
4364 4367 b'ssh2',
4365 4368 ):
4366 4369 raise error.Abort(
4367 4370 _(b'invalid value for --peer'),
4368 4371 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4369 4372 )
4370 4373
4371 4374 if path and opts[b'localssh']:
4372 4375 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4373 4376
4374 4377 if ui.interactive():
4375 4378 ui.write(_(b'(waiting for commands on stdin)\n'))
4376 4379
4377 4380 blocks = list(_parsewirelangblocks(ui.fin))
4378 4381
4379 4382 proc = None
4380 4383 stdin = None
4381 4384 stdout = None
4382 4385 stderr = None
4383 4386 opener = None
4384 4387
4385 4388 if opts[b'localssh']:
4386 4389 # We start the SSH server in its own process so there is process
4387 4390 # separation. This prevents a whole class of potential bugs around
4388 4391 # shared state from interfering with server operation.
4389 4392 args = procutil.hgcmd() + [
4390 4393 b'-R',
4391 4394 repo.root,
4392 4395 b'debugserve',
4393 4396 b'--sshstdio',
4394 4397 ]
4395 4398 proc = subprocess.Popen(
4396 4399 pycompat.rapply(procutil.tonativestr, args),
4397 4400 stdin=subprocess.PIPE,
4398 4401 stdout=subprocess.PIPE,
4399 4402 stderr=subprocess.PIPE,
4400 4403 bufsize=0,
4401 4404 )
4402 4405
4403 4406 stdin = proc.stdin
4404 4407 stdout = proc.stdout
4405 4408 stderr = proc.stderr
4406 4409
4407 4410 # We turn the pipes into observers so we can log I/O.
4408 4411 if ui.verbose or opts[b'peer'] == b'raw':
4409 4412 stdin = util.makeloggingfileobject(
4410 4413 ui, proc.stdin, b'i', logdata=True
4411 4414 )
4412 4415 stdout = util.makeloggingfileobject(
4413 4416 ui, proc.stdout, b'o', logdata=True
4414 4417 )
4415 4418 stderr = util.makeloggingfileobject(
4416 4419 ui, proc.stderr, b'e', logdata=True
4417 4420 )
4418 4421
4419 4422 # --localssh also implies the peer connection settings.
4420 4423
4421 4424 url = b'ssh://localserver'
4422 4425 autoreadstderr = not opts[b'noreadstderr']
4423 4426
4424 4427 if opts[b'peer'] == b'ssh1':
4425 4428 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4426 4429 peer = sshpeer.sshv1peer(
4427 4430 ui,
4428 4431 url,
4429 4432 proc,
4430 4433 stdin,
4431 4434 stdout,
4432 4435 stderr,
4433 4436 None,
4434 4437 autoreadstderr=autoreadstderr,
4435 4438 )
4436 4439 elif opts[b'peer'] == b'ssh2':
4437 4440 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4438 4441 peer = sshpeer.sshv2peer(
4439 4442 ui,
4440 4443 url,
4441 4444 proc,
4442 4445 stdin,
4443 4446 stdout,
4444 4447 stderr,
4445 4448 None,
4446 4449 autoreadstderr=autoreadstderr,
4447 4450 )
4448 4451 elif opts[b'peer'] == b'raw':
4449 4452 ui.write(_(b'using raw connection to peer\n'))
4450 4453 peer = None
4451 4454 else:
4452 4455 ui.write(_(b'creating ssh peer from handshake results\n'))
4453 4456 peer = sshpeer.makepeer(
4454 4457 ui,
4455 4458 url,
4456 4459 proc,
4457 4460 stdin,
4458 4461 stdout,
4459 4462 stderr,
4460 4463 autoreadstderr=autoreadstderr,
4461 4464 )
4462 4465
4463 4466 elif path:
4464 4467 # We bypass hg.peer() so we can proxy the sockets.
4465 4468 # TODO consider not doing this because we skip
4466 4469 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4467 4470 u = util.url(path)
4468 4471 if u.scheme != b'http':
4469 4472 raise error.Abort(_(b'only http:// paths are currently supported'))
4470 4473
4471 4474 url, authinfo = u.authinfo()
4472 4475 openerargs = {
4473 4476 'useragent': b'Mercurial debugwireproto',
4474 4477 }
4475 4478
4476 4479 # Turn pipes/sockets into observers so we can log I/O.
4477 4480 if ui.verbose:
4478 4481 openerargs.update(
4479 4482 {
4480 4483 'loggingfh': ui,
4481 4484 'loggingname': b's',
4482 4485 'loggingopts': {
4483 4486 'logdata': True,
4484 4487 'logdataapis': False,
4485 4488 },
4486 4489 }
4487 4490 )
4488 4491
4489 4492 if ui.debugflag:
4490 4493 openerargs['loggingopts']['logdataapis'] = True
4491 4494
4492 4495 # Don't send default headers when in raw mode. This allows us to
4493 4496 # bypass most of the behavior of our URL handling code so we can
4494 4497 # have near complete control over what's sent on the wire.
4495 4498 if opts[b'peer'] == b'raw':
4496 4499 openerargs['sendaccept'] = False
4497 4500
4498 4501 opener = urlmod.opener(ui, authinfo, **openerargs)
4499 4502
4500 4503 if opts[b'peer'] == b'http2':
4501 4504 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4502 4505 # We go through makepeer() because we need an API descriptor for
4503 4506 # the peer instance to be useful.
4504 4507 with ui.configoverride(
4505 4508 {(b'experimental', b'httppeer.advertise-v2'): True}
4506 4509 ):
4507 4510 if opts[b'nologhandshake']:
4508 4511 ui.pushbuffer()
4509 4512
4510 4513 peer = httppeer.makepeer(ui, path, opener=opener)
4511 4514
4512 4515 if opts[b'nologhandshake']:
4513 4516 ui.popbuffer()
4514 4517
4515 4518 if not isinstance(peer, httppeer.httpv2peer):
4516 4519 raise error.Abort(
4517 4520 _(
4518 4521 b'could not instantiate HTTP peer for '
4519 4522 b'wire protocol version 2'
4520 4523 ),
4521 4524 hint=_(
4522 4525 b'the server may not have the feature '
4523 4526 b'enabled or is not allowing this '
4524 4527 b'client version'
4525 4528 ),
4526 4529 )
4527 4530
4528 4531 elif opts[b'peer'] == b'raw':
4529 4532 ui.write(_(b'using raw connection to peer\n'))
4530 4533 peer = None
4531 4534 elif opts[b'peer']:
4532 4535 raise error.Abort(
4533 4536 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4534 4537 )
4535 4538 else:
4536 4539 peer = httppeer.makepeer(ui, path, opener=opener)
4537 4540
4538 4541 # We /could/ populate stdin/stdout with sock.makefile()...
4539 4542 else:
4540 4543 raise error.Abort(_(b'unsupported connection configuration'))
4541 4544
4542 4545 batchedcommands = None
4543 4546
4544 4547 # Now perform actions based on the parsed wire language instructions.
4545 4548 for action, lines in blocks:
4546 4549 if action in (b'raw', b'raw+'):
4547 4550 if not stdin:
4548 4551 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4549 4552
4550 4553 # Concatenate the data together.
4551 4554 data = b''.join(l.lstrip() for l in lines)
4552 4555 data = stringutil.unescapestr(data)
4553 4556 stdin.write(data)
4554 4557
4555 4558 if action == b'raw+':
4556 4559 stdin.flush()
4557 4560 elif action == b'flush':
4558 4561 if not stdin:
4559 4562 raise error.Abort(_(b'cannot call flush on this peer'))
4560 4563 stdin.flush()
4561 4564 elif action.startswith(b'command'):
4562 4565 if not peer:
4563 4566 raise error.Abort(
4564 4567 _(
4565 4568 b'cannot send commands unless peer instance '
4566 4569 b'is available'
4567 4570 )
4568 4571 )
4569 4572
4570 4573 command = action.split(b' ', 1)[1]
4571 4574
4572 4575 args = {}
4573 4576 for line in lines:
4574 4577 # We need to allow empty values.
4575 4578 fields = line.lstrip().split(b' ', 1)
4576 4579 if len(fields) == 1:
4577 4580 key = fields[0]
4578 4581 value = b''
4579 4582 else:
4580 4583 key, value = fields
4581 4584
4582 4585 if value.startswith(b'eval:'):
4583 4586 value = stringutil.evalpythonliteral(value[5:])
4584 4587 else:
4585 4588 value = stringutil.unescapestr(value)
4586 4589
4587 4590 args[key] = value
4588 4591
4589 4592 if batchedcommands is not None:
4590 4593 batchedcommands.append((command, args))
4591 4594 continue
4592 4595
4593 4596 ui.status(_(b'sending %s command\n') % command)
4594 4597
4595 4598 if b'PUSHFILE' in args:
4596 4599 with open(args[b'PUSHFILE'], 'rb') as fh:
4597 4600 del args[b'PUSHFILE']
4598 4601 res, output = peer._callpush(
4599 4602 command, fh, **pycompat.strkwargs(args)
4600 4603 )
4601 4604 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4602 4605 ui.status(
4603 4606 _(b'remote output: %s\n') % stringutil.escapestr(output)
4604 4607 )
4605 4608 else:
4606 4609 with peer.commandexecutor() as e:
4607 4610 res = e.callcommand(command, args).result()
4608 4611
4609 4612 if isinstance(res, wireprotov2peer.commandresponse):
4610 4613 val = res.objects()
4611 4614 ui.status(
4612 4615 _(b'response: %s\n')
4613 4616 % stringutil.pprint(val, bprefix=True, indent=2)
4614 4617 )
4615 4618 else:
4616 4619 ui.status(
4617 4620 _(b'response: %s\n')
4618 4621 % stringutil.pprint(res, bprefix=True, indent=2)
4619 4622 )
4620 4623
4621 4624 elif action == b'batchbegin':
4622 4625 if batchedcommands is not None:
4623 4626 raise error.Abort(_(b'nested batchbegin not allowed'))
4624 4627
4625 4628 batchedcommands = []
4626 4629 elif action == b'batchsubmit':
4627 4630 # There is a batching API we could go through. But it would be
4628 4631 # difficult to normalize requests into function calls. It is easier
4629 4632 # to bypass this layer and normalize to commands + args.
4630 4633 ui.status(
4631 4634 _(b'sending batch with %d sub-commands\n')
4632 4635 % len(batchedcommands)
4633 4636 )
4634 4637 assert peer is not None
4635 4638 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4636 4639 ui.status(
4637 4640 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4638 4641 )
4639 4642
4640 4643 batchedcommands = None
4641 4644
4642 4645 elif action.startswith(b'httprequest '):
4643 4646 if not opener:
4644 4647 raise error.Abort(
4645 4648 _(b'cannot use httprequest without an HTTP peer')
4646 4649 )
4647 4650
4648 4651 request = action.split(b' ', 2)
4649 4652 if len(request) != 3:
4650 4653 raise error.Abort(
4651 4654 _(
4652 4655 b'invalid httprequest: expected format is '
4653 4656 b'"httprequest <method> <path>'
4654 4657 )
4655 4658 )
4656 4659
4657 4660 method, httppath = request[1:]
4658 4661 headers = {}
4659 4662 body = None
4660 4663 frames = []
4661 4664 for line in lines:
4662 4665 line = line.lstrip()
4663 4666 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4664 4667 if m:
4665 4668 # Headers need to use native strings.
4666 4669 key = pycompat.strurl(m.group(1))
4667 4670 value = pycompat.strurl(m.group(2))
4668 4671 headers[key] = value
4669 4672 continue
4670 4673
4671 4674 if line.startswith(b'BODYFILE '):
4672 4675 with open(line.split(b' ', 1), b'rb') as fh:
4673 4676 body = fh.read()
4674 4677 elif line.startswith(b'frame '):
4675 4678 frame = wireprotoframing.makeframefromhumanstring(
4676 4679 line[len(b'frame ') :]
4677 4680 )
4678 4681
4679 4682 frames.append(frame)
4680 4683 else:
4681 4684 raise error.Abort(
4682 4685 _(b'unknown argument to httprequest: %s') % line
4683 4686 )
4684 4687
4685 4688 url = path + httppath
4686 4689
4687 4690 if frames:
4688 4691 body = b''.join(bytes(f) for f in frames)
4689 4692
4690 4693 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4691 4694
4692 4695 # urllib.Request insists on using has_data() as a proxy for
4693 4696 # determining the request method. Override that to use our
4694 4697 # explicitly requested method.
4695 4698 req.get_method = lambda: pycompat.sysstr(method)
4696 4699
4697 4700 try:
4698 4701 res = opener.open(req)
4699 4702 body = res.read()
4700 4703 except util.urlerr.urlerror as e:
4701 4704 # read() method must be called, but only exists in Python 2
4702 4705 getattr(e, 'read', lambda: None)()
4703 4706 continue
4704 4707
4705 4708 ct = res.headers.get('Content-Type')
4706 4709 if ct == 'application/mercurial-cbor':
4707 4710 ui.write(
4708 4711 _(b'cbor> %s\n')
4709 4712 % stringutil.pprint(
4710 4713 cborutil.decodeall(body), bprefix=True, indent=2
4711 4714 )
4712 4715 )
4713 4716
4714 4717 elif action == b'close':
4715 4718 assert peer is not None
4716 4719 peer.close()
4717 4720 elif action == b'readavailable':
4718 4721 if not stdout or not stderr:
4719 4722 raise error.Abort(
4720 4723 _(b'readavailable not available on this peer')
4721 4724 )
4722 4725
4723 4726 stdin.close()
4724 4727 stdout.read()
4725 4728 stderr.read()
4726 4729
4727 4730 elif action == b'readline':
4728 4731 if not stdout:
4729 4732 raise error.Abort(_(b'readline not available on this peer'))
4730 4733 stdout.readline()
4731 4734 elif action == b'ereadline':
4732 4735 if not stderr:
4733 4736 raise error.Abort(_(b'ereadline not available on this peer'))
4734 4737 stderr.readline()
4735 4738 elif action.startswith(b'read '):
4736 4739 count = int(action.split(b' ', 1)[1])
4737 4740 if not stdout:
4738 4741 raise error.Abort(_(b'read not available on this peer'))
4739 4742 stdout.read(count)
4740 4743 elif action.startswith(b'eread '):
4741 4744 count = int(action.split(b' ', 1)[1])
4742 4745 if not stderr:
4743 4746 raise error.Abort(_(b'eread not available on this peer'))
4744 4747 stderr.read(count)
4745 4748 else:
4746 4749 raise error.Abort(_(b'unknown action: %s') % action)
4747 4750
4748 4751 if batchedcommands is not None:
4749 4752 raise error.Abort(_(b'unclosed "batchbegin" request'))
4750 4753
4751 4754 if peer:
4752 4755 peer.close()
4753 4756
4754 4757 if proc:
4755 4758 proc.kill()
@@ -1,935 +1,935
1 1 setup
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [extensions]
5 5 > blackbox=
6 6 > mock=$TESTDIR/mockblackbox.py
7 7 > [blackbox]
8 8 > track = command, commandfinish, tagscache
9 9 > EOF
10 10
11 11 Helper functions:
12 12
13 13 $ cacheexists() {
14 14 > [ -f .hg/cache/tags2-visible ] && echo "tag cache exists" || echo "no tag cache"
15 15 > }
16 16
17 17 $ fnodescacheexists() {
18 18 > [ -f .hg/cache/hgtagsfnodes1 ] && echo "fnodes cache exists" || echo "no fnodes cache"
19 19 > }
20 20
21 21 $ dumptags() {
22 22 > rev=$1
23 23 > echo "rev $rev: .hgtags:"
24 24 > hg cat -r$rev .hgtags
25 25 > }
26 26
27 27 # XXX need to test that the tag cache works when we strip an old head
28 28 # and add a new one rooted off non-tip: i.e. node and rev of tip are the
29 29 # same, but stuff has changed behind tip.
30 30
31 31 Setup:
32 32
33 33 $ hg init t
34 34 $ cd t
35 35 $ cacheexists
36 36 no tag cache
37 37 $ fnodescacheexists
38 38 no fnodes cache
39 39 $ hg id
40 40 000000000000 tip
41 41 $ cacheexists
42 42 no tag cache
43 43 $ fnodescacheexists
44 44 no fnodes cache
45 45 $ echo a > a
46 46 $ hg add a
47 47 $ hg commit -m "test"
48 48 $ hg co
49 49 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
50 50 $ hg identify
51 51 acb14030fe0a tip
52 52 $ hg identify -r 'wdir()'
53 53 acb14030fe0a tip
54 54 $ cacheexists
55 55 tag cache exists
56 56 No fnodes cache because .hgtags file doesn't exist
57 57 (this is an implementation detail)
58 58 $ fnodescacheexists
59 59 no fnodes cache
60 60
61 61 Try corrupting the cache
62 62
63 63 $ printf 'a b' > .hg/cache/tags2-visible
64 64 $ hg identify
65 65 acb14030fe0a tip
66 66 $ cacheexists
67 67 tag cache exists
68 68 $ fnodescacheexists
69 69 no fnodes cache
70 70 $ hg identify
71 71 acb14030fe0a tip
72 72
73 73 Create local tag with long name:
74 74
75 75 $ T=`hg identify --debug --id`
76 76 $ hg tag -l "This is a local tag with a really long name!"
77 77 $ hg tags
78 78 tip 0:acb14030fe0a
79 79 This is a local tag with a really long name! 0:acb14030fe0a
80 80 $ rm .hg/localtags
81 81
82 82 Create a tag behind hg's back:
83 83
84 84 $ echo "$T first" > .hgtags
85 85 $ cat .hgtags
86 86 acb14030fe0a21b60322c440ad2d20cf7685a376 first
87 87 $ hg add .hgtags
88 88 $ hg commit -m "add tags"
89 89 $ hg tags
90 90 tip 1:b9154636be93
91 91 first 0:acb14030fe0a
92 92 $ hg identify
93 93 b9154636be93 tip
94 94
95 95 We should have a fnodes cache now that we have a real tag
96 96 The cache should have an empty entry for rev 0 and a valid entry for rev 1.
97 97
98 98
99 99 $ fnodescacheexists
100 100 fnodes cache exists
101 101 $ f --size --hexdump .hg/cache/hgtagsfnodes1
102 102 .hg/cache/hgtagsfnodes1: size=48
103 103 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
104 104 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
105 105 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
106 106 $ hg debugtagscache
107 107 0 acb14030fe0a21b60322c440ad2d20cf7685a376 missing
108 108 1 b9154636be938d3d431e75a7c906504a079bfe07 26b7b4a773e09ee3c52f510e19e05e1ff966d859
109 109
110 110 Repeat with cold tag cache:
111 111
112 112 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
113 113 $ hg identify
114 114 b9154636be93 tip
115 115
116 116 $ fnodescacheexists
117 117 fnodes cache exists
118 118 $ f --size --hexdump .hg/cache/hgtagsfnodes1
119 119 .hg/cache/hgtagsfnodes1: size=48
120 120 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
121 121 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
122 122 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
123 123
124 124 And again, but now unable to write tag cache or lock file:
125 125
126 126 #if unix-permissions no-fsmonitor
127 127
128 128 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
129 129 $ chmod 555 .hg/cache
130 130 $ hg identify
131 131 b9154636be93 tip
132 132 $ chmod 755 .hg/cache
133 133
134 134 (this block should be protected by no-fsmonitor, because "chmod 555 .hg"
135 135 makes watchman fail at accessing to files under .hg)
136 136
137 137 $ chmod 555 .hg
138 138 $ hg identify
139 139 b9154636be93 tip
140 140 $ chmod 755 .hg
141 141 #endif
142 142
143 143 Tag cache debug info written to blackbox log
144 144
145 145 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
146 146 $ hg identify
147 147 b9154636be93 tip
148 148 $ hg blackbox -l 6
149 149 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
150 150 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing 48 bytes to cache/hgtagsfnodes1
151 151 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
152 152 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
153 153 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
154 154 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
155 155
156 156 Failure to acquire lock results in no write
157 157
158 158 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
159 159 $ echo 'foo:1' > .hg/store/lock
160 160 $ hg identify
161 161 b9154636be93 tip
162 162 $ hg blackbox -l 6
163 163 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
164 164 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> not writing .hg/cache/hgtagsfnodes1 because lock cannot be acquired
165 165 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
166 166 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
167 167 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
168 168 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
169 169
170 170 $ fnodescacheexists
171 171 no fnodes cache
172 172
173 173 $ rm .hg/store/lock
174 174
175 175 $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1
176 176 $ hg identify
177 177 b9154636be93 tip
178 178
179 179 Create a branch:
180 180
181 181 $ echo bb > a
182 182 $ hg status
183 183 M a
184 184 $ hg identify
185 185 b9154636be93+ tip
186 186 $ hg co first
187 187 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
188 188 $ hg id
189 189 acb14030fe0a+ first
190 190 $ hg id -r 'wdir()'
191 191 acb14030fe0a+ first
192 192 $ hg -v id
193 193 acb14030fe0a+ first
194 194 $ hg status
195 195 M a
196 196 $ echo 1 > b
197 197 $ hg add b
198 198 $ hg commit -m "branch"
199 199 created new head
200 200
201 201 Creating a new commit shouldn't append the .hgtags fnodes cache until
202 202 tags info is accessed
203 203
204 204 $ f --size --hexdump .hg/cache/hgtagsfnodes1
205 205 .hg/cache/hgtagsfnodes1: size=48
206 206 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
207 207 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
208 208 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
209 209
210 210 $ hg id
211 211 c8edf04160c7 tip
212 212
213 213 First 4 bytes of record 3 are changeset fragment
214 214
215 215 $ f --size --hexdump .hg/cache/hgtagsfnodes1
216 216 .hg/cache/hgtagsfnodes1: size=72
217 217 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
218 218 0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
219 219 0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
220 220 0030: c8 ed f0 41 00 00 00 00 00 00 00 00 00 00 00 00 |...A............|
221 221 0040: 00 00 00 00 00 00 00 00 |........|
222 222
223 223 Merge the two heads:
224 224
225 225 $ hg merge 1
226 226 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
227 227 (branch merge, don't forget to commit)
228 228 $ hg blackbox -l3
229 229 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28 (5000)> merge 1
230 230 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> merge 1 exited 0 after * seconds (glob)
231 231 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l3
232 232 $ hg id
233 233 c8edf04160c7+b9154636be93+ tip
234 234 $ hg status
235 235 M .hgtags
236 236 $ hg commit -m "merge"
237 237
238 238 Create a fake head, make sure tag not visible afterwards:
239 239
240 240 $ cp .hgtags tags
241 241 $ hg tag last
242 242 $ hg rm .hgtags
243 243 $ hg commit -m "remove"
244 244
245 245 $ mv tags .hgtags
246 246 $ hg add .hgtags
247 247 $ hg commit -m "readd"
248 248 $
249 249 $ hg tags
250 250 tip 6:35ff301afafe
251 251 first 0:acb14030fe0a
252 252
253 253 Add invalid tags:
254 254
255 255 $ echo "spam" >> .hgtags
256 256 $ echo >> .hgtags
257 257 $ echo "foo bar" >> .hgtags
258 258 $ echo "a5a5 invalid" >> .hg/localtags
259 259 $ cat .hgtags
260 260 acb14030fe0a21b60322c440ad2d20cf7685a376 first
261 261 spam
262 262
263 263 foo bar
264 264 $ hg commit -m "tags"
265 265
266 266 Report tag parse error on other head:
267 267
268 268 $ hg up 3
269 269 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
270 270 $ echo 'x y' >> .hgtags
271 271 $ hg commit -m "head"
272 272 created new head
273 273
274 274 $ hg tags --debug
275 275 .hgtags@75d9f02dfe28, line 2: cannot parse entry
276 276 .hgtags@75d9f02dfe28, line 4: node 'foo' is not well formed
277 277 .hgtags@c4be69a18c11, line 2: node 'x' is not well formed
278 278 tip 8:c4be69a18c11e8bc3a5fdbb576017c25f7d84663
279 279 first 0:acb14030fe0a21b60322c440ad2d20cf7685a376
280 280 $ hg tip
281 281 changeset: 8:c4be69a18c11
282 282 tag: tip
283 283 parent: 3:ac5e980c4dc0
284 284 user: test
285 285 date: Thu Jan 01 00:00:00 1970 +0000
286 286 summary: head
287 287
288 288
289 289 Test tag precedence rules:
290 290
291 291 $ cd ..
292 292 $ hg init t2
293 293 $ cd t2
294 294 $ echo foo > foo
295 295 $ hg add foo
296 296 $ hg ci -m 'add foo' # rev 0
297 297 $ hg tag bar # rev 1
298 298 $ echo >> foo
299 299 $ hg ci -m 'change foo 1' # rev 2
300 300 $ hg up -C 1
301 301 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
302 302 $ hg tag -r 1 -f bar # rev 3
303 303 $ hg up -C 1
304 304 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
305 305 $ echo >> foo
306 306 $ hg ci -m 'change foo 2' # rev 4
307 307 created new head
308 308 $ hg tags
309 309 tip 4:0c192d7d5e6b
310 310 bar 1:78391a272241
311 311
312 312 Repeat in case of cache effects:
313 313
314 314 $ hg tags
315 315 tip 4:0c192d7d5e6b
316 316 bar 1:78391a272241
317 317
318 318 Detailed dump of tag info:
319 319
320 320 $ hg heads -q # expect 4, 3, 2
321 321 4:0c192d7d5e6b
322 322 3:6fa450212aeb
323 323 2:7a94127795a3
324 324 $ dumptags 2
325 325 rev 2: .hgtags:
326 326 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
327 327 $ dumptags 3
328 328 rev 3: .hgtags:
329 329 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
330 330 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
331 331 78391a272241d70354aa14c874552cad6b51bb42 bar
332 332 $ dumptags 4
333 333 rev 4: .hgtags:
334 334 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
335 335
336 336 Dump cache:
337 337
338 338 $ cat .hg/cache/tags2-visible
339 339 4 0c192d7d5e6b78a714de54a2e9627952a877e25a
340 340 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
341 341 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
342 342 78391a272241d70354aa14c874552cad6b51bb42 bar
343 343
344 344 $ f --size --hexdump .hg/cache/hgtagsfnodes1
345 345 .hg/cache/hgtagsfnodes1: size=120
346 346 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
347 347 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
348 348 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
349 349 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
350 350 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
351 351 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
352 352 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
353 353 0070: 78 ee 5a 2d ad bc 94 3d |x.Z-...=|
354 354
355 355 Corrupt the .hgtags fnodes cache
356 356 Extra junk data at the end should get overwritten on next cache update
357 357
358 358 $ echo extra >> .hg/cache/hgtagsfnodes1
359 359 $ echo dummy1 > foo
360 360 $ hg commit -m throwaway1
361 361
362 362 $ hg tags
363 363 tip 5:8dbfe60eff30
364 364 bar 1:78391a272241
365 365
366 366 $ hg blackbox -l 6
367 367 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags
368 368 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing 24 bytes to cache/hgtagsfnodes1
369 369 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> 3/4 cache hits/lookups in * seconds (glob)
370 370 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing .hg/cache/tags2-visible with 1 tags
371 371 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob)
372 372 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6
373 373
374 374 On junk data + missing cache entries, hg also overwrites the junk.
375 375
376 376 $ rm -f .hg/cache/tags2-visible
377 377 >>> import os
378 378 >>> with open(".hg/cache/hgtagsfnodes1", "ab+") as fp:
379 379 ... fp.seek(-10, os.SEEK_END) and None
380 380 ... fp.truncate() and None
381 381
382 382 $ hg debugtagscache | tail -2
383 383 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
384 384 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 missing
385 385 $ hg tags
386 386 tip 5:8dbfe60eff30
387 387 bar 1:78391a272241
388 388 $ hg debugtagscache | tail -2
389 389 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
390 390 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8af31de17fab7422878ee5a2dadbc943d
391 391
392 392 If the 4 bytes of node hash for a record don't match an existing node, the entry
393 393 is flagged as invalid.
394 394
395 395 >>> import os
396 396 >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
397 397 ... fp.seek(-24, os.SEEK_END) and None
398 398 ... fp.write(b'\xde\xad') and None
399 399
400 400 $ f --size --hexdump .hg/cache/hgtagsfnodes1
401 401 .hg/cache/hgtagsfnodes1: size=144
402 402 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
403 403 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
404 404 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
405 405 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
406 406 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
407 407 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
408 408 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
409 409 0070: 78 ee 5a 2d ad bc 94 3d de ad e6 0e 0c 04 f2 a8 |x.Z-...=........|
410 410 0080: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
411 411
412 412 $ hg debugtagscache | tail -2
413 413 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
414 414 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 invalid
415 415
416 416 $ hg tags
417 417 tip 5:8dbfe60eff30
418 418 bar 1:78391a272241
419 419
420 420 BUG: If the filenode part of an entry in hgtagsfnodes is corrupt and
421 421 tags2-visible is missing, `hg tags` aborts. Corrupting the leading 4 bytes of
422 422 node hash (as above) doesn't seem to trigger the issue. Also note that the
423 423 debug command hides the corruption, both with and without tags2-visible.
424 424
425 425 $ mv .hg/cache/hgtagsfnodes1 .hg/cache/hgtagsfnodes1.bak
426 426 $ hg debugupdatecaches
427 427
428 428 >>> import os
429 429 >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
430 430 ... fp.seek(-16, os.SEEK_END) and None
431 431 ... fp.write(b'\xde\xad') and None
432 432
433 433 $ f --size --hexdump .hg/cache/hgtagsfnodes1
434 434 .hg/cache/hgtagsfnodes1: size=144
435 435 0000: bb d1 79 df 00 00 00 00 00 00 00 00 00 00 00 00 |..y.............|
436 436 0010: 00 00 00 00 00 00 00 00 78 39 1a 27 0c 04 f2 a8 |........x9.'....|
437 437 0020: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
438 438 0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
439 439 0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
440 440 0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
441 441 0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
442 442 0070: 78 ee 5a 2d ad bc 94 3d 8d bf e6 0e 0c 04 f2 a8 |x.Z-...=........|
443 443 0080: de ad de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |......B(x.Z-...=|
444 444
445 445 $ hg debugtagscache | tail -2
446 446 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
447 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d
447 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
448 448
449 449 $ rm -f .hg/cache/tags2-visible
450 450 $ hg debugtagscache | tail -2
451 451 4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
452 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d
452 5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
453 453
454 454 $ hg tags
455 455 abort: data/.hgtags.i@0c04f2a8deadde17fab7422878ee5a2dadbc943d: no match found
456 456 [50]
457 457
458 458 BUG: Unless this file is restored, the `hg tags` in the next unix-permissions
459 459 conditional will fail: "abort: data/.hgtags.i@0c04f2a8dead: no match found"
460 460
461 461 $ mv .hg/cache/hgtagsfnodes1.bak .hg/cache/hgtagsfnodes1
462 462
463 463 #if unix-permissions no-root
464 464 Errors writing to .hgtags fnodes cache are silently ignored
465 465
466 466 $ echo dummy2 > foo
467 467 $ hg commit -m throwaway2
468 468
469 469 $ chmod a-w .hg/cache/hgtagsfnodes1
470 470 $ rm -f .hg/cache/tags2-visible
471 471
472 472 $ hg tags
473 473 tip 6:b968051b5cf3
474 474 bar 1:78391a272241
475 475
476 476 $ hg blackbox -l 6
477 477 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
478 478 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno *] * (glob)
479 479 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
480 480 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
481 481 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
482 482 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
483 483
484 484 $ chmod a+w .hg/cache/hgtagsfnodes1
485 485
486 486 $ rm -f .hg/cache/tags2-visible
487 487 $ hg tags
488 488 tip 6:b968051b5cf3
489 489 bar 1:78391a272241
490 490
491 491 $ hg blackbox -l 6
492 492 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
493 493 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1
494 494 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
495 495 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
496 496 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
497 497 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
498 498
499 499 $ f --size .hg/cache/hgtagsfnodes1
500 500 .hg/cache/hgtagsfnodes1: size=168
501 501
502 502 $ hg -q --config extensions.strip= strip -r 6 --no-backup
503 503 #endif
504 504
505 505 Stripping doesn't truncate the tags cache until new data is available
506 506
507 507 $ rm -f .hg/cache/hgtagsfnodes1 .hg/cache/tags2-visible
508 508 $ hg tags
509 509 tip 5:8dbfe60eff30
510 510 bar 1:78391a272241
511 511
512 512 $ f --size .hg/cache/hgtagsfnodes1
513 513 .hg/cache/hgtagsfnodes1: size=144
514 514
515 515 $ hg -q --config extensions.strip= strip -r 5 --no-backup
516 516 $ hg tags
517 517 tip 4:0c192d7d5e6b
518 518 bar 1:78391a272241
519 519
520 520 $ hg blackbox -l 5
521 521 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing 24 bytes to cache/hgtagsfnodes1
522 522 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> 2/4 cache hits/lookups in * seconds (glob)
523 523 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing .hg/cache/tags2-visible with 1 tags
524 524 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> tags exited 0 after * seconds (glob)
525 525 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> blackbox -l 5
526 526
527 527 $ f --size .hg/cache/hgtagsfnodes1
528 528 .hg/cache/hgtagsfnodes1: size=120
529 529
530 530 $ echo dummy > foo
531 531 $ hg commit -m throwaway3
532 532
533 533 $ hg tags
534 534 tip 5:035f65efb448
535 535 bar 1:78391a272241
536 536
537 537 $ hg blackbox -l 6
538 538 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags
539 539 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing 24 bytes to cache/hgtagsfnodes1
540 540 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> 3/4 cache hits/lookups in * seconds (glob)
541 541 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing .hg/cache/tags2-visible with 1 tags
542 542 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags exited 0 after * seconds (glob)
543 543 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> blackbox -l 6
544 544 $ f --size .hg/cache/hgtagsfnodes1
545 545 .hg/cache/hgtagsfnodes1: size=144
546 546
547 547 $ hg -q --config extensions.strip= strip -r 5 --no-backup
548 548
549 549 Test tag removal:
550 550
551 551 $ hg tag --remove bar # rev 5
552 552 $ hg tip -vp
553 553 changeset: 5:5f6e8655b1c7
554 554 tag: tip
555 555 user: test
556 556 date: Thu Jan 01 00:00:00 1970 +0000
557 557 files: .hgtags
558 558 description:
559 559 Removed tag bar
560 560
561 561
562 562 diff -r 0c192d7d5e6b -r 5f6e8655b1c7 .hgtags
563 563 --- a/.hgtags Thu Jan 01 00:00:00 1970 +0000
564 564 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
565 565 @@ -1,1 +1,3 @@
566 566 bbd179dfa0a71671c253b3ae0aa1513b60d199fa bar
567 567 +78391a272241d70354aa14c874552cad6b51bb42 bar
568 568 +0000000000000000000000000000000000000000 bar
569 569
570 570 $ hg tags
571 571 tip 5:5f6e8655b1c7
572 572 $ hg tags # again, try to expose cache bugs
573 573 tip 5:5f6e8655b1c7
574 574
575 575 Remove nonexistent tag:
576 576
577 577 $ hg tag --remove foobar
578 578 abort: tag 'foobar' does not exist
579 579 [10]
580 580 $ hg tip
581 581 changeset: 5:5f6e8655b1c7
582 582 tag: tip
583 583 user: test
584 584 date: Thu Jan 01 00:00:00 1970 +0000
585 585 summary: Removed tag bar
586 586
587 587
588 588 Undo a tag with rollback:
589 589
590 590 $ hg rollback # destroy rev 5 (restore bar)
591 591 repository tip rolled back to revision 4 (undo commit)
592 592 working directory now based on revision 4
593 593 $ hg tags
594 594 tip 4:0c192d7d5e6b
595 595 bar 1:78391a272241
596 596 $ hg tags
597 597 tip 4:0c192d7d5e6b
598 598 bar 1:78391a272241
599 599
600 600 Test tag rank:
601 601
602 602 $ cd ..
603 603 $ hg init t3
604 604 $ cd t3
605 605 $ echo foo > foo
606 606 $ hg add foo
607 607 $ hg ci -m 'add foo' # rev 0
608 608 $ hg tag -f bar # rev 1 bar -> 0
609 609 $ hg tag -f bar # rev 2 bar -> 1
610 610 $ hg tag -fr 0 bar # rev 3 bar -> 0
611 611 $ hg tag -fr 1 bar # rev 4 bar -> 1
612 612 $ hg tag -fr 0 bar # rev 5 bar -> 0
613 613 $ hg tags
614 614 tip 5:85f05169d91d
615 615 bar 0:bbd179dfa0a7
616 616 $ hg co 3
617 617 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
618 618 $ echo barbar > foo
619 619 $ hg ci -m 'change foo' # rev 6
620 620 created new head
621 621 $ hg tags
622 622 tip 6:735c3ca72986
623 623 bar 0:bbd179dfa0a7
624 624
625 625 Don't allow moving tag without -f:
626 626
627 627 $ hg tag -r 3 bar
628 628 abort: tag 'bar' already exists (use -f to force)
629 629 [10]
630 630 $ hg tags
631 631 tip 6:735c3ca72986
632 632 bar 0:bbd179dfa0a7
633 633
634 634 Strip 1: expose an old head:
635 635
636 636 $ hg --config extensions.mq= strip 5
637 637 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
638 638 $ hg tags # partly stale cache
639 639 tip 5:735c3ca72986
640 640 bar 1:78391a272241
641 641 $ hg tags # up-to-date cache
642 642 tip 5:735c3ca72986
643 643 bar 1:78391a272241
644 644
645 645 Strip 2: destroy whole branch, no old head exposed
646 646
647 647 $ hg --config extensions.mq= strip 4
648 648 saved backup bundle to $TESTTMP/t3/.hg/strip-backup/*-backup.hg (glob)
649 649 $ hg tags # partly stale
650 650 tip 4:735c3ca72986
651 651 bar 0:bbd179dfa0a7
652 652 $ rm -f .hg/cache/tags2-visible
653 653 $ hg tags # cold cache
654 654 tip 4:735c3ca72986
655 655 bar 0:bbd179dfa0a7
656 656
657 657 Test tag rank with 3 heads:
658 658
659 659 $ cd ..
660 660 $ hg init t4
661 661 $ cd t4
662 662 $ echo foo > foo
663 663 $ hg add
664 664 adding foo
665 665 $ hg ci -m 'add foo' # rev 0
666 666 $ hg tag bar # rev 1 bar -> 0
667 667 $ hg tag -f bar # rev 2 bar -> 1
668 668 $ hg up -qC 0
669 669 $ hg tag -fr 2 bar # rev 3 bar -> 2
670 670 $ hg tags
671 671 tip 3:197c21bbbf2c
672 672 bar 2:6fa450212aeb
673 673 $ hg up -qC 0
674 674 $ hg tag -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
675 675
676 676 Bar should still point to rev 2:
677 677
678 678 $ hg tags
679 679 tip 4:3b4b14ed0202
680 680 bar 2:6fa450212aeb
681 681
682 682 Test that removing global/local tags does not get confused when trying
683 683 to remove a tag of type X which actually only exists as a type Y:
684 684
685 685 $ cd ..
686 686 $ hg init t5
687 687 $ cd t5
688 688 $ echo foo > foo
689 689 $ hg add
690 690 adding foo
691 691 $ hg ci -m 'add foo' # rev 0
692 692
693 693 $ hg tag -r 0 -l localtag
694 694 $ hg tag --remove localtag
695 695 abort: tag 'localtag' is not a global tag
696 696 [10]
697 697 $
698 698 $ hg tag -r 0 globaltag
699 699 $ hg tag --remove -l globaltag
700 700 abort: tag 'globaltag' is not a local tag
701 701 [10]
702 702 $ hg tags -v
703 703 tip 1:a0b6fe111088
704 704 localtag 0:bbd179dfa0a7 local
705 705 globaltag 0:bbd179dfa0a7
706 706
707 707 Templated output:
708 708
709 709 (immediate values)
710 710
711 711 $ hg tags -T '{pad(tag, 9)} {rev}:{node} ({type})\n'
712 712 tip 1:a0b6fe111088c8c29567d3876cc466aa02927cae ()
713 713 localtag 0:bbd179dfa0a71671c253b3ae0aa1513b60d199fa (local)
714 714 globaltag 0:bbd179dfa0a71671c253b3ae0aa1513b60d199fa ()
715 715
716 716 (ctx/revcache dependent)
717 717
718 718 $ hg tags -T '{pad(tag, 9)} {rev} {file_adds}\n'
719 719 tip 1 .hgtags
720 720 localtag 0 foo
721 721 globaltag 0 foo
722 722
723 723 $ hg tags -T '{pad(tag, 9)} {rev}:{node|shortest}\n'
724 724 tip 1:a0b6
725 725 localtag 0:bbd1
726 726 globaltag 0:bbd1
727 727
728 728 Test for issue3911
729 729
730 730 $ hg tag -r 0 -l localtag2
731 731 $ hg tag -l --remove localtag2
732 732 $ hg tags -v
733 733 tip 1:a0b6fe111088
734 734 localtag 0:bbd179dfa0a7 local
735 735 globaltag 0:bbd179dfa0a7
736 736
737 737 $ hg tag -r 1 -f localtag
738 738 $ hg tags -v
739 739 tip 2:5c70a037bb37
740 740 localtag 1:a0b6fe111088
741 741 globaltag 0:bbd179dfa0a7
742 742
743 743 $ hg tags -v
744 744 tip 2:5c70a037bb37
745 745 localtag 1:a0b6fe111088
746 746 globaltag 0:bbd179dfa0a7
747 747
748 748 $ hg tag -r 1 localtag2
749 749 $ hg tags -v
750 750 tip 3:bbfb8cd42be2
751 751 localtag2 1:a0b6fe111088
752 752 localtag 1:a0b6fe111088
753 753 globaltag 0:bbd179dfa0a7
754 754
755 755 $ hg tags -v
756 756 tip 3:bbfb8cd42be2
757 757 localtag2 1:a0b6fe111088
758 758 localtag 1:a0b6fe111088
759 759 globaltag 0:bbd179dfa0a7
760 760
761 761 $ cd ..
762 762
763 763 Create a repository with tags data to test .hgtags fnodes transfer
764 764
765 765 $ hg init tagsserver
766 766 $ cd tagsserver
767 767 $ touch foo
768 768 $ hg -q commit -A -m initial
769 769 $ hg tag -m 'tag 0.1' 0.1
770 770 $ echo second > foo
771 771 $ hg commit -m second
772 772 $ hg tag -m 'tag 0.2' 0.2
773 773 $ hg tags
774 774 tip 3:40f0358cb314
775 775 0.2 2:f63cc8fe54e4
776 776 0.1 0:96ee1d7354c4
777 777 $ cd ..
778 778
779 779 Cloning should pull down hgtags fnodes mappings and write the cache file
780 780
781 781 $ hg clone --pull tagsserver tagsclient
782 782 requesting all changes
783 783 adding changesets
784 784 adding manifests
785 785 adding file changes
786 786 added 4 changesets with 4 changes to 2 files
787 787 new changesets 96ee1d7354c4:40f0358cb314
788 788 updating to branch default
789 789 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
790 790
791 791 Missing tags2* files means the cache wasn't written through the normal mechanism.
792 792
793 793 $ ls tagsclient/.hg/cache
794 794 branch2-base
795 795 branch2-immutable
796 796 branch2-served
797 797 branch2-served.hidden
798 798 branch2-visible
799 799 branch2-visible-hidden
800 800 hgtagsfnodes1
801 801 rbc-names-v1
802 802 rbc-revs-v1
803 803 tags2
804 804 tags2-served
805 805
806 806 Cache should contain the head only, even though other nodes have tags data
807 807
808 808 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
809 809 tagsclient/.hg/cache/hgtagsfnodes1: size=96
810 810 0000: 96 ee 1d 73 00 00 00 00 00 00 00 00 00 00 00 00 |...s............|
811 811 0010: 00 00 00 00 00 00 00 00 c4 da b0 c2 94 65 e1 c6 |.............e..|
812 812 0020: 0d f7 f0 dd 32 04 ea 57 78 c8 97 97 79 fc d5 95 |....2..Wx...y...|
813 813 0030: f6 3c c8 fe 94 65 e1 c6 0d f7 f0 dd 32 04 ea 57 |.<...e......2..W|
814 814 0040: 78 c8 97 97 79 fc d5 95 40 f0 35 8c 19 e0 a7 d3 |x...y...@.5.....|
815 815 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
816 816
817 817 Running hg tags should produce tags2* file and not change cache
818 818
819 819 $ hg -R tagsclient tags
820 820 tip 3:40f0358cb314
821 821 0.2 2:f63cc8fe54e4
822 822 0.1 0:96ee1d7354c4
823 823
824 824 $ ls tagsclient/.hg/cache
825 825 branch2-base
826 826 branch2-immutable
827 827 branch2-served
828 828 branch2-served.hidden
829 829 branch2-visible
830 830 branch2-visible-hidden
831 831 hgtagsfnodes1
832 832 rbc-names-v1
833 833 rbc-revs-v1
834 834 tags2
835 835 tags2-served
836 836 tags2-visible
837 837
838 838 $ f --size --hexdump tagsclient/.hg/cache/hgtagsfnodes1
839 839 tagsclient/.hg/cache/hgtagsfnodes1: size=96
840 840 0000: 96 ee 1d 73 00 00 00 00 00 00 00 00 00 00 00 00 |...s............|
841 841 0010: 00 00 00 00 00 00 00 00 c4 da b0 c2 94 65 e1 c6 |.............e..|
842 842 0020: 0d f7 f0 dd 32 04 ea 57 78 c8 97 97 79 fc d5 95 |....2..Wx...y...|
843 843 0030: f6 3c c8 fe 94 65 e1 c6 0d f7 f0 dd 32 04 ea 57 |.<...e......2..W|
844 844 0040: 78 c8 97 97 79 fc d5 95 40 f0 35 8c 19 e0 a7 d3 |x...y...@.5.....|
845 845 0050: 8a 5c 6a 82 4d cf fb a5 87 d0 2f a3 1e 4f 2f 8a |.\j.M...../..O/.|
846 846
847 847 Check that the bundle includes cache data
848 848
849 849 $ hg -R tagsclient bundle --all ./test-cache-in-bundle-all-rev.hg
850 850 4 changesets found
851 851 $ hg debugbundle ./test-cache-in-bundle-all-rev.hg
852 852 Stream params: {Compression: BZ}
853 853 changegroup -- {nbchanges: 4, version: 02} (mandatory: True)
854 854 96ee1d7354c4ad7372047672c36a1f561e3a6a4c
855 855 c4dab0c2fd337eb9191f80c3024830a4889a8f34
856 856 f63cc8fe54e4d326f8d692805d70e092f851ddb1
857 857 40f0358cb314c824a5929ee527308d90e023bc10
858 858 hgtagsfnodes -- {} (mandatory: True)
859 859 cache:rev-branch-cache -- {} (mandatory: False)
860 860
861 861 Check that local clone includes cache data
862 862
863 863 $ hg clone tagsclient tags-local-clone
864 864 updating to branch default
865 865 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
866 866 $ (cd tags-local-clone/.hg/cache/; ls -1 tag*)
867 867 tags2
868 868 tags2-served
869 869 tags2-visible
870 870
871 871 Avoid writing logs on trying to delete an already deleted tag
872 872 $ hg init issue5752
873 873 $ cd issue5752
874 874 $ echo > a
875 875 $ hg commit -Am 'add a'
876 876 adding a
877 877 $ hg tag a
878 878 $ hg tags
879 879 tip 1:bd7ee4f3939b
880 880 a 0:a8a82d372bb3
881 881 $ hg log
882 882 changeset: 1:bd7ee4f3939b
883 883 tag: tip
884 884 user: test
885 885 date: Thu Jan 01 00:00:00 1970 +0000
886 886 summary: Added tag a for changeset a8a82d372bb3
887 887
888 888 changeset: 0:a8a82d372bb3
889 889 tag: a
890 890 user: test
891 891 date: Thu Jan 01 00:00:00 1970 +0000
892 892 summary: add a
893 893
894 894 $ hg tag --remove a
895 895 $ hg log
896 896 changeset: 2:e7feacc7ec9e
897 897 tag: tip
898 898 user: test
899 899 date: Thu Jan 01 00:00:00 1970 +0000
900 900 summary: Removed tag a
901 901
902 902 changeset: 1:bd7ee4f3939b
903 903 user: test
904 904 date: Thu Jan 01 00:00:00 1970 +0000
905 905 summary: Added tag a for changeset a8a82d372bb3
906 906
907 907 changeset: 0:a8a82d372bb3
908 908 user: test
909 909 date: Thu Jan 01 00:00:00 1970 +0000
910 910 summary: add a
911 911
912 912 $ hg tag --remove a
913 913 abort: tag 'a' is already removed
914 914 [10]
915 915 $ hg log
916 916 changeset: 2:e7feacc7ec9e
917 917 tag: tip
918 918 user: test
919 919 date: Thu Jan 01 00:00:00 1970 +0000
920 920 summary: Removed tag a
921 921
922 922 changeset: 1:bd7ee4f3939b
923 923 user: test
924 924 date: Thu Jan 01 00:00:00 1970 +0000
925 925 summary: Added tag a for changeset a8a82d372bb3
926 926
927 927 changeset: 0:a8a82d372bb3
928 928 user: test
929 929 date: Thu Jan 01 00:00:00 1970 +0000
930 930 summary: add a
931 931
932 932 $ cat .hgtags
933 933 a8a82d372bb35b42ff736e74f07c23bcd99c371f a
934 934 a8a82d372bb35b42ff736e74f07c23bcd99c371f a
935 935 0000000000000000000000000000000000000000 a
General Comments 0
You need to be logged in to leave comments. Login now