##// END OF EJS Templates
debugsate: Change debug_iter() to yield tuples instead of DirstateItem...
Simon Sapin -
r48836:cedfe260 default
parent child Browse files
Show More
@@ -1,4941 +1,4938 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import binascii
11 11 import codecs
12 12 import collections
13 13 import contextlib
14 14 import difflib
15 15 import errno
16 16 import glob
17 17 import operator
18 18 import os
19 19 import platform
20 20 import random
21 21 import re
22 22 import socket
23 23 import ssl
24 24 import stat
25 25 import string
26 26 import subprocess
27 27 import sys
28 28 import time
29 29
30 30 from .i18n import _
31 31 from .node import (
32 32 bin,
33 33 hex,
34 34 nullrev,
35 35 short,
36 36 )
37 37 from .pycompat import (
38 38 getattr,
39 39 open,
40 40 )
41 41 from . import (
42 42 bundle2,
43 43 bundlerepo,
44 44 changegroup,
45 45 cmdutil,
46 46 color,
47 47 context,
48 48 copies,
49 49 dagparser,
50 50 encoding,
51 51 error,
52 52 exchange,
53 53 extensions,
54 54 filemerge,
55 55 filesetlang,
56 56 formatter,
57 57 hg,
58 58 httppeer,
59 59 localrepo,
60 60 lock as lockmod,
61 61 logcmdutil,
62 62 mergestate as mergestatemod,
63 63 metadata,
64 64 obsolete,
65 65 obsutil,
66 66 pathutil,
67 67 phases,
68 68 policy,
69 69 pvec,
70 70 pycompat,
71 71 registrar,
72 72 repair,
73 73 repoview,
74 74 requirements,
75 75 revlog,
76 76 revset,
77 77 revsetlang,
78 78 scmutil,
79 79 setdiscovery,
80 80 simplemerge,
81 81 sshpeer,
82 82 sslutil,
83 83 streamclone,
84 84 strip,
85 85 tags as tagsmod,
86 86 templater,
87 87 treediscovery,
88 88 upgrade,
89 89 url as urlmod,
90 90 util,
91 91 vfs as vfsmod,
92 92 wireprotoframing,
93 93 wireprotoserver,
94 94 wireprotov2peer,
95 95 )
96 96 from .interfaces import repository
97 97 from .utils import (
98 98 cborutil,
99 99 compression,
100 100 dateutil,
101 101 procutil,
102 102 stringutil,
103 103 urlutil,
104 104 )
105 105
106 106 from .revlogutils import (
107 107 deltas as deltautil,
108 108 nodemap,
109 109 rewrite,
110 110 sidedata,
111 111 )
112 112
113 113 release = lockmod.release
114 114
115 115 table = {}
116 116 table.update(strip.command._table)
117 117 command = registrar.command(table)
118 118
119 119
120 120 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
121 121 def debugancestor(ui, repo, *args):
122 122 """find the ancestor revision of two revisions in a given index"""
123 123 if len(args) == 3:
124 124 index, rev1, rev2 = args
125 125 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
126 126 lookup = r.lookup
127 127 elif len(args) == 2:
128 128 if not repo:
129 129 raise error.Abort(
130 130 _(b'there is no Mercurial repository here (.hg not found)')
131 131 )
132 132 rev1, rev2 = args
133 133 r = repo.changelog
134 134 lookup = repo.lookup
135 135 else:
136 136 raise error.Abort(_(b'either two or three arguments required'))
137 137 a = r.ancestor(lookup(rev1), lookup(rev2))
138 138 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
139 139
140 140
141 141 @command(b'debugantivirusrunning', [])
142 142 def debugantivirusrunning(ui, repo):
143 143 """attempt to trigger an antivirus scanner to see if one is active"""
144 144 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
145 145 f.write(
146 146 util.b85decode(
147 147 # This is a base85-armored version of the EICAR test file. See
148 148 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
149 149 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
150 150 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
151 151 )
152 152 )
153 153 # Give an AV engine time to scan the file.
154 154 time.sleep(2)
155 155 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
156 156
157 157
158 158 @command(b'debugapplystreamclonebundle', [], b'FILE')
159 159 def debugapplystreamclonebundle(ui, repo, fname):
160 160 """apply a stream clone bundle file"""
161 161 f = hg.openpath(ui, fname)
162 162 gen = exchange.readbundle(ui, f, fname)
163 163 gen.apply(repo)
164 164
165 165
166 166 @command(
167 167 b'debugbuilddag',
168 168 [
169 169 (
170 170 b'm',
171 171 b'mergeable-file',
172 172 None,
173 173 _(b'add single file mergeable changes'),
174 174 ),
175 175 (
176 176 b'o',
177 177 b'overwritten-file',
178 178 None,
179 179 _(b'add single file all revs overwrite'),
180 180 ),
181 181 (b'n', b'new-file', None, _(b'add new file at each rev')),
182 182 ],
183 183 _(b'[OPTION]... [TEXT]'),
184 184 )
185 185 def debugbuilddag(
186 186 ui,
187 187 repo,
188 188 text=None,
189 189 mergeable_file=False,
190 190 overwritten_file=False,
191 191 new_file=False,
192 192 ):
193 193 """builds a repo with a given DAG from scratch in the current empty repo
194 194
195 195 The description of the DAG is read from stdin if not given on the
196 196 command line.
197 197
198 198 Elements:
199 199
200 200 - "+n" is a linear run of n nodes based on the current default parent
201 201 - "." is a single node based on the current default parent
202 202 - "$" resets the default parent to null (implied at the start);
203 203 otherwise the default parent is always the last node created
204 204 - "<p" sets the default parent to the backref p
205 205 - "*p" is a fork at parent p, which is a backref
206 206 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
207 207 - "/p2" is a merge of the preceding node and p2
208 208 - ":tag" defines a local tag for the preceding node
209 209 - "@branch" sets the named branch for subsequent nodes
210 210 - "#...\\n" is a comment up to the end of the line
211 211
212 212 Whitespace between the above elements is ignored.
213 213
214 214 A backref is either
215 215
216 216 - a number n, which references the node curr-n, where curr is the current
217 217 node, or
218 218 - the name of a local tag you placed earlier using ":tag", or
219 219 - empty to denote the default parent.
220 220
221 221 All string valued-elements are either strictly alphanumeric, or must
222 222 be enclosed in double quotes ("..."), with "\\" as escape character.
223 223 """
224 224
225 225 if text is None:
226 226 ui.status(_(b"reading DAG from stdin\n"))
227 227 text = ui.fin.read()
228 228
229 229 cl = repo.changelog
230 230 if len(cl) > 0:
231 231 raise error.Abort(_(b'repository is not empty'))
232 232
233 233 # determine number of revs in DAG
234 234 total = 0
235 235 for type, data in dagparser.parsedag(text):
236 236 if type == b'n':
237 237 total += 1
238 238
239 239 if mergeable_file:
240 240 linesperrev = 2
241 241 # make a file with k lines per rev
242 242 initialmergedlines = [
243 243 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
244 244 ]
245 245 initialmergedlines.append(b"")
246 246
247 247 tags = []
248 248 progress = ui.makeprogress(
249 249 _(b'building'), unit=_(b'revisions'), total=total
250 250 )
251 251 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
252 252 at = -1
253 253 atbranch = b'default'
254 254 nodeids = []
255 255 id = 0
256 256 progress.update(id)
257 257 for type, data in dagparser.parsedag(text):
258 258 if type == b'n':
259 259 ui.note((b'node %s\n' % pycompat.bytestr(data)))
260 260 id, ps = data
261 261
262 262 files = []
263 263 filecontent = {}
264 264
265 265 p2 = None
266 266 if mergeable_file:
267 267 fn = b"mf"
268 268 p1 = repo[ps[0]]
269 269 if len(ps) > 1:
270 270 p2 = repo[ps[1]]
271 271 pa = p1.ancestor(p2)
272 272 base, local, other = [
273 273 x[fn].data() for x in (pa, p1, p2)
274 274 ]
275 275 m3 = simplemerge.Merge3Text(base, local, other)
276 276 ml = [l.strip() for l in m3.merge_lines()]
277 277 ml.append(b"")
278 278 elif at > 0:
279 279 ml = p1[fn].data().split(b"\n")
280 280 else:
281 281 ml = initialmergedlines
282 282 ml[id * linesperrev] += b" r%i" % id
283 283 mergedtext = b"\n".join(ml)
284 284 files.append(fn)
285 285 filecontent[fn] = mergedtext
286 286
287 287 if overwritten_file:
288 288 fn = b"of"
289 289 files.append(fn)
290 290 filecontent[fn] = b"r%i\n" % id
291 291
292 292 if new_file:
293 293 fn = b"nf%i" % id
294 294 files.append(fn)
295 295 filecontent[fn] = b"r%i\n" % id
296 296 if len(ps) > 1:
297 297 if not p2:
298 298 p2 = repo[ps[1]]
299 299 for fn in p2:
300 300 if fn.startswith(b"nf"):
301 301 files.append(fn)
302 302 filecontent[fn] = p2[fn].data()
303 303
304 304 def fctxfn(repo, cx, path):
305 305 if path in filecontent:
306 306 return context.memfilectx(
307 307 repo, cx, path, filecontent[path]
308 308 )
309 309 return None
310 310
311 311 if len(ps) == 0 or ps[0] < 0:
312 312 pars = [None, None]
313 313 elif len(ps) == 1:
314 314 pars = [nodeids[ps[0]], None]
315 315 else:
316 316 pars = [nodeids[p] for p in ps]
317 317 cx = context.memctx(
318 318 repo,
319 319 pars,
320 320 b"r%i" % id,
321 321 files,
322 322 fctxfn,
323 323 date=(id, 0),
324 324 user=b"debugbuilddag",
325 325 extra={b'branch': atbranch},
326 326 )
327 327 nodeid = repo.commitctx(cx)
328 328 nodeids.append(nodeid)
329 329 at = id
330 330 elif type == b'l':
331 331 id, name = data
332 332 ui.note((b'tag %s\n' % name))
333 333 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
334 334 elif type == b'a':
335 335 ui.note((b'branch %s\n' % data))
336 336 atbranch = data
337 337 progress.update(id)
338 338
339 339 if tags:
340 340 repo.vfs.write(b"localtags", b"".join(tags))
341 341
342 342
343 343 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
344 344 indent_string = b' ' * indent
345 345 if all:
346 346 ui.writenoi18n(
347 347 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
348 348 % indent_string
349 349 )
350 350
351 351 def showchunks(named):
352 352 ui.write(b"\n%s%s\n" % (indent_string, named))
353 353 for deltadata in gen.deltaiter():
354 354 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
355 355 ui.write(
356 356 b"%s%s %s %s %s %s %d\n"
357 357 % (
358 358 indent_string,
359 359 hex(node),
360 360 hex(p1),
361 361 hex(p2),
362 362 hex(cs),
363 363 hex(deltabase),
364 364 len(delta),
365 365 )
366 366 )
367 367
368 368 gen.changelogheader()
369 369 showchunks(b"changelog")
370 370 gen.manifestheader()
371 371 showchunks(b"manifest")
372 372 for chunkdata in iter(gen.filelogheader, {}):
373 373 fname = chunkdata[b'filename']
374 374 showchunks(fname)
375 375 else:
376 376 if isinstance(gen, bundle2.unbundle20):
377 377 raise error.Abort(_(b'use debugbundle2 for this file'))
378 378 gen.changelogheader()
379 379 for deltadata in gen.deltaiter():
380 380 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
381 381 ui.write(b"%s%s\n" % (indent_string, hex(node)))
382 382
383 383
384 384 def _debugobsmarkers(ui, part, indent=0, **opts):
385 385 """display version and markers contained in 'data'"""
386 386 opts = pycompat.byteskwargs(opts)
387 387 data = part.read()
388 388 indent_string = b' ' * indent
389 389 try:
390 390 version, markers = obsolete._readmarkers(data)
391 391 except error.UnknownVersion as exc:
392 392 msg = b"%sunsupported version: %s (%d bytes)\n"
393 393 msg %= indent_string, exc.version, len(data)
394 394 ui.write(msg)
395 395 else:
396 396 msg = b"%sversion: %d (%d bytes)\n"
397 397 msg %= indent_string, version, len(data)
398 398 ui.write(msg)
399 399 fm = ui.formatter(b'debugobsolete', opts)
400 400 for rawmarker in sorted(markers):
401 401 m = obsutil.marker(None, rawmarker)
402 402 fm.startitem()
403 403 fm.plain(indent_string)
404 404 cmdutil.showmarker(fm, m)
405 405 fm.end()
406 406
407 407
408 408 def _debugphaseheads(ui, data, indent=0):
409 409 """display version and markers contained in 'data'"""
410 410 indent_string = b' ' * indent
411 411 headsbyphase = phases.binarydecode(data)
412 412 for phase in phases.allphases:
413 413 for head in headsbyphase[phase]:
414 414 ui.write(indent_string)
415 415 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
416 416
417 417
418 418 def _quasirepr(thing):
419 419 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
420 420 return b'{%s}' % (
421 421 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
422 422 )
423 423 return pycompat.bytestr(repr(thing))
424 424
425 425
426 426 def _debugbundle2(ui, gen, all=None, **opts):
427 427 """lists the contents of a bundle2"""
428 428 if not isinstance(gen, bundle2.unbundle20):
429 429 raise error.Abort(_(b'not a bundle2 file'))
430 430 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
431 431 parttypes = opts.get('part_type', [])
432 432 for part in gen.iterparts():
433 433 if parttypes and part.type not in parttypes:
434 434 continue
435 435 msg = b'%s -- %s (mandatory: %r)\n'
436 436 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
437 437 if part.type == b'changegroup':
438 438 version = part.params.get(b'version', b'01')
439 439 cg = changegroup.getunbundler(version, part, b'UN')
440 440 if not ui.quiet:
441 441 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
442 442 if part.type == b'obsmarkers':
443 443 if not ui.quiet:
444 444 _debugobsmarkers(ui, part, indent=4, **opts)
445 445 if part.type == b'phase-heads':
446 446 if not ui.quiet:
447 447 _debugphaseheads(ui, part, indent=4)
448 448
449 449
450 450 @command(
451 451 b'debugbundle',
452 452 [
453 453 (b'a', b'all', None, _(b'show all details')),
454 454 (b'', b'part-type', [], _(b'show only the named part type')),
455 455 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
456 456 ],
457 457 _(b'FILE'),
458 458 norepo=True,
459 459 )
460 460 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
461 461 """lists the contents of a bundle"""
462 462 with hg.openpath(ui, bundlepath) as f:
463 463 if spec:
464 464 spec = exchange.getbundlespec(ui, f)
465 465 ui.write(b'%s\n' % spec)
466 466 return
467 467
468 468 gen = exchange.readbundle(ui, f, bundlepath)
469 469 if isinstance(gen, bundle2.unbundle20):
470 470 return _debugbundle2(ui, gen, all=all, **opts)
471 471 _debugchangegroup(ui, gen, all=all, **opts)
472 472
473 473
474 474 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
475 475 def debugcapabilities(ui, path, **opts):
476 476 """lists the capabilities of a remote peer"""
477 477 opts = pycompat.byteskwargs(opts)
478 478 peer = hg.peer(ui, opts, path)
479 479 try:
480 480 caps = peer.capabilities()
481 481 ui.writenoi18n(b'Main capabilities:\n')
482 482 for c in sorted(caps):
483 483 ui.write(b' %s\n' % c)
484 484 b2caps = bundle2.bundle2caps(peer)
485 485 if b2caps:
486 486 ui.writenoi18n(b'Bundle2 capabilities:\n')
487 487 for key, values in sorted(pycompat.iteritems(b2caps)):
488 488 ui.write(b' %s\n' % key)
489 489 for v in values:
490 490 ui.write(b' %s\n' % v)
491 491 finally:
492 492 peer.close()
493 493
494 494
495 495 @command(
496 496 b'debugchangedfiles',
497 497 [
498 498 (
499 499 b'',
500 500 b'compute',
501 501 False,
502 502 b"compute information instead of reading it from storage",
503 503 ),
504 504 ],
505 505 b'REV',
506 506 )
507 507 def debugchangedfiles(ui, repo, rev, **opts):
508 508 """list the stored files changes for a revision"""
509 509 ctx = scmutil.revsingle(repo, rev, None)
510 510 files = None
511 511
512 512 if opts['compute']:
513 513 files = metadata.compute_all_files_changes(ctx)
514 514 else:
515 515 sd = repo.changelog.sidedata(ctx.rev())
516 516 files_block = sd.get(sidedata.SD_FILES)
517 517 if files_block is not None:
518 518 files = metadata.decode_files_sidedata(sd)
519 519 if files is not None:
520 520 for f in sorted(files.touched):
521 521 if f in files.added:
522 522 action = b"added"
523 523 elif f in files.removed:
524 524 action = b"removed"
525 525 elif f in files.merged:
526 526 action = b"merged"
527 527 elif f in files.salvaged:
528 528 action = b"salvaged"
529 529 else:
530 530 action = b"touched"
531 531
532 532 copy_parent = b""
533 533 copy_source = b""
534 534 if f in files.copied_from_p1:
535 535 copy_parent = b"p1"
536 536 copy_source = files.copied_from_p1[f]
537 537 elif f in files.copied_from_p2:
538 538 copy_parent = b"p2"
539 539 copy_source = files.copied_from_p2[f]
540 540
541 541 data = (action, copy_parent, f, copy_source)
542 542 template = b"%-8s %2s: %s, %s;\n"
543 543 ui.write(template % data)
544 544
545 545
546 546 @command(b'debugcheckstate', [], b'')
547 547 def debugcheckstate(ui, repo):
548 548 """validate the correctness of the current dirstate"""
549 549 parent1, parent2 = repo.dirstate.parents()
550 550 m1 = repo[parent1].manifest()
551 551 m2 = repo[parent2].manifest()
552 552 errors = 0
553 553 for f in repo.dirstate:
554 554 state = repo.dirstate[f]
555 555 if state in b"nr" and f not in m1:
556 556 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
557 557 errors += 1
558 558 if state in b"a" and f in m1:
559 559 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
560 560 errors += 1
561 561 if state in b"m" and f not in m1 and f not in m2:
562 562 ui.warn(
563 563 _(b"%s in state %s, but not in either manifest\n") % (f, state)
564 564 )
565 565 errors += 1
566 566 for f in m1:
567 567 state = repo.dirstate[f]
568 568 if state not in b"nrm":
569 569 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
570 570 errors += 1
571 571 if errors:
572 572 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
573 573 raise error.Abort(errstr)
574 574
575 575
576 576 @command(
577 577 b'debugcolor',
578 578 [(b'', b'style', None, _(b'show all configured styles'))],
579 579 b'hg debugcolor',
580 580 )
581 581 def debugcolor(ui, repo, **opts):
582 582 """show available color, effects or style"""
583 583 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
584 584 if opts.get('style'):
585 585 return _debugdisplaystyle(ui)
586 586 else:
587 587 return _debugdisplaycolor(ui)
588 588
589 589
590 590 def _debugdisplaycolor(ui):
591 591 ui = ui.copy()
592 592 ui._styles.clear()
593 593 for effect in color._activeeffects(ui).keys():
594 594 ui._styles[effect] = effect
595 595 if ui._terminfoparams:
596 596 for k, v in ui.configitems(b'color'):
597 597 if k.startswith(b'color.'):
598 598 ui._styles[k] = k[6:]
599 599 elif k.startswith(b'terminfo.'):
600 600 ui._styles[k] = k[9:]
601 601 ui.write(_(b'available colors:\n'))
602 602 # sort label with a '_' after the other to group '_background' entry.
603 603 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
604 604 for colorname, label in items:
605 605 ui.write(b'%s\n' % colorname, label=label)
606 606
607 607
608 608 def _debugdisplaystyle(ui):
609 609 ui.write(_(b'available style:\n'))
610 610 if not ui._styles:
611 611 return
612 612 width = max(len(s) for s in ui._styles)
613 613 for label, effects in sorted(ui._styles.items()):
614 614 ui.write(b'%s' % label, label=label)
615 615 if effects:
616 616 # 50
617 617 ui.write(b': ')
618 618 ui.write(b' ' * (max(0, width - len(label))))
619 619 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
620 620 ui.write(b'\n')
621 621
622 622
623 623 @command(b'debugcreatestreamclonebundle', [], b'FILE')
624 624 def debugcreatestreamclonebundle(ui, repo, fname):
625 625 """create a stream clone bundle file
626 626
627 627 Stream bundles are special bundles that are essentially archives of
628 628 revlog files. They are commonly used for cloning very quickly.
629 629 """
630 630 # TODO we may want to turn this into an abort when this functionality
631 631 # is moved into `hg bundle`.
632 632 if phases.hassecret(repo):
633 633 ui.warn(
634 634 _(
635 635 b'(warning: stream clone bundle will contain secret '
636 636 b'revisions)\n'
637 637 )
638 638 )
639 639
640 640 requirements, gen = streamclone.generatebundlev1(repo)
641 641 changegroup.writechunks(ui, gen, fname)
642 642
643 643 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
644 644
645 645
646 646 @command(
647 647 b'debugdag',
648 648 [
649 649 (b't', b'tags', None, _(b'use tags as labels')),
650 650 (b'b', b'branches', None, _(b'annotate with branch names')),
651 651 (b'', b'dots', None, _(b'use dots for runs')),
652 652 (b's', b'spaces', None, _(b'separate elements by spaces')),
653 653 ],
654 654 _(b'[OPTION]... [FILE [REV]...]'),
655 655 optionalrepo=True,
656 656 )
657 657 def debugdag(ui, repo, file_=None, *revs, **opts):
658 658 """format the changelog or an index DAG as a concise textual description
659 659
660 660 If you pass a revlog index, the revlog's DAG is emitted. If you list
661 661 revision numbers, they get labeled in the output as rN.
662 662
663 663 Otherwise, the changelog DAG of the current repo is emitted.
664 664 """
665 665 spaces = opts.get('spaces')
666 666 dots = opts.get('dots')
667 667 if file_:
668 668 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
669 669 revs = {int(r) for r in revs}
670 670
671 671 def events():
672 672 for r in rlog:
673 673 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
674 674 if r in revs:
675 675 yield b'l', (r, b"r%i" % r)
676 676
677 677 elif repo:
678 678 cl = repo.changelog
679 679 tags = opts.get('tags')
680 680 branches = opts.get('branches')
681 681 if tags:
682 682 labels = {}
683 683 for l, n in repo.tags().items():
684 684 labels.setdefault(cl.rev(n), []).append(l)
685 685
686 686 def events():
687 687 b = b"default"
688 688 for r in cl:
689 689 if branches:
690 690 newb = cl.read(cl.node(r))[5][b'branch']
691 691 if newb != b:
692 692 yield b'a', newb
693 693 b = newb
694 694 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
695 695 if tags:
696 696 ls = labels.get(r)
697 697 if ls:
698 698 for l in ls:
699 699 yield b'l', (r, l)
700 700
701 701 else:
702 702 raise error.Abort(_(b'need repo for changelog dag'))
703 703
704 704 for line in dagparser.dagtextlines(
705 705 events(),
706 706 addspaces=spaces,
707 707 wraplabels=True,
708 708 wrapannotations=True,
709 709 wrapnonlinear=dots,
710 710 usedots=dots,
711 711 maxlinewidth=70,
712 712 ):
713 713 ui.write(line)
714 714 ui.write(b"\n")
715 715
716 716
717 717 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
718 718 def debugdata(ui, repo, file_, rev=None, **opts):
719 719 """dump the contents of a data file revision"""
720 720 opts = pycompat.byteskwargs(opts)
721 721 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
722 722 if rev is not None:
723 723 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
724 724 file_, rev = None, file_
725 725 elif rev is None:
726 726 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
727 727 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
728 728 try:
729 729 ui.write(r.rawdata(r.lookup(rev)))
730 730 except KeyError:
731 731 raise error.Abort(_(b'invalid revision identifier %s') % rev)
732 732
733 733
734 734 @command(
735 735 b'debugdate',
736 736 [(b'e', b'extended', None, _(b'try extended date formats'))],
737 737 _(b'[-e] DATE [RANGE]'),
738 738 norepo=True,
739 739 optionalrepo=True,
740 740 )
741 741 def debugdate(ui, date, range=None, **opts):
742 742 """parse and display a date"""
743 743 if opts["extended"]:
744 744 d = dateutil.parsedate(date, dateutil.extendeddateformats)
745 745 else:
746 746 d = dateutil.parsedate(date)
747 747 ui.writenoi18n(b"internal: %d %d\n" % d)
748 748 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
749 749 if range:
750 750 m = dateutil.matchdate(range)
751 751 ui.writenoi18n(b"match: %s\n" % m(d[0]))
752 752
753 753
754 754 @command(
755 755 b'debugdeltachain',
756 756 cmdutil.debugrevlogopts + cmdutil.formatteropts,
757 757 _(b'-c|-m|FILE'),
758 758 optionalrepo=True,
759 759 )
760 760 def debugdeltachain(ui, repo, file_=None, **opts):
761 761 """dump information about delta chains in a revlog
762 762
763 763 Output can be templatized. Available template keywords are:
764 764
765 765 :``rev``: revision number
766 766 :``chainid``: delta chain identifier (numbered by unique base)
767 767 :``chainlen``: delta chain length to this revision
768 768 :``prevrev``: previous revision in delta chain
769 769 :``deltatype``: role of delta / how it was computed
770 770 :``compsize``: compressed size of revision
771 771 :``uncompsize``: uncompressed size of revision
772 772 :``chainsize``: total size of compressed revisions in chain
773 773 :``chainratio``: total chain size divided by uncompressed revision size
774 774 (new delta chains typically start at ratio 2.00)
775 775 :``lindist``: linear distance from base revision in delta chain to end
776 776 of this revision
777 777 :``extradist``: total size of revisions not part of this delta chain from
778 778 base of delta chain to end of this revision; a measurement
779 779 of how much extra data we need to read/seek across to read
780 780 the delta chain for this revision
781 781 :``extraratio``: extradist divided by chainsize; another representation of
782 782 how much unrelated data is needed to load this delta chain
783 783
784 784 If the repository is configured to use the sparse read, additional keywords
785 785 are available:
786 786
787 787 :``readsize``: total size of data read from the disk for a revision
788 788 (sum of the sizes of all the blocks)
789 789 :``largestblock``: size of the largest block of data read from the disk
790 790 :``readdensity``: density of useful bytes in the data read from the disk
791 791 :``srchunks``: in how many data hunks the whole revision would be read
792 792
793 793 The sparse read can be enabled with experimental.sparse-read = True
794 794 """
795 795 opts = pycompat.byteskwargs(opts)
796 796 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
797 797 index = r.index
798 798 start = r.start
799 799 length = r.length
800 800 generaldelta = r._generaldelta
801 801 withsparseread = getattr(r, '_withsparseread', False)
802 802
803 803 def revinfo(rev):
804 804 e = index[rev]
805 805 compsize = e[1]
806 806 uncompsize = e[2]
807 807 chainsize = 0
808 808
809 809 if generaldelta:
810 810 if e[3] == e[5]:
811 811 deltatype = b'p1'
812 812 elif e[3] == e[6]:
813 813 deltatype = b'p2'
814 814 elif e[3] == rev - 1:
815 815 deltatype = b'prev'
816 816 elif e[3] == rev:
817 817 deltatype = b'base'
818 818 else:
819 819 deltatype = b'other'
820 820 else:
821 821 if e[3] == rev:
822 822 deltatype = b'base'
823 823 else:
824 824 deltatype = b'prev'
825 825
826 826 chain = r._deltachain(rev)[0]
827 827 for iterrev in chain:
828 828 e = index[iterrev]
829 829 chainsize += e[1]
830 830
831 831 return compsize, uncompsize, deltatype, chain, chainsize
832 832
833 833 fm = ui.formatter(b'debugdeltachain', opts)
834 834
835 835 fm.plain(
836 836 b' rev chain# chainlen prev delta '
837 837 b'size rawsize chainsize ratio lindist extradist '
838 838 b'extraratio'
839 839 )
840 840 if withsparseread:
841 841 fm.plain(b' readsize largestblk rddensity srchunks')
842 842 fm.plain(b'\n')
843 843
844 844 chainbases = {}
845 845 for rev in r:
846 846 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
847 847 chainbase = chain[0]
848 848 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
849 849 basestart = start(chainbase)
850 850 revstart = start(rev)
851 851 lineardist = revstart + comp - basestart
852 852 extradist = lineardist - chainsize
853 853 try:
854 854 prevrev = chain[-2]
855 855 except IndexError:
856 856 prevrev = -1
857 857
858 858 if uncomp != 0:
859 859 chainratio = float(chainsize) / float(uncomp)
860 860 else:
861 861 chainratio = chainsize
862 862
863 863 if chainsize != 0:
864 864 extraratio = float(extradist) / float(chainsize)
865 865 else:
866 866 extraratio = extradist
867 867
868 868 fm.startitem()
869 869 fm.write(
870 870 b'rev chainid chainlen prevrev deltatype compsize '
871 871 b'uncompsize chainsize chainratio lindist extradist '
872 872 b'extraratio',
873 873 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
874 874 rev,
875 875 chainid,
876 876 len(chain),
877 877 prevrev,
878 878 deltatype,
879 879 comp,
880 880 uncomp,
881 881 chainsize,
882 882 chainratio,
883 883 lineardist,
884 884 extradist,
885 885 extraratio,
886 886 rev=rev,
887 887 chainid=chainid,
888 888 chainlen=len(chain),
889 889 prevrev=prevrev,
890 890 deltatype=deltatype,
891 891 compsize=comp,
892 892 uncompsize=uncomp,
893 893 chainsize=chainsize,
894 894 chainratio=chainratio,
895 895 lindist=lineardist,
896 896 extradist=extradist,
897 897 extraratio=extraratio,
898 898 )
899 899 if withsparseread:
900 900 readsize = 0
901 901 largestblock = 0
902 902 srchunks = 0
903 903
904 904 for revschunk in deltautil.slicechunk(r, chain):
905 905 srchunks += 1
906 906 blkend = start(revschunk[-1]) + length(revschunk[-1])
907 907 blksize = blkend - start(revschunk[0])
908 908
909 909 readsize += blksize
910 910 if largestblock < blksize:
911 911 largestblock = blksize
912 912
913 913 if readsize:
914 914 readdensity = float(chainsize) / float(readsize)
915 915 else:
916 916 readdensity = 1
917 917
918 918 fm.write(
919 919 b'readsize largestblock readdensity srchunks',
920 920 b' %10d %10d %9.5f %8d',
921 921 readsize,
922 922 largestblock,
923 923 readdensity,
924 924 srchunks,
925 925 readsize=readsize,
926 926 largestblock=largestblock,
927 927 readdensity=readdensity,
928 928 srchunks=srchunks,
929 929 )
930 930
931 931 fm.plain(b'\n')
932 932
933 933 fm.end()
934 934
935 935
936 936 @command(
937 937 b'debugdirstate|debugstate',
938 938 [
939 939 (
940 940 b'',
941 941 b'nodates',
942 942 None,
943 943 _(b'do not display the saved mtime (DEPRECATED)'),
944 944 ),
945 945 (b'', b'dates', True, _(b'display the saved mtime')),
946 946 (b'', b'datesort', None, _(b'sort by saved mtime')),
947 947 (
948 948 b'',
949 949 b'all',
950 950 False,
951 951 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
952 952 ),
953 953 ],
954 954 _(b'[OPTION]...'),
955 955 )
956 956 def debugstate(ui, repo, **opts):
957 957 """show the contents of the current dirstate"""
958 958
959 959 nodates = not opts['dates']
960 960 if opts.get('nodates') is not None:
961 961 nodates = True
962 962 datesort = opts.get('datesort')
963 963
964 964 if datesort:
965 keyfunc = lambda x: (
966 x[1].v1_mtime(),
967 x[0],
968 ) # sort by mtime, then by filename
965
966 def keyfunc(entry):
967 filename, _state, _mode, _size, mtime = entry
968 return (mtime, filename)
969
969 970 else:
970 971 keyfunc = None # sort by filename
971 972 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
972 973 entries.sort(key=keyfunc)
973 for file_, ent in entries:
974 if ent.v1_mtime() == -1:
974 for entry in entries:
975 filename, state, mode, size, mtime = entry
976 if mtime == -1:
975 977 timestr = b'unset '
976 978 elif nodates:
977 979 timestr = b'set '
978 980 else:
979 timestr = time.strftime(
980 "%Y-%m-%d %H:%M:%S ", time.localtime(ent.v1_mtime())
981 )
981 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
982 982 timestr = encoding.strtolocal(timestr)
983 if ent.mode & 0o20000:
983 if mode & 0o20000:
984 984 mode = b'lnk'
985 985 else:
986 mode = b'%3o' % (ent.v1_mode() & 0o777 & ~util.umask)
987 ui.write(
988 b"%c %s %10d %s%s\n"
989 % (ent.v1_state(), mode, ent.v1_size(), timestr, file_)
990 )
986 mode = b'%3o' % (mode & 0o777 & ~util.umask)
987 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
991 988 for f in repo.dirstate.copies():
992 989 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
993 990
994 991
995 992 @command(
996 993 b'debugdirstateignorepatternshash',
997 994 [],
998 995 _(b''),
999 996 )
1000 997 def debugdirstateignorepatternshash(ui, repo, **opts):
1001 998 """show the hash of ignore patterns stored in dirstate if v2,
1002 999 or nothing for dirstate-v2
1003 1000 """
1004 1001 if repo.dirstate._use_dirstate_v2:
1005 1002 docket = repo.dirstate._map.docket
1006 1003 hash_len = 20 # 160 bits for SHA-1
1007 1004 hash_bytes = docket.tree_metadata[-hash_len:]
1008 1005 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1009 1006
1010 1007
1011 1008 @command(
1012 1009 b'debugdiscovery',
1013 1010 [
1014 1011 (b'', b'old', None, _(b'use old-style discovery')),
1015 1012 (
1016 1013 b'',
1017 1014 b'nonheads',
1018 1015 None,
1019 1016 _(b'use old-style discovery with non-heads included'),
1020 1017 ),
1021 1018 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1022 1019 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1023 1020 (
1024 1021 b'',
1025 1022 b'local-as-revs',
1026 1023 b"",
1027 1024 b'treat local has having these revisions only',
1028 1025 ),
1029 1026 (
1030 1027 b'',
1031 1028 b'remote-as-revs',
1032 1029 b"",
1033 1030 b'use local as remote, with only these these revisions',
1034 1031 ),
1035 1032 ]
1036 1033 + cmdutil.remoteopts
1037 1034 + cmdutil.formatteropts,
1038 1035 _(b'[--rev REV] [OTHER]'),
1039 1036 )
1040 1037 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1041 1038 """runs the changeset discovery protocol in isolation
1042 1039
1043 1040 The local peer can be "replaced" by a subset of the local repository by
1044 1041 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1045 1042 be "replaced" by a subset of the local repository using the
1046 1043 `--local-as-revs` flag. This is useful to efficiently debug pathological
1047 1044 discovery situation.
1048 1045
1049 1046 The following developer oriented config are relevant for people playing with this command:
1050 1047
1051 1048 * devel.discovery.exchange-heads=True
1052 1049
1053 1050 If False, the discovery will not start with
1054 1051 remote head fetching and local head querying.
1055 1052
1056 1053 * devel.discovery.grow-sample=True
1057 1054
1058 1055 If False, the sample size used in set discovery will not be increased
1059 1056 through the process
1060 1057
1061 1058 * devel.discovery.grow-sample.dynamic=True
1062 1059
1063 1060 When discovery.grow-sample.dynamic is True, the default, the sample size is
1064 1061 adapted to the shape of the undecided set (it is set to the max of:
1065 1062 <target-size>, len(roots(undecided)), len(heads(undecided)
1066 1063
1067 1064 * devel.discovery.grow-sample.rate=1.05
1068 1065
1069 1066 the rate at which the sample grow
1070 1067
1071 1068 * devel.discovery.randomize=True
1072 1069
1073 1070 If andom sampling during discovery are deterministic. It is meant for
1074 1071 integration tests.
1075 1072
1076 1073 * devel.discovery.sample-size=200
1077 1074
1078 1075 Control the initial size of the discovery sample
1079 1076
1080 1077 * devel.discovery.sample-size.initial=100
1081 1078
1082 1079 Control the initial size of the discovery for initial change
1083 1080 """
1084 1081 opts = pycompat.byteskwargs(opts)
1085 1082 unfi = repo.unfiltered()
1086 1083
1087 1084 # setup potential extra filtering
1088 1085 local_revs = opts[b"local_as_revs"]
1089 1086 remote_revs = opts[b"remote_as_revs"]
1090 1087
1091 1088 # make sure tests are repeatable
1092 1089 random.seed(int(opts[b'seed']))
1093 1090
1094 1091 if not remote_revs:
1095 1092
1096 1093 remoteurl, branches = urlutil.get_unique_pull_path(
1097 1094 b'debugdiscovery', repo, ui, remoteurl
1098 1095 )
1099 1096 remote = hg.peer(repo, opts, remoteurl)
1100 1097 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1101 1098 else:
1102 1099 branches = (None, [])
1103 1100 remote_filtered_revs = scmutil.revrange(
1104 1101 unfi, [b"not (::(%s))" % remote_revs]
1105 1102 )
1106 1103 remote_filtered_revs = frozenset(remote_filtered_revs)
1107 1104
1108 1105 def remote_func(x):
1109 1106 return remote_filtered_revs
1110 1107
1111 1108 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1112 1109
1113 1110 remote = repo.peer()
1114 1111 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1115 1112
1116 1113 if local_revs:
1117 1114 local_filtered_revs = scmutil.revrange(
1118 1115 unfi, [b"not (::(%s))" % local_revs]
1119 1116 )
1120 1117 local_filtered_revs = frozenset(local_filtered_revs)
1121 1118
1122 1119 def local_func(x):
1123 1120 return local_filtered_revs
1124 1121
1125 1122 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1126 1123 repo = repo.filtered(b'debug-discovery-local-filter')
1127 1124
1128 1125 data = {}
1129 1126 if opts.get(b'old'):
1130 1127
1131 1128 def doit(pushedrevs, remoteheads, remote=remote):
1132 1129 if not util.safehasattr(remote, b'branches'):
1133 1130 # enable in-client legacy support
1134 1131 remote = localrepo.locallegacypeer(remote.local())
1135 1132 common, _in, hds = treediscovery.findcommonincoming(
1136 1133 repo, remote, force=True, audit=data
1137 1134 )
1138 1135 common = set(common)
1139 1136 if not opts.get(b'nonheads'):
1140 1137 ui.writenoi18n(
1141 1138 b"unpruned common: %s\n"
1142 1139 % b" ".join(sorted(short(n) for n in common))
1143 1140 )
1144 1141
1145 1142 clnode = repo.changelog.node
1146 1143 common = repo.revs(b'heads(::%ln)', common)
1147 1144 common = {clnode(r) for r in common}
1148 1145 return common, hds
1149 1146
1150 1147 else:
1151 1148
1152 1149 def doit(pushedrevs, remoteheads, remote=remote):
1153 1150 nodes = None
1154 1151 if pushedrevs:
1155 1152 revs = scmutil.revrange(repo, pushedrevs)
1156 1153 nodes = [repo[r].node() for r in revs]
1157 1154 common, any, hds = setdiscovery.findcommonheads(
1158 1155 ui, repo, remote, ancestorsof=nodes, audit=data
1159 1156 )
1160 1157 return common, hds
1161 1158
1162 1159 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1163 1160 localrevs = opts[b'rev']
1164 1161
1165 1162 fm = ui.formatter(b'debugdiscovery', opts)
1166 1163 if fm.strict_format:
1167 1164
1168 1165 @contextlib.contextmanager
1169 1166 def may_capture_output():
1170 1167 ui.pushbuffer()
1171 1168 yield
1172 1169 data[b'output'] = ui.popbuffer()
1173 1170
1174 1171 else:
1175 1172 may_capture_output = util.nullcontextmanager
1176 1173 with may_capture_output():
1177 1174 with util.timedcm('debug-discovery') as t:
1178 1175 common, hds = doit(localrevs, remoterevs)
1179 1176
1180 1177 # compute all statistics
1181 1178 heads_common = set(common)
1182 1179 heads_remote = set(hds)
1183 1180 heads_local = set(repo.heads())
1184 1181 # note: they cannot be a local or remote head that is in common and not
1185 1182 # itself a head of common.
1186 1183 heads_common_local = heads_common & heads_local
1187 1184 heads_common_remote = heads_common & heads_remote
1188 1185 heads_common_both = heads_common & heads_remote & heads_local
1189 1186
1190 1187 all = repo.revs(b'all()')
1191 1188 common = repo.revs(b'::%ln', common)
1192 1189 roots_common = repo.revs(b'roots(::%ld)', common)
1193 1190 missing = repo.revs(b'not ::%ld', common)
1194 1191 heads_missing = repo.revs(b'heads(%ld)', missing)
1195 1192 roots_missing = repo.revs(b'roots(%ld)', missing)
1196 1193 assert len(common) + len(missing) == len(all)
1197 1194
1198 1195 initial_undecided = repo.revs(
1199 1196 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1200 1197 )
1201 1198 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1202 1199 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1203 1200 common_initial_undecided = initial_undecided & common
1204 1201 missing_initial_undecided = initial_undecided & missing
1205 1202
1206 1203 data[b'elapsed'] = t.elapsed
1207 1204 data[b'nb-common-heads'] = len(heads_common)
1208 1205 data[b'nb-common-heads-local'] = len(heads_common_local)
1209 1206 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1210 1207 data[b'nb-common-heads-both'] = len(heads_common_both)
1211 1208 data[b'nb-common-roots'] = len(roots_common)
1212 1209 data[b'nb-head-local'] = len(heads_local)
1213 1210 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1214 1211 data[b'nb-head-remote'] = len(heads_remote)
1215 1212 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1216 1213 heads_common_remote
1217 1214 )
1218 1215 data[b'nb-revs'] = len(all)
1219 1216 data[b'nb-revs-common'] = len(common)
1220 1217 data[b'nb-revs-missing'] = len(missing)
1221 1218 data[b'nb-missing-heads'] = len(heads_missing)
1222 1219 data[b'nb-missing-roots'] = len(roots_missing)
1223 1220 data[b'nb-ini_und'] = len(initial_undecided)
1224 1221 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1225 1222 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1226 1223 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1227 1224 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1228 1225
1229 1226 fm.startitem()
1230 1227 fm.data(**pycompat.strkwargs(data))
1231 1228 # display discovery summary
1232 1229 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1233 1230 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1234 1231 fm.plain(b"heads summary:\n")
1235 1232 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1236 1233 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1237 1234 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1238 1235 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1239 1236 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1240 1237 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1241 1238 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1242 1239 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1243 1240 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1244 1241 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1245 1242 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1246 1243 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1247 1244 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1248 1245 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1249 1246 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1250 1247 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1251 1248 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1252 1249 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1253 1250 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1254 1251 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1255 1252 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1256 1253 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1257 1254
1258 1255 if ui.verbose:
1259 1256 fm.plain(
1260 1257 b"common heads: %s\n"
1261 1258 % b" ".join(sorted(short(n) for n in heads_common))
1262 1259 )
1263 1260 fm.end()
1264 1261
1265 1262
1266 1263 _chunksize = 4 << 10
1267 1264
1268 1265
1269 1266 @command(
1270 1267 b'debugdownload',
1271 1268 [
1272 1269 (b'o', b'output', b'', _(b'path')),
1273 1270 ],
1274 1271 optionalrepo=True,
1275 1272 )
1276 1273 def debugdownload(ui, repo, url, output=None, **opts):
1277 1274 """download a resource using Mercurial logic and config"""
1278 1275 fh = urlmod.open(ui, url, output)
1279 1276
1280 1277 dest = ui
1281 1278 if output:
1282 1279 dest = open(output, b"wb", _chunksize)
1283 1280 try:
1284 1281 data = fh.read(_chunksize)
1285 1282 while data:
1286 1283 dest.write(data)
1287 1284 data = fh.read(_chunksize)
1288 1285 finally:
1289 1286 if output:
1290 1287 dest.close()
1291 1288
1292 1289
1293 1290 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1294 1291 def debugextensions(ui, repo, **opts):
1295 1292 '''show information about active extensions'''
1296 1293 opts = pycompat.byteskwargs(opts)
1297 1294 exts = extensions.extensions(ui)
1298 1295 hgver = util.version()
1299 1296 fm = ui.formatter(b'debugextensions', opts)
1300 1297 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1301 1298 isinternal = extensions.ismoduleinternal(extmod)
1302 1299 extsource = None
1303 1300
1304 1301 if util.safehasattr(extmod, '__file__'):
1305 1302 extsource = pycompat.fsencode(extmod.__file__)
1306 1303 elif getattr(sys, 'oxidized', False):
1307 1304 extsource = pycompat.sysexecutable
1308 1305 if isinternal:
1309 1306 exttestedwith = [] # never expose magic string to users
1310 1307 else:
1311 1308 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1312 1309 extbuglink = getattr(extmod, 'buglink', None)
1313 1310
1314 1311 fm.startitem()
1315 1312
1316 1313 if ui.quiet or ui.verbose:
1317 1314 fm.write(b'name', b'%s\n', extname)
1318 1315 else:
1319 1316 fm.write(b'name', b'%s', extname)
1320 1317 if isinternal or hgver in exttestedwith:
1321 1318 fm.plain(b'\n')
1322 1319 elif not exttestedwith:
1323 1320 fm.plain(_(b' (untested!)\n'))
1324 1321 else:
1325 1322 lasttestedversion = exttestedwith[-1]
1326 1323 fm.plain(b' (%s!)\n' % lasttestedversion)
1327 1324
1328 1325 fm.condwrite(
1329 1326 ui.verbose and extsource,
1330 1327 b'source',
1331 1328 _(b' location: %s\n'),
1332 1329 extsource or b"",
1333 1330 )
1334 1331
1335 1332 if ui.verbose:
1336 1333 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1337 1334 fm.data(bundled=isinternal)
1338 1335
1339 1336 fm.condwrite(
1340 1337 ui.verbose and exttestedwith,
1341 1338 b'testedwith',
1342 1339 _(b' tested with: %s\n'),
1343 1340 fm.formatlist(exttestedwith, name=b'ver'),
1344 1341 )
1345 1342
1346 1343 fm.condwrite(
1347 1344 ui.verbose and extbuglink,
1348 1345 b'buglink',
1349 1346 _(b' bug reporting: %s\n'),
1350 1347 extbuglink or b"",
1351 1348 )
1352 1349
1353 1350 fm.end()
1354 1351
1355 1352
1356 1353 @command(
1357 1354 b'debugfileset',
1358 1355 [
1359 1356 (
1360 1357 b'r',
1361 1358 b'rev',
1362 1359 b'',
1363 1360 _(b'apply the filespec on this revision'),
1364 1361 _(b'REV'),
1365 1362 ),
1366 1363 (
1367 1364 b'',
1368 1365 b'all-files',
1369 1366 False,
1370 1367 _(b'test files from all revisions and working directory'),
1371 1368 ),
1372 1369 (
1373 1370 b's',
1374 1371 b'show-matcher',
1375 1372 None,
1376 1373 _(b'print internal representation of matcher'),
1377 1374 ),
1378 1375 (
1379 1376 b'p',
1380 1377 b'show-stage',
1381 1378 [],
1382 1379 _(b'print parsed tree at the given stage'),
1383 1380 _(b'NAME'),
1384 1381 ),
1385 1382 ],
1386 1383 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1387 1384 )
1388 1385 def debugfileset(ui, repo, expr, **opts):
1389 1386 '''parse and apply a fileset specification'''
1390 1387 from . import fileset
1391 1388
1392 1389 fileset.symbols # force import of fileset so we have predicates to optimize
1393 1390 opts = pycompat.byteskwargs(opts)
1394 1391 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1395 1392
1396 1393 stages = [
1397 1394 (b'parsed', pycompat.identity),
1398 1395 (b'analyzed', filesetlang.analyze),
1399 1396 (b'optimized', filesetlang.optimize),
1400 1397 ]
1401 1398 stagenames = {n for n, f in stages}
1402 1399
1403 1400 showalways = set()
1404 1401 if ui.verbose and not opts[b'show_stage']:
1405 1402 # show parsed tree by --verbose (deprecated)
1406 1403 showalways.add(b'parsed')
1407 1404 if opts[b'show_stage'] == [b'all']:
1408 1405 showalways.update(stagenames)
1409 1406 else:
1410 1407 for n in opts[b'show_stage']:
1411 1408 if n not in stagenames:
1412 1409 raise error.Abort(_(b'invalid stage name: %s') % n)
1413 1410 showalways.update(opts[b'show_stage'])
1414 1411
1415 1412 tree = filesetlang.parse(expr)
1416 1413 for n, f in stages:
1417 1414 tree = f(tree)
1418 1415 if n in showalways:
1419 1416 if opts[b'show_stage'] or n != b'parsed':
1420 1417 ui.write(b"* %s:\n" % n)
1421 1418 ui.write(filesetlang.prettyformat(tree), b"\n")
1422 1419
1423 1420 files = set()
1424 1421 if opts[b'all_files']:
1425 1422 for r in repo:
1426 1423 c = repo[r]
1427 1424 files.update(c.files())
1428 1425 files.update(c.substate)
1429 1426 if opts[b'all_files'] or ctx.rev() is None:
1430 1427 wctx = repo[None]
1431 1428 files.update(
1432 1429 repo.dirstate.walk(
1433 1430 scmutil.matchall(repo),
1434 1431 subrepos=list(wctx.substate),
1435 1432 unknown=True,
1436 1433 ignored=True,
1437 1434 )
1438 1435 )
1439 1436 files.update(wctx.substate)
1440 1437 else:
1441 1438 files.update(ctx.files())
1442 1439 files.update(ctx.substate)
1443 1440
1444 1441 m = ctx.matchfileset(repo.getcwd(), expr)
1445 1442 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1446 1443 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1447 1444 for f in sorted(files):
1448 1445 if not m(f):
1449 1446 continue
1450 1447 ui.write(b"%s\n" % f)
1451 1448
1452 1449
1453 1450 @command(
1454 1451 b"debug-repair-issue6528",
1455 1452 [
1456 1453 (
1457 1454 b'',
1458 1455 b'to-report',
1459 1456 b'',
1460 1457 _(b'build a report of affected revisions to this file'),
1461 1458 _(b'FILE'),
1462 1459 ),
1463 1460 (
1464 1461 b'',
1465 1462 b'from-report',
1466 1463 b'',
1467 1464 _(b'repair revisions listed in this report file'),
1468 1465 _(b'FILE'),
1469 1466 ),
1470 1467 (
1471 1468 b'',
1472 1469 b'paranoid',
1473 1470 False,
1474 1471 _(b'check that both detection methods do the same thing'),
1475 1472 ),
1476 1473 ]
1477 1474 + cmdutil.dryrunopts,
1478 1475 )
1479 1476 def debug_repair_issue6528(ui, repo, **opts):
1480 1477 """find affected revisions and repair them. See issue6528 for more details.
1481 1478
1482 1479 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1483 1480 computation of affected revisions for a given repository across clones.
1484 1481 The report format is line-based (with empty lines ignored):
1485 1482
1486 1483 ```
1487 1484 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1488 1485 ```
1489 1486
1490 1487 There can be multiple broken revisions per filelog, they are separated by
1491 1488 a comma with no spaces. The only space is between the revision(s) and the
1492 1489 filename.
1493 1490
1494 1491 Note that this does *not* mean that this repairs future affected revisions,
1495 1492 that needs a separate fix at the exchange level that hasn't been written yet
1496 1493 (as of 5.9rc0).
1497 1494
1498 1495 There is a `--paranoid` flag to test that the fast implementation is correct
1499 1496 by checking it against the slow implementation. Since this matter is quite
1500 1497 urgent and testing every edge-case is probably quite costly, we use this
1501 1498 method to test on large repositories as a fuzzing method of sorts.
1502 1499 """
1503 1500 cmdutil.check_incompatible_arguments(
1504 1501 opts, 'to_report', ['from_report', 'dry_run']
1505 1502 )
1506 1503 dry_run = opts.get('dry_run')
1507 1504 to_report = opts.get('to_report')
1508 1505 from_report = opts.get('from_report')
1509 1506 paranoid = opts.get('paranoid')
1510 1507 # TODO maybe add filelog pattern and revision pattern parameters to help
1511 1508 # narrow down the search for users that know what they're looking for?
1512 1509
1513 1510 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1514 1511 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1515 1512 raise error.Abort(_(msg))
1516 1513
1517 1514 rewrite.repair_issue6528(
1518 1515 ui,
1519 1516 repo,
1520 1517 dry_run=dry_run,
1521 1518 to_report=to_report,
1522 1519 from_report=from_report,
1523 1520 paranoid=paranoid,
1524 1521 )
1525 1522
1526 1523
1527 1524 @command(b'debugformat', [] + cmdutil.formatteropts)
1528 1525 def debugformat(ui, repo, **opts):
1529 1526 """display format information about the current repository
1530 1527
1531 1528 Use --verbose to get extra information about current config value and
1532 1529 Mercurial default."""
1533 1530 opts = pycompat.byteskwargs(opts)
1534 1531 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1535 1532 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1536 1533
1537 1534 def makeformatname(name):
1538 1535 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1539 1536
1540 1537 fm = ui.formatter(b'debugformat', opts)
1541 1538 if fm.isplain():
1542 1539
1543 1540 def formatvalue(value):
1544 1541 if util.safehasattr(value, b'startswith'):
1545 1542 return value
1546 1543 if value:
1547 1544 return b'yes'
1548 1545 else:
1549 1546 return b'no'
1550 1547
1551 1548 else:
1552 1549 formatvalue = pycompat.identity
1553 1550
1554 1551 fm.plain(b'format-variant')
1555 1552 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1556 1553 fm.plain(b' repo')
1557 1554 if ui.verbose:
1558 1555 fm.plain(b' config default')
1559 1556 fm.plain(b'\n')
1560 1557 for fv in upgrade.allformatvariant:
1561 1558 fm.startitem()
1562 1559 repovalue = fv.fromrepo(repo)
1563 1560 configvalue = fv.fromconfig(repo)
1564 1561
1565 1562 if repovalue != configvalue:
1566 1563 namelabel = b'formatvariant.name.mismatchconfig'
1567 1564 repolabel = b'formatvariant.repo.mismatchconfig'
1568 1565 elif repovalue != fv.default:
1569 1566 namelabel = b'formatvariant.name.mismatchdefault'
1570 1567 repolabel = b'formatvariant.repo.mismatchdefault'
1571 1568 else:
1572 1569 namelabel = b'formatvariant.name.uptodate'
1573 1570 repolabel = b'formatvariant.repo.uptodate'
1574 1571
1575 1572 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1576 1573 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1577 1574 if fv.default != configvalue:
1578 1575 configlabel = b'formatvariant.config.special'
1579 1576 else:
1580 1577 configlabel = b'formatvariant.config.default'
1581 1578 fm.condwrite(
1582 1579 ui.verbose,
1583 1580 b'config',
1584 1581 b' %6s',
1585 1582 formatvalue(configvalue),
1586 1583 label=configlabel,
1587 1584 )
1588 1585 fm.condwrite(
1589 1586 ui.verbose,
1590 1587 b'default',
1591 1588 b' %7s',
1592 1589 formatvalue(fv.default),
1593 1590 label=b'formatvariant.default',
1594 1591 )
1595 1592 fm.plain(b'\n')
1596 1593 fm.end()
1597 1594
1598 1595
1599 1596 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1600 1597 def debugfsinfo(ui, path=b"."):
1601 1598 """show information detected about current filesystem"""
1602 1599 ui.writenoi18n(b'path: %s\n' % path)
1603 1600 ui.writenoi18n(
1604 1601 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1605 1602 )
1606 1603 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1607 1604 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1608 1605 ui.writenoi18n(
1609 1606 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1610 1607 )
1611 1608 ui.writenoi18n(
1612 1609 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1613 1610 )
1614 1611 casesensitive = b'(unknown)'
1615 1612 try:
1616 1613 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1617 1614 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1618 1615 except OSError:
1619 1616 pass
1620 1617 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1621 1618
1622 1619
1623 1620 @command(
1624 1621 b'debuggetbundle',
1625 1622 [
1626 1623 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1627 1624 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1628 1625 (
1629 1626 b't',
1630 1627 b'type',
1631 1628 b'bzip2',
1632 1629 _(b'bundle compression type to use'),
1633 1630 _(b'TYPE'),
1634 1631 ),
1635 1632 ],
1636 1633 _(b'REPO FILE [-H|-C ID]...'),
1637 1634 norepo=True,
1638 1635 )
1639 1636 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1640 1637 """retrieves a bundle from a repo
1641 1638
1642 1639 Every ID must be a full-length hex node id string. Saves the bundle to the
1643 1640 given file.
1644 1641 """
1645 1642 opts = pycompat.byteskwargs(opts)
1646 1643 repo = hg.peer(ui, opts, repopath)
1647 1644 if not repo.capable(b'getbundle'):
1648 1645 raise error.Abort(b"getbundle() not supported by target repository")
1649 1646 args = {}
1650 1647 if common:
1651 1648 args['common'] = [bin(s) for s in common]
1652 1649 if head:
1653 1650 args['heads'] = [bin(s) for s in head]
1654 1651 # TODO: get desired bundlecaps from command line.
1655 1652 args['bundlecaps'] = None
1656 1653 bundle = repo.getbundle(b'debug', **args)
1657 1654
1658 1655 bundletype = opts.get(b'type', b'bzip2').lower()
1659 1656 btypes = {
1660 1657 b'none': b'HG10UN',
1661 1658 b'bzip2': b'HG10BZ',
1662 1659 b'gzip': b'HG10GZ',
1663 1660 b'bundle2': b'HG20',
1664 1661 }
1665 1662 bundletype = btypes.get(bundletype)
1666 1663 if bundletype not in bundle2.bundletypes:
1667 1664 raise error.Abort(_(b'unknown bundle type specified with --type'))
1668 1665 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1669 1666
1670 1667
1671 1668 @command(b'debugignore', [], b'[FILE]')
1672 1669 def debugignore(ui, repo, *files, **opts):
1673 1670 """display the combined ignore pattern and information about ignored files
1674 1671
1675 1672 With no argument display the combined ignore pattern.
1676 1673
1677 1674 Given space separated file names, shows if the given file is ignored and
1678 1675 if so, show the ignore rule (file and line number) that matched it.
1679 1676 """
1680 1677 ignore = repo.dirstate._ignore
1681 1678 if not files:
1682 1679 # Show all the patterns
1683 1680 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1684 1681 else:
1685 1682 m = scmutil.match(repo[None], pats=files)
1686 1683 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1687 1684 for f in m.files():
1688 1685 nf = util.normpath(f)
1689 1686 ignored = None
1690 1687 ignoredata = None
1691 1688 if nf != b'.':
1692 1689 if ignore(nf):
1693 1690 ignored = nf
1694 1691 ignoredata = repo.dirstate._ignorefileandline(nf)
1695 1692 else:
1696 1693 for p in pathutil.finddirs(nf):
1697 1694 if ignore(p):
1698 1695 ignored = p
1699 1696 ignoredata = repo.dirstate._ignorefileandline(p)
1700 1697 break
1701 1698 if ignored:
1702 1699 if ignored == nf:
1703 1700 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1704 1701 else:
1705 1702 ui.write(
1706 1703 _(
1707 1704 b"%s is ignored because of "
1708 1705 b"containing directory %s\n"
1709 1706 )
1710 1707 % (uipathfn(f), ignored)
1711 1708 )
1712 1709 ignorefile, lineno, line = ignoredata
1713 1710 ui.write(
1714 1711 _(b"(ignore rule in %s, line %d: '%s')\n")
1715 1712 % (ignorefile, lineno, line)
1716 1713 )
1717 1714 else:
1718 1715 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1719 1716
1720 1717
1721 1718 @command(
1722 1719 b'debugindex',
1723 1720 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1724 1721 _(b'-c|-m|FILE'),
1725 1722 )
1726 1723 def debugindex(ui, repo, file_=None, **opts):
1727 1724 """dump index data for a storage primitive"""
1728 1725 opts = pycompat.byteskwargs(opts)
1729 1726 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1730 1727
1731 1728 if ui.debugflag:
1732 1729 shortfn = hex
1733 1730 else:
1734 1731 shortfn = short
1735 1732
1736 1733 idlen = 12
1737 1734 for i in store:
1738 1735 idlen = len(shortfn(store.node(i)))
1739 1736 break
1740 1737
1741 1738 fm = ui.formatter(b'debugindex', opts)
1742 1739 fm.plain(
1743 1740 b' rev linkrev %s %s p2\n'
1744 1741 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1745 1742 )
1746 1743
1747 1744 for rev in store:
1748 1745 node = store.node(rev)
1749 1746 parents = store.parents(node)
1750 1747
1751 1748 fm.startitem()
1752 1749 fm.write(b'rev', b'%6d ', rev)
1753 1750 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1754 1751 fm.write(b'node', b'%s ', shortfn(node))
1755 1752 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1756 1753 fm.write(b'p2', b'%s', shortfn(parents[1]))
1757 1754 fm.plain(b'\n')
1758 1755
1759 1756 fm.end()
1760 1757
1761 1758
1762 1759 @command(
1763 1760 b'debugindexdot',
1764 1761 cmdutil.debugrevlogopts,
1765 1762 _(b'-c|-m|FILE'),
1766 1763 optionalrepo=True,
1767 1764 )
1768 1765 def debugindexdot(ui, repo, file_=None, **opts):
1769 1766 """dump an index DAG as a graphviz dot file"""
1770 1767 opts = pycompat.byteskwargs(opts)
1771 1768 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1772 1769 ui.writenoi18n(b"digraph G {\n")
1773 1770 for i in r:
1774 1771 node = r.node(i)
1775 1772 pp = r.parents(node)
1776 1773 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1777 1774 if pp[1] != repo.nullid:
1778 1775 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1779 1776 ui.write(b"}\n")
1780 1777
1781 1778
1782 1779 @command(b'debugindexstats', [])
1783 1780 def debugindexstats(ui, repo):
1784 1781 """show stats related to the changelog index"""
1785 1782 repo.changelog.shortest(repo.nullid, 1)
1786 1783 index = repo.changelog.index
1787 1784 if not util.safehasattr(index, b'stats'):
1788 1785 raise error.Abort(_(b'debugindexstats only works with native code'))
1789 1786 for k, v in sorted(index.stats().items()):
1790 1787 ui.write(b'%s: %d\n' % (k, v))
1791 1788
1792 1789
1793 1790 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1794 1791 def debuginstall(ui, **opts):
1795 1792 """test Mercurial installation
1796 1793
1797 1794 Returns 0 on success.
1798 1795 """
1799 1796 opts = pycompat.byteskwargs(opts)
1800 1797
1801 1798 problems = 0
1802 1799
1803 1800 fm = ui.formatter(b'debuginstall', opts)
1804 1801 fm.startitem()
1805 1802
1806 1803 # encoding might be unknown or wrong. don't translate these messages.
1807 1804 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1808 1805 err = None
1809 1806 try:
1810 1807 codecs.lookup(pycompat.sysstr(encoding.encoding))
1811 1808 except LookupError as inst:
1812 1809 err = stringutil.forcebytestr(inst)
1813 1810 problems += 1
1814 1811 fm.condwrite(
1815 1812 err,
1816 1813 b'encodingerror',
1817 1814 b" %s\n (check that your locale is properly set)\n",
1818 1815 err,
1819 1816 )
1820 1817
1821 1818 # Python
1822 1819 pythonlib = None
1823 1820 if util.safehasattr(os, '__file__'):
1824 1821 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1825 1822 elif getattr(sys, 'oxidized', False):
1826 1823 pythonlib = pycompat.sysexecutable
1827 1824
1828 1825 fm.write(
1829 1826 b'pythonexe',
1830 1827 _(b"checking Python executable (%s)\n"),
1831 1828 pycompat.sysexecutable or _(b"unknown"),
1832 1829 )
1833 1830 fm.write(
1834 1831 b'pythonimplementation',
1835 1832 _(b"checking Python implementation (%s)\n"),
1836 1833 pycompat.sysbytes(platform.python_implementation()),
1837 1834 )
1838 1835 fm.write(
1839 1836 b'pythonver',
1840 1837 _(b"checking Python version (%s)\n"),
1841 1838 (b"%d.%d.%d" % sys.version_info[:3]),
1842 1839 )
1843 1840 fm.write(
1844 1841 b'pythonlib',
1845 1842 _(b"checking Python lib (%s)...\n"),
1846 1843 pythonlib or _(b"unknown"),
1847 1844 )
1848 1845
1849 1846 try:
1850 1847 from . import rustext # pytype: disable=import-error
1851 1848
1852 1849 rustext.__doc__ # trigger lazy import
1853 1850 except ImportError:
1854 1851 rustext = None
1855 1852
1856 1853 security = set(sslutil.supportedprotocols)
1857 1854 if sslutil.hassni:
1858 1855 security.add(b'sni')
1859 1856
1860 1857 fm.write(
1861 1858 b'pythonsecurity',
1862 1859 _(b"checking Python security support (%s)\n"),
1863 1860 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1864 1861 )
1865 1862
1866 1863 # These are warnings, not errors. So don't increment problem count. This
1867 1864 # may change in the future.
1868 1865 if b'tls1.2' not in security:
1869 1866 fm.plain(
1870 1867 _(
1871 1868 b' TLS 1.2 not supported by Python install; '
1872 1869 b'network connections lack modern security\n'
1873 1870 )
1874 1871 )
1875 1872 if b'sni' not in security:
1876 1873 fm.plain(
1877 1874 _(
1878 1875 b' SNI not supported by Python install; may have '
1879 1876 b'connectivity issues with some servers\n'
1880 1877 )
1881 1878 )
1882 1879
1883 1880 fm.plain(
1884 1881 _(
1885 1882 b"checking Rust extensions (%s)\n"
1886 1883 % (b'missing' if rustext is None else b'installed')
1887 1884 ),
1888 1885 )
1889 1886
1890 1887 # TODO print CA cert info
1891 1888
1892 1889 # hg version
1893 1890 hgver = util.version()
1894 1891 fm.write(
1895 1892 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1896 1893 )
1897 1894 fm.write(
1898 1895 b'hgverextra',
1899 1896 _(b"checking Mercurial custom build (%s)\n"),
1900 1897 b'+'.join(hgver.split(b'+')[1:]),
1901 1898 )
1902 1899
1903 1900 # compiled modules
1904 1901 hgmodules = None
1905 1902 if util.safehasattr(sys.modules[__name__], '__file__'):
1906 1903 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1907 1904 elif getattr(sys, 'oxidized', False):
1908 1905 hgmodules = pycompat.sysexecutable
1909 1906
1910 1907 fm.write(
1911 1908 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1912 1909 )
1913 1910 fm.write(
1914 1911 b'hgmodules',
1915 1912 _(b"checking installed modules (%s)...\n"),
1916 1913 hgmodules or _(b"unknown"),
1917 1914 )
1918 1915
1919 1916 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1920 1917 rustext = rustandc # for now, that's the only case
1921 1918 cext = policy.policy in (b'c', b'allow') or rustandc
1922 1919 nopure = cext or rustext
1923 1920 if nopure:
1924 1921 err = None
1925 1922 try:
1926 1923 if cext:
1927 1924 from .cext import ( # pytype: disable=import-error
1928 1925 base85,
1929 1926 bdiff,
1930 1927 mpatch,
1931 1928 osutil,
1932 1929 )
1933 1930
1934 1931 # quiet pyflakes
1935 1932 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1936 1933 if rustext:
1937 1934 from .rustext import ( # pytype: disable=import-error
1938 1935 ancestor,
1939 1936 dirstate,
1940 1937 )
1941 1938
1942 1939 dir(ancestor), dir(dirstate) # quiet pyflakes
1943 1940 except Exception as inst:
1944 1941 err = stringutil.forcebytestr(inst)
1945 1942 problems += 1
1946 1943 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1947 1944
1948 1945 compengines = util.compengines._engines.values()
1949 1946 fm.write(
1950 1947 b'compengines',
1951 1948 _(b'checking registered compression engines (%s)\n'),
1952 1949 fm.formatlist(
1953 1950 sorted(e.name() for e in compengines),
1954 1951 name=b'compengine',
1955 1952 fmt=b'%s',
1956 1953 sep=b', ',
1957 1954 ),
1958 1955 )
1959 1956 fm.write(
1960 1957 b'compenginesavail',
1961 1958 _(b'checking available compression engines (%s)\n'),
1962 1959 fm.formatlist(
1963 1960 sorted(e.name() for e in compengines if e.available()),
1964 1961 name=b'compengine',
1965 1962 fmt=b'%s',
1966 1963 sep=b', ',
1967 1964 ),
1968 1965 )
1969 1966 wirecompengines = compression.compengines.supportedwireengines(
1970 1967 compression.SERVERROLE
1971 1968 )
1972 1969 fm.write(
1973 1970 b'compenginesserver',
1974 1971 _(
1975 1972 b'checking available compression engines '
1976 1973 b'for wire protocol (%s)\n'
1977 1974 ),
1978 1975 fm.formatlist(
1979 1976 [e.name() for e in wirecompengines if e.wireprotosupport()],
1980 1977 name=b'compengine',
1981 1978 fmt=b'%s',
1982 1979 sep=b', ',
1983 1980 ),
1984 1981 )
1985 1982 re2 = b'missing'
1986 1983 if util._re2:
1987 1984 re2 = b'available'
1988 1985 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1989 1986 fm.data(re2=bool(util._re2))
1990 1987
1991 1988 # templates
1992 1989 p = templater.templatedir()
1993 1990 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1994 1991 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1995 1992 if p:
1996 1993 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1997 1994 if m:
1998 1995 # template found, check if it is working
1999 1996 err = None
2000 1997 try:
2001 1998 templater.templater.frommapfile(m)
2002 1999 except Exception as inst:
2003 2000 err = stringutil.forcebytestr(inst)
2004 2001 p = None
2005 2002 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2006 2003 else:
2007 2004 p = None
2008 2005 fm.condwrite(
2009 2006 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2010 2007 )
2011 2008 fm.condwrite(
2012 2009 not m,
2013 2010 b'defaulttemplatenotfound',
2014 2011 _(b" template '%s' not found\n"),
2015 2012 b"default",
2016 2013 )
2017 2014 if not p:
2018 2015 problems += 1
2019 2016 fm.condwrite(
2020 2017 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2021 2018 )
2022 2019
2023 2020 # editor
2024 2021 editor = ui.geteditor()
2025 2022 editor = util.expandpath(editor)
2026 2023 editorbin = procutil.shellsplit(editor)[0]
2027 2024 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2028 2025 cmdpath = procutil.findexe(editorbin)
2029 2026 fm.condwrite(
2030 2027 not cmdpath and editor == b'vi',
2031 2028 b'vinotfound',
2032 2029 _(
2033 2030 b" No commit editor set and can't find %s in PATH\n"
2034 2031 b" (specify a commit editor in your configuration"
2035 2032 b" file)\n"
2036 2033 ),
2037 2034 not cmdpath and editor == b'vi' and editorbin,
2038 2035 )
2039 2036 fm.condwrite(
2040 2037 not cmdpath and editor != b'vi',
2041 2038 b'editornotfound',
2042 2039 _(
2043 2040 b" Can't find editor '%s' in PATH\n"
2044 2041 b" (specify a commit editor in your configuration"
2045 2042 b" file)\n"
2046 2043 ),
2047 2044 not cmdpath and editorbin,
2048 2045 )
2049 2046 if not cmdpath and editor != b'vi':
2050 2047 problems += 1
2051 2048
2052 2049 # check username
2053 2050 username = None
2054 2051 err = None
2055 2052 try:
2056 2053 username = ui.username()
2057 2054 except error.Abort as e:
2058 2055 err = e.message
2059 2056 problems += 1
2060 2057
2061 2058 fm.condwrite(
2062 2059 username, b'username', _(b"checking username (%s)\n"), username
2063 2060 )
2064 2061 fm.condwrite(
2065 2062 err,
2066 2063 b'usernameerror',
2067 2064 _(
2068 2065 b"checking username...\n %s\n"
2069 2066 b" (specify a username in your configuration file)\n"
2070 2067 ),
2071 2068 err,
2072 2069 )
2073 2070
2074 2071 for name, mod in extensions.extensions():
2075 2072 handler = getattr(mod, 'debuginstall', None)
2076 2073 if handler is not None:
2077 2074 problems += handler(ui, fm)
2078 2075
2079 2076 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2080 2077 if not problems:
2081 2078 fm.data(problems=problems)
2082 2079 fm.condwrite(
2083 2080 problems,
2084 2081 b'problems',
2085 2082 _(b"%d problems detected, please check your install!\n"),
2086 2083 problems,
2087 2084 )
2088 2085 fm.end()
2089 2086
2090 2087 return problems
2091 2088
2092 2089
2093 2090 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2094 2091 def debugknown(ui, repopath, *ids, **opts):
2095 2092 """test whether node ids are known to a repo
2096 2093
2097 2094 Every ID must be a full-length hex node id string. Returns a list of 0s
2098 2095 and 1s indicating unknown/known.
2099 2096 """
2100 2097 opts = pycompat.byteskwargs(opts)
2101 2098 repo = hg.peer(ui, opts, repopath)
2102 2099 if not repo.capable(b'known'):
2103 2100 raise error.Abort(b"known() not supported by target repository")
2104 2101 flags = repo.known([bin(s) for s in ids])
2105 2102 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2106 2103
2107 2104
2108 2105 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2109 2106 def debuglabelcomplete(ui, repo, *args):
2110 2107 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2111 2108 debugnamecomplete(ui, repo, *args)
2112 2109
2113 2110
2114 2111 @command(
2115 2112 b'debuglocks',
2116 2113 [
2117 2114 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2118 2115 (
2119 2116 b'W',
2120 2117 b'force-free-wlock',
2121 2118 None,
2122 2119 _(b'free the working state lock (DANGEROUS)'),
2123 2120 ),
2124 2121 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2125 2122 (
2126 2123 b'S',
2127 2124 b'set-wlock',
2128 2125 None,
2129 2126 _(b'set the working state lock until stopped'),
2130 2127 ),
2131 2128 ],
2132 2129 _(b'[OPTION]...'),
2133 2130 )
2134 2131 def debuglocks(ui, repo, **opts):
2135 2132 """show or modify state of locks
2136 2133
2137 2134 By default, this command will show which locks are held. This
2138 2135 includes the user and process holding the lock, the amount of time
2139 2136 the lock has been held, and the machine name where the process is
2140 2137 running if it's not local.
2141 2138
2142 2139 Locks protect the integrity of Mercurial's data, so should be
2143 2140 treated with care. System crashes or other interruptions may cause
2144 2141 locks to not be properly released, though Mercurial will usually
2145 2142 detect and remove such stale locks automatically.
2146 2143
2147 2144 However, detecting stale locks may not always be possible (for
2148 2145 instance, on a shared filesystem). Removing locks may also be
2149 2146 blocked by filesystem permissions.
2150 2147
2151 2148 Setting a lock will prevent other commands from changing the data.
2152 2149 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2153 2150 The set locks are removed when the command exits.
2154 2151
2155 2152 Returns 0 if no locks are held.
2156 2153
2157 2154 """
2158 2155
2159 2156 if opts.get('force_free_lock'):
2160 2157 repo.svfs.unlink(b'lock')
2161 2158 if opts.get('force_free_wlock'):
2162 2159 repo.vfs.unlink(b'wlock')
2163 2160 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2164 2161 return 0
2165 2162
2166 2163 locks = []
2167 2164 try:
2168 2165 if opts.get('set_wlock'):
2169 2166 try:
2170 2167 locks.append(repo.wlock(False))
2171 2168 except error.LockHeld:
2172 2169 raise error.Abort(_(b'wlock is already held'))
2173 2170 if opts.get('set_lock'):
2174 2171 try:
2175 2172 locks.append(repo.lock(False))
2176 2173 except error.LockHeld:
2177 2174 raise error.Abort(_(b'lock is already held'))
2178 2175 if len(locks):
2179 2176 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2180 2177 return 0
2181 2178 finally:
2182 2179 release(*locks)
2183 2180
2184 2181 now = time.time()
2185 2182 held = 0
2186 2183
2187 2184 def report(vfs, name, method):
2188 2185 # this causes stale locks to get reaped for more accurate reporting
2189 2186 try:
2190 2187 l = method(False)
2191 2188 except error.LockHeld:
2192 2189 l = None
2193 2190
2194 2191 if l:
2195 2192 l.release()
2196 2193 else:
2197 2194 try:
2198 2195 st = vfs.lstat(name)
2199 2196 age = now - st[stat.ST_MTIME]
2200 2197 user = util.username(st.st_uid)
2201 2198 locker = vfs.readlock(name)
2202 2199 if b":" in locker:
2203 2200 host, pid = locker.split(b':')
2204 2201 if host == socket.gethostname():
2205 2202 locker = b'user %s, process %s' % (user or b'None', pid)
2206 2203 else:
2207 2204 locker = b'user %s, process %s, host %s' % (
2208 2205 user or b'None',
2209 2206 pid,
2210 2207 host,
2211 2208 )
2212 2209 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2213 2210 return 1
2214 2211 except OSError as e:
2215 2212 if e.errno != errno.ENOENT:
2216 2213 raise
2217 2214
2218 2215 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2219 2216 return 0
2220 2217
2221 2218 held += report(repo.svfs, b"lock", repo.lock)
2222 2219 held += report(repo.vfs, b"wlock", repo.wlock)
2223 2220
2224 2221 return held
2225 2222
2226 2223
2227 2224 @command(
2228 2225 b'debugmanifestfulltextcache',
2229 2226 [
2230 2227 (b'', b'clear', False, _(b'clear the cache')),
2231 2228 (
2232 2229 b'a',
2233 2230 b'add',
2234 2231 [],
2235 2232 _(b'add the given manifest nodes to the cache'),
2236 2233 _(b'NODE'),
2237 2234 ),
2238 2235 ],
2239 2236 b'',
2240 2237 )
2241 2238 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2242 2239 """show, clear or amend the contents of the manifest fulltext cache"""
2243 2240
2244 2241 def getcache():
2245 2242 r = repo.manifestlog.getstorage(b'')
2246 2243 try:
2247 2244 return r._fulltextcache
2248 2245 except AttributeError:
2249 2246 msg = _(
2250 2247 b"Current revlog implementation doesn't appear to have a "
2251 2248 b"manifest fulltext cache\n"
2252 2249 )
2253 2250 raise error.Abort(msg)
2254 2251
2255 2252 if opts.get('clear'):
2256 2253 with repo.wlock():
2257 2254 cache = getcache()
2258 2255 cache.clear(clear_persisted_data=True)
2259 2256 return
2260 2257
2261 2258 if add:
2262 2259 with repo.wlock():
2263 2260 m = repo.manifestlog
2264 2261 store = m.getstorage(b'')
2265 2262 for n in add:
2266 2263 try:
2267 2264 manifest = m[store.lookup(n)]
2268 2265 except error.LookupError as e:
2269 2266 raise error.Abort(
2270 2267 bytes(e), hint=b"Check your manifest node id"
2271 2268 )
2272 2269 manifest.read() # stores revisision in cache too
2273 2270 return
2274 2271
2275 2272 cache = getcache()
2276 2273 if not len(cache):
2277 2274 ui.write(_(b'cache empty\n'))
2278 2275 else:
2279 2276 ui.write(
2280 2277 _(
2281 2278 b'cache contains %d manifest entries, in order of most to '
2282 2279 b'least recent:\n'
2283 2280 )
2284 2281 % (len(cache),)
2285 2282 )
2286 2283 totalsize = 0
2287 2284 for nodeid in cache:
2288 2285 # Use cache.get to not update the LRU order
2289 2286 data = cache.peek(nodeid)
2290 2287 size = len(data)
2291 2288 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2292 2289 ui.write(
2293 2290 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2294 2291 )
2295 2292 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2296 2293 ui.write(
2297 2294 _(b'total cache data size %s, on-disk %s\n')
2298 2295 % (util.bytecount(totalsize), util.bytecount(ondisk))
2299 2296 )
2300 2297
2301 2298
2302 2299 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2303 2300 def debugmergestate(ui, repo, *args, **opts):
2304 2301 """print merge state
2305 2302
2306 2303 Use --verbose to print out information about whether v1 or v2 merge state
2307 2304 was chosen."""
2308 2305
2309 2306 if ui.verbose:
2310 2307 ms = mergestatemod.mergestate(repo)
2311 2308
2312 2309 # sort so that reasonable information is on top
2313 2310 v1records = ms._readrecordsv1()
2314 2311 v2records = ms._readrecordsv2()
2315 2312
2316 2313 if not v1records and not v2records:
2317 2314 pass
2318 2315 elif not v2records:
2319 2316 ui.writenoi18n(b'no version 2 merge state\n')
2320 2317 elif ms._v1v2match(v1records, v2records):
2321 2318 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2322 2319 else:
2323 2320 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2324 2321
2325 2322 opts = pycompat.byteskwargs(opts)
2326 2323 if not opts[b'template']:
2327 2324 opts[b'template'] = (
2328 2325 b'{if(commits, "", "no merge state found\n")}'
2329 2326 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2330 2327 b'{files % "file: {path} (state \\"{state}\\")\n'
2331 2328 b'{if(local_path, "'
2332 2329 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2333 2330 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2334 2331 b' other path: {other_path} (node {other_node})\n'
2335 2332 b'")}'
2336 2333 b'{if(rename_side, "'
2337 2334 b' rename side: {rename_side}\n'
2338 2335 b' renamed path: {renamed_path}\n'
2339 2336 b'")}'
2340 2337 b'{extras % " extra: {key} = {value}\n"}'
2341 2338 b'"}'
2342 2339 b'{extras % "extra: {file} ({key} = {value})\n"}'
2343 2340 )
2344 2341
2345 2342 ms = mergestatemod.mergestate.read(repo)
2346 2343
2347 2344 fm = ui.formatter(b'debugmergestate', opts)
2348 2345 fm.startitem()
2349 2346
2350 2347 fm_commits = fm.nested(b'commits')
2351 2348 if ms.active():
2352 2349 for name, node, label_index in (
2353 2350 (b'local', ms.local, 0),
2354 2351 (b'other', ms.other, 1),
2355 2352 ):
2356 2353 fm_commits.startitem()
2357 2354 fm_commits.data(name=name)
2358 2355 fm_commits.data(node=hex(node))
2359 2356 if ms._labels and len(ms._labels) > label_index:
2360 2357 fm_commits.data(label=ms._labels[label_index])
2361 2358 fm_commits.end()
2362 2359
2363 2360 fm_files = fm.nested(b'files')
2364 2361 if ms.active():
2365 2362 for f in ms:
2366 2363 fm_files.startitem()
2367 2364 fm_files.data(path=f)
2368 2365 state = ms._state[f]
2369 2366 fm_files.data(state=state[0])
2370 2367 if state[0] in (
2371 2368 mergestatemod.MERGE_RECORD_UNRESOLVED,
2372 2369 mergestatemod.MERGE_RECORD_RESOLVED,
2373 2370 ):
2374 2371 fm_files.data(local_key=state[1])
2375 2372 fm_files.data(local_path=state[2])
2376 2373 fm_files.data(ancestor_path=state[3])
2377 2374 fm_files.data(ancestor_node=state[4])
2378 2375 fm_files.data(other_path=state[5])
2379 2376 fm_files.data(other_node=state[6])
2380 2377 fm_files.data(local_flags=state[7])
2381 2378 elif state[0] in (
2382 2379 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2383 2380 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2384 2381 ):
2385 2382 fm_files.data(renamed_path=state[1])
2386 2383 fm_files.data(rename_side=state[2])
2387 2384 fm_extras = fm_files.nested(b'extras')
2388 2385 for k, v in sorted(ms.extras(f).items()):
2389 2386 fm_extras.startitem()
2390 2387 fm_extras.data(key=k)
2391 2388 fm_extras.data(value=v)
2392 2389 fm_extras.end()
2393 2390
2394 2391 fm_files.end()
2395 2392
2396 2393 fm_extras = fm.nested(b'extras')
2397 2394 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2398 2395 if f in ms:
2399 2396 # If file is in mergestate, we have already processed it's extras
2400 2397 continue
2401 2398 for k, v in pycompat.iteritems(d):
2402 2399 fm_extras.startitem()
2403 2400 fm_extras.data(file=f)
2404 2401 fm_extras.data(key=k)
2405 2402 fm_extras.data(value=v)
2406 2403 fm_extras.end()
2407 2404
2408 2405 fm.end()
2409 2406
2410 2407
2411 2408 @command(b'debugnamecomplete', [], _(b'NAME...'))
2412 2409 def debugnamecomplete(ui, repo, *args):
2413 2410 '''complete "names" - tags, open branch names, bookmark names'''
2414 2411
2415 2412 names = set()
2416 2413 # since we previously only listed open branches, we will handle that
2417 2414 # specially (after this for loop)
2418 2415 for name, ns in pycompat.iteritems(repo.names):
2419 2416 if name != b'branches':
2420 2417 names.update(ns.listnames(repo))
2421 2418 names.update(
2422 2419 tag
2423 2420 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2424 2421 if not closed
2425 2422 )
2426 2423 completions = set()
2427 2424 if not args:
2428 2425 args = [b'']
2429 2426 for a in args:
2430 2427 completions.update(n for n in names if n.startswith(a))
2431 2428 ui.write(b'\n'.join(sorted(completions)))
2432 2429 ui.write(b'\n')
2433 2430
2434 2431
2435 2432 @command(
2436 2433 b'debugnodemap',
2437 2434 [
2438 2435 (
2439 2436 b'',
2440 2437 b'dump-new',
2441 2438 False,
2442 2439 _(b'write a (new) persistent binary nodemap on stdout'),
2443 2440 ),
2444 2441 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2445 2442 (
2446 2443 b'',
2447 2444 b'check',
2448 2445 False,
2449 2446 _(b'check that the data on disk data are correct.'),
2450 2447 ),
2451 2448 (
2452 2449 b'',
2453 2450 b'metadata',
2454 2451 False,
2455 2452 _(b'display the on disk meta data for the nodemap'),
2456 2453 ),
2457 2454 ],
2458 2455 )
2459 2456 def debugnodemap(ui, repo, **opts):
2460 2457 """write and inspect on disk nodemap"""
2461 2458 if opts['dump_new']:
2462 2459 unfi = repo.unfiltered()
2463 2460 cl = unfi.changelog
2464 2461 if util.safehasattr(cl.index, "nodemap_data_all"):
2465 2462 data = cl.index.nodemap_data_all()
2466 2463 else:
2467 2464 data = nodemap.persistent_data(cl.index)
2468 2465 ui.write(data)
2469 2466 elif opts['dump_disk']:
2470 2467 unfi = repo.unfiltered()
2471 2468 cl = unfi.changelog
2472 2469 nm_data = nodemap.persisted_data(cl)
2473 2470 if nm_data is not None:
2474 2471 docket, data = nm_data
2475 2472 ui.write(data[:])
2476 2473 elif opts['check']:
2477 2474 unfi = repo.unfiltered()
2478 2475 cl = unfi.changelog
2479 2476 nm_data = nodemap.persisted_data(cl)
2480 2477 if nm_data is not None:
2481 2478 docket, data = nm_data
2482 2479 return nodemap.check_data(ui, cl.index, data)
2483 2480 elif opts['metadata']:
2484 2481 unfi = repo.unfiltered()
2485 2482 cl = unfi.changelog
2486 2483 nm_data = nodemap.persisted_data(cl)
2487 2484 if nm_data is not None:
2488 2485 docket, data = nm_data
2489 2486 ui.write((b"uid: %s\n") % docket.uid)
2490 2487 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2491 2488 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2492 2489 ui.write((b"data-length: %d\n") % docket.data_length)
2493 2490 ui.write((b"data-unused: %d\n") % docket.data_unused)
2494 2491 unused_perc = docket.data_unused * 100.0 / docket.data_length
2495 2492 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2496 2493
2497 2494
2498 2495 @command(
2499 2496 b'debugobsolete',
2500 2497 [
2501 2498 (b'', b'flags', 0, _(b'markers flag')),
2502 2499 (
2503 2500 b'',
2504 2501 b'record-parents',
2505 2502 False,
2506 2503 _(b'record parent information for the precursor'),
2507 2504 ),
2508 2505 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2509 2506 (
2510 2507 b'',
2511 2508 b'exclusive',
2512 2509 False,
2513 2510 _(b'restrict display to markers only relevant to REV'),
2514 2511 ),
2515 2512 (b'', b'index', False, _(b'display index of the marker')),
2516 2513 (b'', b'delete', [], _(b'delete markers specified by indices')),
2517 2514 ]
2518 2515 + cmdutil.commitopts2
2519 2516 + cmdutil.formatteropts,
2520 2517 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2521 2518 )
2522 2519 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2523 2520 """create arbitrary obsolete marker
2524 2521
2525 2522 With no arguments, displays the list of obsolescence markers."""
2526 2523
2527 2524 opts = pycompat.byteskwargs(opts)
2528 2525
2529 2526 def parsenodeid(s):
2530 2527 try:
2531 2528 # We do not use revsingle/revrange functions here to accept
2532 2529 # arbitrary node identifiers, possibly not present in the
2533 2530 # local repository.
2534 2531 n = bin(s)
2535 2532 if len(n) != repo.nodeconstants.nodelen:
2536 2533 raise TypeError()
2537 2534 return n
2538 2535 except TypeError:
2539 2536 raise error.InputError(
2540 2537 b'changeset references must be full hexadecimal '
2541 2538 b'node identifiers'
2542 2539 )
2543 2540
2544 2541 if opts.get(b'delete'):
2545 2542 indices = []
2546 2543 for v in opts.get(b'delete'):
2547 2544 try:
2548 2545 indices.append(int(v))
2549 2546 except ValueError:
2550 2547 raise error.InputError(
2551 2548 _(b'invalid index value: %r') % v,
2552 2549 hint=_(b'use integers for indices'),
2553 2550 )
2554 2551
2555 2552 if repo.currenttransaction():
2556 2553 raise error.Abort(
2557 2554 _(b'cannot delete obsmarkers in the middle of transaction.')
2558 2555 )
2559 2556
2560 2557 with repo.lock():
2561 2558 n = repair.deleteobsmarkers(repo.obsstore, indices)
2562 2559 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2563 2560
2564 2561 return
2565 2562
2566 2563 if precursor is not None:
2567 2564 if opts[b'rev']:
2568 2565 raise error.InputError(
2569 2566 b'cannot select revision when creating marker'
2570 2567 )
2571 2568 metadata = {}
2572 2569 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2573 2570 succs = tuple(parsenodeid(succ) for succ in successors)
2574 2571 l = repo.lock()
2575 2572 try:
2576 2573 tr = repo.transaction(b'debugobsolete')
2577 2574 try:
2578 2575 date = opts.get(b'date')
2579 2576 if date:
2580 2577 date = dateutil.parsedate(date)
2581 2578 else:
2582 2579 date = None
2583 2580 prec = parsenodeid(precursor)
2584 2581 parents = None
2585 2582 if opts[b'record_parents']:
2586 2583 if prec not in repo.unfiltered():
2587 2584 raise error.Abort(
2588 2585 b'cannot used --record-parents on '
2589 2586 b'unknown changesets'
2590 2587 )
2591 2588 parents = repo.unfiltered()[prec].parents()
2592 2589 parents = tuple(p.node() for p in parents)
2593 2590 repo.obsstore.create(
2594 2591 tr,
2595 2592 prec,
2596 2593 succs,
2597 2594 opts[b'flags'],
2598 2595 parents=parents,
2599 2596 date=date,
2600 2597 metadata=metadata,
2601 2598 ui=ui,
2602 2599 )
2603 2600 tr.close()
2604 2601 except ValueError as exc:
2605 2602 raise error.Abort(
2606 2603 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2607 2604 )
2608 2605 finally:
2609 2606 tr.release()
2610 2607 finally:
2611 2608 l.release()
2612 2609 else:
2613 2610 if opts[b'rev']:
2614 2611 revs = scmutil.revrange(repo, opts[b'rev'])
2615 2612 nodes = [repo[r].node() for r in revs]
2616 2613 markers = list(
2617 2614 obsutil.getmarkers(
2618 2615 repo, nodes=nodes, exclusive=opts[b'exclusive']
2619 2616 )
2620 2617 )
2621 2618 markers.sort(key=lambda x: x._data)
2622 2619 else:
2623 2620 markers = obsutil.getmarkers(repo)
2624 2621
2625 2622 markerstoiter = markers
2626 2623 isrelevant = lambda m: True
2627 2624 if opts.get(b'rev') and opts.get(b'index'):
2628 2625 markerstoiter = obsutil.getmarkers(repo)
2629 2626 markerset = set(markers)
2630 2627 isrelevant = lambda m: m in markerset
2631 2628
2632 2629 fm = ui.formatter(b'debugobsolete', opts)
2633 2630 for i, m in enumerate(markerstoiter):
2634 2631 if not isrelevant(m):
2635 2632 # marker can be irrelevant when we're iterating over a set
2636 2633 # of markers (markerstoiter) which is bigger than the set
2637 2634 # of markers we want to display (markers)
2638 2635 # this can happen if both --index and --rev options are
2639 2636 # provided and thus we need to iterate over all of the markers
2640 2637 # to get the correct indices, but only display the ones that
2641 2638 # are relevant to --rev value
2642 2639 continue
2643 2640 fm.startitem()
2644 2641 ind = i if opts.get(b'index') else None
2645 2642 cmdutil.showmarker(fm, m, index=ind)
2646 2643 fm.end()
2647 2644
2648 2645
2649 2646 @command(
2650 2647 b'debugp1copies',
2651 2648 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2652 2649 _(b'[-r REV]'),
2653 2650 )
2654 2651 def debugp1copies(ui, repo, **opts):
2655 2652 """dump copy information compared to p1"""
2656 2653
2657 2654 opts = pycompat.byteskwargs(opts)
2658 2655 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2659 2656 for dst, src in ctx.p1copies().items():
2660 2657 ui.write(b'%s -> %s\n' % (src, dst))
2661 2658
2662 2659
2663 2660 @command(
2664 2661 b'debugp2copies',
2665 2662 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2666 2663 _(b'[-r REV]'),
2667 2664 )
2668 2665 def debugp1copies(ui, repo, **opts):
2669 2666 """dump copy information compared to p2"""
2670 2667
2671 2668 opts = pycompat.byteskwargs(opts)
2672 2669 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2673 2670 for dst, src in ctx.p2copies().items():
2674 2671 ui.write(b'%s -> %s\n' % (src, dst))
2675 2672
2676 2673
2677 2674 @command(
2678 2675 b'debugpathcomplete',
2679 2676 [
2680 2677 (b'f', b'full', None, _(b'complete an entire path')),
2681 2678 (b'n', b'normal', None, _(b'show only normal files')),
2682 2679 (b'a', b'added', None, _(b'show only added files')),
2683 2680 (b'r', b'removed', None, _(b'show only removed files')),
2684 2681 ],
2685 2682 _(b'FILESPEC...'),
2686 2683 )
2687 2684 def debugpathcomplete(ui, repo, *specs, **opts):
2688 2685 """complete part or all of a tracked path
2689 2686
2690 2687 This command supports shells that offer path name completion. It
2691 2688 currently completes only files already known to the dirstate.
2692 2689
2693 2690 Completion extends only to the next path segment unless
2694 2691 --full is specified, in which case entire paths are used."""
2695 2692
2696 2693 def complete(path, acceptable):
2697 2694 dirstate = repo.dirstate
2698 2695 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2699 2696 rootdir = repo.root + pycompat.ossep
2700 2697 if spec != repo.root and not spec.startswith(rootdir):
2701 2698 return [], []
2702 2699 if os.path.isdir(spec):
2703 2700 spec += b'/'
2704 2701 spec = spec[len(rootdir) :]
2705 2702 fixpaths = pycompat.ossep != b'/'
2706 2703 if fixpaths:
2707 2704 spec = spec.replace(pycompat.ossep, b'/')
2708 2705 speclen = len(spec)
2709 2706 fullpaths = opts['full']
2710 2707 files, dirs = set(), set()
2711 2708 adddir, addfile = dirs.add, files.add
2712 2709 for f, st in pycompat.iteritems(dirstate):
2713 2710 if f.startswith(spec) and st.state in acceptable:
2714 2711 if fixpaths:
2715 2712 f = f.replace(b'/', pycompat.ossep)
2716 2713 if fullpaths:
2717 2714 addfile(f)
2718 2715 continue
2719 2716 s = f.find(pycompat.ossep, speclen)
2720 2717 if s >= 0:
2721 2718 adddir(f[:s])
2722 2719 else:
2723 2720 addfile(f)
2724 2721 return files, dirs
2725 2722
2726 2723 acceptable = b''
2727 2724 if opts['normal']:
2728 2725 acceptable += b'nm'
2729 2726 if opts['added']:
2730 2727 acceptable += b'a'
2731 2728 if opts['removed']:
2732 2729 acceptable += b'r'
2733 2730 cwd = repo.getcwd()
2734 2731 if not specs:
2735 2732 specs = [b'.']
2736 2733
2737 2734 files, dirs = set(), set()
2738 2735 for spec in specs:
2739 2736 f, d = complete(spec, acceptable or b'nmar')
2740 2737 files.update(f)
2741 2738 dirs.update(d)
2742 2739 files.update(dirs)
2743 2740 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2744 2741 ui.write(b'\n')
2745 2742
2746 2743
2747 2744 @command(
2748 2745 b'debugpathcopies',
2749 2746 cmdutil.walkopts,
2750 2747 b'hg debugpathcopies REV1 REV2 [FILE]',
2751 2748 inferrepo=True,
2752 2749 )
2753 2750 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2754 2751 """show copies between two revisions"""
2755 2752 ctx1 = scmutil.revsingle(repo, rev1)
2756 2753 ctx2 = scmutil.revsingle(repo, rev2)
2757 2754 m = scmutil.match(ctx1, pats, opts)
2758 2755 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2759 2756 ui.write(b'%s -> %s\n' % (src, dst))
2760 2757
2761 2758
2762 2759 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2763 2760 def debugpeer(ui, path):
2764 2761 """establish a connection to a peer repository"""
2765 2762 # Always enable peer request logging. Requires --debug to display
2766 2763 # though.
2767 2764 overrides = {
2768 2765 (b'devel', b'debug.peer-request'): True,
2769 2766 }
2770 2767
2771 2768 with ui.configoverride(overrides):
2772 2769 peer = hg.peer(ui, {}, path)
2773 2770
2774 2771 try:
2775 2772 local = peer.local() is not None
2776 2773 canpush = peer.canpush()
2777 2774
2778 2775 ui.write(_(b'url: %s\n') % peer.url())
2779 2776 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2780 2777 ui.write(
2781 2778 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2782 2779 )
2783 2780 finally:
2784 2781 peer.close()
2785 2782
2786 2783
2787 2784 @command(
2788 2785 b'debugpickmergetool',
2789 2786 [
2790 2787 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2791 2788 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2792 2789 ]
2793 2790 + cmdutil.walkopts
2794 2791 + cmdutil.mergetoolopts,
2795 2792 _(b'[PATTERN]...'),
2796 2793 inferrepo=True,
2797 2794 )
2798 2795 def debugpickmergetool(ui, repo, *pats, **opts):
2799 2796 """examine which merge tool is chosen for specified file
2800 2797
2801 2798 As described in :hg:`help merge-tools`, Mercurial examines
2802 2799 configurations below in this order to decide which merge tool is
2803 2800 chosen for specified file.
2804 2801
2805 2802 1. ``--tool`` option
2806 2803 2. ``HGMERGE`` environment variable
2807 2804 3. configurations in ``merge-patterns`` section
2808 2805 4. configuration of ``ui.merge``
2809 2806 5. configurations in ``merge-tools`` section
2810 2807 6. ``hgmerge`` tool (for historical reason only)
2811 2808 7. default tool for fallback (``:merge`` or ``:prompt``)
2812 2809
2813 2810 This command writes out examination result in the style below::
2814 2811
2815 2812 FILE = MERGETOOL
2816 2813
2817 2814 By default, all files known in the first parent context of the
2818 2815 working directory are examined. Use file patterns and/or -I/-X
2819 2816 options to limit target files. -r/--rev is also useful to examine
2820 2817 files in another context without actual updating to it.
2821 2818
2822 2819 With --debug, this command shows warning messages while matching
2823 2820 against ``merge-patterns`` and so on, too. It is recommended to
2824 2821 use this option with explicit file patterns and/or -I/-X options,
2825 2822 because this option increases amount of output per file according
2826 2823 to configurations in hgrc.
2827 2824
2828 2825 With -v/--verbose, this command shows configurations below at
2829 2826 first (only if specified).
2830 2827
2831 2828 - ``--tool`` option
2832 2829 - ``HGMERGE`` environment variable
2833 2830 - configuration of ``ui.merge``
2834 2831
2835 2832 If merge tool is chosen before matching against
2836 2833 ``merge-patterns``, this command can't show any helpful
2837 2834 information, even with --debug. In such case, information above is
2838 2835 useful to know why a merge tool is chosen.
2839 2836 """
2840 2837 opts = pycompat.byteskwargs(opts)
2841 2838 overrides = {}
2842 2839 if opts[b'tool']:
2843 2840 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2844 2841 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2845 2842
2846 2843 with ui.configoverride(overrides, b'debugmergepatterns'):
2847 2844 hgmerge = encoding.environ.get(b"HGMERGE")
2848 2845 if hgmerge is not None:
2849 2846 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2850 2847 uimerge = ui.config(b"ui", b"merge")
2851 2848 if uimerge:
2852 2849 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2853 2850
2854 2851 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2855 2852 m = scmutil.match(ctx, pats, opts)
2856 2853 changedelete = opts[b'changedelete']
2857 2854 for path in ctx.walk(m):
2858 2855 fctx = ctx[path]
2859 2856 with ui.silent(
2860 2857 error=True
2861 2858 ) if not ui.debugflag else util.nullcontextmanager():
2862 2859 tool, toolpath = filemerge._picktool(
2863 2860 repo,
2864 2861 ui,
2865 2862 path,
2866 2863 fctx.isbinary(),
2867 2864 b'l' in fctx.flags(),
2868 2865 changedelete,
2869 2866 )
2870 2867 ui.write(b'%s = %s\n' % (path, tool))
2871 2868
2872 2869
2873 2870 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2874 2871 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2875 2872 """access the pushkey key/value protocol
2876 2873
2877 2874 With two args, list the keys in the given namespace.
2878 2875
2879 2876 With five args, set a key to new if it currently is set to old.
2880 2877 Reports success or failure.
2881 2878 """
2882 2879
2883 2880 target = hg.peer(ui, {}, repopath)
2884 2881 try:
2885 2882 if keyinfo:
2886 2883 key, old, new = keyinfo
2887 2884 with target.commandexecutor() as e:
2888 2885 r = e.callcommand(
2889 2886 b'pushkey',
2890 2887 {
2891 2888 b'namespace': namespace,
2892 2889 b'key': key,
2893 2890 b'old': old,
2894 2891 b'new': new,
2895 2892 },
2896 2893 ).result()
2897 2894
2898 2895 ui.status(pycompat.bytestr(r) + b'\n')
2899 2896 return not r
2900 2897 else:
2901 2898 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2902 2899 ui.write(
2903 2900 b"%s\t%s\n"
2904 2901 % (stringutil.escapestr(k), stringutil.escapestr(v))
2905 2902 )
2906 2903 finally:
2907 2904 target.close()
2908 2905
2909 2906
2910 2907 @command(b'debugpvec', [], _(b'A B'))
2911 2908 def debugpvec(ui, repo, a, b=None):
2912 2909 ca = scmutil.revsingle(repo, a)
2913 2910 cb = scmutil.revsingle(repo, b)
2914 2911 pa = pvec.ctxpvec(ca)
2915 2912 pb = pvec.ctxpvec(cb)
2916 2913 if pa == pb:
2917 2914 rel = b"="
2918 2915 elif pa > pb:
2919 2916 rel = b">"
2920 2917 elif pa < pb:
2921 2918 rel = b"<"
2922 2919 elif pa | pb:
2923 2920 rel = b"|"
2924 2921 ui.write(_(b"a: %s\n") % pa)
2925 2922 ui.write(_(b"b: %s\n") % pb)
2926 2923 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2927 2924 ui.write(
2928 2925 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2929 2926 % (
2930 2927 abs(pa._depth - pb._depth),
2931 2928 pvec._hamming(pa._vec, pb._vec),
2932 2929 pa.distance(pb),
2933 2930 rel,
2934 2931 )
2935 2932 )
2936 2933
2937 2934
2938 2935 @command(
2939 2936 b'debugrebuilddirstate|debugrebuildstate',
2940 2937 [
2941 2938 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2942 2939 (
2943 2940 b'',
2944 2941 b'minimal',
2945 2942 None,
2946 2943 _(
2947 2944 b'only rebuild files that are inconsistent with '
2948 2945 b'the working copy parent'
2949 2946 ),
2950 2947 ),
2951 2948 ],
2952 2949 _(b'[-r REV]'),
2953 2950 )
2954 2951 def debugrebuilddirstate(ui, repo, rev, **opts):
2955 2952 """rebuild the dirstate as it would look like for the given revision
2956 2953
2957 2954 If no revision is specified the first current parent will be used.
2958 2955
2959 2956 The dirstate will be set to the files of the given revision.
2960 2957 The actual working directory content or existing dirstate
2961 2958 information such as adds or removes is not considered.
2962 2959
2963 2960 ``minimal`` will only rebuild the dirstate status for files that claim to be
2964 2961 tracked but are not in the parent manifest, or that exist in the parent
2965 2962 manifest but are not in the dirstate. It will not change adds, removes, or
2966 2963 modified files that are in the working copy parent.
2967 2964
2968 2965 One use of this command is to make the next :hg:`status` invocation
2969 2966 check the actual file content.
2970 2967 """
2971 2968 ctx = scmutil.revsingle(repo, rev)
2972 2969 with repo.wlock():
2973 2970 dirstate = repo.dirstate
2974 2971 changedfiles = None
2975 2972 # See command doc for what minimal does.
2976 2973 if opts.get('minimal'):
2977 2974 manifestfiles = set(ctx.manifest().keys())
2978 2975 dirstatefiles = set(dirstate)
2979 2976 manifestonly = manifestfiles - dirstatefiles
2980 2977 dsonly = dirstatefiles - manifestfiles
2981 2978 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2982 2979 changedfiles = manifestonly | dsnotadded
2983 2980
2984 2981 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2985 2982
2986 2983
2987 2984 @command(
2988 2985 b'debugrebuildfncache',
2989 2986 [
2990 2987 (
2991 2988 b'',
2992 2989 b'only-data',
2993 2990 False,
2994 2991 _(b'only look for wrong .d files (much faster)'),
2995 2992 )
2996 2993 ],
2997 2994 b'',
2998 2995 )
2999 2996 def debugrebuildfncache(ui, repo, **opts):
3000 2997 """rebuild the fncache file"""
3001 2998 opts = pycompat.byteskwargs(opts)
3002 2999 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
3003 3000
3004 3001
3005 3002 @command(
3006 3003 b'debugrename',
3007 3004 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3008 3005 _(b'[-r REV] [FILE]...'),
3009 3006 )
3010 3007 def debugrename(ui, repo, *pats, **opts):
3011 3008 """dump rename information"""
3012 3009
3013 3010 opts = pycompat.byteskwargs(opts)
3014 3011 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3015 3012 m = scmutil.match(ctx, pats, opts)
3016 3013 for abs in ctx.walk(m):
3017 3014 fctx = ctx[abs]
3018 3015 o = fctx.filelog().renamed(fctx.filenode())
3019 3016 rel = repo.pathto(abs)
3020 3017 if o:
3021 3018 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3022 3019 else:
3023 3020 ui.write(_(b"%s not renamed\n") % rel)
3024 3021
3025 3022
3026 3023 @command(b'debugrequires|debugrequirements', [], b'')
3027 3024 def debugrequirements(ui, repo):
3028 3025 """print the current repo requirements"""
3029 3026 for r in sorted(repo.requirements):
3030 3027 ui.write(b"%s\n" % r)
3031 3028
3032 3029
3033 3030 @command(
3034 3031 b'debugrevlog',
3035 3032 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3036 3033 _(b'-c|-m|FILE'),
3037 3034 optionalrepo=True,
3038 3035 )
3039 3036 def debugrevlog(ui, repo, file_=None, **opts):
3040 3037 """show data and statistics about a revlog"""
3041 3038 opts = pycompat.byteskwargs(opts)
3042 3039 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
3043 3040
3044 3041 if opts.get(b"dump"):
3045 3042 numrevs = len(r)
3046 3043 ui.write(
3047 3044 (
3048 3045 b"# rev p1rev p2rev start end deltastart base p1 p2"
3049 3046 b" rawsize totalsize compression heads chainlen\n"
3050 3047 )
3051 3048 )
3052 3049 ts = 0
3053 3050 heads = set()
3054 3051
3055 3052 for rev in pycompat.xrange(numrevs):
3056 3053 dbase = r.deltaparent(rev)
3057 3054 if dbase == -1:
3058 3055 dbase = rev
3059 3056 cbase = r.chainbase(rev)
3060 3057 clen = r.chainlen(rev)
3061 3058 p1, p2 = r.parentrevs(rev)
3062 3059 rs = r.rawsize(rev)
3063 3060 ts = ts + rs
3064 3061 heads -= set(r.parentrevs(rev))
3065 3062 heads.add(rev)
3066 3063 try:
3067 3064 compression = ts / r.end(rev)
3068 3065 except ZeroDivisionError:
3069 3066 compression = 0
3070 3067 ui.write(
3071 3068 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
3072 3069 b"%11d %5d %8d\n"
3073 3070 % (
3074 3071 rev,
3075 3072 p1,
3076 3073 p2,
3077 3074 r.start(rev),
3078 3075 r.end(rev),
3079 3076 r.start(dbase),
3080 3077 r.start(cbase),
3081 3078 r.start(p1),
3082 3079 r.start(p2),
3083 3080 rs,
3084 3081 ts,
3085 3082 compression,
3086 3083 len(heads),
3087 3084 clen,
3088 3085 )
3089 3086 )
3090 3087 return 0
3091 3088
3092 3089 format = r._format_version
3093 3090 v = r._format_flags
3094 3091 flags = []
3095 3092 gdelta = False
3096 3093 if v & revlog.FLAG_INLINE_DATA:
3097 3094 flags.append(b'inline')
3098 3095 if v & revlog.FLAG_GENERALDELTA:
3099 3096 gdelta = True
3100 3097 flags.append(b'generaldelta')
3101 3098 if not flags:
3102 3099 flags = [b'(none)']
3103 3100
3104 3101 ### tracks merge vs single parent
3105 3102 nummerges = 0
3106 3103
3107 3104 ### tracks ways the "delta" are build
3108 3105 # nodelta
3109 3106 numempty = 0
3110 3107 numemptytext = 0
3111 3108 numemptydelta = 0
3112 3109 # full file content
3113 3110 numfull = 0
3114 3111 # intermediate snapshot against a prior snapshot
3115 3112 numsemi = 0
3116 3113 # snapshot count per depth
3117 3114 numsnapdepth = collections.defaultdict(lambda: 0)
3118 3115 # delta against previous revision
3119 3116 numprev = 0
3120 3117 # delta against first or second parent (not prev)
3121 3118 nump1 = 0
3122 3119 nump2 = 0
3123 3120 # delta against neither prev nor parents
3124 3121 numother = 0
3125 3122 # delta against prev that are also first or second parent
3126 3123 # (details of `numprev`)
3127 3124 nump1prev = 0
3128 3125 nump2prev = 0
3129 3126
3130 3127 # data about delta chain of each revs
3131 3128 chainlengths = []
3132 3129 chainbases = []
3133 3130 chainspans = []
3134 3131
3135 3132 # data about each revision
3136 3133 datasize = [None, 0, 0]
3137 3134 fullsize = [None, 0, 0]
3138 3135 semisize = [None, 0, 0]
3139 3136 # snapshot count per depth
3140 3137 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3141 3138 deltasize = [None, 0, 0]
3142 3139 chunktypecounts = {}
3143 3140 chunktypesizes = {}
3144 3141
3145 3142 def addsize(size, l):
3146 3143 if l[0] is None or size < l[0]:
3147 3144 l[0] = size
3148 3145 if size > l[1]:
3149 3146 l[1] = size
3150 3147 l[2] += size
3151 3148
3152 3149 numrevs = len(r)
3153 3150 for rev in pycompat.xrange(numrevs):
3154 3151 p1, p2 = r.parentrevs(rev)
3155 3152 delta = r.deltaparent(rev)
3156 3153 if format > 0:
3157 3154 addsize(r.rawsize(rev), datasize)
3158 3155 if p2 != nullrev:
3159 3156 nummerges += 1
3160 3157 size = r.length(rev)
3161 3158 if delta == nullrev:
3162 3159 chainlengths.append(0)
3163 3160 chainbases.append(r.start(rev))
3164 3161 chainspans.append(size)
3165 3162 if size == 0:
3166 3163 numempty += 1
3167 3164 numemptytext += 1
3168 3165 else:
3169 3166 numfull += 1
3170 3167 numsnapdepth[0] += 1
3171 3168 addsize(size, fullsize)
3172 3169 addsize(size, snapsizedepth[0])
3173 3170 else:
3174 3171 chainlengths.append(chainlengths[delta] + 1)
3175 3172 baseaddr = chainbases[delta]
3176 3173 revaddr = r.start(rev)
3177 3174 chainbases.append(baseaddr)
3178 3175 chainspans.append((revaddr - baseaddr) + size)
3179 3176 if size == 0:
3180 3177 numempty += 1
3181 3178 numemptydelta += 1
3182 3179 elif r.issnapshot(rev):
3183 3180 addsize(size, semisize)
3184 3181 numsemi += 1
3185 3182 depth = r.snapshotdepth(rev)
3186 3183 numsnapdepth[depth] += 1
3187 3184 addsize(size, snapsizedepth[depth])
3188 3185 else:
3189 3186 addsize(size, deltasize)
3190 3187 if delta == rev - 1:
3191 3188 numprev += 1
3192 3189 if delta == p1:
3193 3190 nump1prev += 1
3194 3191 elif delta == p2:
3195 3192 nump2prev += 1
3196 3193 elif delta == p1:
3197 3194 nump1 += 1
3198 3195 elif delta == p2:
3199 3196 nump2 += 1
3200 3197 elif delta != nullrev:
3201 3198 numother += 1
3202 3199
3203 3200 # Obtain data on the raw chunks in the revlog.
3204 3201 if util.safehasattr(r, b'_getsegmentforrevs'):
3205 3202 segment = r._getsegmentforrevs(rev, rev)[1]
3206 3203 else:
3207 3204 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3208 3205 if segment:
3209 3206 chunktype = bytes(segment[0:1])
3210 3207 else:
3211 3208 chunktype = b'empty'
3212 3209
3213 3210 if chunktype not in chunktypecounts:
3214 3211 chunktypecounts[chunktype] = 0
3215 3212 chunktypesizes[chunktype] = 0
3216 3213
3217 3214 chunktypecounts[chunktype] += 1
3218 3215 chunktypesizes[chunktype] += size
3219 3216
3220 3217 # Adjust size min value for empty cases
3221 3218 for size in (datasize, fullsize, semisize, deltasize):
3222 3219 if size[0] is None:
3223 3220 size[0] = 0
3224 3221
3225 3222 numdeltas = numrevs - numfull - numempty - numsemi
3226 3223 numoprev = numprev - nump1prev - nump2prev
3227 3224 totalrawsize = datasize[2]
3228 3225 datasize[2] /= numrevs
3229 3226 fulltotal = fullsize[2]
3230 3227 if numfull == 0:
3231 3228 fullsize[2] = 0
3232 3229 else:
3233 3230 fullsize[2] /= numfull
3234 3231 semitotal = semisize[2]
3235 3232 snaptotal = {}
3236 3233 if numsemi > 0:
3237 3234 semisize[2] /= numsemi
3238 3235 for depth in snapsizedepth:
3239 3236 snaptotal[depth] = snapsizedepth[depth][2]
3240 3237 snapsizedepth[depth][2] /= numsnapdepth[depth]
3241 3238
3242 3239 deltatotal = deltasize[2]
3243 3240 if numdeltas > 0:
3244 3241 deltasize[2] /= numdeltas
3245 3242 totalsize = fulltotal + semitotal + deltatotal
3246 3243 avgchainlen = sum(chainlengths) / numrevs
3247 3244 maxchainlen = max(chainlengths)
3248 3245 maxchainspan = max(chainspans)
3249 3246 compratio = 1
3250 3247 if totalsize:
3251 3248 compratio = totalrawsize / totalsize
3252 3249
3253 3250 basedfmtstr = b'%%%dd\n'
3254 3251 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3255 3252
3256 3253 def dfmtstr(max):
3257 3254 return basedfmtstr % len(str(max))
3258 3255
3259 3256 def pcfmtstr(max, padding=0):
3260 3257 return basepcfmtstr % (len(str(max)), b' ' * padding)
3261 3258
3262 3259 def pcfmt(value, total):
3263 3260 if total:
3264 3261 return (value, 100 * float(value) / total)
3265 3262 else:
3266 3263 return value, 100.0
3267 3264
3268 3265 ui.writenoi18n(b'format : %d\n' % format)
3269 3266 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3270 3267
3271 3268 ui.write(b'\n')
3272 3269 fmt = pcfmtstr(totalsize)
3273 3270 fmt2 = dfmtstr(totalsize)
3274 3271 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3275 3272 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3276 3273 ui.writenoi18n(
3277 3274 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3278 3275 )
3279 3276 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3280 3277 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3281 3278 ui.writenoi18n(
3282 3279 b' text : '
3283 3280 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3284 3281 )
3285 3282 ui.writenoi18n(
3286 3283 b' delta : '
3287 3284 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3288 3285 )
3289 3286 ui.writenoi18n(
3290 3287 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3291 3288 )
3292 3289 for depth in sorted(numsnapdepth):
3293 3290 ui.write(
3294 3291 (b' lvl-%-3d : ' % depth)
3295 3292 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3296 3293 )
3297 3294 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3298 3295 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3299 3296 ui.writenoi18n(
3300 3297 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3301 3298 )
3302 3299 for depth in sorted(numsnapdepth):
3303 3300 ui.write(
3304 3301 (b' lvl-%-3d : ' % depth)
3305 3302 + fmt % pcfmt(snaptotal[depth], totalsize)
3306 3303 )
3307 3304 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3308 3305
3309 3306 def fmtchunktype(chunktype):
3310 3307 if chunktype == b'empty':
3311 3308 return b' %s : ' % chunktype
3312 3309 elif chunktype in pycompat.bytestr(string.ascii_letters):
3313 3310 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3314 3311 else:
3315 3312 return b' 0x%s : ' % hex(chunktype)
3316 3313
3317 3314 ui.write(b'\n')
3318 3315 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3319 3316 for chunktype in sorted(chunktypecounts):
3320 3317 ui.write(fmtchunktype(chunktype))
3321 3318 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3322 3319 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3323 3320 for chunktype in sorted(chunktypecounts):
3324 3321 ui.write(fmtchunktype(chunktype))
3325 3322 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3326 3323
3327 3324 ui.write(b'\n')
3328 3325 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3329 3326 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3330 3327 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3331 3328 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3332 3329 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3333 3330
3334 3331 if format > 0:
3335 3332 ui.write(b'\n')
3336 3333 ui.writenoi18n(
3337 3334 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3338 3335 % tuple(datasize)
3339 3336 )
3340 3337 ui.writenoi18n(
3341 3338 b'full revision size (min/max/avg) : %d / %d / %d\n'
3342 3339 % tuple(fullsize)
3343 3340 )
3344 3341 ui.writenoi18n(
3345 3342 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3346 3343 % tuple(semisize)
3347 3344 )
3348 3345 for depth in sorted(snapsizedepth):
3349 3346 if depth == 0:
3350 3347 continue
3351 3348 ui.writenoi18n(
3352 3349 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3353 3350 % ((depth,) + tuple(snapsizedepth[depth]))
3354 3351 )
3355 3352 ui.writenoi18n(
3356 3353 b'delta size (min/max/avg) : %d / %d / %d\n'
3357 3354 % tuple(deltasize)
3358 3355 )
3359 3356
3360 3357 if numdeltas > 0:
3361 3358 ui.write(b'\n')
3362 3359 fmt = pcfmtstr(numdeltas)
3363 3360 fmt2 = pcfmtstr(numdeltas, 4)
3364 3361 ui.writenoi18n(
3365 3362 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3366 3363 )
3367 3364 if numprev > 0:
3368 3365 ui.writenoi18n(
3369 3366 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3370 3367 )
3371 3368 ui.writenoi18n(
3372 3369 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3373 3370 )
3374 3371 ui.writenoi18n(
3375 3372 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3376 3373 )
3377 3374 if gdelta:
3378 3375 ui.writenoi18n(
3379 3376 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3380 3377 )
3381 3378 ui.writenoi18n(
3382 3379 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3383 3380 )
3384 3381 ui.writenoi18n(
3385 3382 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3386 3383 )
3387 3384
3388 3385
3389 3386 @command(
3390 3387 b'debugrevlogindex',
3391 3388 cmdutil.debugrevlogopts
3392 3389 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3393 3390 _(b'[-f FORMAT] -c|-m|FILE'),
3394 3391 optionalrepo=True,
3395 3392 )
3396 3393 def debugrevlogindex(ui, repo, file_=None, **opts):
3397 3394 """dump the contents of a revlog index"""
3398 3395 opts = pycompat.byteskwargs(opts)
3399 3396 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3400 3397 format = opts.get(b'format', 0)
3401 3398 if format not in (0, 1):
3402 3399 raise error.Abort(_(b"unknown format %d") % format)
3403 3400
3404 3401 if ui.debugflag:
3405 3402 shortfn = hex
3406 3403 else:
3407 3404 shortfn = short
3408 3405
3409 3406 # There might not be anything in r, so have a sane default
3410 3407 idlen = 12
3411 3408 for i in r:
3412 3409 idlen = len(shortfn(r.node(i)))
3413 3410 break
3414 3411
3415 3412 if format == 0:
3416 3413 if ui.verbose:
3417 3414 ui.writenoi18n(
3418 3415 b" rev offset length linkrev %s %s p2\n"
3419 3416 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3420 3417 )
3421 3418 else:
3422 3419 ui.writenoi18n(
3423 3420 b" rev linkrev %s %s p2\n"
3424 3421 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3425 3422 )
3426 3423 elif format == 1:
3427 3424 if ui.verbose:
3428 3425 ui.writenoi18n(
3429 3426 (
3430 3427 b" rev flag offset length size link p1"
3431 3428 b" p2 %s\n"
3432 3429 )
3433 3430 % b"nodeid".rjust(idlen)
3434 3431 )
3435 3432 else:
3436 3433 ui.writenoi18n(
3437 3434 b" rev flag size link p1 p2 %s\n"
3438 3435 % b"nodeid".rjust(idlen)
3439 3436 )
3440 3437
3441 3438 for i in r:
3442 3439 node = r.node(i)
3443 3440 if format == 0:
3444 3441 try:
3445 3442 pp = r.parents(node)
3446 3443 except Exception:
3447 3444 pp = [repo.nullid, repo.nullid]
3448 3445 if ui.verbose:
3449 3446 ui.write(
3450 3447 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3451 3448 % (
3452 3449 i,
3453 3450 r.start(i),
3454 3451 r.length(i),
3455 3452 r.linkrev(i),
3456 3453 shortfn(node),
3457 3454 shortfn(pp[0]),
3458 3455 shortfn(pp[1]),
3459 3456 )
3460 3457 )
3461 3458 else:
3462 3459 ui.write(
3463 3460 b"% 6d % 7d %s %s %s\n"
3464 3461 % (
3465 3462 i,
3466 3463 r.linkrev(i),
3467 3464 shortfn(node),
3468 3465 shortfn(pp[0]),
3469 3466 shortfn(pp[1]),
3470 3467 )
3471 3468 )
3472 3469 elif format == 1:
3473 3470 pr = r.parentrevs(i)
3474 3471 if ui.verbose:
3475 3472 ui.write(
3476 3473 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3477 3474 % (
3478 3475 i,
3479 3476 r.flags(i),
3480 3477 r.start(i),
3481 3478 r.length(i),
3482 3479 r.rawsize(i),
3483 3480 r.linkrev(i),
3484 3481 pr[0],
3485 3482 pr[1],
3486 3483 shortfn(node),
3487 3484 )
3488 3485 )
3489 3486 else:
3490 3487 ui.write(
3491 3488 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3492 3489 % (
3493 3490 i,
3494 3491 r.flags(i),
3495 3492 r.rawsize(i),
3496 3493 r.linkrev(i),
3497 3494 pr[0],
3498 3495 pr[1],
3499 3496 shortfn(node),
3500 3497 )
3501 3498 )
3502 3499
3503 3500
3504 3501 @command(
3505 3502 b'debugrevspec',
3506 3503 [
3507 3504 (
3508 3505 b'',
3509 3506 b'optimize',
3510 3507 None,
3511 3508 _(b'print parsed tree after optimizing (DEPRECATED)'),
3512 3509 ),
3513 3510 (
3514 3511 b'',
3515 3512 b'show-revs',
3516 3513 True,
3517 3514 _(b'print list of result revisions (default)'),
3518 3515 ),
3519 3516 (
3520 3517 b's',
3521 3518 b'show-set',
3522 3519 None,
3523 3520 _(b'print internal representation of result set'),
3524 3521 ),
3525 3522 (
3526 3523 b'p',
3527 3524 b'show-stage',
3528 3525 [],
3529 3526 _(b'print parsed tree at the given stage'),
3530 3527 _(b'NAME'),
3531 3528 ),
3532 3529 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3533 3530 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3534 3531 ],
3535 3532 b'REVSPEC',
3536 3533 )
3537 3534 def debugrevspec(ui, repo, expr, **opts):
3538 3535 """parse and apply a revision specification
3539 3536
3540 3537 Use -p/--show-stage option to print the parsed tree at the given stages.
3541 3538 Use -p all to print tree at every stage.
3542 3539
3543 3540 Use --no-show-revs option with -s or -p to print only the set
3544 3541 representation or the parsed tree respectively.
3545 3542
3546 3543 Use --verify-optimized to compare the optimized result with the unoptimized
3547 3544 one. Returns 1 if the optimized result differs.
3548 3545 """
3549 3546 opts = pycompat.byteskwargs(opts)
3550 3547 aliases = ui.configitems(b'revsetalias')
3551 3548 stages = [
3552 3549 (b'parsed', lambda tree: tree),
3553 3550 (
3554 3551 b'expanded',
3555 3552 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3556 3553 ),
3557 3554 (b'concatenated', revsetlang.foldconcat),
3558 3555 (b'analyzed', revsetlang.analyze),
3559 3556 (b'optimized', revsetlang.optimize),
3560 3557 ]
3561 3558 if opts[b'no_optimized']:
3562 3559 stages = stages[:-1]
3563 3560 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3564 3561 raise error.Abort(
3565 3562 _(b'cannot use --verify-optimized with --no-optimized')
3566 3563 )
3567 3564 stagenames = {n for n, f in stages}
3568 3565
3569 3566 showalways = set()
3570 3567 showchanged = set()
3571 3568 if ui.verbose and not opts[b'show_stage']:
3572 3569 # show parsed tree by --verbose (deprecated)
3573 3570 showalways.add(b'parsed')
3574 3571 showchanged.update([b'expanded', b'concatenated'])
3575 3572 if opts[b'optimize']:
3576 3573 showalways.add(b'optimized')
3577 3574 if opts[b'show_stage'] and opts[b'optimize']:
3578 3575 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3579 3576 if opts[b'show_stage'] == [b'all']:
3580 3577 showalways.update(stagenames)
3581 3578 else:
3582 3579 for n in opts[b'show_stage']:
3583 3580 if n not in stagenames:
3584 3581 raise error.Abort(_(b'invalid stage name: %s') % n)
3585 3582 showalways.update(opts[b'show_stage'])
3586 3583
3587 3584 treebystage = {}
3588 3585 printedtree = None
3589 3586 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3590 3587 for n, f in stages:
3591 3588 treebystage[n] = tree = f(tree)
3592 3589 if n in showalways or (n in showchanged and tree != printedtree):
3593 3590 if opts[b'show_stage'] or n != b'parsed':
3594 3591 ui.write(b"* %s:\n" % n)
3595 3592 ui.write(revsetlang.prettyformat(tree), b"\n")
3596 3593 printedtree = tree
3597 3594
3598 3595 if opts[b'verify_optimized']:
3599 3596 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3600 3597 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3601 3598 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3602 3599 ui.writenoi18n(
3603 3600 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3604 3601 )
3605 3602 ui.writenoi18n(
3606 3603 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3607 3604 )
3608 3605 arevs = list(arevs)
3609 3606 brevs = list(brevs)
3610 3607 if arevs == brevs:
3611 3608 return 0
3612 3609 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3613 3610 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3614 3611 sm = difflib.SequenceMatcher(None, arevs, brevs)
3615 3612 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3616 3613 if tag in ('delete', 'replace'):
3617 3614 for c in arevs[alo:ahi]:
3618 3615 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3619 3616 if tag in ('insert', 'replace'):
3620 3617 for c in brevs[blo:bhi]:
3621 3618 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3622 3619 if tag == 'equal':
3623 3620 for c in arevs[alo:ahi]:
3624 3621 ui.write(b' %d\n' % c)
3625 3622 return 1
3626 3623
3627 3624 func = revset.makematcher(tree)
3628 3625 revs = func(repo)
3629 3626 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3630 3627 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3631 3628 if not opts[b'show_revs']:
3632 3629 return
3633 3630 for c in revs:
3634 3631 ui.write(b"%d\n" % c)
3635 3632
3636 3633
3637 3634 @command(
3638 3635 b'debugserve',
3639 3636 [
3640 3637 (
3641 3638 b'',
3642 3639 b'sshstdio',
3643 3640 False,
3644 3641 _(b'run an SSH server bound to process handles'),
3645 3642 ),
3646 3643 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3647 3644 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3648 3645 ],
3649 3646 b'',
3650 3647 )
3651 3648 def debugserve(ui, repo, **opts):
3652 3649 """run a server with advanced settings
3653 3650
3654 3651 This command is similar to :hg:`serve`. It exists partially as a
3655 3652 workaround to the fact that ``hg serve --stdio`` must have specific
3656 3653 arguments for security reasons.
3657 3654 """
3658 3655 opts = pycompat.byteskwargs(opts)
3659 3656
3660 3657 if not opts[b'sshstdio']:
3661 3658 raise error.Abort(_(b'only --sshstdio is currently supported'))
3662 3659
3663 3660 logfh = None
3664 3661
3665 3662 if opts[b'logiofd'] and opts[b'logiofile']:
3666 3663 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3667 3664
3668 3665 if opts[b'logiofd']:
3669 3666 # Ideally we would be line buffered. But line buffering in binary
3670 3667 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3671 3668 # buffering could have performance impacts. But since this isn't
3672 3669 # performance critical code, it should be fine.
3673 3670 try:
3674 3671 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3675 3672 except OSError as e:
3676 3673 if e.errno != errno.ESPIPE:
3677 3674 raise
3678 3675 # can't seek a pipe, so `ab` mode fails on py3
3679 3676 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3680 3677 elif opts[b'logiofile']:
3681 3678 logfh = open(opts[b'logiofile'], b'ab', 0)
3682 3679
3683 3680 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3684 3681 s.serve_forever()
3685 3682
3686 3683
3687 3684 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3688 3685 def debugsetparents(ui, repo, rev1, rev2=None):
3689 3686 """manually set the parents of the current working directory (DANGEROUS)
3690 3687
3691 3688 This command is not what you are looking for and should not be used. Using
3692 3689 this command will most certainly results in slight corruption of the file
3693 3690 level histories withing your repository. DO NOT USE THIS COMMAND.
3694 3691
3695 3692 The command update the p1 and p2 field in the dirstate, and not touching
3696 3693 anything else. This useful for writing repository conversion tools, but
3697 3694 should be used with extreme care. For example, neither the working
3698 3695 directory nor the dirstate is updated, so file status may be incorrect
3699 3696 after running this command. Only used if you are one of the few people that
3700 3697 deeply unstand both conversion tools and file level histories. If you are
3701 3698 reading this help, you are not one of this people (most of them sailed west
3702 3699 from Mithlond anyway.
3703 3700
3704 3701 So one last time DO NOT USE THIS COMMAND.
3705 3702
3706 3703 Returns 0 on success.
3707 3704 """
3708 3705
3709 3706 node1 = scmutil.revsingle(repo, rev1).node()
3710 3707 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3711 3708
3712 3709 with repo.wlock():
3713 3710 repo.setparents(node1, node2)
3714 3711
3715 3712
3716 3713 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3717 3714 def debugsidedata(ui, repo, file_, rev=None, **opts):
3718 3715 """dump the side data for a cl/manifest/file revision
3719 3716
3720 3717 Use --verbose to dump the sidedata content."""
3721 3718 opts = pycompat.byteskwargs(opts)
3722 3719 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3723 3720 if rev is not None:
3724 3721 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3725 3722 file_, rev = None, file_
3726 3723 elif rev is None:
3727 3724 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3728 3725 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3729 3726 r = getattr(r, '_revlog', r)
3730 3727 try:
3731 3728 sidedata = r.sidedata(r.lookup(rev))
3732 3729 except KeyError:
3733 3730 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3734 3731 if sidedata:
3735 3732 sidedata = list(sidedata.items())
3736 3733 sidedata.sort()
3737 3734 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3738 3735 for key, value in sidedata:
3739 3736 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3740 3737 if ui.verbose:
3741 3738 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3742 3739
3743 3740
3744 3741 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3745 3742 def debugssl(ui, repo, source=None, **opts):
3746 3743 """test a secure connection to a server
3747 3744
3748 3745 This builds the certificate chain for the server on Windows, installing the
3749 3746 missing intermediates and trusted root via Windows Update if necessary. It
3750 3747 does nothing on other platforms.
3751 3748
3752 3749 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3753 3750 that server is used. See :hg:`help urls` for more information.
3754 3751
3755 3752 If the update succeeds, retry the original operation. Otherwise, the cause
3756 3753 of the SSL error is likely another issue.
3757 3754 """
3758 3755 if not pycompat.iswindows:
3759 3756 raise error.Abort(
3760 3757 _(b'certificate chain building is only possible on Windows')
3761 3758 )
3762 3759
3763 3760 if not source:
3764 3761 if not repo:
3765 3762 raise error.Abort(
3766 3763 _(
3767 3764 b"there is no Mercurial repository here, and no "
3768 3765 b"server specified"
3769 3766 )
3770 3767 )
3771 3768 source = b"default"
3772 3769
3773 3770 source, branches = urlutil.get_unique_pull_path(
3774 3771 b'debugssl', repo, ui, source
3775 3772 )
3776 3773 url = urlutil.url(source)
3777 3774
3778 3775 defaultport = {b'https': 443, b'ssh': 22}
3779 3776 if url.scheme in defaultport:
3780 3777 try:
3781 3778 addr = (url.host, int(url.port or defaultport[url.scheme]))
3782 3779 except ValueError:
3783 3780 raise error.Abort(_(b"malformed port number in URL"))
3784 3781 else:
3785 3782 raise error.Abort(_(b"only https and ssh connections are supported"))
3786 3783
3787 3784 from . import win32
3788 3785
3789 3786 s = ssl.wrap_socket(
3790 3787 socket.socket(),
3791 3788 ssl_version=ssl.PROTOCOL_TLS,
3792 3789 cert_reqs=ssl.CERT_NONE,
3793 3790 ca_certs=None,
3794 3791 )
3795 3792
3796 3793 try:
3797 3794 s.connect(addr)
3798 3795 cert = s.getpeercert(True)
3799 3796
3800 3797 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3801 3798
3802 3799 complete = win32.checkcertificatechain(cert, build=False)
3803 3800
3804 3801 if not complete:
3805 3802 ui.status(_(b'certificate chain is incomplete, updating... '))
3806 3803
3807 3804 if not win32.checkcertificatechain(cert):
3808 3805 ui.status(_(b'failed.\n'))
3809 3806 else:
3810 3807 ui.status(_(b'done.\n'))
3811 3808 else:
3812 3809 ui.status(_(b'full certificate chain is available\n'))
3813 3810 finally:
3814 3811 s.close()
3815 3812
3816 3813
3817 3814 @command(
3818 3815 b"debugbackupbundle",
3819 3816 [
3820 3817 (
3821 3818 b"",
3822 3819 b"recover",
3823 3820 b"",
3824 3821 b"brings the specified changeset back into the repository",
3825 3822 )
3826 3823 ]
3827 3824 + cmdutil.logopts,
3828 3825 _(b"hg debugbackupbundle [--recover HASH]"),
3829 3826 )
3830 3827 def debugbackupbundle(ui, repo, *pats, **opts):
3831 3828 """lists the changesets available in backup bundles
3832 3829
3833 3830 Without any arguments, this command prints a list of the changesets in each
3834 3831 backup bundle.
3835 3832
3836 3833 --recover takes a changeset hash and unbundles the first bundle that
3837 3834 contains that hash, which puts that changeset back in your repository.
3838 3835
3839 3836 --verbose will print the entire commit message and the bundle path for that
3840 3837 backup.
3841 3838 """
3842 3839 backups = list(
3843 3840 filter(
3844 3841 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3845 3842 )
3846 3843 )
3847 3844 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3848 3845
3849 3846 opts = pycompat.byteskwargs(opts)
3850 3847 opts[b"bundle"] = b""
3851 3848 opts[b"force"] = None
3852 3849 limit = logcmdutil.getlimit(opts)
3853 3850
3854 3851 def display(other, chlist, displayer):
3855 3852 if opts.get(b"newest_first"):
3856 3853 chlist.reverse()
3857 3854 count = 0
3858 3855 for n in chlist:
3859 3856 if limit is not None and count >= limit:
3860 3857 break
3861 3858 parents = [
3862 3859 True for p in other.changelog.parents(n) if p != repo.nullid
3863 3860 ]
3864 3861 if opts.get(b"no_merges") and len(parents) == 2:
3865 3862 continue
3866 3863 count += 1
3867 3864 displayer.show(other[n])
3868 3865
3869 3866 recovernode = opts.get(b"recover")
3870 3867 if recovernode:
3871 3868 if scmutil.isrevsymbol(repo, recovernode):
3872 3869 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3873 3870 return
3874 3871 elif backups:
3875 3872 msg = _(
3876 3873 b"Recover changesets using: hg debugbackupbundle --recover "
3877 3874 b"<changeset hash>\n\nAvailable backup changesets:"
3878 3875 )
3879 3876 ui.status(msg, label=b"status.removed")
3880 3877 else:
3881 3878 ui.status(_(b"no backup changesets found\n"))
3882 3879 return
3883 3880
3884 3881 for backup in backups:
3885 3882 # Much of this is copied from the hg incoming logic
3886 3883 source = os.path.relpath(backup, encoding.getcwd())
3887 3884 source, branches = urlutil.get_unique_pull_path(
3888 3885 b'debugbackupbundle',
3889 3886 repo,
3890 3887 ui,
3891 3888 source,
3892 3889 default_branches=opts.get(b'branch'),
3893 3890 )
3894 3891 try:
3895 3892 other = hg.peer(repo, opts, source)
3896 3893 except error.LookupError as ex:
3897 3894 msg = _(b"\nwarning: unable to open bundle %s") % source
3898 3895 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3899 3896 ui.warn(msg, hint=hint)
3900 3897 continue
3901 3898 revs, checkout = hg.addbranchrevs(
3902 3899 repo, other, branches, opts.get(b"rev")
3903 3900 )
3904 3901
3905 3902 if revs:
3906 3903 revs = [other.lookup(rev) for rev in revs]
3907 3904
3908 3905 with ui.silent():
3909 3906 try:
3910 3907 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3911 3908 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3912 3909 )
3913 3910 except error.LookupError:
3914 3911 continue
3915 3912
3916 3913 try:
3917 3914 if not chlist:
3918 3915 continue
3919 3916 if recovernode:
3920 3917 with repo.lock(), repo.transaction(b"unbundle") as tr:
3921 3918 if scmutil.isrevsymbol(other, recovernode):
3922 3919 ui.status(_(b"Unbundling %s\n") % (recovernode))
3923 3920 f = hg.openpath(ui, source)
3924 3921 gen = exchange.readbundle(ui, f, source)
3925 3922 if isinstance(gen, bundle2.unbundle20):
3926 3923 bundle2.applybundle(
3927 3924 repo,
3928 3925 gen,
3929 3926 tr,
3930 3927 source=b"unbundle",
3931 3928 url=b"bundle:" + source,
3932 3929 )
3933 3930 else:
3934 3931 gen.apply(repo, b"unbundle", b"bundle:" + source)
3935 3932 break
3936 3933 else:
3937 3934 backupdate = encoding.strtolocal(
3938 3935 time.strftime(
3939 3936 "%a %H:%M, %Y-%m-%d",
3940 3937 time.localtime(os.path.getmtime(source)),
3941 3938 )
3942 3939 )
3943 3940 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3944 3941 if ui.verbose:
3945 3942 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3946 3943 else:
3947 3944 opts[
3948 3945 b"template"
3949 3946 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3950 3947 displayer = logcmdutil.changesetdisplayer(
3951 3948 ui, other, opts, False
3952 3949 )
3953 3950 display(other, chlist, displayer)
3954 3951 displayer.close()
3955 3952 finally:
3956 3953 cleanupfn()
3957 3954
3958 3955
3959 3956 @command(
3960 3957 b'debugsub',
3961 3958 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3962 3959 _(b'[-r REV] [REV]'),
3963 3960 )
3964 3961 def debugsub(ui, repo, rev=None):
3965 3962 ctx = scmutil.revsingle(repo, rev, None)
3966 3963 for k, v in sorted(ctx.substate.items()):
3967 3964 ui.writenoi18n(b'path %s\n' % k)
3968 3965 ui.writenoi18n(b' source %s\n' % v[0])
3969 3966 ui.writenoi18n(b' revision %s\n' % v[1])
3970 3967
3971 3968
3972 3969 @command(b'debugshell', optionalrepo=True)
3973 3970 def debugshell(ui, repo):
3974 3971 """run an interactive Python interpreter
3975 3972
3976 3973 The local namespace is provided with a reference to the ui and
3977 3974 the repo instance (if available).
3978 3975 """
3979 3976 import code
3980 3977
3981 3978 imported_objects = {
3982 3979 'ui': ui,
3983 3980 'repo': repo,
3984 3981 }
3985 3982
3986 3983 code.interact(local=imported_objects)
3987 3984
3988 3985
3989 3986 @command(
3990 3987 b'debugsuccessorssets',
3991 3988 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3992 3989 _(b'[REV]'),
3993 3990 )
3994 3991 def debugsuccessorssets(ui, repo, *revs, **opts):
3995 3992 """show set of successors for revision
3996 3993
3997 3994 A successors set of changeset A is a consistent group of revisions that
3998 3995 succeed A. It contains non-obsolete changesets only unless closests
3999 3996 successors set is set.
4000 3997
4001 3998 In most cases a changeset A has a single successors set containing a single
4002 3999 successor (changeset A replaced by A').
4003 4000
4004 4001 A changeset that is made obsolete with no successors are called "pruned".
4005 4002 Such changesets have no successors sets at all.
4006 4003
4007 4004 A changeset that has been "split" will have a successors set containing
4008 4005 more than one successor.
4009 4006
4010 4007 A changeset that has been rewritten in multiple different ways is called
4011 4008 "divergent". Such changesets have multiple successor sets (each of which
4012 4009 may also be split, i.e. have multiple successors).
4013 4010
4014 4011 Results are displayed as follows::
4015 4012
4016 4013 <rev1>
4017 4014 <successors-1A>
4018 4015 <rev2>
4019 4016 <successors-2A>
4020 4017 <successors-2B1> <successors-2B2> <successors-2B3>
4021 4018
4022 4019 Here rev2 has two possible (i.e. divergent) successors sets. The first
4023 4020 holds one element, whereas the second holds three (i.e. the changeset has
4024 4021 been split).
4025 4022 """
4026 4023 # passed to successorssets caching computation from one call to another
4027 4024 cache = {}
4028 4025 ctx2str = bytes
4029 4026 node2str = short
4030 4027 for rev in scmutil.revrange(repo, revs):
4031 4028 ctx = repo[rev]
4032 4029 ui.write(b'%s\n' % ctx2str(ctx))
4033 4030 for succsset in obsutil.successorssets(
4034 4031 repo, ctx.node(), closest=opts['closest'], cache=cache
4035 4032 ):
4036 4033 if succsset:
4037 4034 ui.write(b' ')
4038 4035 ui.write(node2str(succsset[0]))
4039 4036 for node in succsset[1:]:
4040 4037 ui.write(b' ')
4041 4038 ui.write(node2str(node))
4042 4039 ui.write(b'\n')
4043 4040
4044 4041
4045 4042 @command(b'debugtagscache', [])
4046 4043 def debugtagscache(ui, repo):
4047 4044 """display the contents of .hg/cache/hgtagsfnodes1"""
4048 4045 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
4049 4046 flog = repo.file(b'.hgtags')
4050 4047 for r in repo:
4051 4048 node = repo[r].node()
4052 4049 tagsnode = cache.getfnode(node, computemissing=False)
4053 4050 if tagsnode:
4054 4051 tagsnodedisplay = hex(tagsnode)
4055 4052 if not flog.hasnode(tagsnode):
4056 4053 tagsnodedisplay += b' (unknown node)'
4057 4054 elif tagsnode is None:
4058 4055 tagsnodedisplay = b'missing'
4059 4056 else:
4060 4057 tagsnodedisplay = b'invalid'
4061 4058
4062 4059 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
4063 4060
4064 4061
4065 4062 @command(
4066 4063 b'debugtemplate',
4067 4064 [
4068 4065 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
4069 4066 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
4070 4067 ],
4071 4068 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
4072 4069 optionalrepo=True,
4073 4070 )
4074 4071 def debugtemplate(ui, repo, tmpl, **opts):
4075 4072 """parse and apply a template
4076 4073
4077 4074 If -r/--rev is given, the template is processed as a log template and
4078 4075 applied to the given changesets. Otherwise, it is processed as a generic
4079 4076 template.
4080 4077
4081 4078 Use --verbose to print the parsed tree.
4082 4079 """
4083 4080 revs = None
4084 4081 if opts['rev']:
4085 4082 if repo is None:
4086 4083 raise error.RepoError(
4087 4084 _(b'there is no Mercurial repository here (.hg not found)')
4088 4085 )
4089 4086 revs = scmutil.revrange(repo, opts['rev'])
4090 4087
4091 4088 props = {}
4092 4089 for d in opts['define']:
4093 4090 try:
4094 4091 k, v = (e.strip() for e in d.split(b'=', 1))
4095 4092 if not k or k == b'ui':
4096 4093 raise ValueError
4097 4094 props[k] = v
4098 4095 except ValueError:
4099 4096 raise error.Abort(_(b'malformed keyword definition: %s') % d)
4100 4097
4101 4098 if ui.verbose:
4102 4099 aliases = ui.configitems(b'templatealias')
4103 4100 tree = templater.parse(tmpl)
4104 4101 ui.note(templater.prettyformat(tree), b'\n')
4105 4102 newtree = templater.expandaliases(tree, aliases)
4106 4103 if newtree != tree:
4107 4104 ui.notenoi18n(
4108 4105 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4109 4106 )
4110 4107
4111 4108 if revs is None:
4112 4109 tres = formatter.templateresources(ui, repo)
4113 4110 t = formatter.maketemplater(ui, tmpl, resources=tres)
4114 4111 if ui.verbose:
4115 4112 kwds, funcs = t.symbolsuseddefault()
4116 4113 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4117 4114 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4118 4115 ui.write(t.renderdefault(props))
4119 4116 else:
4120 4117 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4121 4118 if ui.verbose:
4122 4119 kwds, funcs = displayer.t.symbolsuseddefault()
4123 4120 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4124 4121 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4125 4122 for r in revs:
4126 4123 displayer.show(repo[r], **pycompat.strkwargs(props))
4127 4124 displayer.close()
4128 4125
4129 4126
4130 4127 @command(
4131 4128 b'debuguigetpass',
4132 4129 [
4133 4130 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4134 4131 ],
4135 4132 _(b'[-p TEXT]'),
4136 4133 norepo=True,
4137 4134 )
4138 4135 def debuguigetpass(ui, prompt=b''):
4139 4136 """show prompt to type password"""
4140 4137 r = ui.getpass(prompt)
4141 4138 if r is None:
4142 4139 r = b"<default response>"
4143 4140 ui.writenoi18n(b'response: %s\n' % r)
4144 4141
4145 4142
4146 4143 @command(
4147 4144 b'debuguiprompt',
4148 4145 [
4149 4146 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4150 4147 ],
4151 4148 _(b'[-p TEXT]'),
4152 4149 norepo=True,
4153 4150 )
4154 4151 def debuguiprompt(ui, prompt=b''):
4155 4152 """show plain prompt"""
4156 4153 r = ui.prompt(prompt)
4157 4154 ui.writenoi18n(b'response: %s\n' % r)
4158 4155
4159 4156
4160 4157 @command(b'debugupdatecaches', [])
4161 4158 def debugupdatecaches(ui, repo, *pats, **opts):
4162 4159 """warm all known caches in the repository"""
4163 4160 with repo.wlock(), repo.lock():
4164 4161 repo.updatecaches(caches=repository.CACHES_ALL)
4165 4162
4166 4163
4167 4164 @command(
4168 4165 b'debugupgraderepo',
4169 4166 [
4170 4167 (
4171 4168 b'o',
4172 4169 b'optimize',
4173 4170 [],
4174 4171 _(b'extra optimization to perform'),
4175 4172 _(b'NAME'),
4176 4173 ),
4177 4174 (b'', b'run', False, _(b'performs an upgrade')),
4178 4175 (b'', b'backup', True, _(b'keep the old repository content around')),
4179 4176 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4180 4177 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4181 4178 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4182 4179 ],
4183 4180 )
4184 4181 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4185 4182 """upgrade a repository to use different features
4186 4183
4187 4184 If no arguments are specified, the repository is evaluated for upgrade
4188 4185 and a list of problems and potential optimizations is printed.
4189 4186
4190 4187 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4191 4188 can be influenced via additional arguments. More details will be provided
4192 4189 by the command output when run without ``--run``.
4193 4190
4194 4191 During the upgrade, the repository will be locked and no writes will be
4195 4192 allowed.
4196 4193
4197 4194 At the end of the upgrade, the repository may not be readable while new
4198 4195 repository data is swapped in. This window will be as long as it takes to
4199 4196 rename some directories inside the ``.hg`` directory. On most machines, this
4200 4197 should complete almost instantaneously and the chances of a consumer being
4201 4198 unable to access the repository should be low.
4202 4199
4203 4200 By default, all revlogs will be upgraded. You can restrict this using flags
4204 4201 such as `--manifest`:
4205 4202
4206 4203 * `--manifest`: only optimize the manifest
4207 4204 * `--no-manifest`: optimize all revlog but the manifest
4208 4205 * `--changelog`: optimize the changelog only
4209 4206 * `--no-changelog --no-manifest`: optimize filelogs only
4210 4207 * `--filelogs`: optimize the filelogs only
4211 4208 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4212 4209 """
4213 4210 return upgrade.upgraderepo(
4214 4211 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4215 4212 )
4216 4213
4217 4214
4218 4215 @command(
4219 4216 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4220 4217 )
4221 4218 def debugwalk(ui, repo, *pats, **opts):
4222 4219 """show how files match on given patterns"""
4223 4220 opts = pycompat.byteskwargs(opts)
4224 4221 m = scmutil.match(repo[None], pats, opts)
4225 4222 if ui.verbose:
4226 4223 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4227 4224 items = list(repo[None].walk(m))
4228 4225 if not items:
4229 4226 return
4230 4227 f = lambda fn: fn
4231 4228 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4232 4229 f = lambda fn: util.normpath(fn)
4233 4230 fmt = b'f %%-%ds %%-%ds %%s' % (
4234 4231 max([len(abs) for abs in items]),
4235 4232 max([len(repo.pathto(abs)) for abs in items]),
4236 4233 )
4237 4234 for abs in items:
4238 4235 line = fmt % (
4239 4236 abs,
4240 4237 f(repo.pathto(abs)),
4241 4238 m.exact(abs) and b'exact' or b'',
4242 4239 )
4243 4240 ui.write(b"%s\n" % line.rstrip())
4244 4241
4245 4242
4246 4243 @command(b'debugwhyunstable', [], _(b'REV'))
4247 4244 def debugwhyunstable(ui, repo, rev):
4248 4245 """explain instabilities of a changeset"""
4249 4246 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4250 4247 dnodes = b''
4251 4248 if entry.get(b'divergentnodes'):
4252 4249 dnodes = (
4253 4250 b' '.join(
4254 4251 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4255 4252 for ctx in entry[b'divergentnodes']
4256 4253 )
4257 4254 + b' '
4258 4255 )
4259 4256 ui.write(
4260 4257 b'%s: %s%s %s\n'
4261 4258 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4262 4259 )
4263 4260
4264 4261
4265 4262 @command(
4266 4263 b'debugwireargs',
4267 4264 [
4268 4265 (b'', b'three', b'', b'three'),
4269 4266 (b'', b'four', b'', b'four'),
4270 4267 (b'', b'five', b'', b'five'),
4271 4268 ]
4272 4269 + cmdutil.remoteopts,
4273 4270 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4274 4271 norepo=True,
4275 4272 )
4276 4273 def debugwireargs(ui, repopath, *vals, **opts):
4277 4274 opts = pycompat.byteskwargs(opts)
4278 4275 repo = hg.peer(ui, opts, repopath)
4279 4276 try:
4280 4277 for opt in cmdutil.remoteopts:
4281 4278 del opts[opt[1]]
4282 4279 args = {}
4283 4280 for k, v in pycompat.iteritems(opts):
4284 4281 if v:
4285 4282 args[k] = v
4286 4283 args = pycompat.strkwargs(args)
4287 4284 # run twice to check that we don't mess up the stream for the next command
4288 4285 res1 = repo.debugwireargs(*vals, **args)
4289 4286 res2 = repo.debugwireargs(*vals, **args)
4290 4287 ui.write(b"%s\n" % res1)
4291 4288 if res1 != res2:
4292 4289 ui.warn(b"%s\n" % res2)
4293 4290 finally:
4294 4291 repo.close()
4295 4292
4296 4293
4297 4294 def _parsewirelangblocks(fh):
4298 4295 activeaction = None
4299 4296 blocklines = []
4300 4297 lastindent = 0
4301 4298
4302 4299 for line in fh:
4303 4300 line = line.rstrip()
4304 4301 if not line:
4305 4302 continue
4306 4303
4307 4304 if line.startswith(b'#'):
4308 4305 continue
4309 4306
4310 4307 if not line.startswith(b' '):
4311 4308 # New block. Flush previous one.
4312 4309 if activeaction:
4313 4310 yield activeaction, blocklines
4314 4311
4315 4312 activeaction = line
4316 4313 blocklines = []
4317 4314 lastindent = 0
4318 4315 continue
4319 4316
4320 4317 # Else we start with an indent.
4321 4318
4322 4319 if not activeaction:
4323 4320 raise error.Abort(_(b'indented line outside of block'))
4324 4321
4325 4322 indent = len(line) - len(line.lstrip())
4326 4323
4327 4324 # If this line is indented more than the last line, concatenate it.
4328 4325 if indent > lastindent and blocklines:
4329 4326 blocklines[-1] += line.lstrip()
4330 4327 else:
4331 4328 blocklines.append(line)
4332 4329 lastindent = indent
4333 4330
4334 4331 # Flush last block.
4335 4332 if activeaction:
4336 4333 yield activeaction, blocklines
4337 4334
4338 4335
4339 4336 @command(
4340 4337 b'debugwireproto',
4341 4338 [
4342 4339 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4343 4340 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4344 4341 (
4345 4342 b'',
4346 4343 b'noreadstderr',
4347 4344 False,
4348 4345 _(b'do not read from stderr of the remote'),
4349 4346 ),
4350 4347 (
4351 4348 b'',
4352 4349 b'nologhandshake',
4353 4350 False,
4354 4351 _(b'do not log I/O related to the peer handshake'),
4355 4352 ),
4356 4353 ]
4357 4354 + cmdutil.remoteopts,
4358 4355 _(b'[PATH]'),
4359 4356 optionalrepo=True,
4360 4357 )
4361 4358 def debugwireproto(ui, repo, path=None, **opts):
4362 4359 """send wire protocol commands to a server
4363 4360
4364 4361 This command can be used to issue wire protocol commands to remote
4365 4362 peers and to debug the raw data being exchanged.
4366 4363
4367 4364 ``--localssh`` will start an SSH server against the current repository
4368 4365 and connect to that. By default, the connection will perform a handshake
4369 4366 and establish an appropriate peer instance.
4370 4367
4371 4368 ``--peer`` can be used to bypass the handshake protocol and construct a
4372 4369 peer instance using the specified class type. Valid values are ``raw``,
4373 4370 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4374 4371 raw data payloads and don't support higher-level command actions.
4375 4372
4376 4373 ``--noreadstderr`` can be used to disable automatic reading from stderr
4377 4374 of the peer (for SSH connections only). Disabling automatic reading of
4378 4375 stderr is useful for making output more deterministic.
4379 4376
4380 4377 Commands are issued via a mini language which is specified via stdin.
4381 4378 The language consists of individual actions to perform. An action is
4382 4379 defined by a block. A block is defined as a line with no leading
4383 4380 space followed by 0 or more lines with leading space. Blocks are
4384 4381 effectively a high-level command with additional metadata.
4385 4382
4386 4383 Lines beginning with ``#`` are ignored.
4387 4384
4388 4385 The following sections denote available actions.
4389 4386
4390 4387 raw
4391 4388 ---
4392 4389
4393 4390 Send raw data to the server.
4394 4391
4395 4392 The block payload contains the raw data to send as one atomic send
4396 4393 operation. The data may not actually be delivered in a single system
4397 4394 call: it depends on the abilities of the transport being used.
4398 4395
4399 4396 Each line in the block is de-indented and concatenated. Then, that
4400 4397 value is evaluated as a Python b'' literal. This allows the use of
4401 4398 backslash escaping, etc.
4402 4399
4403 4400 raw+
4404 4401 ----
4405 4402
4406 4403 Behaves like ``raw`` except flushes output afterwards.
4407 4404
4408 4405 command <X>
4409 4406 -----------
4410 4407
4411 4408 Send a request to run a named command, whose name follows the ``command``
4412 4409 string.
4413 4410
4414 4411 Arguments to the command are defined as lines in this block. The format of
4415 4412 each line is ``<key> <value>``. e.g.::
4416 4413
4417 4414 command listkeys
4418 4415 namespace bookmarks
4419 4416
4420 4417 If the value begins with ``eval:``, it will be interpreted as a Python
4421 4418 literal expression. Otherwise values are interpreted as Python b'' literals.
4422 4419 This allows sending complex types and encoding special byte sequences via
4423 4420 backslash escaping.
4424 4421
4425 4422 The following arguments have special meaning:
4426 4423
4427 4424 ``PUSHFILE``
4428 4425 When defined, the *push* mechanism of the peer will be used instead
4429 4426 of the static request-response mechanism and the content of the
4430 4427 file specified in the value of this argument will be sent as the
4431 4428 command payload.
4432 4429
4433 4430 This can be used to submit a local bundle file to the remote.
4434 4431
4435 4432 batchbegin
4436 4433 ----------
4437 4434
4438 4435 Instruct the peer to begin a batched send.
4439 4436
4440 4437 All ``command`` blocks are queued for execution until the next
4441 4438 ``batchsubmit`` block.
4442 4439
4443 4440 batchsubmit
4444 4441 -----------
4445 4442
4446 4443 Submit previously queued ``command`` blocks as a batch request.
4447 4444
4448 4445 This action MUST be paired with a ``batchbegin`` action.
4449 4446
4450 4447 httprequest <method> <path>
4451 4448 ---------------------------
4452 4449
4453 4450 (HTTP peer only)
4454 4451
4455 4452 Send an HTTP request to the peer.
4456 4453
4457 4454 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4458 4455
4459 4456 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4460 4457 headers to add to the request. e.g. ``Accept: foo``.
4461 4458
4462 4459 The following arguments are special:
4463 4460
4464 4461 ``BODYFILE``
4465 4462 The content of the file defined as the value to this argument will be
4466 4463 transferred verbatim as the HTTP request body.
4467 4464
4468 4465 ``frame <type> <flags> <payload>``
4469 4466 Send a unified protocol frame as part of the request body.
4470 4467
4471 4468 All frames will be collected and sent as the body to the HTTP
4472 4469 request.
4473 4470
4474 4471 close
4475 4472 -----
4476 4473
4477 4474 Close the connection to the server.
4478 4475
4479 4476 flush
4480 4477 -----
4481 4478
4482 4479 Flush data written to the server.
4483 4480
4484 4481 readavailable
4485 4482 -------------
4486 4483
4487 4484 Close the write end of the connection and read all available data from
4488 4485 the server.
4489 4486
4490 4487 If the connection to the server encompasses multiple pipes, we poll both
4491 4488 pipes and read available data.
4492 4489
4493 4490 readline
4494 4491 --------
4495 4492
4496 4493 Read a line of output from the server. If there are multiple output
4497 4494 pipes, reads only the main pipe.
4498 4495
4499 4496 ereadline
4500 4497 ---------
4501 4498
4502 4499 Like ``readline``, but read from the stderr pipe, if available.
4503 4500
4504 4501 read <X>
4505 4502 --------
4506 4503
4507 4504 ``read()`` N bytes from the server's main output pipe.
4508 4505
4509 4506 eread <X>
4510 4507 ---------
4511 4508
4512 4509 ``read()`` N bytes from the server's stderr pipe, if available.
4513 4510
4514 4511 Specifying Unified Frame-Based Protocol Frames
4515 4512 ----------------------------------------------
4516 4513
4517 4514 It is possible to emit a *Unified Frame-Based Protocol* by using special
4518 4515 syntax.
4519 4516
4520 4517 A frame is composed as a type, flags, and payload. These can be parsed
4521 4518 from a string of the form:
4522 4519
4523 4520 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4524 4521
4525 4522 ``request-id`` and ``stream-id`` are integers defining the request and
4526 4523 stream identifiers.
4527 4524
4528 4525 ``type`` can be an integer value for the frame type or the string name
4529 4526 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4530 4527 ``command-name``.
4531 4528
4532 4529 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4533 4530 components. Each component (and there can be just one) can be an integer
4534 4531 or a flag name for stream flags or frame flags, respectively. Values are
4535 4532 resolved to integers and then bitwise OR'd together.
4536 4533
4537 4534 ``payload`` represents the raw frame payload. If it begins with
4538 4535 ``cbor:``, the following string is evaluated as Python code and the
4539 4536 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4540 4537 as a Python byte string literal.
4541 4538 """
4542 4539 opts = pycompat.byteskwargs(opts)
4543 4540
4544 4541 if opts[b'localssh'] and not repo:
4545 4542 raise error.Abort(_(b'--localssh requires a repository'))
4546 4543
4547 4544 if opts[b'peer'] and opts[b'peer'] not in (
4548 4545 b'raw',
4549 4546 b'http2',
4550 4547 b'ssh1',
4551 4548 b'ssh2',
4552 4549 ):
4553 4550 raise error.Abort(
4554 4551 _(b'invalid value for --peer'),
4555 4552 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4556 4553 )
4557 4554
4558 4555 if path and opts[b'localssh']:
4559 4556 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4560 4557
4561 4558 if ui.interactive():
4562 4559 ui.write(_(b'(waiting for commands on stdin)\n'))
4563 4560
4564 4561 blocks = list(_parsewirelangblocks(ui.fin))
4565 4562
4566 4563 proc = None
4567 4564 stdin = None
4568 4565 stdout = None
4569 4566 stderr = None
4570 4567 opener = None
4571 4568
4572 4569 if opts[b'localssh']:
4573 4570 # We start the SSH server in its own process so there is process
4574 4571 # separation. This prevents a whole class of potential bugs around
4575 4572 # shared state from interfering with server operation.
4576 4573 args = procutil.hgcmd() + [
4577 4574 b'-R',
4578 4575 repo.root,
4579 4576 b'debugserve',
4580 4577 b'--sshstdio',
4581 4578 ]
4582 4579 proc = subprocess.Popen(
4583 4580 pycompat.rapply(procutil.tonativestr, args),
4584 4581 stdin=subprocess.PIPE,
4585 4582 stdout=subprocess.PIPE,
4586 4583 stderr=subprocess.PIPE,
4587 4584 bufsize=0,
4588 4585 )
4589 4586
4590 4587 stdin = proc.stdin
4591 4588 stdout = proc.stdout
4592 4589 stderr = proc.stderr
4593 4590
4594 4591 # We turn the pipes into observers so we can log I/O.
4595 4592 if ui.verbose or opts[b'peer'] == b'raw':
4596 4593 stdin = util.makeloggingfileobject(
4597 4594 ui, proc.stdin, b'i', logdata=True
4598 4595 )
4599 4596 stdout = util.makeloggingfileobject(
4600 4597 ui, proc.stdout, b'o', logdata=True
4601 4598 )
4602 4599 stderr = util.makeloggingfileobject(
4603 4600 ui, proc.stderr, b'e', logdata=True
4604 4601 )
4605 4602
4606 4603 # --localssh also implies the peer connection settings.
4607 4604
4608 4605 url = b'ssh://localserver'
4609 4606 autoreadstderr = not opts[b'noreadstderr']
4610 4607
4611 4608 if opts[b'peer'] == b'ssh1':
4612 4609 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4613 4610 peer = sshpeer.sshv1peer(
4614 4611 ui,
4615 4612 url,
4616 4613 proc,
4617 4614 stdin,
4618 4615 stdout,
4619 4616 stderr,
4620 4617 None,
4621 4618 autoreadstderr=autoreadstderr,
4622 4619 )
4623 4620 elif opts[b'peer'] == b'ssh2':
4624 4621 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4625 4622 peer = sshpeer.sshv2peer(
4626 4623 ui,
4627 4624 url,
4628 4625 proc,
4629 4626 stdin,
4630 4627 stdout,
4631 4628 stderr,
4632 4629 None,
4633 4630 autoreadstderr=autoreadstderr,
4634 4631 )
4635 4632 elif opts[b'peer'] == b'raw':
4636 4633 ui.write(_(b'using raw connection to peer\n'))
4637 4634 peer = None
4638 4635 else:
4639 4636 ui.write(_(b'creating ssh peer from handshake results\n'))
4640 4637 peer = sshpeer.makepeer(
4641 4638 ui,
4642 4639 url,
4643 4640 proc,
4644 4641 stdin,
4645 4642 stdout,
4646 4643 stderr,
4647 4644 autoreadstderr=autoreadstderr,
4648 4645 )
4649 4646
4650 4647 elif path:
4651 4648 # We bypass hg.peer() so we can proxy the sockets.
4652 4649 # TODO consider not doing this because we skip
4653 4650 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4654 4651 u = urlutil.url(path)
4655 4652 if u.scheme != b'http':
4656 4653 raise error.Abort(_(b'only http:// paths are currently supported'))
4657 4654
4658 4655 url, authinfo = u.authinfo()
4659 4656 openerargs = {
4660 4657 'useragent': b'Mercurial debugwireproto',
4661 4658 }
4662 4659
4663 4660 # Turn pipes/sockets into observers so we can log I/O.
4664 4661 if ui.verbose:
4665 4662 openerargs.update(
4666 4663 {
4667 4664 'loggingfh': ui,
4668 4665 'loggingname': b's',
4669 4666 'loggingopts': {
4670 4667 'logdata': True,
4671 4668 'logdataapis': False,
4672 4669 },
4673 4670 }
4674 4671 )
4675 4672
4676 4673 if ui.debugflag:
4677 4674 openerargs['loggingopts']['logdataapis'] = True
4678 4675
4679 4676 # Don't send default headers when in raw mode. This allows us to
4680 4677 # bypass most of the behavior of our URL handling code so we can
4681 4678 # have near complete control over what's sent on the wire.
4682 4679 if opts[b'peer'] == b'raw':
4683 4680 openerargs['sendaccept'] = False
4684 4681
4685 4682 opener = urlmod.opener(ui, authinfo, **openerargs)
4686 4683
4687 4684 if opts[b'peer'] == b'http2':
4688 4685 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4689 4686 # We go through makepeer() because we need an API descriptor for
4690 4687 # the peer instance to be useful.
4691 4688 maybe_silent = (
4692 4689 ui.silent()
4693 4690 if opts[b'nologhandshake']
4694 4691 else util.nullcontextmanager()
4695 4692 )
4696 4693 with maybe_silent, ui.configoverride(
4697 4694 {(b'experimental', b'httppeer.advertise-v2'): True}
4698 4695 ):
4699 4696 peer = httppeer.makepeer(ui, path, opener=opener)
4700 4697
4701 4698 if not isinstance(peer, httppeer.httpv2peer):
4702 4699 raise error.Abort(
4703 4700 _(
4704 4701 b'could not instantiate HTTP peer for '
4705 4702 b'wire protocol version 2'
4706 4703 ),
4707 4704 hint=_(
4708 4705 b'the server may not have the feature '
4709 4706 b'enabled or is not allowing this '
4710 4707 b'client version'
4711 4708 ),
4712 4709 )
4713 4710
4714 4711 elif opts[b'peer'] == b'raw':
4715 4712 ui.write(_(b'using raw connection to peer\n'))
4716 4713 peer = None
4717 4714 elif opts[b'peer']:
4718 4715 raise error.Abort(
4719 4716 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4720 4717 )
4721 4718 else:
4722 4719 peer = httppeer.makepeer(ui, path, opener=opener)
4723 4720
4724 4721 # We /could/ populate stdin/stdout with sock.makefile()...
4725 4722 else:
4726 4723 raise error.Abort(_(b'unsupported connection configuration'))
4727 4724
4728 4725 batchedcommands = None
4729 4726
4730 4727 # Now perform actions based on the parsed wire language instructions.
4731 4728 for action, lines in blocks:
4732 4729 if action in (b'raw', b'raw+'):
4733 4730 if not stdin:
4734 4731 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4735 4732
4736 4733 # Concatenate the data together.
4737 4734 data = b''.join(l.lstrip() for l in lines)
4738 4735 data = stringutil.unescapestr(data)
4739 4736 stdin.write(data)
4740 4737
4741 4738 if action == b'raw+':
4742 4739 stdin.flush()
4743 4740 elif action == b'flush':
4744 4741 if not stdin:
4745 4742 raise error.Abort(_(b'cannot call flush on this peer'))
4746 4743 stdin.flush()
4747 4744 elif action.startswith(b'command'):
4748 4745 if not peer:
4749 4746 raise error.Abort(
4750 4747 _(
4751 4748 b'cannot send commands unless peer instance '
4752 4749 b'is available'
4753 4750 )
4754 4751 )
4755 4752
4756 4753 command = action.split(b' ', 1)[1]
4757 4754
4758 4755 args = {}
4759 4756 for line in lines:
4760 4757 # We need to allow empty values.
4761 4758 fields = line.lstrip().split(b' ', 1)
4762 4759 if len(fields) == 1:
4763 4760 key = fields[0]
4764 4761 value = b''
4765 4762 else:
4766 4763 key, value = fields
4767 4764
4768 4765 if value.startswith(b'eval:'):
4769 4766 value = stringutil.evalpythonliteral(value[5:])
4770 4767 else:
4771 4768 value = stringutil.unescapestr(value)
4772 4769
4773 4770 args[key] = value
4774 4771
4775 4772 if batchedcommands is not None:
4776 4773 batchedcommands.append((command, args))
4777 4774 continue
4778 4775
4779 4776 ui.status(_(b'sending %s command\n') % command)
4780 4777
4781 4778 if b'PUSHFILE' in args:
4782 4779 with open(args[b'PUSHFILE'], 'rb') as fh:
4783 4780 del args[b'PUSHFILE']
4784 4781 res, output = peer._callpush(
4785 4782 command, fh, **pycompat.strkwargs(args)
4786 4783 )
4787 4784 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4788 4785 ui.status(
4789 4786 _(b'remote output: %s\n') % stringutil.escapestr(output)
4790 4787 )
4791 4788 else:
4792 4789 with peer.commandexecutor() as e:
4793 4790 res = e.callcommand(command, args).result()
4794 4791
4795 4792 if isinstance(res, wireprotov2peer.commandresponse):
4796 4793 val = res.objects()
4797 4794 ui.status(
4798 4795 _(b'response: %s\n')
4799 4796 % stringutil.pprint(val, bprefix=True, indent=2)
4800 4797 )
4801 4798 else:
4802 4799 ui.status(
4803 4800 _(b'response: %s\n')
4804 4801 % stringutil.pprint(res, bprefix=True, indent=2)
4805 4802 )
4806 4803
4807 4804 elif action == b'batchbegin':
4808 4805 if batchedcommands is not None:
4809 4806 raise error.Abort(_(b'nested batchbegin not allowed'))
4810 4807
4811 4808 batchedcommands = []
4812 4809 elif action == b'batchsubmit':
4813 4810 # There is a batching API we could go through. But it would be
4814 4811 # difficult to normalize requests into function calls. It is easier
4815 4812 # to bypass this layer and normalize to commands + args.
4816 4813 ui.status(
4817 4814 _(b'sending batch with %d sub-commands\n')
4818 4815 % len(batchedcommands)
4819 4816 )
4820 4817 assert peer is not None
4821 4818 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4822 4819 ui.status(
4823 4820 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4824 4821 )
4825 4822
4826 4823 batchedcommands = None
4827 4824
4828 4825 elif action.startswith(b'httprequest '):
4829 4826 if not opener:
4830 4827 raise error.Abort(
4831 4828 _(b'cannot use httprequest without an HTTP peer')
4832 4829 )
4833 4830
4834 4831 request = action.split(b' ', 2)
4835 4832 if len(request) != 3:
4836 4833 raise error.Abort(
4837 4834 _(
4838 4835 b'invalid httprequest: expected format is '
4839 4836 b'"httprequest <method> <path>'
4840 4837 )
4841 4838 )
4842 4839
4843 4840 method, httppath = request[1:]
4844 4841 headers = {}
4845 4842 body = None
4846 4843 frames = []
4847 4844 for line in lines:
4848 4845 line = line.lstrip()
4849 4846 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4850 4847 if m:
4851 4848 # Headers need to use native strings.
4852 4849 key = pycompat.strurl(m.group(1))
4853 4850 value = pycompat.strurl(m.group(2))
4854 4851 headers[key] = value
4855 4852 continue
4856 4853
4857 4854 if line.startswith(b'BODYFILE '):
4858 4855 with open(line.split(b' ', 1), b'rb') as fh:
4859 4856 body = fh.read()
4860 4857 elif line.startswith(b'frame '):
4861 4858 frame = wireprotoframing.makeframefromhumanstring(
4862 4859 line[len(b'frame ') :]
4863 4860 )
4864 4861
4865 4862 frames.append(frame)
4866 4863 else:
4867 4864 raise error.Abort(
4868 4865 _(b'unknown argument to httprequest: %s') % line
4869 4866 )
4870 4867
4871 4868 url = path + httppath
4872 4869
4873 4870 if frames:
4874 4871 body = b''.join(bytes(f) for f in frames)
4875 4872
4876 4873 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4877 4874
4878 4875 # urllib.Request insists on using has_data() as a proxy for
4879 4876 # determining the request method. Override that to use our
4880 4877 # explicitly requested method.
4881 4878 req.get_method = lambda: pycompat.sysstr(method)
4882 4879
4883 4880 try:
4884 4881 res = opener.open(req)
4885 4882 body = res.read()
4886 4883 except util.urlerr.urlerror as e:
4887 4884 # read() method must be called, but only exists in Python 2
4888 4885 getattr(e, 'read', lambda: None)()
4889 4886 continue
4890 4887
4891 4888 ct = res.headers.get('Content-Type')
4892 4889 if ct == 'application/mercurial-cbor':
4893 4890 ui.write(
4894 4891 _(b'cbor> %s\n')
4895 4892 % stringutil.pprint(
4896 4893 cborutil.decodeall(body), bprefix=True, indent=2
4897 4894 )
4898 4895 )
4899 4896
4900 4897 elif action == b'close':
4901 4898 assert peer is not None
4902 4899 peer.close()
4903 4900 elif action == b'readavailable':
4904 4901 if not stdout or not stderr:
4905 4902 raise error.Abort(
4906 4903 _(b'readavailable not available on this peer')
4907 4904 )
4908 4905
4909 4906 stdin.close()
4910 4907 stdout.read()
4911 4908 stderr.read()
4912 4909
4913 4910 elif action == b'readline':
4914 4911 if not stdout:
4915 4912 raise error.Abort(_(b'readline not available on this peer'))
4916 4913 stdout.readline()
4917 4914 elif action == b'ereadline':
4918 4915 if not stderr:
4919 4916 raise error.Abort(_(b'ereadline not available on this peer'))
4920 4917 stderr.readline()
4921 4918 elif action.startswith(b'read '):
4922 4919 count = int(action.split(b' ', 1)[1])
4923 4920 if not stdout:
4924 4921 raise error.Abort(_(b'read not available on this peer'))
4925 4922 stdout.read(count)
4926 4923 elif action.startswith(b'eread '):
4927 4924 count = int(action.split(b' ', 1)[1])
4928 4925 if not stderr:
4929 4926 raise error.Abort(_(b'eread not available on this peer'))
4930 4927 stderr.read(count)
4931 4928 else:
4932 4929 raise error.Abort(_(b'unknown action: %s') % action)
4933 4930
4934 4931 if batchedcommands is not None:
4935 4932 raise error.Abort(_(b'unclosed "batchbegin" request'))
4936 4933
4937 4934 if peer:
4938 4935 peer.close()
4939 4936
4940 4937 if proc:
4941 4938 proc.kill()
@@ -1,952 +1,962 b''
1 1 # dirstatemap.py
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 from __future__ import absolute_import
7 7
8 8 import errno
9 9
10 10 from .i18n import _
11 11
12 12 from . import (
13 13 error,
14 14 pathutil,
15 15 policy,
16 16 pycompat,
17 17 txnutil,
18 18 util,
19 19 )
20 20
21 21 from .dirstateutils import (
22 22 docket as docketmod,
23 23 )
24 24
25 25 parsers = policy.importmod('parsers')
26 26 rustmod = policy.importrust('dirstate')
27 27
28 28 propertycache = util.propertycache
29 29
30 30 DirstateItem = parsers.DirstateItem
31 31
32 32 rangemask = 0x7FFFFFFF
33 33
34 34
35 35 class dirstatemap(object):
36 36 """Map encapsulating the dirstate's contents.
37 37
38 38 The dirstate contains the following state:
39 39
40 40 - `identity` is the identity of the dirstate file, which can be used to
41 41 detect when changes have occurred to the dirstate file.
42 42
43 43 - `parents` is a pair containing the parents of the working copy. The
44 44 parents are updated by calling `setparents`.
45 45
46 46 - the state map maps filenames to tuples of (state, mode, size, mtime),
47 47 where state is a single character representing 'normal', 'added',
48 48 'removed', or 'merged'. It is read by treating the dirstate as a
49 49 dict. File state is updated by calling various methods (see each
50 50 documentation for details):
51 51
52 52 - `reset_state`,
53 53 - `set_tracked`
54 54 - `set_untracked`
55 55 - `set_clean`
56 56 - `set_possibly_dirty`
57 57
58 58 - `copymap` maps destination filenames to their source filename.
59 59
60 60 The dirstate also provides the following views onto the state:
61 61
62 62 - `nonnormalset` is a set of the filenames that have state other
63 63 than 'normal', or are normal but have an mtime of -1 ('normallookup').
64 64
65 65 - `otherparentset` is a set of the filenames that are marked as coming
66 66 from the second parent when the dirstate is currently being merged.
67 67
68 68 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
69 69 form that they appear as in the dirstate.
70 70
71 71 - `dirfoldmap` is a dict mapping normalized directory names to the
72 72 denormalized form that they appear as in the dirstate.
73 73 """
74 74
75 75 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
76 76 self._ui = ui
77 77 self._opener = opener
78 78 self._root = root
79 79 self._filename = b'dirstate'
80 80 self._nodelen = 20
81 81 self._nodeconstants = nodeconstants
82 82 assert (
83 83 not use_dirstate_v2
84 84 ), "should have detected unsupported requirement"
85 85
86 86 self._parents = None
87 87 self._dirtyparents = False
88 88
89 89 # for consistent view between _pl() and _read() invocations
90 90 self._pendingmode = None
91 91
92 92 @propertycache
93 93 def _map(self):
94 94 self._map = {}
95 95 self.read()
96 96 return self._map
97 97
98 98 @propertycache
99 99 def copymap(self):
100 100 self.copymap = {}
101 101 self._map
102 102 return self.copymap
103 103
104 104 def clear(self):
105 105 self._map.clear()
106 106 self.copymap.clear()
107 107 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
108 108 util.clearcachedproperty(self, b"_dirs")
109 109 util.clearcachedproperty(self, b"_alldirs")
110 110 util.clearcachedproperty(self, b"filefoldmap")
111 111 util.clearcachedproperty(self, b"dirfoldmap")
112 112 util.clearcachedproperty(self, b"nonnormalset")
113 113 util.clearcachedproperty(self, b"otherparentset")
114 114
115 115 def items(self):
116 116 return pycompat.iteritems(self._map)
117 117
118 118 # forward for python2,3 compat
119 119 iteritems = items
120 120
121 121 def debug_iter(self, all):
122 122 """
123 Return an iterator of (filename, state, mode, size, mtime) tuples
124
123 125 `all` is unused when Rust is not enabled
124 126 """
125 return self.item()
127 for (filename, item) in self.items():
128 yield (filename, item.state, item.mode, item.size, item.mtime)
126 129
127 130 def __len__(self):
128 131 return len(self._map)
129 132
130 133 def __iter__(self):
131 134 return iter(self._map)
132 135
133 136 def get(self, key, default=None):
134 137 return self._map.get(key, default)
135 138
136 139 def __contains__(self, key):
137 140 return key in self._map
138 141
139 142 def __getitem__(self, key):
140 143 return self._map[key]
141 144
142 145 def keys(self):
143 146 return self._map.keys()
144 147
145 148 def preload(self):
146 149 """Loads the underlying data, if it's not already loaded"""
147 150 self._map
148 151
149 152 def _dirs_incr(self, filename, old_entry=None):
150 153 """incremente the dirstate counter if applicable"""
151 154 if (
152 155 old_entry is None or old_entry.removed
153 156 ) and "_dirs" in self.__dict__:
154 157 self._dirs.addpath(filename)
155 158 if old_entry is None and "_alldirs" in self.__dict__:
156 159 self._alldirs.addpath(filename)
157 160
158 161 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
159 162 """decremente the dirstate counter if applicable"""
160 163 if old_entry is not None:
161 164 if "_dirs" in self.__dict__ and not old_entry.removed:
162 165 self._dirs.delpath(filename)
163 166 if "_alldirs" in self.__dict__ and not remove_variant:
164 167 self._alldirs.delpath(filename)
165 168 elif remove_variant and "_alldirs" in self.__dict__:
166 169 self._alldirs.addpath(filename)
167 170 if "filefoldmap" in self.__dict__:
168 171 normed = util.normcase(filename)
169 172 self.filefoldmap.pop(normed, None)
170 173
171 174 def set_possibly_dirty(self, filename):
172 175 """record that the current state of the file on disk is unknown"""
173 176 self[filename].set_possibly_dirty()
174 177
175 178 def set_clean(self, filename, mode, size, mtime):
176 179 """mark a file as back to a clean state"""
177 180 entry = self[filename]
178 181 mtime = mtime & rangemask
179 182 size = size & rangemask
180 183 entry.set_clean(mode, size, mtime)
181 184 self.copymap.pop(filename, None)
182 185 self.nonnormalset.discard(filename)
183 186
184 187 def reset_state(
185 188 self,
186 189 filename,
187 190 wc_tracked=False,
188 191 p1_tracked=False,
189 192 p2_tracked=False,
190 193 merged=False,
191 194 clean_p1=False,
192 195 clean_p2=False,
193 196 possibly_dirty=False,
194 197 parentfiledata=None,
195 198 ):
196 199 """Set a entry to a given state, diregarding all previous state
197 200
198 201 This is to be used by the part of the dirstate API dedicated to
199 202 adjusting the dirstate after a update/merge.
200 203
201 204 note: calling this might result to no entry existing at all if the
202 205 dirstate map does not see any point at having one for this file
203 206 anymore.
204 207 """
205 208 if merged and (clean_p1 or clean_p2):
206 209 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
207 210 raise error.ProgrammingError(msg)
208 211 # copy information are now outdated
209 212 # (maybe new information should be in directly passed to this function)
210 213 self.copymap.pop(filename, None)
211 214
212 215 if not (p1_tracked or p2_tracked or wc_tracked):
213 216 old_entry = self._map.pop(filename, None)
214 217 self._dirs_decr(filename, old_entry=old_entry)
215 218 self.nonnormalset.discard(filename)
216 219 self.copymap.pop(filename, None)
217 220 return
218 221 elif merged:
219 222 # XXX might be merged and removed ?
220 223 entry = self.get(filename)
221 224 if entry is None or not entry.tracked:
222 225 # XXX mostly replicate dirstate.other parent. We should get
223 226 # the higher layer to pass us more reliable data where `merged`
224 227 # actually mean merged. Dropping this clause will show failure
225 228 # in `test-graft.t`
226 229 merged = False
227 230 clean_p2 = True
228 231 elif not (p1_tracked or p2_tracked) and wc_tracked:
229 232 pass # file is added, nothing special to adjust
230 233 elif (p1_tracked or p2_tracked) and not wc_tracked:
231 234 pass
232 235 elif clean_p2 and wc_tracked:
233 236 if p1_tracked or self.get(filename) is not None:
234 237 # XXX the `self.get` call is catching some case in
235 238 # `test-merge-remove.t` where the file is tracked in p1, the
236 239 # p1_tracked argument is False.
237 240 #
238 241 # In addition, this seems to be a case where the file is marked
239 242 # as merged without actually being the result of a merge
240 243 # action. So thing are not ideal here.
241 244 merged = True
242 245 clean_p2 = False
243 246 elif not p1_tracked and p2_tracked and wc_tracked:
244 247 clean_p2 = True
245 248 elif possibly_dirty:
246 249 pass
247 250 elif wc_tracked:
248 251 # this is a "normal" file
249 252 if parentfiledata is None:
250 253 msg = b'failed to pass parentfiledata for a normal file: %s'
251 254 msg %= filename
252 255 raise error.ProgrammingError(msg)
253 256 else:
254 257 assert False, 'unreachable'
255 258
256 259 old_entry = self._map.get(filename)
257 260 self._dirs_incr(filename, old_entry)
258 261 entry = DirstateItem(
259 262 wc_tracked=wc_tracked,
260 263 p1_tracked=p1_tracked,
261 264 p2_tracked=p2_tracked,
262 265 merged=merged,
263 266 clean_p1=clean_p1,
264 267 clean_p2=clean_p2,
265 268 possibly_dirty=possibly_dirty,
266 269 parentfiledata=parentfiledata,
267 270 )
268 271 if entry.dm_nonnormal:
269 272 self.nonnormalset.add(filename)
270 273 else:
271 274 self.nonnormalset.discard(filename)
272 275 if entry.dm_otherparent:
273 276 self.otherparentset.add(filename)
274 277 else:
275 278 self.otherparentset.discard(filename)
276 279 self._map[filename] = entry
277 280
278 281 def set_tracked(self, filename):
279 282 new = False
280 283 entry = self.get(filename)
281 284 if entry is None:
282 285 self._dirs_incr(filename)
283 286 entry = DirstateItem(
284 287 p1_tracked=False,
285 288 p2_tracked=False,
286 289 wc_tracked=True,
287 290 merged=False,
288 291 clean_p1=False,
289 292 clean_p2=False,
290 293 possibly_dirty=False,
291 294 parentfiledata=None,
292 295 )
293 296 self._map[filename] = entry
294 297 if entry.dm_nonnormal:
295 298 self.nonnormalset.add(filename)
296 299 new = True
297 300 elif not entry.tracked:
298 301 self._dirs_incr(filename, entry)
299 302 entry.set_tracked()
300 303 new = True
301 304 else:
302 305 # XXX This is probably overkill for more case, but we need this to
303 306 # fully replace the `normallookup` call with `set_tracked` one.
304 307 # Consider smoothing this in the future.
305 308 self.set_possibly_dirty(filename)
306 309 return new
307 310
308 311 def set_untracked(self, f):
309 312 """Mark a file as no longer tracked in the dirstate map"""
310 313 entry = self.get(f)
311 314 if entry is None:
312 315 return False
313 316 else:
314 317 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
315 318 if not entry.merged:
316 319 self.copymap.pop(f, None)
317 320 if entry.added:
318 321 self.nonnormalset.discard(f)
319 322 self._map.pop(f, None)
320 323 else:
321 324 self.nonnormalset.add(f)
322 325 if entry.from_p2:
323 326 self.otherparentset.add(f)
324 327 entry.set_untracked()
325 328 return True
326 329
327 330 def clearambiguoustimes(self, files, now):
328 331 for f in files:
329 332 e = self.get(f)
330 333 if e is not None and e.need_delay(now):
331 334 e.set_possibly_dirty()
332 335 self.nonnormalset.add(f)
333 336
334 337 def nonnormalentries(self):
335 338 '''Compute the nonnormal dirstate entries from the dmap'''
336 339 try:
337 340 return parsers.nonnormalotherparententries(self._map)
338 341 except AttributeError:
339 342 nonnorm = set()
340 343 otherparent = set()
341 344 for fname, e in pycompat.iteritems(self._map):
342 345 if e.dm_nonnormal:
343 346 nonnorm.add(fname)
344 347 if e.from_p2:
345 348 otherparent.add(fname)
346 349 return nonnorm, otherparent
347 350
348 351 @propertycache
349 352 def filefoldmap(self):
350 353 """Returns a dictionary mapping normalized case paths to their
351 354 non-normalized versions.
352 355 """
353 356 try:
354 357 makefilefoldmap = parsers.make_file_foldmap
355 358 except AttributeError:
356 359 pass
357 360 else:
358 361 return makefilefoldmap(
359 362 self._map, util.normcasespec, util.normcasefallback
360 363 )
361 364
362 365 f = {}
363 366 normcase = util.normcase
364 367 for name, s in pycompat.iteritems(self._map):
365 368 if not s.removed:
366 369 f[normcase(name)] = name
367 370 f[b'.'] = b'.' # prevents useless util.fspath() invocation
368 371 return f
369 372
370 373 def hastrackeddir(self, d):
371 374 """
372 375 Returns True if the dirstate contains a tracked (not removed) file
373 376 in this directory.
374 377 """
375 378 return d in self._dirs
376 379
377 380 def hasdir(self, d):
378 381 """
379 382 Returns True if the dirstate contains a file (tracked or removed)
380 383 in this directory.
381 384 """
382 385 return d in self._alldirs
383 386
384 387 @propertycache
385 388 def _dirs(self):
386 389 return pathutil.dirs(self._map, only_tracked=True)
387 390
388 391 @propertycache
389 392 def _alldirs(self):
390 393 return pathutil.dirs(self._map)
391 394
392 395 def _opendirstatefile(self):
393 396 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
394 397 if self._pendingmode is not None and self._pendingmode != mode:
395 398 fp.close()
396 399 raise error.Abort(
397 400 _(b'working directory state may be changed parallelly')
398 401 )
399 402 self._pendingmode = mode
400 403 return fp
401 404
402 405 def parents(self):
403 406 if not self._parents:
404 407 try:
405 408 fp = self._opendirstatefile()
406 409 st = fp.read(2 * self._nodelen)
407 410 fp.close()
408 411 except IOError as err:
409 412 if err.errno != errno.ENOENT:
410 413 raise
411 414 # File doesn't exist, so the current state is empty
412 415 st = b''
413 416
414 417 l = len(st)
415 418 if l == self._nodelen * 2:
416 419 self._parents = (
417 420 st[: self._nodelen],
418 421 st[self._nodelen : 2 * self._nodelen],
419 422 )
420 423 elif l == 0:
421 424 self._parents = (
422 425 self._nodeconstants.nullid,
423 426 self._nodeconstants.nullid,
424 427 )
425 428 else:
426 429 raise error.Abort(
427 430 _(b'working directory state appears damaged!')
428 431 )
429 432
430 433 return self._parents
431 434
432 435 def setparents(self, p1, p2):
433 436 self._parents = (p1, p2)
434 437 self._dirtyparents = True
435 438
436 439 def read(self):
437 440 # ignore HG_PENDING because identity is used only for writing
438 441 self.identity = util.filestat.frompath(
439 442 self._opener.join(self._filename)
440 443 )
441 444
442 445 try:
443 446 fp = self._opendirstatefile()
444 447 try:
445 448 st = fp.read()
446 449 finally:
447 450 fp.close()
448 451 except IOError as err:
449 452 if err.errno != errno.ENOENT:
450 453 raise
451 454 return
452 455 if not st:
453 456 return
454 457
455 458 if util.safehasattr(parsers, b'dict_new_presized'):
456 459 # Make an estimate of the number of files in the dirstate based on
457 460 # its size. This trades wasting some memory for avoiding costly
458 461 # resizes. Each entry have a prefix of 17 bytes followed by one or
459 462 # two path names. Studies on various large-scale real-world repositories
460 463 # found 54 bytes a reasonable upper limit for the average path names.
461 464 # Copy entries are ignored for the sake of this estimate.
462 465 self._map = parsers.dict_new_presized(len(st) // 71)
463 466
464 467 # Python's garbage collector triggers a GC each time a certain number
465 468 # of container objects (the number being defined by
466 469 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
467 470 # for each file in the dirstate. The C version then immediately marks
468 471 # them as not to be tracked by the collector. However, this has no
469 472 # effect on when GCs are triggered, only on what objects the GC looks
470 473 # into. This means that O(number of files) GCs are unavoidable.
471 474 # Depending on when in the process's lifetime the dirstate is parsed,
472 475 # this can get very expensive. As a workaround, disable GC while
473 476 # parsing the dirstate.
474 477 #
475 478 # (we cannot decorate the function directly since it is in a C module)
476 479 parse_dirstate = util.nogc(parsers.parse_dirstate)
477 480 p = parse_dirstate(self._map, self.copymap, st)
478 481 if not self._dirtyparents:
479 482 self.setparents(*p)
480 483
481 484 # Avoid excess attribute lookups by fast pathing certain checks
482 485 self.__contains__ = self._map.__contains__
483 486 self.__getitem__ = self._map.__getitem__
484 487 self.get = self._map.get
485 488
486 489 def write(self, _tr, st, now):
487 490 st.write(
488 491 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
489 492 )
490 493 st.close()
491 494 self._dirtyparents = False
492 495 self.nonnormalset, self.otherparentset = self.nonnormalentries()
493 496
494 497 @propertycache
495 498 def nonnormalset(self):
496 499 nonnorm, otherparents = self.nonnormalentries()
497 500 self.otherparentset = otherparents
498 501 return nonnorm
499 502
500 503 @propertycache
501 504 def otherparentset(self):
502 505 nonnorm, otherparents = self.nonnormalentries()
503 506 self.nonnormalset = nonnorm
504 507 return otherparents
505 508
506 509 def non_normal_or_other_parent_paths(self):
507 510 return self.nonnormalset.union(self.otherparentset)
508 511
509 512 @propertycache
510 513 def identity(self):
511 514 self._map
512 515 return self.identity
513 516
514 517 @propertycache
515 518 def dirfoldmap(self):
516 519 f = {}
517 520 normcase = util.normcase
518 521 for name in self._dirs:
519 522 f[normcase(name)] = name
520 523 return f
521 524
522 525
523 526 if rustmod is not None:
524 527
525 528 class dirstatemap(object):
526 529 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
527 530 self._use_dirstate_v2 = use_dirstate_v2
528 531 self._nodeconstants = nodeconstants
529 532 self._ui = ui
530 533 self._opener = opener
531 534 self._root = root
532 535 self._filename = b'dirstate'
533 536 self._nodelen = 20 # Also update Rust code when changing this!
534 537 self._parents = None
535 538 self._dirtyparents = False
536 539 self._docket = None
537 540
538 541 # for consistent view between _pl() and _read() invocations
539 542 self._pendingmode = None
540 543
541 544 self._use_dirstate_tree = self._ui.configbool(
542 545 b"experimental",
543 546 b"dirstate-tree.in-memory",
544 547 False,
545 548 )
546 549
547 550 def addfile(
548 551 self,
549 552 f,
550 553 mode=0,
551 554 size=None,
552 555 mtime=None,
553 556 added=False,
554 557 merged=False,
555 558 from_p2=False,
556 559 possibly_dirty=False,
557 560 ):
558 561 ret = self._rustmap.addfile(
559 562 f,
560 563 mode,
561 564 size,
562 565 mtime,
563 566 added,
564 567 merged,
565 568 from_p2,
566 569 possibly_dirty,
567 570 )
568 571 if added:
569 572 self.copymap.pop(f, None)
570 573 return ret
571 574
572 575 def reset_state(
573 576 self,
574 577 filename,
575 578 wc_tracked=False,
576 579 p1_tracked=False,
577 580 p2_tracked=False,
578 581 merged=False,
579 582 clean_p1=False,
580 583 clean_p2=False,
581 584 possibly_dirty=False,
582 585 parentfiledata=None,
583 586 ):
584 587 """Set a entry to a given state, disregarding all previous state
585 588
586 589 This is to be used by the part of the dirstate API dedicated to
587 590 adjusting the dirstate after a update/merge.
588 591
589 592 note: calling this might result to no entry existing at all if the
590 593 dirstate map does not see any point at having one for this file
591 594 anymore.
592 595 """
593 596 if merged and (clean_p1 or clean_p2):
594 597 msg = (
595 598 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
596 599 )
597 600 raise error.ProgrammingError(msg)
598 601 # copy information are now outdated
599 602 # (maybe new information should be in directly passed to this function)
600 603 self.copymap.pop(filename, None)
601 604
602 605 if not (p1_tracked or p2_tracked or wc_tracked):
603 606 self.dropfile(filename)
604 607 elif merged:
605 608 # XXX might be merged and removed ?
606 609 entry = self.get(filename)
607 610 if entry is not None and entry.tracked:
608 611 # XXX mostly replicate dirstate.other parent. We should get
609 612 # the higher layer to pass us more reliable data where `merged`
610 613 # actually mean merged. Dropping the else clause will show
611 614 # failure in `test-graft.t`
612 615 self.addfile(filename, merged=True)
613 616 else:
614 617 self.addfile(filename, from_p2=True)
615 618 elif not (p1_tracked or p2_tracked) and wc_tracked:
616 619 self.addfile(
617 620 filename, added=True, possibly_dirty=possibly_dirty
618 621 )
619 622 elif (p1_tracked or p2_tracked) and not wc_tracked:
620 623 # XXX might be merged and removed ?
621 624 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
622 625 self.nonnormalset.add(filename)
623 626 elif clean_p2 and wc_tracked:
624 627 if p1_tracked or self.get(filename) is not None:
625 628 # XXX the `self.get` call is catching some case in
626 629 # `test-merge-remove.t` where the file is tracked in p1, the
627 630 # p1_tracked argument is False.
628 631 #
629 632 # In addition, this seems to be a case where the file is marked
630 633 # as merged without actually being the result of a merge
631 634 # action. So thing are not ideal here.
632 635 self.addfile(filename, merged=True)
633 636 else:
634 637 self.addfile(filename, from_p2=True)
635 638 elif not p1_tracked and p2_tracked and wc_tracked:
636 639 self.addfile(
637 640 filename, from_p2=True, possibly_dirty=possibly_dirty
638 641 )
639 642 elif possibly_dirty:
640 643 self.addfile(filename, possibly_dirty=possibly_dirty)
641 644 elif wc_tracked:
642 645 # this is a "normal" file
643 646 if parentfiledata is None:
644 647 msg = b'failed to pass parentfiledata for a normal file: %s'
645 648 msg %= filename
646 649 raise error.ProgrammingError(msg)
647 650 mode, size, mtime = parentfiledata
648 651 self.addfile(filename, mode=mode, size=size, mtime=mtime)
649 652 self.nonnormalset.discard(filename)
650 653 else:
651 654 assert False, 'unreachable'
652 655
653 656 def set_tracked(self, filename):
654 657 new = False
655 658 entry = self.get(filename)
656 659 if entry is None:
657 660 self.addfile(filename, added=True)
658 661 new = True
659 662 elif not entry.tracked:
660 663 entry.set_tracked()
661 664 self._rustmap.set_v1(filename, entry)
662 665 new = True
663 666 else:
664 667 # XXX This is probably overkill for more case, but we need this to
665 668 # fully replace the `normallookup` call with `set_tracked` one.
666 669 # Consider smoothing this in the future.
667 670 self.set_possibly_dirty(filename)
668 671 return new
669 672
670 673 def set_untracked(self, f):
671 674 """Mark a file as no longer tracked in the dirstate map"""
672 675 # in merge is only trigger more logic, so it "fine" to pass it.
673 676 #
674 677 # the inner rust dirstate map code need to be adjusted once the API
675 678 # for dirstate/dirstatemap/DirstateItem is a bit more settled
676 679 entry = self.get(f)
677 680 if entry is None:
678 681 return False
679 682 else:
680 683 if entry.added:
681 684 self._rustmap.copymap().pop(f, None)
682 685 self._rustmap.dropfile(f)
683 686 else:
684 687 self._rustmap.removefile(f, in_merge=True)
685 688 return True
686 689
687 690 def removefile(self, *args, **kwargs):
688 691 return self._rustmap.removefile(*args, **kwargs)
689 692
690 693 def dropfile(self, f, *args, **kwargs):
691 694 self._rustmap.copymap().pop(f, None)
692 695 return self._rustmap.dropfile(f, *args, **kwargs)
693 696
694 697 def clearambiguoustimes(self, *args, **kwargs):
695 698 return self._rustmap.clearambiguoustimes(*args, **kwargs)
696 699
697 700 def nonnormalentries(self):
698 701 return self._rustmap.nonnormalentries()
699 702
700 703 def get(self, *args, **kwargs):
701 704 return self._rustmap.get(*args, **kwargs)
702 705
703 706 @property
704 707 def copymap(self):
705 708 return self._rustmap.copymap()
706 709
707 710 def debug_iter(self, all):
711 """
712 Return an iterator of (filename, state, mode, size, mtime) tuples
713
714 `all`: also include with `state == b' '` dirstate tree nodes that
715 don't have an associated `DirstateItem`.
716
717 """
708 718 return self._rustmap.debug_iter(all)
709 719
710 720 def preload(self):
711 721 self._rustmap
712 722
713 723 def clear(self):
714 724 self._rustmap.clear()
715 725 self.setparents(
716 726 self._nodeconstants.nullid, self._nodeconstants.nullid
717 727 )
718 728 util.clearcachedproperty(self, b"_dirs")
719 729 util.clearcachedproperty(self, b"_alldirs")
720 730 util.clearcachedproperty(self, b"dirfoldmap")
721 731
722 732 def items(self):
723 733 return self._rustmap.items()
724 734
725 735 def keys(self):
726 736 return iter(self._rustmap)
727 737
728 738 def __contains__(self, key):
729 739 return key in self._rustmap
730 740
731 741 def __getitem__(self, item):
732 742 return self._rustmap[item]
733 743
734 744 def __len__(self):
735 745 return len(self._rustmap)
736 746
737 747 def __iter__(self):
738 748 return iter(self._rustmap)
739 749
740 750 # forward for python2,3 compat
741 751 iteritems = items
742 752
743 753 def _opendirstatefile(self):
744 754 fp, mode = txnutil.trypending(
745 755 self._root, self._opener, self._filename
746 756 )
747 757 if self._pendingmode is not None and self._pendingmode != mode:
748 758 fp.close()
749 759 raise error.Abort(
750 760 _(b'working directory state may be changed parallelly')
751 761 )
752 762 self._pendingmode = mode
753 763 return fp
754 764
755 765 def _readdirstatefile(self, size=-1):
756 766 try:
757 767 with self._opendirstatefile() as fp:
758 768 return fp.read(size)
759 769 except IOError as err:
760 770 if err.errno != errno.ENOENT:
761 771 raise
762 772 # File doesn't exist, so the current state is empty
763 773 return b''
764 774
765 775 def setparents(self, p1, p2):
766 776 self._parents = (p1, p2)
767 777 self._dirtyparents = True
768 778
769 779 def parents(self):
770 780 if not self._parents:
771 781 if self._use_dirstate_v2:
772 782 self._parents = self.docket.parents
773 783 else:
774 784 read_len = self._nodelen * 2
775 785 st = self._readdirstatefile(read_len)
776 786 l = len(st)
777 787 if l == read_len:
778 788 self._parents = (
779 789 st[: self._nodelen],
780 790 st[self._nodelen : 2 * self._nodelen],
781 791 )
782 792 elif l == 0:
783 793 self._parents = (
784 794 self._nodeconstants.nullid,
785 795 self._nodeconstants.nullid,
786 796 )
787 797 else:
788 798 raise error.Abort(
789 799 _(b'working directory state appears damaged!')
790 800 )
791 801
792 802 return self._parents
793 803
794 804 @property
795 805 def docket(self):
796 806 if not self._docket:
797 807 if not self._use_dirstate_v2:
798 808 raise error.ProgrammingError(
799 809 b'dirstate only has a docket in v2 format'
800 810 )
801 811 self._docket = docketmod.DirstateDocket.parse(
802 812 self._readdirstatefile(), self._nodeconstants
803 813 )
804 814 return self._docket
805 815
806 816 @propertycache
807 817 def _rustmap(self):
808 818 """
809 819 Fills the Dirstatemap when called.
810 820 """
811 821 # ignore HG_PENDING because identity is used only for writing
812 822 self.identity = util.filestat.frompath(
813 823 self._opener.join(self._filename)
814 824 )
815 825
816 826 if self._use_dirstate_v2:
817 827 if self.docket.uuid:
818 828 # TODO: use mmap when possible
819 829 data = self._opener.read(self.docket.data_filename())
820 830 else:
821 831 data = b''
822 832 self._rustmap = rustmod.DirstateMap.new_v2(
823 833 data, self.docket.data_size, self.docket.tree_metadata
824 834 )
825 835 parents = self.docket.parents
826 836 else:
827 837 self._rustmap, parents = rustmod.DirstateMap.new_v1(
828 838 self._use_dirstate_tree, self._readdirstatefile()
829 839 )
830 840
831 841 if parents and not self._dirtyparents:
832 842 self.setparents(*parents)
833 843
834 844 self.__contains__ = self._rustmap.__contains__
835 845 self.__getitem__ = self._rustmap.__getitem__
836 846 self.get = self._rustmap.get
837 847 return self._rustmap
838 848
839 849 def write(self, tr, st, now):
840 850 if not self._use_dirstate_v2:
841 851 p1, p2 = self.parents()
842 852 packed = self._rustmap.write_v1(p1, p2, now)
843 853 st.write(packed)
844 854 st.close()
845 855 self._dirtyparents = False
846 856 return
847 857
848 858 # We can only append to an existing data file if there is one
849 859 can_append = self.docket.uuid is not None
850 860 packed, meta, append = self._rustmap.write_v2(now, can_append)
851 861 if append:
852 862 docket = self.docket
853 863 data_filename = docket.data_filename()
854 864 if tr:
855 865 tr.add(data_filename, docket.data_size)
856 866 with self._opener(data_filename, b'r+b') as fp:
857 867 fp.seek(docket.data_size)
858 868 assert fp.tell() == docket.data_size
859 869 written = fp.write(packed)
860 870 if written is not None: # py2 may return None
861 871 assert written == len(packed), (written, len(packed))
862 872 docket.data_size += len(packed)
863 873 docket.parents = self.parents()
864 874 docket.tree_metadata = meta
865 875 st.write(docket.serialize())
866 876 st.close()
867 877 else:
868 878 old_docket = self.docket
869 879 new_docket = docketmod.DirstateDocket.with_new_uuid(
870 880 self.parents(), len(packed), meta
871 881 )
872 882 data_filename = new_docket.data_filename()
873 883 if tr:
874 884 tr.add(data_filename, 0)
875 885 self._opener.write(data_filename, packed)
876 886 # Write the new docket after the new data file has been
877 887 # written. Because `st` was opened with `atomictemp=True`,
878 888 # the actual `.hg/dirstate` file is only affected on close.
879 889 st.write(new_docket.serialize())
880 890 st.close()
881 891 # Remove the old data file after the new docket pointing to
882 892 # the new data file was written.
883 893 if old_docket.uuid:
884 894 data_filename = old_docket.data_filename()
885 895 unlink = lambda _tr=None: self._opener.unlink(data_filename)
886 896 if tr:
887 897 category = b"dirstate-v2-clean-" + old_docket.uuid
888 898 tr.addpostclose(category, unlink)
889 899 else:
890 900 unlink()
891 901 self._docket = new_docket
892 902 # Reload from the newly-written file
893 903 util.clearcachedproperty(self, b"_rustmap")
894 904 self._dirtyparents = False
895 905
896 906 @propertycache
897 907 def filefoldmap(self):
898 908 """Returns a dictionary mapping normalized case paths to their
899 909 non-normalized versions.
900 910 """
901 911 return self._rustmap.filefoldmapasdict()
902 912
903 913 def hastrackeddir(self, d):
904 914 return self._rustmap.hastrackeddir(d)
905 915
906 916 def hasdir(self, d):
907 917 return self._rustmap.hasdir(d)
908 918
909 919 @propertycache
910 920 def identity(self):
911 921 self._rustmap
912 922 return self.identity
913 923
914 924 @property
915 925 def nonnormalset(self):
916 926 nonnorm = self._rustmap.non_normal_entries()
917 927 return nonnorm
918 928
919 929 @propertycache
920 930 def otherparentset(self):
921 931 otherparents = self._rustmap.other_parent_entries()
922 932 return otherparents
923 933
924 934 def non_normal_or_other_parent_paths(self):
925 935 return self._rustmap.non_normal_or_other_parent_paths()
926 936
927 937 @propertycache
928 938 def dirfoldmap(self):
929 939 f = {}
930 940 normcase = util.normcase
931 941 for name in self._rustmap.tracked_dirs():
932 942 f[normcase(name)] = name
933 943 return f
934 944
935 945 def set_possibly_dirty(self, filename):
936 946 """record that the current state of the file on disk is unknown"""
937 947 entry = self[filename]
938 948 entry.set_possibly_dirty()
939 949 self._rustmap.set_v1(filename, entry)
940 950
941 951 def set_clean(self, filename, mode, size, mtime):
942 952 """mark a file as back to a clean state"""
943 953 entry = self[filename]
944 954 mtime = mtime & rangemask
945 955 size = size & rangemask
946 956 entry.set_clean(mode, size, mtime)
947 957 self._rustmap.set_v1(filename, entry)
948 958 self._rustmap.copymap().pop(filename, None)
949 959
950 960 def __setitem__(self, key, value):
951 961 assert isinstance(value, DirstateItem)
952 962 self._rustmap.set_v1(key, value)
@@ -1,676 +1,675 b''
1 1 // dirstate_map.rs
2 2 //
3 3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 4 //
5 5 // This software may be used and distributed according to the terms of the
6 6 // GNU General Public License version 2 or any later version.
7 7
8 8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
9 9 //! `hg-core` package.
10 10
11 11 use std::cell::{RefCell, RefMut};
12 12 use std::convert::TryInto;
13 13
14 14 use cpython::{
15 15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
16 16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
17 17 UnsafePyLeaked,
18 18 };
19 19
20 20 use crate::{
21 21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 22 dirstate::make_dirstate_item,
23 dirstate::make_dirstate_item_raw,
24 23 dirstate::non_normal_entries::{
25 24 NonNormalEntries, NonNormalEntriesIterator,
26 25 },
27 26 pybytes_deref::PyBytesDeref,
28 27 };
29 28 use hg::{
30 29 dirstate::parsers::Timestamp,
31 30 dirstate::MTIME_UNSET,
32 31 dirstate::SIZE_NON_NORMAL,
33 32 dirstate_tree::dirstate_map::DirstateMap as TreeDirstateMap,
34 33 dirstate_tree::dispatch::DirstateMapMethods,
35 34 dirstate_tree::on_disk::DirstateV2ParseError,
36 35 dirstate_tree::owning::OwningDirstateMap,
37 36 revlog::Node,
38 37 utils::files::normalize_case,
39 38 utils::hg_path::{HgPath, HgPathBuf},
40 39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
41 40 DirstateParents, EntryState, StateMapIter,
42 41 };
43 42
44 43 // TODO
45 44 // This object needs to share references to multiple members of its Rust
46 45 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
47 46 // Right now `CopyMap` is done, but it needs to have an explicit reference
48 47 // to `RustDirstateMap` which itself needs to have an encapsulation for
49 48 // every method in `CopyMap` (copymapcopy, etc.).
50 49 // This is ugly and hard to maintain.
51 50 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
52 51 // `py_class!` is already implemented and does not mention
53 52 // `RustDirstateMap`, rightfully so.
54 53 // All attributes also have to have a separate refcount data attribute for
55 54 // leaks, with all methods that go along for reference sharing.
56 55 py_class!(pub class DirstateMap |py| {
57 56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
58 57
59 58 /// Returns a `(dirstate_map, parents)` tuple
60 59 @staticmethod
61 60 def new_v1(
62 61 use_dirstate_tree: bool,
63 62 on_disk: PyBytes,
64 63 ) -> PyResult<PyObject> {
65 64 let (inner, parents) = if use_dirstate_tree {
66 65 let on_disk = PyBytesDeref::new(py, on_disk);
67 66 let mut map = OwningDirstateMap::new_empty(on_disk);
68 67 let (on_disk, map_placeholder) = map.get_mut_pair();
69 68
70 69 let (actual_map, parents) = TreeDirstateMap::new_v1(on_disk)
71 70 .map_err(|e| dirstate_error(py, e))?;
72 71 *map_placeholder = actual_map;
73 72 (Box::new(map) as _, parents)
74 73 } else {
75 74 let bytes = on_disk.data(py);
76 75 let mut map = RustDirstateMap::default();
77 76 let parents = map.read(bytes).map_err(|e| dirstate_error(py, e))?;
78 77 (Box::new(map) as _, parents)
79 78 };
80 79 let map = Self::create_instance(py, inner)?;
81 80 let parents = parents.map(|p| {
82 81 let p1 = PyBytes::new(py, p.p1.as_bytes());
83 82 let p2 = PyBytes::new(py, p.p2.as_bytes());
84 83 (p1, p2)
85 84 });
86 85 Ok((map, parents).to_py_object(py).into_object())
87 86 }
88 87
89 88 /// Returns a DirstateMap
90 89 @staticmethod
91 90 def new_v2(
92 91 on_disk: PyBytes,
93 92 data_size: usize,
94 93 tree_metadata: PyBytes,
95 94 ) -> PyResult<PyObject> {
96 95 let dirstate_error = |e: DirstateError| {
97 96 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
98 97 };
99 98 let on_disk = PyBytesDeref::new(py, on_disk);
100 99 let mut map = OwningDirstateMap::new_empty(on_disk);
101 100 let (on_disk, map_placeholder) = map.get_mut_pair();
102 101 *map_placeholder = TreeDirstateMap::new_v2(
103 102 on_disk, data_size, tree_metadata.data(py),
104 103 ).map_err(dirstate_error)?;
105 104 let map = Self::create_instance(py, Box::new(map))?;
106 105 Ok(map.into_object())
107 106 }
108 107
109 108 def clear(&self) -> PyResult<PyObject> {
110 109 self.inner(py).borrow_mut().clear();
111 110 Ok(py.None())
112 111 }
113 112
114 113 def get(
115 114 &self,
116 115 key: PyObject,
117 116 default: Option<PyObject> = None
118 117 ) -> PyResult<Option<PyObject>> {
119 118 let key = key.extract::<PyBytes>(py)?;
120 119 match self
121 120 .inner(py)
122 121 .borrow()
123 122 .get(HgPath::new(key.data(py)))
124 123 .map_err(|e| v2_error(py, e))?
125 124 {
126 125 Some(entry) => {
127 126 Ok(Some(make_dirstate_item(py, &entry)?))
128 127 },
129 128 None => Ok(default)
130 129 }
131 130 }
132 131
133 132 def set_v1(&self, path: PyObject, item: PyObject) -> PyResult<PyObject> {
134 133 let f = path.extract::<PyBytes>(py)?;
135 134 let filename = HgPath::new(f.data(py));
136 135 let state = item.getattr(py, "state")?.extract::<PyBytes>(py)?;
137 136 let state = state.data(py)[0];
138 137 let entry = DirstateEntry::from_v1_data(
139 138 state.try_into().expect("state is always valid"),
140 139 item.getattr(py, "mode")?.extract(py)?,
141 140 item.getattr(py, "size")?.extract(py)?,
142 141 item.getattr(py, "mtime")?.extract(py)?,
143 142 );
144 143 self.inner(py).borrow_mut().set_v1(filename, entry);
145 144 Ok(py.None())
146 145 }
147 146
148 147 def addfile(
149 148 &self,
150 149 f: PyObject,
151 150 mode: PyObject,
152 151 size: PyObject,
153 152 mtime: PyObject,
154 153 added: PyObject,
155 154 merged: PyObject,
156 155 from_p2: PyObject,
157 156 possibly_dirty: PyObject,
158 157 ) -> PyResult<PyObject> {
159 158 let f = f.extract::<PyBytes>(py)?;
160 159 let filename = HgPath::new(f.data(py));
161 160 let mode = if mode.is_none(py) {
162 161 // fallback default value
163 162 0
164 163 } else {
165 164 mode.extract(py)?
166 165 };
167 166 let size = if size.is_none(py) {
168 167 // fallback default value
169 168 SIZE_NON_NORMAL
170 169 } else {
171 170 size.extract(py)?
172 171 };
173 172 let mtime = if mtime.is_none(py) {
174 173 // fallback default value
175 174 MTIME_UNSET
176 175 } else {
177 176 mtime.extract(py)?
178 177 };
179 178 let entry = DirstateEntry::new_for_add_file(mode, size, mtime);
180 179 let added = added.extract::<PyBool>(py)?.is_true();
181 180 let merged = merged.extract::<PyBool>(py)?.is_true();
182 181 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
183 182 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
184 183 self.inner(py).borrow_mut().add_file(
185 184 filename,
186 185 entry,
187 186 added,
188 187 merged,
189 188 from_p2,
190 189 possibly_dirty
191 190 ).and(Ok(py.None())).or_else(|e: DirstateError| {
192 191 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
193 192 })
194 193 }
195 194
196 195 def removefile(
197 196 &self,
198 197 f: PyObject,
199 198 in_merge: PyObject
200 199 ) -> PyResult<PyObject> {
201 200 self.inner(py).borrow_mut()
202 201 .remove_file(
203 202 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
204 203 in_merge.extract::<PyBool>(py)?.is_true(),
205 204 )
206 205 .or_else(|_| {
207 206 Err(PyErr::new::<exc::OSError, _>(
208 207 py,
209 208 "Dirstate error".to_string(),
210 209 ))
211 210 })?;
212 211 Ok(py.None())
213 212 }
214 213
215 214 def dropfile(
216 215 &self,
217 216 f: PyObject,
218 217 ) -> PyResult<PyBool> {
219 218 self.inner(py).borrow_mut()
220 219 .drop_file(
221 220 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
222 221 )
223 222 .and_then(|b| Ok(b.to_py_object(py)))
224 223 .or_else(|e| {
225 224 Err(PyErr::new::<exc::OSError, _>(
226 225 py,
227 226 format!("Dirstate error: {}", e.to_string()),
228 227 ))
229 228 })
230 229 }
231 230
232 231 def clearambiguoustimes(
233 232 &self,
234 233 files: PyObject,
235 234 now: PyObject
236 235 ) -> PyResult<PyObject> {
237 236 let files: PyResult<Vec<HgPathBuf>> = files
238 237 .iter(py)?
239 238 .map(|filename| {
240 239 Ok(HgPathBuf::from_bytes(
241 240 filename?.extract::<PyBytes>(py)?.data(py),
242 241 ))
243 242 })
244 243 .collect();
245 244 self.inner(py)
246 245 .borrow_mut()
247 246 .clear_ambiguous_times(files?, now.extract(py)?)
248 247 .map_err(|e| v2_error(py, e))?;
249 248 Ok(py.None())
250 249 }
251 250
252 251 def other_parent_entries(&self) -> PyResult<PyObject> {
253 252 let mut inner_shared = self.inner(py).borrow_mut();
254 253 let set = PySet::empty(py)?;
255 254 for path in inner_shared.iter_other_parent_paths() {
256 255 let path = path.map_err(|e| v2_error(py, e))?;
257 256 set.add(py, PyBytes::new(py, path.as_bytes()))?;
258 257 }
259 258 Ok(set.into_object())
260 259 }
261 260
262 261 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
263 262 NonNormalEntries::from_inner(py, self.clone_ref(py))
264 263 }
265 264
266 265 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
267 266 let key = key.extract::<PyBytes>(py)?;
268 267 self.inner(py)
269 268 .borrow_mut()
270 269 .non_normal_entries_contains(HgPath::new(key.data(py)))
271 270 .map_err(|e| v2_error(py, e))
272 271 }
273 272
274 273 def non_normal_entries_display(&self) -> PyResult<PyString> {
275 274 let mut inner = self.inner(py).borrow_mut();
276 275 let paths = inner
277 276 .iter_non_normal_paths()
278 277 .collect::<Result<Vec<_>, _>>()
279 278 .map_err(|e| v2_error(py, e))?;
280 279 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
281 280 Ok(PyString::new(py, &formatted))
282 281 }
283 282
284 283 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
285 284 let key = key.extract::<PyBytes>(py)?;
286 285 let key = key.data(py);
287 286 let was_present = self
288 287 .inner(py)
289 288 .borrow_mut()
290 289 .non_normal_entries_remove(HgPath::new(key));
291 290 if !was_present {
292 291 let msg = String::from_utf8_lossy(key);
293 292 Err(PyErr::new::<exc::KeyError, _>(py, msg))
294 293 } else {
295 294 Ok(py.None())
296 295 }
297 296 }
298 297
299 298 def non_normal_entries_discard(&self, key: PyObject) -> PyResult<PyObject>
300 299 {
301 300 let key = key.extract::<PyBytes>(py)?;
302 301 self
303 302 .inner(py)
304 303 .borrow_mut()
305 304 .non_normal_entries_remove(HgPath::new(key.data(py)));
306 305 Ok(py.None())
307 306 }
308 307
309 308 def non_normal_entries_add(&self, key: PyObject) -> PyResult<PyObject> {
310 309 let key = key.extract::<PyBytes>(py)?;
311 310 self
312 311 .inner(py)
313 312 .borrow_mut()
314 313 .non_normal_entries_add(HgPath::new(key.data(py)));
315 314 Ok(py.None())
316 315 }
317 316
318 317 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
319 318 let mut inner = self.inner(py).borrow_mut();
320 319
321 320 let ret = PyList::new(py, &[]);
322 321 for filename in inner.non_normal_or_other_parent_paths() {
323 322 let filename = filename.map_err(|e| v2_error(py, e))?;
324 323 let as_pystring = PyBytes::new(py, filename.as_bytes());
325 324 ret.append(py, as_pystring.into_object());
326 325 }
327 326 Ok(ret)
328 327 }
329 328
330 329 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
331 330 // Make sure the sets are defined before we no longer have a mutable
332 331 // reference to the dmap.
333 332 self.inner(py)
334 333 .borrow_mut()
335 334 .set_non_normal_other_parent_entries(false);
336 335
337 336 let leaked_ref = self.inner(py).leak_immutable();
338 337
339 338 NonNormalEntriesIterator::from_inner(py, unsafe {
340 339 leaked_ref.map(py, |o| {
341 340 o.iter_non_normal_paths_panic()
342 341 })
343 342 })
344 343 }
345 344
346 345 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
347 346 let d = d.extract::<PyBytes>(py)?;
348 347 Ok(self.inner(py).borrow_mut()
349 348 .has_tracked_dir(HgPath::new(d.data(py)))
350 349 .map_err(|e| {
351 350 PyErr::new::<exc::ValueError, _>(py, e.to_string())
352 351 })?
353 352 .to_py_object(py))
354 353 }
355 354
356 355 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
357 356 let d = d.extract::<PyBytes>(py)?;
358 357 Ok(self.inner(py).borrow_mut()
359 358 .has_dir(HgPath::new(d.data(py)))
360 359 .map_err(|e| {
361 360 PyErr::new::<exc::ValueError, _>(py, e.to_string())
362 361 })?
363 362 .to_py_object(py))
364 363 }
365 364
366 365 def write_v1(
367 366 &self,
368 367 p1: PyObject,
369 368 p2: PyObject,
370 369 now: PyObject
371 370 ) -> PyResult<PyBytes> {
372 371 let now = Timestamp(now.extract(py)?);
373 372
374 373 let mut inner = self.inner(py).borrow_mut();
375 374 let parents = DirstateParents {
376 375 p1: extract_node_id(py, &p1)?,
377 376 p2: extract_node_id(py, &p2)?,
378 377 };
379 378 let result = inner.pack_v1(parents, now);
380 379 match result {
381 380 Ok(packed) => Ok(PyBytes::new(py, &packed)),
382 381 Err(_) => Err(PyErr::new::<exc::OSError, _>(
383 382 py,
384 383 "Dirstate error".to_string(),
385 384 )),
386 385 }
387 386 }
388 387
389 388 /// Returns new data together with whether that data should be appended to
390 389 /// the existing data file whose content is at `self.on_disk` (True),
391 390 /// instead of written to a new data file (False).
392 391 def write_v2(
393 392 &self,
394 393 now: PyObject,
395 394 can_append: bool,
396 395 ) -> PyResult<PyObject> {
397 396 let now = Timestamp(now.extract(py)?);
398 397
399 398 let mut inner = self.inner(py).borrow_mut();
400 399 let result = inner.pack_v2(now, can_append);
401 400 match result {
402 401 Ok((packed, tree_metadata, append)) => {
403 402 let packed = PyBytes::new(py, &packed);
404 403 let tree_metadata = PyBytes::new(py, &tree_metadata);
405 404 let tuple = (packed, tree_metadata, append);
406 405 Ok(tuple.to_py_object(py).into_object())
407 406 },
408 407 Err(_) => Err(PyErr::new::<exc::OSError, _>(
409 408 py,
410 409 "Dirstate error".to_string(),
411 410 )),
412 411 }
413 412 }
414 413
415 414 def filefoldmapasdict(&self) -> PyResult<PyDict> {
416 415 let dict = PyDict::new(py);
417 416 for item in self.inner(py).borrow_mut().iter() {
418 417 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
419 418 if entry.state() != EntryState::Removed {
420 419 let key = normalize_case(path);
421 420 let value = path;
422 421 dict.set_item(
423 422 py,
424 423 PyBytes::new(py, key.as_bytes()).into_object(),
425 424 PyBytes::new(py, value.as_bytes()).into_object(),
426 425 )?;
427 426 }
428 427 }
429 428 Ok(dict)
430 429 }
431 430
432 431 def __len__(&self) -> PyResult<usize> {
433 432 Ok(self.inner(py).borrow().len())
434 433 }
435 434
436 435 def __contains__(&self, key: PyObject) -> PyResult<bool> {
437 436 let key = key.extract::<PyBytes>(py)?;
438 437 self.inner(py)
439 438 .borrow()
440 439 .contains_key(HgPath::new(key.data(py)))
441 440 .map_err(|e| v2_error(py, e))
442 441 }
443 442
444 443 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
445 444 let key = key.extract::<PyBytes>(py)?;
446 445 let key = HgPath::new(key.data(py));
447 446 match self
448 447 .inner(py)
449 448 .borrow()
450 449 .get(key)
451 450 .map_err(|e| v2_error(py, e))?
452 451 {
453 452 Some(entry) => {
454 453 Ok(make_dirstate_item(py, &entry)?)
455 454 },
456 455 None => Err(PyErr::new::<exc::KeyError, _>(
457 456 py,
458 457 String::from_utf8_lossy(key.as_bytes()),
459 458 )),
460 459 }
461 460 }
462 461
463 462 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
464 463 let leaked_ref = self.inner(py).leak_immutable();
465 464 DirstateMapKeysIterator::from_inner(
466 465 py,
467 466 unsafe { leaked_ref.map(py, |o| o.iter()) },
468 467 )
469 468 }
470 469
471 470 def items(&self) -> PyResult<DirstateMapItemsIterator> {
472 471 let leaked_ref = self.inner(py).leak_immutable();
473 472 DirstateMapItemsIterator::from_inner(
474 473 py,
475 474 unsafe { leaked_ref.map(py, |o| o.iter()) },
476 475 )
477 476 }
478 477
479 478 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
480 479 let leaked_ref = self.inner(py).leak_immutable();
481 480 DirstateMapKeysIterator::from_inner(
482 481 py,
483 482 unsafe { leaked_ref.map(py, |o| o.iter()) },
484 483 )
485 484 }
486 485
487 486 // TODO all copymap* methods, see docstring above
488 487 def copymapcopy(&self) -> PyResult<PyDict> {
489 488 let dict = PyDict::new(py);
490 489 for item in self.inner(py).borrow().copy_map_iter() {
491 490 let (key, value) = item.map_err(|e| v2_error(py, e))?;
492 491 dict.set_item(
493 492 py,
494 493 PyBytes::new(py, key.as_bytes()),
495 494 PyBytes::new(py, value.as_bytes()),
496 495 )?;
497 496 }
498 497 Ok(dict)
499 498 }
500 499
501 500 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
502 501 let key = key.extract::<PyBytes>(py)?;
503 502 match self
504 503 .inner(py)
505 504 .borrow()
506 505 .copy_map_get(HgPath::new(key.data(py)))
507 506 .map_err(|e| v2_error(py, e))?
508 507 {
509 508 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
510 509 None => Err(PyErr::new::<exc::KeyError, _>(
511 510 py,
512 511 String::from_utf8_lossy(key.data(py)),
513 512 )),
514 513 }
515 514 }
516 515 def copymap(&self) -> PyResult<CopyMap> {
517 516 CopyMap::from_inner(py, self.clone_ref(py))
518 517 }
519 518
520 519 def copymaplen(&self) -> PyResult<usize> {
521 520 Ok(self.inner(py).borrow().copy_map_len())
522 521 }
523 522 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
524 523 let key = key.extract::<PyBytes>(py)?;
525 524 self.inner(py)
526 525 .borrow()
527 526 .copy_map_contains_key(HgPath::new(key.data(py)))
528 527 .map_err(|e| v2_error(py, e))
529 528 }
530 529 def copymapget(
531 530 &self,
532 531 key: PyObject,
533 532 default: Option<PyObject>
534 533 ) -> PyResult<Option<PyObject>> {
535 534 let key = key.extract::<PyBytes>(py)?;
536 535 match self
537 536 .inner(py)
538 537 .borrow()
539 538 .copy_map_get(HgPath::new(key.data(py)))
540 539 .map_err(|e| v2_error(py, e))?
541 540 {
542 541 Some(copy) => Ok(Some(
543 542 PyBytes::new(py, copy.as_bytes()).into_object(),
544 543 )),
545 544 None => Ok(default),
546 545 }
547 546 }
548 547 def copymapsetitem(
549 548 &self,
550 549 key: PyObject,
551 550 value: PyObject
552 551 ) -> PyResult<PyObject> {
553 552 let key = key.extract::<PyBytes>(py)?;
554 553 let value = value.extract::<PyBytes>(py)?;
555 554 self.inner(py)
556 555 .borrow_mut()
557 556 .copy_map_insert(
558 557 HgPathBuf::from_bytes(key.data(py)),
559 558 HgPathBuf::from_bytes(value.data(py)),
560 559 )
561 560 .map_err(|e| v2_error(py, e))?;
562 561 Ok(py.None())
563 562 }
564 563 def copymappop(
565 564 &self,
566 565 key: PyObject,
567 566 default: Option<PyObject>
568 567 ) -> PyResult<Option<PyObject>> {
569 568 let key = key.extract::<PyBytes>(py)?;
570 569 match self
571 570 .inner(py)
572 571 .borrow_mut()
573 572 .copy_map_remove(HgPath::new(key.data(py)))
574 573 .map_err(|e| v2_error(py, e))?
575 574 {
576 575 Some(_) => Ok(None),
577 576 None => Ok(default),
578 577 }
579 578 }
580 579
581 580 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
582 581 let leaked_ref = self.inner(py).leak_immutable();
583 582 CopyMapKeysIterator::from_inner(
584 583 py,
585 584 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
586 585 )
587 586 }
588 587
589 588 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
590 589 let leaked_ref = self.inner(py).leak_immutable();
591 590 CopyMapItemsIterator::from_inner(
592 591 py,
593 592 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
594 593 )
595 594 }
596 595
597 596 def tracked_dirs(&self) -> PyResult<PyList> {
598 597 let dirs = PyList::new(py, &[]);
599 598 for path in self.inner(py).borrow_mut().iter_tracked_dirs()
600 599 .map_err(|e |dirstate_error(py, e))?
601 600 {
602 601 let path = path.map_err(|e| v2_error(py, e))?;
603 602 let path = PyBytes::new(py, path.as_bytes());
604 603 dirs.append(py, path.into_object())
605 604 }
606 605 Ok(dirs)
607 606 }
608 607
609 608 def debug_iter(&self, all: bool) -> PyResult<PyList> {
610 609 let dirs = PyList::new(py, &[]);
611 610 for item in self.inner(py).borrow().debug_iter(all) {
612 611 let (path, (state, mode, size, mtime)) =
613 612 item.map_err(|e| v2_error(py, e))?;
614 613 let path = PyBytes::new(py, path.as_bytes());
615 let item = make_dirstate_item_raw(py, state, mode, size, mtime)?;
616 dirs.append(py, (path, item).to_py_object(py).into_object())
614 let item = (path, state, mode, size, mtime);
615 dirs.append(py, item.to_py_object(py).into_object())
617 616 }
618 617 Ok(dirs)
619 618 }
620 619 });
621 620
622 621 impl DirstateMap {
623 622 pub fn get_inner_mut<'a>(
624 623 &'a self,
625 624 py: Python<'a>,
626 625 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
627 626 self.inner(py).borrow_mut()
628 627 }
629 628 fn translate_key(
630 629 py: Python,
631 630 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
632 631 ) -> PyResult<Option<PyBytes>> {
633 632 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
634 633 Ok(Some(PyBytes::new(py, f.as_bytes())))
635 634 }
636 635 fn translate_key_value(
637 636 py: Python,
638 637 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
639 638 ) -> PyResult<Option<(PyBytes, PyObject)>> {
640 639 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
641 640 Ok(Some((
642 641 PyBytes::new(py, f.as_bytes()),
643 642 make_dirstate_item(py, &entry)?,
644 643 )))
645 644 }
646 645 }
647 646
648 647 py_shared_iterator!(
649 648 DirstateMapKeysIterator,
650 649 UnsafePyLeaked<StateMapIter<'static>>,
651 650 DirstateMap::translate_key,
652 651 Option<PyBytes>
653 652 );
654 653
655 654 py_shared_iterator!(
656 655 DirstateMapItemsIterator,
657 656 UnsafePyLeaked<StateMapIter<'static>>,
658 657 DirstateMap::translate_key_value,
659 658 Option<(PyBytes, PyObject)>
660 659 );
661 660
662 661 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
663 662 let bytes = obj.extract::<PyBytes>(py)?;
664 663 match bytes.data(py).try_into() {
665 664 Ok(s) => Ok(s),
666 665 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
667 666 }
668 667 }
669 668
670 669 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
671 670 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
672 671 }
673 672
674 673 fn dirstate_error(py: Python<'_>, e: DirstateError) -> PyErr {
675 674 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
676 675 }
General Comments 0
You need to be logged in to leave comments. Login now