##// END OF EJS Templates
debugdeltachain: distinct between snapshot and "other" diffs...
marmoute -
r50113:5b1495c3 default
parent child Browse files
Show More
@@ -1,4930 +1,4932 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import binascii
10 10 import codecs
11 11 import collections
12 12 import contextlib
13 13 import difflib
14 14 import errno
15 15 import glob
16 16 import operator
17 17 import os
18 18 import platform
19 19 import random
20 20 import re
21 21 import socket
22 22 import ssl
23 23 import stat
24 24 import string
25 25 import subprocess
26 26 import sys
27 27 import time
28 28
29 29 from .i18n import _
30 30 from .node import (
31 31 bin,
32 32 hex,
33 33 nullrev,
34 34 short,
35 35 )
36 36 from .pycompat import (
37 37 getattr,
38 38 open,
39 39 )
40 40 from . import (
41 41 bundle2,
42 42 bundlerepo,
43 43 changegroup,
44 44 cmdutil,
45 45 color,
46 46 context,
47 47 copies,
48 48 dagparser,
49 49 dirstateutils,
50 50 encoding,
51 51 error,
52 52 exchange,
53 53 extensions,
54 54 filemerge,
55 55 filesetlang,
56 56 formatter,
57 57 hg,
58 58 httppeer,
59 59 localrepo,
60 60 lock as lockmod,
61 61 logcmdutil,
62 62 mergestate as mergestatemod,
63 63 metadata,
64 64 obsolete,
65 65 obsutil,
66 66 pathutil,
67 67 phases,
68 68 policy,
69 69 pvec,
70 70 pycompat,
71 71 registrar,
72 72 repair,
73 73 repoview,
74 74 requirements,
75 75 revlog,
76 76 revset,
77 77 revsetlang,
78 78 scmutil,
79 79 setdiscovery,
80 80 simplemerge,
81 81 sshpeer,
82 82 sslutil,
83 83 streamclone,
84 84 strip,
85 85 tags as tagsmod,
86 86 templater,
87 87 treediscovery,
88 88 upgrade,
89 89 url as urlmod,
90 90 util,
91 91 vfs as vfsmod,
92 92 wireprotoframing,
93 93 wireprotoserver,
94 94 )
95 95 from .interfaces import repository
96 96 from .utils import (
97 97 cborutil,
98 98 compression,
99 99 dateutil,
100 100 procutil,
101 101 stringutil,
102 102 urlutil,
103 103 )
104 104
105 105 from .revlogutils import (
106 106 deltas as deltautil,
107 107 nodemap,
108 108 rewrite,
109 109 sidedata,
110 110 )
111 111
112 112 release = lockmod.release
113 113
114 114 table = {}
115 115 table.update(strip.command._table)
116 116 command = registrar.command(table)
117 117
118 118
119 119 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
120 120 def debugancestor(ui, repo, *args):
121 121 """find the ancestor revision of two revisions in a given index"""
122 122 if len(args) == 3:
123 123 index, rev1, rev2 = args
124 124 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
125 125 lookup = r.lookup
126 126 elif len(args) == 2:
127 127 if not repo:
128 128 raise error.Abort(
129 129 _(b'there is no Mercurial repository here (.hg not found)')
130 130 )
131 131 rev1, rev2 = args
132 132 r = repo.changelog
133 133 lookup = repo.lookup
134 134 else:
135 135 raise error.Abort(_(b'either two or three arguments required'))
136 136 a = r.ancestor(lookup(rev1), lookup(rev2))
137 137 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
138 138
139 139
140 140 @command(b'debugantivirusrunning', [])
141 141 def debugantivirusrunning(ui, repo):
142 142 """attempt to trigger an antivirus scanner to see if one is active"""
143 143 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
144 144 f.write(
145 145 util.b85decode(
146 146 # This is a base85-armored version of the EICAR test file. See
147 147 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
148 148 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
149 149 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
150 150 )
151 151 )
152 152 # Give an AV engine time to scan the file.
153 153 time.sleep(2)
154 154 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
155 155
156 156
157 157 @command(b'debugapplystreamclonebundle', [], b'FILE')
158 158 def debugapplystreamclonebundle(ui, repo, fname):
159 159 """apply a stream clone bundle file"""
160 160 f = hg.openpath(ui, fname)
161 161 gen = exchange.readbundle(ui, f, fname)
162 162 gen.apply(repo)
163 163
164 164
165 165 @command(
166 166 b'debugbuilddag',
167 167 [
168 168 (
169 169 b'm',
170 170 b'mergeable-file',
171 171 None,
172 172 _(b'add single file mergeable changes'),
173 173 ),
174 174 (
175 175 b'o',
176 176 b'overwritten-file',
177 177 None,
178 178 _(b'add single file all revs overwrite'),
179 179 ),
180 180 (b'n', b'new-file', None, _(b'add new file at each rev')),
181 181 (
182 182 b'',
183 183 b'from-existing',
184 184 None,
185 185 _(b'continue from a non-empty repository'),
186 186 ),
187 187 ],
188 188 _(b'[OPTION]... [TEXT]'),
189 189 )
190 190 def debugbuilddag(
191 191 ui,
192 192 repo,
193 193 text=None,
194 194 mergeable_file=False,
195 195 overwritten_file=False,
196 196 new_file=False,
197 197 from_existing=False,
198 198 ):
199 199 """builds a repo with a given DAG from scratch in the current empty repo
200 200
201 201 The description of the DAG is read from stdin if not given on the
202 202 command line.
203 203
204 204 Elements:
205 205
206 206 - "+n" is a linear run of n nodes based on the current default parent
207 207 - "." is a single node based on the current default parent
208 208 - "$" resets the default parent to null (implied at the start);
209 209 otherwise the default parent is always the last node created
210 210 - "<p" sets the default parent to the backref p
211 211 - "*p" is a fork at parent p, which is a backref
212 212 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
213 213 - "/p2" is a merge of the preceding node and p2
214 214 - ":tag" defines a local tag for the preceding node
215 215 - "@branch" sets the named branch for subsequent nodes
216 216 - "#...\\n" is a comment up to the end of the line
217 217
218 218 Whitespace between the above elements is ignored.
219 219
220 220 A backref is either
221 221
222 222 - a number n, which references the node curr-n, where curr is the current
223 223 node, or
224 224 - the name of a local tag you placed earlier using ":tag", or
225 225 - empty to denote the default parent.
226 226
227 227 All string valued-elements are either strictly alphanumeric, or must
228 228 be enclosed in double quotes ("..."), with "\\" as escape character.
229 229 """
230 230
231 231 if text is None:
232 232 ui.status(_(b"reading DAG from stdin\n"))
233 233 text = ui.fin.read()
234 234
235 235 cl = repo.changelog
236 236 if len(cl) > 0 and not from_existing:
237 237 raise error.Abort(_(b'repository is not empty'))
238 238
239 239 # determine number of revs in DAG
240 240 total = 0
241 241 for type, data in dagparser.parsedag(text):
242 242 if type == b'n':
243 243 total += 1
244 244
245 245 if mergeable_file:
246 246 linesperrev = 2
247 247 # make a file with k lines per rev
248 248 initialmergedlines = [
249 249 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
250 250 ]
251 251 initialmergedlines.append(b"")
252 252
253 253 tags = []
254 254 progress = ui.makeprogress(
255 255 _(b'building'), unit=_(b'revisions'), total=total
256 256 )
257 257 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
258 258 at = -1
259 259 atbranch = b'default'
260 260 nodeids = []
261 261 id = 0
262 262 progress.update(id)
263 263 for type, data in dagparser.parsedag(text):
264 264 if type == b'n':
265 265 ui.note((b'node %s\n' % pycompat.bytestr(data)))
266 266 id, ps = data
267 267
268 268 files = []
269 269 filecontent = {}
270 270
271 271 p2 = None
272 272 if mergeable_file:
273 273 fn = b"mf"
274 274 p1 = repo[ps[0]]
275 275 if len(ps) > 1:
276 276 p2 = repo[ps[1]]
277 277 pa = p1.ancestor(p2)
278 278 base, local, other = [
279 279 x[fn].data() for x in (pa, p1, p2)
280 280 ]
281 281 m3 = simplemerge.Merge3Text(base, local, other)
282 282 ml = [
283 283 l.strip()
284 284 for l in simplemerge.render_minimized(m3)[0]
285 285 ]
286 286 ml.append(b"")
287 287 elif at > 0:
288 288 ml = p1[fn].data().split(b"\n")
289 289 else:
290 290 ml = initialmergedlines
291 291 ml[id * linesperrev] += b" r%i" % id
292 292 mergedtext = b"\n".join(ml)
293 293 files.append(fn)
294 294 filecontent[fn] = mergedtext
295 295
296 296 if overwritten_file:
297 297 fn = b"of"
298 298 files.append(fn)
299 299 filecontent[fn] = b"r%i\n" % id
300 300
301 301 if new_file:
302 302 fn = b"nf%i" % id
303 303 files.append(fn)
304 304 filecontent[fn] = b"r%i\n" % id
305 305 if len(ps) > 1:
306 306 if not p2:
307 307 p2 = repo[ps[1]]
308 308 for fn in p2:
309 309 if fn.startswith(b"nf"):
310 310 files.append(fn)
311 311 filecontent[fn] = p2[fn].data()
312 312
313 313 def fctxfn(repo, cx, path):
314 314 if path in filecontent:
315 315 return context.memfilectx(
316 316 repo, cx, path, filecontent[path]
317 317 )
318 318 return None
319 319
320 320 if len(ps) == 0 or ps[0] < 0:
321 321 pars = [None, None]
322 322 elif len(ps) == 1:
323 323 pars = [nodeids[ps[0]], None]
324 324 else:
325 325 pars = [nodeids[p] for p in ps]
326 326 cx = context.memctx(
327 327 repo,
328 328 pars,
329 329 b"r%i" % id,
330 330 files,
331 331 fctxfn,
332 332 date=(id, 0),
333 333 user=b"debugbuilddag",
334 334 extra={b'branch': atbranch},
335 335 )
336 336 nodeid = repo.commitctx(cx)
337 337 nodeids.append(nodeid)
338 338 at = id
339 339 elif type == b'l':
340 340 id, name = data
341 341 ui.note((b'tag %s\n' % name))
342 342 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
343 343 elif type == b'a':
344 344 ui.note((b'branch %s\n' % data))
345 345 atbranch = data
346 346 progress.update(id)
347 347
348 348 if tags:
349 349 repo.vfs.write(b"localtags", b"".join(tags))
350 350
351 351
352 352 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
353 353 indent_string = b' ' * indent
354 354 if all:
355 355 ui.writenoi18n(
356 356 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
357 357 % indent_string
358 358 )
359 359
360 360 def showchunks(named):
361 361 ui.write(b"\n%s%s\n" % (indent_string, named))
362 362 for deltadata in gen.deltaiter():
363 363 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
364 364 ui.write(
365 365 b"%s%s %s %s %s %s %d\n"
366 366 % (
367 367 indent_string,
368 368 hex(node),
369 369 hex(p1),
370 370 hex(p2),
371 371 hex(cs),
372 372 hex(deltabase),
373 373 len(delta),
374 374 )
375 375 )
376 376
377 377 gen.changelogheader()
378 378 showchunks(b"changelog")
379 379 gen.manifestheader()
380 380 showchunks(b"manifest")
381 381 for chunkdata in iter(gen.filelogheader, {}):
382 382 fname = chunkdata[b'filename']
383 383 showchunks(fname)
384 384 else:
385 385 if isinstance(gen, bundle2.unbundle20):
386 386 raise error.Abort(_(b'use debugbundle2 for this file'))
387 387 gen.changelogheader()
388 388 for deltadata in gen.deltaiter():
389 389 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
390 390 ui.write(b"%s%s\n" % (indent_string, hex(node)))
391 391
392 392
393 393 def _debugobsmarkers(ui, part, indent=0, **opts):
394 394 """display version and markers contained in 'data'"""
395 395 opts = pycompat.byteskwargs(opts)
396 396 data = part.read()
397 397 indent_string = b' ' * indent
398 398 try:
399 399 version, markers = obsolete._readmarkers(data)
400 400 except error.UnknownVersion as exc:
401 401 msg = b"%sunsupported version: %s (%d bytes)\n"
402 402 msg %= indent_string, exc.version, len(data)
403 403 ui.write(msg)
404 404 else:
405 405 msg = b"%sversion: %d (%d bytes)\n"
406 406 msg %= indent_string, version, len(data)
407 407 ui.write(msg)
408 408 fm = ui.formatter(b'debugobsolete', opts)
409 409 for rawmarker in sorted(markers):
410 410 m = obsutil.marker(None, rawmarker)
411 411 fm.startitem()
412 412 fm.plain(indent_string)
413 413 cmdutil.showmarker(fm, m)
414 414 fm.end()
415 415
416 416
417 417 def _debugphaseheads(ui, data, indent=0):
418 418 """display version and markers contained in 'data'"""
419 419 indent_string = b' ' * indent
420 420 headsbyphase = phases.binarydecode(data)
421 421 for phase in phases.allphases:
422 422 for head in headsbyphase[phase]:
423 423 ui.write(indent_string)
424 424 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
425 425
426 426
427 427 def _quasirepr(thing):
428 428 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
429 429 return b'{%s}' % (
430 430 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
431 431 )
432 432 return pycompat.bytestr(repr(thing))
433 433
434 434
435 435 def _debugbundle2(ui, gen, all=None, **opts):
436 436 """lists the contents of a bundle2"""
437 437 if not isinstance(gen, bundle2.unbundle20):
438 438 raise error.Abort(_(b'not a bundle2 file'))
439 439 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
440 440 parttypes = opts.get('part_type', [])
441 441 for part in gen.iterparts():
442 442 if parttypes and part.type not in parttypes:
443 443 continue
444 444 msg = b'%s -- %s (mandatory: %r)\n'
445 445 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
446 446 if part.type == b'changegroup':
447 447 version = part.params.get(b'version', b'01')
448 448 cg = changegroup.getunbundler(version, part, b'UN')
449 449 if not ui.quiet:
450 450 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
451 451 if part.type == b'obsmarkers':
452 452 if not ui.quiet:
453 453 _debugobsmarkers(ui, part, indent=4, **opts)
454 454 if part.type == b'phase-heads':
455 455 if not ui.quiet:
456 456 _debugphaseheads(ui, part, indent=4)
457 457
458 458
459 459 @command(
460 460 b'debugbundle',
461 461 [
462 462 (b'a', b'all', None, _(b'show all details')),
463 463 (b'', b'part-type', [], _(b'show only the named part type')),
464 464 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
465 465 ],
466 466 _(b'FILE'),
467 467 norepo=True,
468 468 )
469 469 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
470 470 """lists the contents of a bundle"""
471 471 with hg.openpath(ui, bundlepath) as f:
472 472 if spec:
473 473 spec = exchange.getbundlespec(ui, f)
474 474 ui.write(b'%s\n' % spec)
475 475 return
476 476
477 477 gen = exchange.readbundle(ui, f, bundlepath)
478 478 if isinstance(gen, bundle2.unbundle20):
479 479 return _debugbundle2(ui, gen, all=all, **opts)
480 480 _debugchangegroup(ui, gen, all=all, **opts)
481 481
482 482
483 483 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
484 484 def debugcapabilities(ui, path, **opts):
485 485 """lists the capabilities of a remote peer"""
486 486 opts = pycompat.byteskwargs(opts)
487 487 peer = hg.peer(ui, opts, path)
488 488 try:
489 489 caps = peer.capabilities()
490 490 ui.writenoi18n(b'Main capabilities:\n')
491 491 for c in sorted(caps):
492 492 ui.write(b' %s\n' % c)
493 493 b2caps = bundle2.bundle2caps(peer)
494 494 if b2caps:
495 495 ui.writenoi18n(b'Bundle2 capabilities:\n')
496 496 for key, values in sorted(b2caps.items()):
497 497 ui.write(b' %s\n' % key)
498 498 for v in values:
499 499 ui.write(b' %s\n' % v)
500 500 finally:
501 501 peer.close()
502 502
503 503
504 504 @command(
505 505 b'debugchangedfiles',
506 506 [
507 507 (
508 508 b'',
509 509 b'compute',
510 510 False,
511 511 b"compute information instead of reading it from storage",
512 512 ),
513 513 ],
514 514 b'REV',
515 515 )
516 516 def debugchangedfiles(ui, repo, rev, **opts):
517 517 """list the stored files changes for a revision"""
518 518 ctx = logcmdutil.revsingle(repo, rev, None)
519 519 files = None
520 520
521 521 if opts['compute']:
522 522 files = metadata.compute_all_files_changes(ctx)
523 523 else:
524 524 sd = repo.changelog.sidedata(ctx.rev())
525 525 files_block = sd.get(sidedata.SD_FILES)
526 526 if files_block is not None:
527 527 files = metadata.decode_files_sidedata(sd)
528 528 if files is not None:
529 529 for f in sorted(files.touched):
530 530 if f in files.added:
531 531 action = b"added"
532 532 elif f in files.removed:
533 533 action = b"removed"
534 534 elif f in files.merged:
535 535 action = b"merged"
536 536 elif f in files.salvaged:
537 537 action = b"salvaged"
538 538 else:
539 539 action = b"touched"
540 540
541 541 copy_parent = b""
542 542 copy_source = b""
543 543 if f in files.copied_from_p1:
544 544 copy_parent = b"p1"
545 545 copy_source = files.copied_from_p1[f]
546 546 elif f in files.copied_from_p2:
547 547 copy_parent = b"p2"
548 548 copy_source = files.copied_from_p2[f]
549 549
550 550 data = (action, copy_parent, f, copy_source)
551 551 template = b"%-8s %2s: %s, %s;\n"
552 552 ui.write(template % data)
553 553
554 554
555 555 @command(b'debugcheckstate', [], b'')
556 556 def debugcheckstate(ui, repo):
557 557 """validate the correctness of the current dirstate"""
558 558 parent1, parent2 = repo.dirstate.parents()
559 559 m1 = repo[parent1].manifest()
560 560 m2 = repo[parent2].manifest()
561 561 errors = 0
562 562 for err in repo.dirstate.verify(m1, m2):
563 563 ui.warn(err[0] % err[1:])
564 564 errors += 1
565 565 if errors:
566 566 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
567 567 raise error.Abort(errstr)
568 568
569 569
570 570 @command(
571 571 b'debugcolor',
572 572 [(b'', b'style', None, _(b'show all configured styles'))],
573 573 b'hg debugcolor',
574 574 )
575 575 def debugcolor(ui, repo, **opts):
576 576 """show available color, effects or style"""
577 577 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
578 578 if opts.get('style'):
579 579 return _debugdisplaystyle(ui)
580 580 else:
581 581 return _debugdisplaycolor(ui)
582 582
583 583
584 584 def _debugdisplaycolor(ui):
585 585 ui = ui.copy()
586 586 ui._styles.clear()
587 587 for effect in color._activeeffects(ui).keys():
588 588 ui._styles[effect] = effect
589 589 if ui._terminfoparams:
590 590 for k, v in ui.configitems(b'color'):
591 591 if k.startswith(b'color.'):
592 592 ui._styles[k] = k[6:]
593 593 elif k.startswith(b'terminfo.'):
594 594 ui._styles[k] = k[9:]
595 595 ui.write(_(b'available colors:\n'))
596 596 # sort label with a '_' after the other to group '_background' entry.
597 597 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
598 598 for colorname, label in items:
599 599 ui.write(b'%s\n' % colorname, label=label)
600 600
601 601
602 602 def _debugdisplaystyle(ui):
603 603 ui.write(_(b'available style:\n'))
604 604 if not ui._styles:
605 605 return
606 606 width = max(len(s) for s in ui._styles)
607 607 for label, effects in sorted(ui._styles.items()):
608 608 ui.write(b'%s' % label, label=label)
609 609 if effects:
610 610 # 50
611 611 ui.write(b': ')
612 612 ui.write(b' ' * (max(0, width - len(label))))
613 613 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
614 614 ui.write(b'\n')
615 615
616 616
617 617 @command(b'debugcreatestreamclonebundle', [], b'FILE')
618 618 def debugcreatestreamclonebundle(ui, repo, fname):
619 619 """create a stream clone bundle file
620 620
621 621 Stream bundles are special bundles that are essentially archives of
622 622 revlog files. They are commonly used for cloning very quickly.
623 623 """
624 624 # TODO we may want to turn this into an abort when this functionality
625 625 # is moved into `hg bundle`.
626 626 if phases.hassecret(repo):
627 627 ui.warn(
628 628 _(
629 629 b'(warning: stream clone bundle will contain secret '
630 630 b'revisions)\n'
631 631 )
632 632 )
633 633
634 634 requirements, gen = streamclone.generatebundlev1(repo)
635 635 changegroup.writechunks(ui, gen, fname)
636 636
637 637 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
638 638
639 639
640 640 @command(
641 641 b'debugdag',
642 642 [
643 643 (b't', b'tags', None, _(b'use tags as labels')),
644 644 (b'b', b'branches', None, _(b'annotate with branch names')),
645 645 (b'', b'dots', None, _(b'use dots for runs')),
646 646 (b's', b'spaces', None, _(b'separate elements by spaces')),
647 647 ],
648 648 _(b'[OPTION]... [FILE [REV]...]'),
649 649 optionalrepo=True,
650 650 )
651 651 def debugdag(ui, repo, file_=None, *revs, **opts):
652 652 """format the changelog or an index DAG as a concise textual description
653 653
654 654 If you pass a revlog index, the revlog's DAG is emitted. If you list
655 655 revision numbers, they get labeled in the output as rN.
656 656
657 657 Otherwise, the changelog DAG of the current repo is emitted.
658 658 """
659 659 spaces = opts.get('spaces')
660 660 dots = opts.get('dots')
661 661 if file_:
662 662 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
663 663 revs = {int(r) for r in revs}
664 664
665 665 def events():
666 666 for r in rlog:
667 667 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
668 668 if r in revs:
669 669 yield b'l', (r, b"r%i" % r)
670 670
671 671 elif repo:
672 672 cl = repo.changelog
673 673 tags = opts.get('tags')
674 674 branches = opts.get('branches')
675 675 if tags:
676 676 labels = {}
677 677 for l, n in repo.tags().items():
678 678 labels.setdefault(cl.rev(n), []).append(l)
679 679
680 680 def events():
681 681 b = b"default"
682 682 for r in cl:
683 683 if branches:
684 684 newb = cl.read(cl.node(r))[5][b'branch']
685 685 if newb != b:
686 686 yield b'a', newb
687 687 b = newb
688 688 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
689 689 if tags:
690 690 ls = labels.get(r)
691 691 if ls:
692 692 for l in ls:
693 693 yield b'l', (r, l)
694 694
695 695 else:
696 696 raise error.Abort(_(b'need repo for changelog dag'))
697 697
698 698 for line in dagparser.dagtextlines(
699 699 events(),
700 700 addspaces=spaces,
701 701 wraplabels=True,
702 702 wrapannotations=True,
703 703 wrapnonlinear=dots,
704 704 usedots=dots,
705 705 maxlinewidth=70,
706 706 ):
707 707 ui.write(line)
708 708 ui.write(b"\n")
709 709
710 710
711 711 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
712 712 def debugdata(ui, repo, file_, rev=None, **opts):
713 713 """dump the contents of a data file revision"""
714 714 opts = pycompat.byteskwargs(opts)
715 715 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
716 716 if rev is not None:
717 717 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
718 718 file_, rev = None, file_
719 719 elif rev is None:
720 720 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
721 721 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
722 722 try:
723 723 ui.write(r.rawdata(r.lookup(rev)))
724 724 except KeyError:
725 725 raise error.Abort(_(b'invalid revision identifier %s') % rev)
726 726
727 727
728 728 @command(
729 729 b'debugdate',
730 730 [(b'e', b'extended', None, _(b'try extended date formats'))],
731 731 _(b'[-e] DATE [RANGE]'),
732 732 norepo=True,
733 733 optionalrepo=True,
734 734 )
735 735 def debugdate(ui, date, range=None, **opts):
736 736 """parse and display a date"""
737 737 if opts["extended"]:
738 738 d = dateutil.parsedate(date, dateutil.extendeddateformats)
739 739 else:
740 740 d = dateutil.parsedate(date)
741 741 ui.writenoi18n(b"internal: %d %d\n" % d)
742 742 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
743 743 if range:
744 744 m = dateutil.matchdate(range)
745 745 ui.writenoi18n(b"match: %s\n" % m(d[0]))
746 746
747 747
748 748 @command(
749 749 b'debugdeltachain',
750 750 cmdutil.debugrevlogopts + cmdutil.formatteropts,
751 751 _(b'-c|-m|FILE'),
752 752 optionalrepo=True,
753 753 )
754 754 def debugdeltachain(ui, repo, file_=None, **opts):
755 755 """dump information about delta chains in a revlog
756 756
757 757 Output can be templatized. Available template keywords are:
758 758
759 759 :``rev``: revision number
760 760 :``chainid``: delta chain identifier (numbered by unique base)
761 761 :``chainlen``: delta chain length to this revision
762 762 :``prevrev``: previous revision in delta chain
763 763 :``deltatype``: role of delta / how it was computed
764 764 :``compsize``: compressed size of revision
765 765 :``uncompsize``: uncompressed size of revision
766 766 :``chainsize``: total size of compressed revisions in chain
767 767 :``chainratio``: total chain size divided by uncompressed revision size
768 768 (new delta chains typically start at ratio 2.00)
769 769 :``lindist``: linear distance from base revision in delta chain to end
770 770 of this revision
771 771 :``extradist``: total size of revisions not part of this delta chain from
772 772 base of delta chain to end of this revision; a measurement
773 773 of how much extra data we need to read/seek across to read
774 774 the delta chain for this revision
775 775 :``extraratio``: extradist divided by chainsize; another representation of
776 776 how much unrelated data is needed to load this delta chain
777 777
778 778 If the repository is configured to use the sparse read, additional keywords
779 779 are available:
780 780
781 781 :``readsize``: total size of data read from the disk for a revision
782 782 (sum of the sizes of all the blocks)
783 783 :``largestblock``: size of the largest block of data read from the disk
784 784 :``readdensity``: density of useful bytes in the data read from the disk
785 785 :``srchunks``: in how many data hunks the whole revision would be read
786 786
787 787 The sparse read can be enabled with experimental.sparse-read = True
788 788 """
789 789 opts = pycompat.byteskwargs(opts)
790 790 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
791 791 index = r.index
792 792 start = r.start
793 793 length = r.length
794 794 generaldelta = r._generaldelta
795 795 withsparseread = getattr(r, '_withsparseread', False)
796 796
797 797 def revinfo(rev):
798 798 e = index[rev]
799 799 compsize = e[1]
800 800 uncompsize = e[2]
801 801 chainsize = 0
802 802
803 803 if generaldelta:
804 804 if e[3] == e[5]:
805 805 deltatype = b'p1'
806 806 elif e[3] == e[6]:
807 807 deltatype = b'p2'
808 elif e[3] == rev:
809 deltatype = b'base'
810 elif r.issnapshot(rev):
811 deltatype = b'snap'
808 812 elif e[3] == rev - 1:
809 813 deltatype = b'prev'
810 elif e[3] == rev:
811 deltatype = b'base'
812 814 else:
813 815 deltatype = b'other'
814 816 else:
815 817 if e[3] == rev:
816 818 deltatype = b'base'
817 819 else:
818 820 deltatype = b'prev'
819 821
820 822 chain = r._deltachain(rev)[0]
821 823 for iterrev in chain:
822 824 e = index[iterrev]
823 825 chainsize += e[1]
824 826
825 827 return compsize, uncompsize, deltatype, chain, chainsize
826 828
827 829 fm = ui.formatter(b'debugdeltachain', opts)
828 830
829 831 fm.plain(
830 832 b' rev chain# chainlen prev delta '
831 833 b'size rawsize chainsize ratio lindist extradist '
832 834 b'extraratio'
833 835 )
834 836 if withsparseread:
835 837 fm.plain(b' readsize largestblk rddensity srchunks')
836 838 fm.plain(b'\n')
837 839
838 840 chainbases = {}
839 841 for rev in r:
840 842 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
841 843 chainbase = chain[0]
842 844 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
843 845 basestart = start(chainbase)
844 846 revstart = start(rev)
845 847 lineardist = revstart + comp - basestart
846 848 extradist = lineardist - chainsize
847 849 try:
848 850 prevrev = chain[-2]
849 851 except IndexError:
850 852 prevrev = -1
851 853
852 854 if uncomp != 0:
853 855 chainratio = float(chainsize) / float(uncomp)
854 856 else:
855 857 chainratio = chainsize
856 858
857 859 if chainsize != 0:
858 860 extraratio = float(extradist) / float(chainsize)
859 861 else:
860 862 extraratio = extradist
861 863
862 864 fm.startitem()
863 865 fm.write(
864 866 b'rev chainid chainlen prevrev deltatype compsize '
865 867 b'uncompsize chainsize chainratio lindist extradist '
866 868 b'extraratio',
867 869 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
868 870 rev,
869 871 chainid,
870 872 len(chain),
871 873 prevrev,
872 874 deltatype,
873 875 comp,
874 876 uncomp,
875 877 chainsize,
876 878 chainratio,
877 879 lineardist,
878 880 extradist,
879 881 extraratio,
880 882 rev=rev,
881 883 chainid=chainid,
882 884 chainlen=len(chain),
883 885 prevrev=prevrev,
884 886 deltatype=deltatype,
885 887 compsize=comp,
886 888 uncompsize=uncomp,
887 889 chainsize=chainsize,
888 890 chainratio=chainratio,
889 891 lindist=lineardist,
890 892 extradist=extradist,
891 893 extraratio=extraratio,
892 894 )
893 895 if withsparseread:
894 896 readsize = 0
895 897 largestblock = 0
896 898 srchunks = 0
897 899
898 900 for revschunk in deltautil.slicechunk(r, chain):
899 901 srchunks += 1
900 902 blkend = start(revschunk[-1]) + length(revschunk[-1])
901 903 blksize = blkend - start(revschunk[0])
902 904
903 905 readsize += blksize
904 906 if largestblock < blksize:
905 907 largestblock = blksize
906 908
907 909 if readsize:
908 910 readdensity = float(chainsize) / float(readsize)
909 911 else:
910 912 readdensity = 1
911 913
912 914 fm.write(
913 915 b'readsize largestblock readdensity srchunks',
914 916 b' %10d %10d %9.5f %8d',
915 917 readsize,
916 918 largestblock,
917 919 readdensity,
918 920 srchunks,
919 921 readsize=readsize,
920 922 largestblock=largestblock,
921 923 readdensity=readdensity,
922 924 srchunks=srchunks,
923 925 )
924 926
925 927 fm.plain(b'\n')
926 928
927 929 fm.end()
928 930
929 931
930 932 @command(
931 933 b'debugdirstate|debugstate',
932 934 [
933 935 (
934 936 b'',
935 937 b'nodates',
936 938 None,
937 939 _(b'do not display the saved mtime (DEPRECATED)'),
938 940 ),
939 941 (b'', b'dates', True, _(b'display the saved mtime')),
940 942 (b'', b'datesort', None, _(b'sort by saved mtime')),
941 943 (
942 944 b'',
943 945 b'docket',
944 946 False,
945 947 _(b'display the docket (metadata file) instead'),
946 948 ),
947 949 (
948 950 b'',
949 951 b'all',
950 952 False,
951 953 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
952 954 ),
953 955 ],
954 956 _(b'[OPTION]...'),
955 957 )
956 958 def debugstate(ui, repo, **opts):
957 959 """show the contents of the current dirstate"""
958 960
959 961 if opts.get("docket"):
960 962 if not repo.dirstate._use_dirstate_v2:
961 963 raise error.Abort(_(b'dirstate v1 does not have a docket'))
962 964
963 965 docket = repo.dirstate._map.docket
964 966 (
965 967 start_offset,
966 968 root_nodes,
967 969 nodes_with_entry,
968 970 nodes_with_copy,
969 971 unused_bytes,
970 972 _unused,
971 973 ignore_pattern,
972 974 ) = dirstateutils.v2.TREE_METADATA.unpack(docket.tree_metadata)
973 975
974 976 ui.write(_(b"size of dirstate data: %d\n") % docket.data_size)
975 977 ui.write(_(b"data file uuid: %s\n") % docket.uuid)
976 978 ui.write(_(b"start offset of root nodes: %d\n") % start_offset)
977 979 ui.write(_(b"number of root nodes: %d\n") % root_nodes)
978 980 ui.write(_(b"nodes with entries: %d\n") % nodes_with_entry)
979 981 ui.write(_(b"nodes with copies: %d\n") % nodes_with_copy)
980 982 ui.write(_(b"number of unused bytes: %d\n") % unused_bytes)
981 983 ui.write(
982 984 _(b"ignore pattern hash: %s\n") % binascii.hexlify(ignore_pattern)
983 985 )
984 986 return
985 987
986 988 nodates = not opts['dates']
987 989 if opts.get('nodates') is not None:
988 990 nodates = True
989 991 datesort = opts.get('datesort')
990 992
991 993 if datesort:
992 994
993 995 def keyfunc(entry):
994 996 filename, _state, _mode, _size, mtime = entry
995 997 return (mtime, filename)
996 998
997 999 else:
998 1000 keyfunc = None # sort by filename
999 1001 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
1000 1002 entries.sort(key=keyfunc)
1001 1003 for entry in entries:
1002 1004 filename, state, mode, size, mtime = entry
1003 1005 if mtime == -1:
1004 1006 timestr = b'unset '
1005 1007 elif nodates:
1006 1008 timestr = b'set '
1007 1009 else:
1008 1010 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
1009 1011 timestr = encoding.strtolocal(timestr)
1010 1012 if mode & 0o20000:
1011 1013 mode = b'lnk'
1012 1014 else:
1013 1015 mode = b'%3o' % (mode & 0o777 & ~util.umask)
1014 1016 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
1015 1017 for f in repo.dirstate.copies():
1016 1018 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
1017 1019
1018 1020
1019 1021 @command(
1020 1022 b'debugdirstateignorepatternshash',
1021 1023 [],
1022 1024 _(b''),
1023 1025 )
1024 1026 def debugdirstateignorepatternshash(ui, repo, **opts):
1025 1027 """show the hash of ignore patterns stored in dirstate if v2,
1026 1028 or nothing for dirstate-v2
1027 1029 """
1028 1030 if repo.dirstate._use_dirstate_v2:
1029 1031 docket = repo.dirstate._map.docket
1030 1032 hash_len = 20 # 160 bits for SHA-1
1031 1033 hash_bytes = docket.tree_metadata[-hash_len:]
1032 1034 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1033 1035
1034 1036
1035 1037 @command(
1036 1038 b'debugdiscovery',
1037 1039 [
1038 1040 (b'', b'old', None, _(b'use old-style discovery')),
1039 1041 (
1040 1042 b'',
1041 1043 b'nonheads',
1042 1044 None,
1043 1045 _(b'use old-style discovery with non-heads included'),
1044 1046 ),
1045 1047 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1046 1048 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1047 1049 (
1048 1050 b'',
1049 1051 b'local-as-revs',
1050 1052 b"",
1051 1053 b'treat local has having these revisions only',
1052 1054 ),
1053 1055 (
1054 1056 b'',
1055 1057 b'remote-as-revs',
1056 1058 b"",
1057 1059 b'use local as remote, with only these revisions',
1058 1060 ),
1059 1061 ]
1060 1062 + cmdutil.remoteopts
1061 1063 + cmdutil.formatteropts,
1062 1064 _(b'[--rev REV] [OTHER]'),
1063 1065 )
1064 1066 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1065 1067 """runs the changeset discovery protocol in isolation
1066 1068
1067 1069 The local peer can be "replaced" by a subset of the local repository by
1068 1070 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1069 1071 be "replaced" by a subset of the local repository using the
1070 1072 `--local-as-revs` flag. This is useful to efficiently debug pathological
1071 1073 discovery situation.
1072 1074
1073 1075 The following developer oriented config are relevant for people playing with this command:
1074 1076
1075 1077 * devel.discovery.exchange-heads=True
1076 1078
1077 1079 If False, the discovery will not start with
1078 1080 remote head fetching and local head querying.
1079 1081
1080 1082 * devel.discovery.grow-sample=True
1081 1083
1082 1084 If False, the sample size used in set discovery will not be increased
1083 1085 through the process
1084 1086
1085 1087 * devel.discovery.grow-sample.dynamic=True
1086 1088
1087 1089 When discovery.grow-sample.dynamic is True, the default, the sample size is
1088 1090 adapted to the shape of the undecided set (it is set to the max of:
1089 1091 <target-size>, len(roots(undecided)), len(heads(undecided)
1090 1092
1091 1093 * devel.discovery.grow-sample.rate=1.05
1092 1094
1093 1095 the rate at which the sample grow
1094 1096
1095 1097 * devel.discovery.randomize=True
1096 1098
1097 1099 If andom sampling during discovery are deterministic. It is meant for
1098 1100 integration tests.
1099 1101
1100 1102 * devel.discovery.sample-size=200
1101 1103
1102 1104 Control the initial size of the discovery sample
1103 1105
1104 1106 * devel.discovery.sample-size.initial=100
1105 1107
1106 1108 Control the initial size of the discovery for initial change
1107 1109 """
1108 1110 opts = pycompat.byteskwargs(opts)
1109 1111 unfi = repo.unfiltered()
1110 1112
1111 1113 # setup potential extra filtering
1112 1114 local_revs = opts[b"local_as_revs"]
1113 1115 remote_revs = opts[b"remote_as_revs"]
1114 1116
1115 1117 # make sure tests are repeatable
1116 1118 random.seed(int(opts[b'seed']))
1117 1119
1118 1120 if not remote_revs:
1119 1121
1120 1122 remoteurl, branches = urlutil.get_unique_pull_path(
1121 1123 b'debugdiscovery', repo, ui, remoteurl
1122 1124 )
1123 1125 remote = hg.peer(repo, opts, remoteurl)
1124 1126 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1125 1127 else:
1126 1128 branches = (None, [])
1127 1129 remote_filtered_revs = logcmdutil.revrange(
1128 1130 unfi, [b"not (::(%s))" % remote_revs]
1129 1131 )
1130 1132 remote_filtered_revs = frozenset(remote_filtered_revs)
1131 1133
1132 1134 def remote_func(x):
1133 1135 return remote_filtered_revs
1134 1136
1135 1137 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1136 1138
1137 1139 remote = repo.peer()
1138 1140 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1139 1141
1140 1142 if local_revs:
1141 1143 local_filtered_revs = logcmdutil.revrange(
1142 1144 unfi, [b"not (::(%s))" % local_revs]
1143 1145 )
1144 1146 local_filtered_revs = frozenset(local_filtered_revs)
1145 1147
1146 1148 def local_func(x):
1147 1149 return local_filtered_revs
1148 1150
1149 1151 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1150 1152 repo = repo.filtered(b'debug-discovery-local-filter')
1151 1153
1152 1154 data = {}
1153 1155 if opts.get(b'old'):
1154 1156
1155 1157 def doit(pushedrevs, remoteheads, remote=remote):
1156 1158 if not util.safehasattr(remote, b'branches'):
1157 1159 # enable in-client legacy support
1158 1160 remote = localrepo.locallegacypeer(remote.local())
1159 1161 common, _in, hds = treediscovery.findcommonincoming(
1160 1162 repo, remote, force=True, audit=data
1161 1163 )
1162 1164 common = set(common)
1163 1165 if not opts.get(b'nonheads'):
1164 1166 ui.writenoi18n(
1165 1167 b"unpruned common: %s\n"
1166 1168 % b" ".join(sorted(short(n) for n in common))
1167 1169 )
1168 1170
1169 1171 clnode = repo.changelog.node
1170 1172 common = repo.revs(b'heads(::%ln)', common)
1171 1173 common = {clnode(r) for r in common}
1172 1174 return common, hds
1173 1175
1174 1176 else:
1175 1177
1176 1178 def doit(pushedrevs, remoteheads, remote=remote):
1177 1179 nodes = None
1178 1180 if pushedrevs:
1179 1181 revs = logcmdutil.revrange(repo, pushedrevs)
1180 1182 nodes = [repo[r].node() for r in revs]
1181 1183 common, any, hds = setdiscovery.findcommonheads(
1182 1184 ui, repo, remote, ancestorsof=nodes, audit=data
1183 1185 )
1184 1186 return common, hds
1185 1187
1186 1188 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1187 1189 localrevs = opts[b'rev']
1188 1190
1189 1191 fm = ui.formatter(b'debugdiscovery', opts)
1190 1192 if fm.strict_format:
1191 1193
1192 1194 @contextlib.contextmanager
1193 1195 def may_capture_output():
1194 1196 ui.pushbuffer()
1195 1197 yield
1196 1198 data[b'output'] = ui.popbuffer()
1197 1199
1198 1200 else:
1199 1201 may_capture_output = util.nullcontextmanager
1200 1202 with may_capture_output():
1201 1203 with util.timedcm('debug-discovery') as t:
1202 1204 common, hds = doit(localrevs, remoterevs)
1203 1205
1204 1206 # compute all statistics
1205 1207 heads_common = set(common)
1206 1208 heads_remote = set(hds)
1207 1209 heads_local = set(repo.heads())
1208 1210 # note: they cannot be a local or remote head that is in common and not
1209 1211 # itself a head of common.
1210 1212 heads_common_local = heads_common & heads_local
1211 1213 heads_common_remote = heads_common & heads_remote
1212 1214 heads_common_both = heads_common & heads_remote & heads_local
1213 1215
1214 1216 all = repo.revs(b'all()')
1215 1217 common = repo.revs(b'::%ln', common)
1216 1218 roots_common = repo.revs(b'roots(::%ld)', common)
1217 1219 missing = repo.revs(b'not ::%ld', common)
1218 1220 heads_missing = repo.revs(b'heads(%ld)', missing)
1219 1221 roots_missing = repo.revs(b'roots(%ld)', missing)
1220 1222 assert len(common) + len(missing) == len(all)
1221 1223
1222 1224 initial_undecided = repo.revs(
1223 1225 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1224 1226 )
1225 1227 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1226 1228 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1227 1229 common_initial_undecided = initial_undecided & common
1228 1230 missing_initial_undecided = initial_undecided & missing
1229 1231
1230 1232 data[b'elapsed'] = t.elapsed
1231 1233 data[b'nb-common-heads'] = len(heads_common)
1232 1234 data[b'nb-common-heads-local'] = len(heads_common_local)
1233 1235 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1234 1236 data[b'nb-common-heads-both'] = len(heads_common_both)
1235 1237 data[b'nb-common-roots'] = len(roots_common)
1236 1238 data[b'nb-head-local'] = len(heads_local)
1237 1239 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1238 1240 data[b'nb-head-remote'] = len(heads_remote)
1239 1241 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1240 1242 heads_common_remote
1241 1243 )
1242 1244 data[b'nb-revs'] = len(all)
1243 1245 data[b'nb-revs-common'] = len(common)
1244 1246 data[b'nb-revs-missing'] = len(missing)
1245 1247 data[b'nb-missing-heads'] = len(heads_missing)
1246 1248 data[b'nb-missing-roots'] = len(roots_missing)
1247 1249 data[b'nb-ini_und'] = len(initial_undecided)
1248 1250 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1249 1251 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1250 1252 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1251 1253 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1252 1254
1253 1255 fm.startitem()
1254 1256 fm.data(**pycompat.strkwargs(data))
1255 1257 # display discovery summary
1256 1258 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1257 1259 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1258 1260 fm.plain(b"queries: %(total-queries)9d\n" % data)
1259 1261 fm.plain(b"heads summary:\n")
1260 1262 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1261 1263 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1262 1264 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1263 1265 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1264 1266 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1265 1267 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1266 1268 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1267 1269 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1268 1270 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1269 1271 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1270 1272 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1271 1273 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1272 1274 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1273 1275 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1274 1276 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1275 1277 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1276 1278 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1277 1279 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1278 1280 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1279 1281 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1280 1282 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1281 1283 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1282 1284
1283 1285 if ui.verbose:
1284 1286 fm.plain(
1285 1287 b"common heads: %s\n"
1286 1288 % b" ".join(sorted(short(n) for n in heads_common))
1287 1289 )
1288 1290 fm.end()
1289 1291
1290 1292
1291 1293 _chunksize = 4 << 10
1292 1294
1293 1295
1294 1296 @command(
1295 1297 b'debugdownload',
1296 1298 [
1297 1299 (b'o', b'output', b'', _(b'path')),
1298 1300 ],
1299 1301 optionalrepo=True,
1300 1302 )
1301 1303 def debugdownload(ui, repo, url, output=None, **opts):
1302 1304 """download a resource using Mercurial logic and config"""
1303 1305 fh = urlmod.open(ui, url, output)
1304 1306
1305 1307 dest = ui
1306 1308 if output:
1307 1309 dest = open(output, b"wb", _chunksize)
1308 1310 try:
1309 1311 data = fh.read(_chunksize)
1310 1312 while data:
1311 1313 dest.write(data)
1312 1314 data = fh.read(_chunksize)
1313 1315 finally:
1314 1316 if output:
1315 1317 dest.close()
1316 1318
1317 1319
1318 1320 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1319 1321 def debugextensions(ui, repo, **opts):
1320 1322 '''show information about active extensions'''
1321 1323 opts = pycompat.byteskwargs(opts)
1322 1324 exts = extensions.extensions(ui)
1323 1325 hgver = util.version()
1324 1326 fm = ui.formatter(b'debugextensions', opts)
1325 1327 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1326 1328 isinternal = extensions.ismoduleinternal(extmod)
1327 1329 extsource = None
1328 1330
1329 1331 if util.safehasattr(extmod, '__file__'):
1330 1332 extsource = pycompat.fsencode(extmod.__file__)
1331 1333 elif getattr(sys, 'oxidized', False):
1332 1334 extsource = pycompat.sysexecutable
1333 1335 if isinternal:
1334 1336 exttestedwith = [] # never expose magic string to users
1335 1337 else:
1336 1338 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1337 1339 extbuglink = getattr(extmod, 'buglink', None)
1338 1340
1339 1341 fm.startitem()
1340 1342
1341 1343 if ui.quiet or ui.verbose:
1342 1344 fm.write(b'name', b'%s\n', extname)
1343 1345 else:
1344 1346 fm.write(b'name', b'%s', extname)
1345 1347 if isinternal or hgver in exttestedwith:
1346 1348 fm.plain(b'\n')
1347 1349 elif not exttestedwith:
1348 1350 fm.plain(_(b' (untested!)\n'))
1349 1351 else:
1350 1352 lasttestedversion = exttestedwith[-1]
1351 1353 fm.plain(b' (%s!)\n' % lasttestedversion)
1352 1354
1353 1355 fm.condwrite(
1354 1356 ui.verbose and extsource,
1355 1357 b'source',
1356 1358 _(b' location: %s\n'),
1357 1359 extsource or b"",
1358 1360 )
1359 1361
1360 1362 if ui.verbose:
1361 1363 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1362 1364 fm.data(bundled=isinternal)
1363 1365
1364 1366 fm.condwrite(
1365 1367 ui.verbose and exttestedwith,
1366 1368 b'testedwith',
1367 1369 _(b' tested with: %s\n'),
1368 1370 fm.formatlist(exttestedwith, name=b'ver'),
1369 1371 )
1370 1372
1371 1373 fm.condwrite(
1372 1374 ui.verbose and extbuglink,
1373 1375 b'buglink',
1374 1376 _(b' bug reporting: %s\n'),
1375 1377 extbuglink or b"",
1376 1378 )
1377 1379
1378 1380 fm.end()
1379 1381
1380 1382
1381 1383 @command(
1382 1384 b'debugfileset',
1383 1385 [
1384 1386 (
1385 1387 b'r',
1386 1388 b'rev',
1387 1389 b'',
1388 1390 _(b'apply the filespec on this revision'),
1389 1391 _(b'REV'),
1390 1392 ),
1391 1393 (
1392 1394 b'',
1393 1395 b'all-files',
1394 1396 False,
1395 1397 _(b'test files from all revisions and working directory'),
1396 1398 ),
1397 1399 (
1398 1400 b's',
1399 1401 b'show-matcher',
1400 1402 None,
1401 1403 _(b'print internal representation of matcher'),
1402 1404 ),
1403 1405 (
1404 1406 b'p',
1405 1407 b'show-stage',
1406 1408 [],
1407 1409 _(b'print parsed tree at the given stage'),
1408 1410 _(b'NAME'),
1409 1411 ),
1410 1412 ],
1411 1413 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1412 1414 )
1413 1415 def debugfileset(ui, repo, expr, **opts):
1414 1416 '''parse and apply a fileset specification'''
1415 1417 from . import fileset
1416 1418
1417 1419 fileset.symbols # force import of fileset so we have predicates to optimize
1418 1420 opts = pycompat.byteskwargs(opts)
1419 1421 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
1420 1422
1421 1423 stages = [
1422 1424 (b'parsed', pycompat.identity),
1423 1425 (b'analyzed', filesetlang.analyze),
1424 1426 (b'optimized', filesetlang.optimize),
1425 1427 ]
1426 1428 stagenames = {n for n, f in stages}
1427 1429
1428 1430 showalways = set()
1429 1431 if ui.verbose and not opts[b'show_stage']:
1430 1432 # show parsed tree by --verbose (deprecated)
1431 1433 showalways.add(b'parsed')
1432 1434 if opts[b'show_stage'] == [b'all']:
1433 1435 showalways.update(stagenames)
1434 1436 else:
1435 1437 for n in opts[b'show_stage']:
1436 1438 if n not in stagenames:
1437 1439 raise error.Abort(_(b'invalid stage name: %s') % n)
1438 1440 showalways.update(opts[b'show_stage'])
1439 1441
1440 1442 tree = filesetlang.parse(expr)
1441 1443 for n, f in stages:
1442 1444 tree = f(tree)
1443 1445 if n in showalways:
1444 1446 if opts[b'show_stage'] or n != b'parsed':
1445 1447 ui.write(b"* %s:\n" % n)
1446 1448 ui.write(filesetlang.prettyformat(tree), b"\n")
1447 1449
1448 1450 files = set()
1449 1451 if opts[b'all_files']:
1450 1452 for r in repo:
1451 1453 c = repo[r]
1452 1454 files.update(c.files())
1453 1455 files.update(c.substate)
1454 1456 if opts[b'all_files'] or ctx.rev() is None:
1455 1457 wctx = repo[None]
1456 1458 files.update(
1457 1459 repo.dirstate.walk(
1458 1460 scmutil.matchall(repo),
1459 1461 subrepos=list(wctx.substate),
1460 1462 unknown=True,
1461 1463 ignored=True,
1462 1464 )
1463 1465 )
1464 1466 files.update(wctx.substate)
1465 1467 else:
1466 1468 files.update(ctx.files())
1467 1469 files.update(ctx.substate)
1468 1470
1469 1471 m = ctx.matchfileset(repo.getcwd(), expr)
1470 1472 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1471 1473 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1472 1474 for f in sorted(files):
1473 1475 if not m(f):
1474 1476 continue
1475 1477 ui.write(b"%s\n" % f)
1476 1478
1477 1479
1478 1480 @command(
1479 1481 b"debug-repair-issue6528",
1480 1482 [
1481 1483 (
1482 1484 b'',
1483 1485 b'to-report',
1484 1486 b'',
1485 1487 _(b'build a report of affected revisions to this file'),
1486 1488 _(b'FILE'),
1487 1489 ),
1488 1490 (
1489 1491 b'',
1490 1492 b'from-report',
1491 1493 b'',
1492 1494 _(b'repair revisions listed in this report file'),
1493 1495 _(b'FILE'),
1494 1496 ),
1495 1497 (
1496 1498 b'',
1497 1499 b'paranoid',
1498 1500 False,
1499 1501 _(b'check that both detection methods do the same thing'),
1500 1502 ),
1501 1503 ]
1502 1504 + cmdutil.dryrunopts,
1503 1505 )
1504 1506 def debug_repair_issue6528(ui, repo, **opts):
1505 1507 """find affected revisions and repair them. See issue6528 for more details.
1506 1508
1507 1509 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1508 1510 computation of affected revisions for a given repository across clones.
1509 1511 The report format is line-based (with empty lines ignored):
1510 1512
1511 1513 ```
1512 1514 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1513 1515 ```
1514 1516
1515 1517 There can be multiple broken revisions per filelog, they are separated by
1516 1518 a comma with no spaces. The only space is between the revision(s) and the
1517 1519 filename.
1518 1520
1519 1521 Note that this does *not* mean that this repairs future affected revisions,
1520 1522 that needs a separate fix at the exchange level that was introduced in
1521 1523 Mercurial 5.9.1.
1522 1524
1523 1525 There is a `--paranoid` flag to test that the fast implementation is correct
1524 1526 by checking it against the slow implementation. Since this matter is quite
1525 1527 urgent and testing every edge-case is probably quite costly, we use this
1526 1528 method to test on large repositories as a fuzzing method of sorts.
1527 1529 """
1528 1530 cmdutil.check_incompatible_arguments(
1529 1531 opts, 'to_report', ['from_report', 'dry_run']
1530 1532 )
1531 1533 dry_run = opts.get('dry_run')
1532 1534 to_report = opts.get('to_report')
1533 1535 from_report = opts.get('from_report')
1534 1536 paranoid = opts.get('paranoid')
1535 1537 # TODO maybe add filelog pattern and revision pattern parameters to help
1536 1538 # narrow down the search for users that know what they're looking for?
1537 1539
1538 1540 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1539 1541 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1540 1542 raise error.Abort(_(msg))
1541 1543
1542 1544 rewrite.repair_issue6528(
1543 1545 ui,
1544 1546 repo,
1545 1547 dry_run=dry_run,
1546 1548 to_report=to_report,
1547 1549 from_report=from_report,
1548 1550 paranoid=paranoid,
1549 1551 )
1550 1552
1551 1553
1552 1554 @command(b'debugformat', [] + cmdutil.formatteropts)
1553 1555 def debugformat(ui, repo, **opts):
1554 1556 """display format information about the current repository
1555 1557
1556 1558 Use --verbose to get extra information about current config value and
1557 1559 Mercurial default."""
1558 1560 opts = pycompat.byteskwargs(opts)
1559 1561 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1560 1562 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1561 1563
1562 1564 def makeformatname(name):
1563 1565 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1564 1566
1565 1567 fm = ui.formatter(b'debugformat', opts)
1566 1568 if fm.isplain():
1567 1569
1568 1570 def formatvalue(value):
1569 1571 if util.safehasattr(value, b'startswith'):
1570 1572 return value
1571 1573 if value:
1572 1574 return b'yes'
1573 1575 else:
1574 1576 return b'no'
1575 1577
1576 1578 else:
1577 1579 formatvalue = pycompat.identity
1578 1580
1579 1581 fm.plain(b'format-variant')
1580 1582 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1581 1583 fm.plain(b' repo')
1582 1584 if ui.verbose:
1583 1585 fm.plain(b' config default')
1584 1586 fm.plain(b'\n')
1585 1587 for fv in upgrade.allformatvariant:
1586 1588 fm.startitem()
1587 1589 repovalue = fv.fromrepo(repo)
1588 1590 configvalue = fv.fromconfig(repo)
1589 1591
1590 1592 if repovalue != configvalue:
1591 1593 namelabel = b'formatvariant.name.mismatchconfig'
1592 1594 repolabel = b'formatvariant.repo.mismatchconfig'
1593 1595 elif repovalue != fv.default:
1594 1596 namelabel = b'formatvariant.name.mismatchdefault'
1595 1597 repolabel = b'formatvariant.repo.mismatchdefault'
1596 1598 else:
1597 1599 namelabel = b'formatvariant.name.uptodate'
1598 1600 repolabel = b'formatvariant.repo.uptodate'
1599 1601
1600 1602 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1601 1603 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1602 1604 if fv.default != configvalue:
1603 1605 configlabel = b'formatvariant.config.special'
1604 1606 else:
1605 1607 configlabel = b'formatvariant.config.default'
1606 1608 fm.condwrite(
1607 1609 ui.verbose,
1608 1610 b'config',
1609 1611 b' %6s',
1610 1612 formatvalue(configvalue),
1611 1613 label=configlabel,
1612 1614 )
1613 1615 fm.condwrite(
1614 1616 ui.verbose,
1615 1617 b'default',
1616 1618 b' %7s',
1617 1619 formatvalue(fv.default),
1618 1620 label=b'formatvariant.default',
1619 1621 )
1620 1622 fm.plain(b'\n')
1621 1623 fm.end()
1622 1624
1623 1625
1624 1626 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1625 1627 def debugfsinfo(ui, path=b"."):
1626 1628 """show information detected about current filesystem"""
1627 1629 ui.writenoi18n(b'path: %s\n' % path)
1628 1630 ui.writenoi18n(
1629 1631 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1630 1632 )
1631 1633 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1632 1634 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1633 1635 ui.writenoi18n(
1634 1636 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1635 1637 )
1636 1638 ui.writenoi18n(
1637 1639 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1638 1640 )
1639 1641 casesensitive = b'(unknown)'
1640 1642 try:
1641 1643 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1642 1644 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1643 1645 except OSError:
1644 1646 pass
1645 1647 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1646 1648
1647 1649
1648 1650 @command(
1649 1651 b'debuggetbundle',
1650 1652 [
1651 1653 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1652 1654 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1653 1655 (
1654 1656 b't',
1655 1657 b'type',
1656 1658 b'bzip2',
1657 1659 _(b'bundle compression type to use'),
1658 1660 _(b'TYPE'),
1659 1661 ),
1660 1662 ],
1661 1663 _(b'REPO FILE [-H|-C ID]...'),
1662 1664 norepo=True,
1663 1665 )
1664 1666 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1665 1667 """retrieves a bundle from a repo
1666 1668
1667 1669 Every ID must be a full-length hex node id string. Saves the bundle to the
1668 1670 given file.
1669 1671 """
1670 1672 opts = pycompat.byteskwargs(opts)
1671 1673 repo = hg.peer(ui, opts, repopath)
1672 1674 if not repo.capable(b'getbundle'):
1673 1675 raise error.Abort(b"getbundle() not supported by target repository")
1674 1676 args = {}
1675 1677 if common:
1676 1678 args['common'] = [bin(s) for s in common]
1677 1679 if head:
1678 1680 args['heads'] = [bin(s) for s in head]
1679 1681 # TODO: get desired bundlecaps from command line.
1680 1682 args['bundlecaps'] = None
1681 1683 bundle = repo.getbundle(b'debug', **args)
1682 1684
1683 1685 bundletype = opts.get(b'type', b'bzip2').lower()
1684 1686 btypes = {
1685 1687 b'none': b'HG10UN',
1686 1688 b'bzip2': b'HG10BZ',
1687 1689 b'gzip': b'HG10GZ',
1688 1690 b'bundle2': b'HG20',
1689 1691 }
1690 1692 bundletype = btypes.get(bundletype)
1691 1693 if bundletype not in bundle2.bundletypes:
1692 1694 raise error.Abort(_(b'unknown bundle type specified with --type'))
1693 1695 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1694 1696
1695 1697
1696 1698 @command(b'debugignore', [], b'[FILE]')
1697 1699 def debugignore(ui, repo, *files, **opts):
1698 1700 """display the combined ignore pattern and information about ignored files
1699 1701
1700 1702 With no argument display the combined ignore pattern.
1701 1703
1702 1704 Given space separated file names, shows if the given file is ignored and
1703 1705 if so, show the ignore rule (file and line number) that matched it.
1704 1706 """
1705 1707 ignore = repo.dirstate._ignore
1706 1708 if not files:
1707 1709 # Show all the patterns
1708 1710 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1709 1711 else:
1710 1712 m = scmutil.match(repo[None], pats=files)
1711 1713 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1712 1714 for f in m.files():
1713 1715 nf = util.normpath(f)
1714 1716 ignored = None
1715 1717 ignoredata = None
1716 1718 if nf != b'.':
1717 1719 if ignore(nf):
1718 1720 ignored = nf
1719 1721 ignoredata = repo.dirstate._ignorefileandline(nf)
1720 1722 else:
1721 1723 for p in pathutil.finddirs(nf):
1722 1724 if ignore(p):
1723 1725 ignored = p
1724 1726 ignoredata = repo.dirstate._ignorefileandline(p)
1725 1727 break
1726 1728 if ignored:
1727 1729 if ignored == nf:
1728 1730 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1729 1731 else:
1730 1732 ui.write(
1731 1733 _(
1732 1734 b"%s is ignored because of "
1733 1735 b"containing directory %s\n"
1734 1736 )
1735 1737 % (uipathfn(f), ignored)
1736 1738 )
1737 1739 ignorefile, lineno, line = ignoredata
1738 1740 ui.write(
1739 1741 _(b"(ignore rule in %s, line %d: '%s')\n")
1740 1742 % (ignorefile, lineno, line)
1741 1743 )
1742 1744 else:
1743 1745 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1744 1746
1745 1747
1746 1748 @command(
1747 1749 b'debugindex',
1748 1750 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1749 1751 _(b'-c|-m|FILE'),
1750 1752 )
1751 1753 def debugindex(ui, repo, file_=None, **opts):
1752 1754 """dump index data for a storage primitive"""
1753 1755 opts = pycompat.byteskwargs(opts)
1754 1756 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1755 1757
1756 1758 if ui.debugflag:
1757 1759 shortfn = hex
1758 1760 else:
1759 1761 shortfn = short
1760 1762
1761 1763 idlen = 12
1762 1764 for i in store:
1763 1765 idlen = len(shortfn(store.node(i)))
1764 1766 break
1765 1767
1766 1768 fm = ui.formatter(b'debugindex', opts)
1767 1769 fm.plain(
1768 1770 b' rev linkrev %s %s p2\n'
1769 1771 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1770 1772 )
1771 1773
1772 1774 for rev in store:
1773 1775 node = store.node(rev)
1774 1776 parents = store.parents(node)
1775 1777
1776 1778 fm.startitem()
1777 1779 fm.write(b'rev', b'%6d ', rev)
1778 1780 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1779 1781 fm.write(b'node', b'%s ', shortfn(node))
1780 1782 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1781 1783 fm.write(b'p2', b'%s', shortfn(parents[1]))
1782 1784 fm.plain(b'\n')
1783 1785
1784 1786 fm.end()
1785 1787
1786 1788
1787 1789 @command(
1788 1790 b'debugindexdot',
1789 1791 cmdutil.debugrevlogopts,
1790 1792 _(b'-c|-m|FILE'),
1791 1793 optionalrepo=True,
1792 1794 )
1793 1795 def debugindexdot(ui, repo, file_=None, **opts):
1794 1796 """dump an index DAG as a graphviz dot file"""
1795 1797 opts = pycompat.byteskwargs(opts)
1796 1798 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1797 1799 ui.writenoi18n(b"digraph G {\n")
1798 1800 for i in r:
1799 1801 node = r.node(i)
1800 1802 pp = r.parents(node)
1801 1803 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1802 1804 if pp[1] != repo.nullid:
1803 1805 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1804 1806 ui.write(b"}\n")
1805 1807
1806 1808
1807 1809 @command(b'debugindexstats', [])
1808 1810 def debugindexstats(ui, repo):
1809 1811 """show stats related to the changelog index"""
1810 1812 repo.changelog.shortest(repo.nullid, 1)
1811 1813 index = repo.changelog.index
1812 1814 if not util.safehasattr(index, b'stats'):
1813 1815 raise error.Abort(_(b'debugindexstats only works with native code'))
1814 1816 for k, v in sorted(index.stats().items()):
1815 1817 ui.write(b'%s: %d\n' % (k, v))
1816 1818
1817 1819
1818 1820 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1819 1821 def debuginstall(ui, **opts):
1820 1822 """test Mercurial installation
1821 1823
1822 1824 Returns 0 on success.
1823 1825 """
1824 1826 opts = pycompat.byteskwargs(opts)
1825 1827
1826 1828 problems = 0
1827 1829
1828 1830 fm = ui.formatter(b'debuginstall', opts)
1829 1831 fm.startitem()
1830 1832
1831 1833 # encoding might be unknown or wrong. don't translate these messages.
1832 1834 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1833 1835 err = None
1834 1836 try:
1835 1837 codecs.lookup(pycompat.sysstr(encoding.encoding))
1836 1838 except LookupError as inst:
1837 1839 err = stringutil.forcebytestr(inst)
1838 1840 problems += 1
1839 1841 fm.condwrite(
1840 1842 err,
1841 1843 b'encodingerror',
1842 1844 b" %s\n (check that your locale is properly set)\n",
1843 1845 err,
1844 1846 )
1845 1847
1846 1848 # Python
1847 1849 pythonlib = None
1848 1850 if util.safehasattr(os, '__file__'):
1849 1851 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1850 1852 elif getattr(sys, 'oxidized', False):
1851 1853 pythonlib = pycompat.sysexecutable
1852 1854
1853 1855 fm.write(
1854 1856 b'pythonexe',
1855 1857 _(b"checking Python executable (%s)\n"),
1856 1858 pycompat.sysexecutable or _(b"unknown"),
1857 1859 )
1858 1860 fm.write(
1859 1861 b'pythonimplementation',
1860 1862 _(b"checking Python implementation (%s)\n"),
1861 1863 pycompat.sysbytes(platform.python_implementation()),
1862 1864 )
1863 1865 fm.write(
1864 1866 b'pythonver',
1865 1867 _(b"checking Python version (%s)\n"),
1866 1868 (b"%d.%d.%d" % sys.version_info[:3]),
1867 1869 )
1868 1870 fm.write(
1869 1871 b'pythonlib',
1870 1872 _(b"checking Python lib (%s)...\n"),
1871 1873 pythonlib or _(b"unknown"),
1872 1874 )
1873 1875
1874 1876 try:
1875 1877 from . import rustext # pytype: disable=import-error
1876 1878
1877 1879 rustext.__doc__ # trigger lazy import
1878 1880 except ImportError:
1879 1881 rustext = None
1880 1882
1881 1883 security = set(sslutil.supportedprotocols)
1882 1884 if sslutil.hassni:
1883 1885 security.add(b'sni')
1884 1886
1885 1887 fm.write(
1886 1888 b'pythonsecurity',
1887 1889 _(b"checking Python security support (%s)\n"),
1888 1890 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1889 1891 )
1890 1892
1891 1893 # These are warnings, not errors. So don't increment problem count. This
1892 1894 # may change in the future.
1893 1895 if b'tls1.2' not in security:
1894 1896 fm.plain(
1895 1897 _(
1896 1898 b' TLS 1.2 not supported by Python install; '
1897 1899 b'network connections lack modern security\n'
1898 1900 )
1899 1901 )
1900 1902 if b'sni' not in security:
1901 1903 fm.plain(
1902 1904 _(
1903 1905 b' SNI not supported by Python install; may have '
1904 1906 b'connectivity issues with some servers\n'
1905 1907 )
1906 1908 )
1907 1909
1908 1910 fm.plain(
1909 1911 _(
1910 1912 b"checking Rust extensions (%s)\n"
1911 1913 % (b'missing' if rustext is None else b'installed')
1912 1914 ),
1913 1915 )
1914 1916
1915 1917 # TODO print CA cert info
1916 1918
1917 1919 # hg version
1918 1920 hgver = util.version()
1919 1921 fm.write(
1920 1922 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1921 1923 )
1922 1924 fm.write(
1923 1925 b'hgverextra',
1924 1926 _(b"checking Mercurial custom build (%s)\n"),
1925 1927 b'+'.join(hgver.split(b'+')[1:]),
1926 1928 )
1927 1929
1928 1930 # compiled modules
1929 1931 hgmodules = None
1930 1932 if util.safehasattr(sys.modules[__name__], '__file__'):
1931 1933 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1932 1934 elif getattr(sys, 'oxidized', False):
1933 1935 hgmodules = pycompat.sysexecutable
1934 1936
1935 1937 fm.write(
1936 1938 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1937 1939 )
1938 1940 fm.write(
1939 1941 b'hgmodules',
1940 1942 _(b"checking installed modules (%s)...\n"),
1941 1943 hgmodules or _(b"unknown"),
1942 1944 )
1943 1945
1944 1946 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1945 1947 rustext = rustandc # for now, that's the only case
1946 1948 cext = policy.policy in (b'c', b'allow') or rustandc
1947 1949 nopure = cext or rustext
1948 1950 if nopure:
1949 1951 err = None
1950 1952 try:
1951 1953 if cext:
1952 1954 from .cext import ( # pytype: disable=import-error
1953 1955 base85,
1954 1956 bdiff,
1955 1957 mpatch,
1956 1958 osutil,
1957 1959 )
1958 1960
1959 1961 # quiet pyflakes
1960 1962 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1961 1963 if rustext:
1962 1964 from .rustext import ( # pytype: disable=import-error
1963 1965 ancestor,
1964 1966 dirstate,
1965 1967 )
1966 1968
1967 1969 dir(ancestor), dir(dirstate) # quiet pyflakes
1968 1970 except Exception as inst:
1969 1971 err = stringutil.forcebytestr(inst)
1970 1972 problems += 1
1971 1973 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1972 1974
1973 1975 compengines = util.compengines._engines.values()
1974 1976 fm.write(
1975 1977 b'compengines',
1976 1978 _(b'checking registered compression engines (%s)\n'),
1977 1979 fm.formatlist(
1978 1980 sorted(e.name() for e in compengines),
1979 1981 name=b'compengine',
1980 1982 fmt=b'%s',
1981 1983 sep=b', ',
1982 1984 ),
1983 1985 )
1984 1986 fm.write(
1985 1987 b'compenginesavail',
1986 1988 _(b'checking available compression engines (%s)\n'),
1987 1989 fm.formatlist(
1988 1990 sorted(e.name() for e in compengines if e.available()),
1989 1991 name=b'compengine',
1990 1992 fmt=b'%s',
1991 1993 sep=b', ',
1992 1994 ),
1993 1995 )
1994 1996 wirecompengines = compression.compengines.supportedwireengines(
1995 1997 compression.SERVERROLE
1996 1998 )
1997 1999 fm.write(
1998 2000 b'compenginesserver',
1999 2001 _(
2000 2002 b'checking available compression engines '
2001 2003 b'for wire protocol (%s)\n'
2002 2004 ),
2003 2005 fm.formatlist(
2004 2006 [e.name() for e in wirecompengines if e.wireprotosupport()],
2005 2007 name=b'compengine',
2006 2008 fmt=b'%s',
2007 2009 sep=b', ',
2008 2010 ),
2009 2011 )
2010 2012 re2 = b'missing'
2011 2013 if util._re2:
2012 2014 re2 = b'available'
2013 2015 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
2014 2016 fm.data(re2=bool(util._re2))
2015 2017
2016 2018 # templates
2017 2019 p = templater.templatedir()
2018 2020 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
2019 2021 fm.condwrite(not p, b'', _(b" no template directories found\n"))
2020 2022 if p:
2021 2023 (m, fp) = templater.try_open_template(b"map-cmdline.default")
2022 2024 if m:
2023 2025 # template found, check if it is working
2024 2026 err = None
2025 2027 try:
2026 2028 templater.templater.frommapfile(m)
2027 2029 except Exception as inst:
2028 2030 err = stringutil.forcebytestr(inst)
2029 2031 p = None
2030 2032 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2031 2033 else:
2032 2034 p = None
2033 2035 fm.condwrite(
2034 2036 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2035 2037 )
2036 2038 fm.condwrite(
2037 2039 not m,
2038 2040 b'defaulttemplatenotfound',
2039 2041 _(b" template '%s' not found\n"),
2040 2042 b"default",
2041 2043 )
2042 2044 if not p:
2043 2045 problems += 1
2044 2046 fm.condwrite(
2045 2047 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2046 2048 )
2047 2049
2048 2050 # editor
2049 2051 editor = ui.geteditor()
2050 2052 editor = util.expandpath(editor)
2051 2053 editorbin = procutil.shellsplit(editor)[0]
2052 2054 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2053 2055 cmdpath = procutil.findexe(editorbin)
2054 2056 fm.condwrite(
2055 2057 not cmdpath and editor == b'vi',
2056 2058 b'vinotfound',
2057 2059 _(
2058 2060 b" No commit editor set and can't find %s in PATH\n"
2059 2061 b" (specify a commit editor in your configuration"
2060 2062 b" file)\n"
2061 2063 ),
2062 2064 not cmdpath and editor == b'vi' and editorbin,
2063 2065 )
2064 2066 fm.condwrite(
2065 2067 not cmdpath and editor != b'vi',
2066 2068 b'editornotfound',
2067 2069 _(
2068 2070 b" Can't find editor '%s' in PATH\n"
2069 2071 b" (specify a commit editor in your configuration"
2070 2072 b" file)\n"
2071 2073 ),
2072 2074 not cmdpath and editorbin,
2073 2075 )
2074 2076 if not cmdpath and editor != b'vi':
2075 2077 problems += 1
2076 2078
2077 2079 # check username
2078 2080 username = None
2079 2081 err = None
2080 2082 try:
2081 2083 username = ui.username()
2082 2084 except error.Abort as e:
2083 2085 err = e.message
2084 2086 problems += 1
2085 2087
2086 2088 fm.condwrite(
2087 2089 username, b'username', _(b"checking username (%s)\n"), username
2088 2090 )
2089 2091 fm.condwrite(
2090 2092 err,
2091 2093 b'usernameerror',
2092 2094 _(
2093 2095 b"checking username...\n %s\n"
2094 2096 b" (specify a username in your configuration file)\n"
2095 2097 ),
2096 2098 err,
2097 2099 )
2098 2100
2099 2101 for name, mod in extensions.extensions():
2100 2102 handler = getattr(mod, 'debuginstall', None)
2101 2103 if handler is not None:
2102 2104 problems += handler(ui, fm)
2103 2105
2104 2106 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2105 2107 if not problems:
2106 2108 fm.data(problems=problems)
2107 2109 fm.condwrite(
2108 2110 problems,
2109 2111 b'problems',
2110 2112 _(b"%d problems detected, please check your install!\n"),
2111 2113 problems,
2112 2114 )
2113 2115 fm.end()
2114 2116
2115 2117 return problems
2116 2118
2117 2119
2118 2120 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2119 2121 def debugknown(ui, repopath, *ids, **opts):
2120 2122 """test whether node ids are known to a repo
2121 2123
2122 2124 Every ID must be a full-length hex node id string. Returns a list of 0s
2123 2125 and 1s indicating unknown/known.
2124 2126 """
2125 2127 opts = pycompat.byteskwargs(opts)
2126 2128 repo = hg.peer(ui, opts, repopath)
2127 2129 if not repo.capable(b'known'):
2128 2130 raise error.Abort(b"known() not supported by target repository")
2129 2131 flags = repo.known([bin(s) for s in ids])
2130 2132 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2131 2133
2132 2134
2133 2135 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2134 2136 def debuglabelcomplete(ui, repo, *args):
2135 2137 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2136 2138 debugnamecomplete(ui, repo, *args)
2137 2139
2138 2140
2139 2141 @command(
2140 2142 b'debuglocks',
2141 2143 [
2142 2144 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2143 2145 (
2144 2146 b'W',
2145 2147 b'force-free-wlock',
2146 2148 None,
2147 2149 _(b'free the working state lock (DANGEROUS)'),
2148 2150 ),
2149 2151 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2150 2152 (
2151 2153 b'S',
2152 2154 b'set-wlock',
2153 2155 None,
2154 2156 _(b'set the working state lock until stopped'),
2155 2157 ),
2156 2158 ],
2157 2159 _(b'[OPTION]...'),
2158 2160 )
2159 2161 def debuglocks(ui, repo, **opts):
2160 2162 """show or modify state of locks
2161 2163
2162 2164 By default, this command will show which locks are held. This
2163 2165 includes the user and process holding the lock, the amount of time
2164 2166 the lock has been held, and the machine name where the process is
2165 2167 running if it's not local.
2166 2168
2167 2169 Locks protect the integrity of Mercurial's data, so should be
2168 2170 treated with care. System crashes or other interruptions may cause
2169 2171 locks to not be properly released, though Mercurial will usually
2170 2172 detect and remove such stale locks automatically.
2171 2173
2172 2174 However, detecting stale locks may not always be possible (for
2173 2175 instance, on a shared filesystem). Removing locks may also be
2174 2176 blocked by filesystem permissions.
2175 2177
2176 2178 Setting a lock will prevent other commands from changing the data.
2177 2179 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2178 2180 The set locks are removed when the command exits.
2179 2181
2180 2182 Returns 0 if no locks are held.
2181 2183
2182 2184 """
2183 2185
2184 2186 if opts.get('force_free_lock'):
2185 2187 repo.svfs.tryunlink(b'lock')
2186 2188 if opts.get('force_free_wlock'):
2187 2189 repo.vfs.tryunlink(b'wlock')
2188 2190 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2189 2191 return 0
2190 2192
2191 2193 locks = []
2192 2194 try:
2193 2195 if opts.get('set_wlock'):
2194 2196 try:
2195 2197 locks.append(repo.wlock(False))
2196 2198 except error.LockHeld:
2197 2199 raise error.Abort(_(b'wlock is already held'))
2198 2200 if opts.get('set_lock'):
2199 2201 try:
2200 2202 locks.append(repo.lock(False))
2201 2203 except error.LockHeld:
2202 2204 raise error.Abort(_(b'lock is already held'))
2203 2205 if len(locks):
2204 2206 try:
2205 2207 if ui.interactive():
2206 2208 prompt = _(b"ready to release the lock (y)? $$ &Yes")
2207 2209 ui.promptchoice(prompt)
2208 2210 else:
2209 2211 msg = b"%d locks held, waiting for signal\n"
2210 2212 msg %= len(locks)
2211 2213 ui.status(msg)
2212 2214 while True: # XXX wait for a signal
2213 2215 time.sleep(0.1)
2214 2216 except KeyboardInterrupt:
2215 2217 msg = b"signal-received releasing locks\n"
2216 2218 ui.status(msg)
2217 2219 return 0
2218 2220 finally:
2219 2221 release(*locks)
2220 2222
2221 2223 now = time.time()
2222 2224 held = 0
2223 2225
2224 2226 def report(vfs, name, method):
2225 2227 # this causes stale locks to get reaped for more accurate reporting
2226 2228 try:
2227 2229 l = method(False)
2228 2230 except error.LockHeld:
2229 2231 l = None
2230 2232
2231 2233 if l:
2232 2234 l.release()
2233 2235 else:
2234 2236 try:
2235 2237 st = vfs.lstat(name)
2236 2238 age = now - st[stat.ST_MTIME]
2237 2239 user = util.username(st.st_uid)
2238 2240 locker = vfs.readlock(name)
2239 2241 if b":" in locker:
2240 2242 host, pid = locker.split(b':')
2241 2243 if host == socket.gethostname():
2242 2244 locker = b'user %s, process %s' % (user or b'None', pid)
2243 2245 else:
2244 2246 locker = b'user %s, process %s, host %s' % (
2245 2247 user or b'None',
2246 2248 pid,
2247 2249 host,
2248 2250 )
2249 2251 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2250 2252 return 1
2251 2253 except OSError as e:
2252 2254 if e.errno != errno.ENOENT:
2253 2255 raise
2254 2256
2255 2257 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2256 2258 return 0
2257 2259
2258 2260 held += report(repo.svfs, b"lock", repo.lock)
2259 2261 held += report(repo.vfs, b"wlock", repo.wlock)
2260 2262
2261 2263 return held
2262 2264
2263 2265
2264 2266 @command(
2265 2267 b'debugmanifestfulltextcache',
2266 2268 [
2267 2269 (b'', b'clear', False, _(b'clear the cache')),
2268 2270 (
2269 2271 b'a',
2270 2272 b'add',
2271 2273 [],
2272 2274 _(b'add the given manifest nodes to the cache'),
2273 2275 _(b'NODE'),
2274 2276 ),
2275 2277 ],
2276 2278 b'',
2277 2279 )
2278 2280 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2279 2281 """show, clear or amend the contents of the manifest fulltext cache"""
2280 2282
2281 2283 def getcache():
2282 2284 r = repo.manifestlog.getstorage(b'')
2283 2285 try:
2284 2286 return r._fulltextcache
2285 2287 except AttributeError:
2286 2288 msg = _(
2287 2289 b"Current revlog implementation doesn't appear to have a "
2288 2290 b"manifest fulltext cache\n"
2289 2291 )
2290 2292 raise error.Abort(msg)
2291 2293
2292 2294 if opts.get('clear'):
2293 2295 with repo.wlock():
2294 2296 cache = getcache()
2295 2297 cache.clear(clear_persisted_data=True)
2296 2298 return
2297 2299
2298 2300 if add:
2299 2301 with repo.wlock():
2300 2302 m = repo.manifestlog
2301 2303 store = m.getstorage(b'')
2302 2304 for n in add:
2303 2305 try:
2304 2306 manifest = m[store.lookup(n)]
2305 2307 except error.LookupError as e:
2306 2308 raise error.Abort(
2307 2309 bytes(e), hint=b"Check your manifest node id"
2308 2310 )
2309 2311 manifest.read() # stores revisision in cache too
2310 2312 return
2311 2313
2312 2314 cache = getcache()
2313 2315 if not len(cache):
2314 2316 ui.write(_(b'cache empty\n'))
2315 2317 else:
2316 2318 ui.write(
2317 2319 _(
2318 2320 b'cache contains %d manifest entries, in order of most to '
2319 2321 b'least recent:\n'
2320 2322 )
2321 2323 % (len(cache),)
2322 2324 )
2323 2325 totalsize = 0
2324 2326 for nodeid in cache:
2325 2327 # Use cache.get to not update the LRU order
2326 2328 data = cache.peek(nodeid)
2327 2329 size = len(data)
2328 2330 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2329 2331 ui.write(
2330 2332 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2331 2333 )
2332 2334 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2333 2335 ui.write(
2334 2336 _(b'total cache data size %s, on-disk %s\n')
2335 2337 % (util.bytecount(totalsize), util.bytecount(ondisk))
2336 2338 )
2337 2339
2338 2340
2339 2341 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2340 2342 def debugmergestate(ui, repo, *args, **opts):
2341 2343 """print merge state
2342 2344
2343 2345 Use --verbose to print out information about whether v1 or v2 merge state
2344 2346 was chosen."""
2345 2347
2346 2348 if ui.verbose:
2347 2349 ms = mergestatemod.mergestate(repo)
2348 2350
2349 2351 # sort so that reasonable information is on top
2350 2352 v1records = ms._readrecordsv1()
2351 2353 v2records = ms._readrecordsv2()
2352 2354
2353 2355 if not v1records and not v2records:
2354 2356 pass
2355 2357 elif not v2records:
2356 2358 ui.writenoi18n(b'no version 2 merge state\n')
2357 2359 elif ms._v1v2match(v1records, v2records):
2358 2360 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2359 2361 else:
2360 2362 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2361 2363
2362 2364 opts = pycompat.byteskwargs(opts)
2363 2365 if not opts[b'template']:
2364 2366 opts[b'template'] = (
2365 2367 b'{if(commits, "", "no merge state found\n")}'
2366 2368 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2367 2369 b'{files % "file: {path} (state \\"{state}\\")\n'
2368 2370 b'{if(local_path, "'
2369 2371 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2370 2372 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2371 2373 b' other path: {other_path} (node {other_node})\n'
2372 2374 b'")}'
2373 2375 b'{if(rename_side, "'
2374 2376 b' rename side: {rename_side}\n'
2375 2377 b' renamed path: {renamed_path}\n'
2376 2378 b'")}'
2377 2379 b'{extras % " extra: {key} = {value}\n"}'
2378 2380 b'"}'
2379 2381 b'{extras % "extra: {file} ({key} = {value})\n"}'
2380 2382 )
2381 2383
2382 2384 ms = mergestatemod.mergestate.read(repo)
2383 2385
2384 2386 fm = ui.formatter(b'debugmergestate', opts)
2385 2387 fm.startitem()
2386 2388
2387 2389 fm_commits = fm.nested(b'commits')
2388 2390 if ms.active():
2389 2391 for name, node, label_index in (
2390 2392 (b'local', ms.local, 0),
2391 2393 (b'other', ms.other, 1),
2392 2394 ):
2393 2395 fm_commits.startitem()
2394 2396 fm_commits.data(name=name)
2395 2397 fm_commits.data(node=hex(node))
2396 2398 if ms._labels and len(ms._labels) > label_index:
2397 2399 fm_commits.data(label=ms._labels[label_index])
2398 2400 fm_commits.end()
2399 2401
2400 2402 fm_files = fm.nested(b'files')
2401 2403 if ms.active():
2402 2404 for f in ms:
2403 2405 fm_files.startitem()
2404 2406 fm_files.data(path=f)
2405 2407 state = ms._state[f]
2406 2408 fm_files.data(state=state[0])
2407 2409 if state[0] in (
2408 2410 mergestatemod.MERGE_RECORD_UNRESOLVED,
2409 2411 mergestatemod.MERGE_RECORD_RESOLVED,
2410 2412 ):
2411 2413 fm_files.data(local_key=state[1])
2412 2414 fm_files.data(local_path=state[2])
2413 2415 fm_files.data(ancestor_path=state[3])
2414 2416 fm_files.data(ancestor_node=state[4])
2415 2417 fm_files.data(other_path=state[5])
2416 2418 fm_files.data(other_node=state[6])
2417 2419 fm_files.data(local_flags=state[7])
2418 2420 elif state[0] in (
2419 2421 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2420 2422 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2421 2423 ):
2422 2424 fm_files.data(renamed_path=state[1])
2423 2425 fm_files.data(rename_side=state[2])
2424 2426 fm_extras = fm_files.nested(b'extras')
2425 2427 for k, v in sorted(ms.extras(f).items()):
2426 2428 fm_extras.startitem()
2427 2429 fm_extras.data(key=k)
2428 2430 fm_extras.data(value=v)
2429 2431 fm_extras.end()
2430 2432
2431 2433 fm_files.end()
2432 2434
2433 2435 fm_extras = fm.nested(b'extras')
2434 2436 for f, d in sorted(ms.allextras().items()):
2435 2437 if f in ms:
2436 2438 # If file is in mergestate, we have already processed it's extras
2437 2439 continue
2438 2440 for k, v in d.items():
2439 2441 fm_extras.startitem()
2440 2442 fm_extras.data(file=f)
2441 2443 fm_extras.data(key=k)
2442 2444 fm_extras.data(value=v)
2443 2445 fm_extras.end()
2444 2446
2445 2447 fm.end()
2446 2448
2447 2449
2448 2450 @command(b'debugnamecomplete', [], _(b'NAME...'))
2449 2451 def debugnamecomplete(ui, repo, *args):
2450 2452 '''complete "names" - tags, open branch names, bookmark names'''
2451 2453
2452 2454 names = set()
2453 2455 # since we previously only listed open branches, we will handle that
2454 2456 # specially (after this for loop)
2455 2457 for name, ns in repo.names.items():
2456 2458 if name != b'branches':
2457 2459 names.update(ns.listnames(repo))
2458 2460 names.update(
2459 2461 tag
2460 2462 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2461 2463 if not closed
2462 2464 )
2463 2465 completions = set()
2464 2466 if not args:
2465 2467 args = [b'']
2466 2468 for a in args:
2467 2469 completions.update(n for n in names if n.startswith(a))
2468 2470 ui.write(b'\n'.join(sorted(completions)))
2469 2471 ui.write(b'\n')
2470 2472
2471 2473
2472 2474 @command(
2473 2475 b'debugnodemap',
2474 2476 [
2475 2477 (
2476 2478 b'',
2477 2479 b'dump-new',
2478 2480 False,
2479 2481 _(b'write a (new) persistent binary nodemap on stdout'),
2480 2482 ),
2481 2483 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2482 2484 (
2483 2485 b'',
2484 2486 b'check',
2485 2487 False,
2486 2488 _(b'check that the data on disk data are correct.'),
2487 2489 ),
2488 2490 (
2489 2491 b'',
2490 2492 b'metadata',
2491 2493 False,
2492 2494 _(b'display the on disk meta data for the nodemap'),
2493 2495 ),
2494 2496 ],
2495 2497 )
2496 2498 def debugnodemap(ui, repo, **opts):
2497 2499 """write and inspect on disk nodemap"""
2498 2500 if opts['dump_new']:
2499 2501 unfi = repo.unfiltered()
2500 2502 cl = unfi.changelog
2501 2503 if util.safehasattr(cl.index, "nodemap_data_all"):
2502 2504 data = cl.index.nodemap_data_all()
2503 2505 else:
2504 2506 data = nodemap.persistent_data(cl.index)
2505 2507 ui.write(data)
2506 2508 elif opts['dump_disk']:
2507 2509 unfi = repo.unfiltered()
2508 2510 cl = unfi.changelog
2509 2511 nm_data = nodemap.persisted_data(cl)
2510 2512 if nm_data is not None:
2511 2513 docket, data = nm_data
2512 2514 ui.write(data[:])
2513 2515 elif opts['check']:
2514 2516 unfi = repo.unfiltered()
2515 2517 cl = unfi.changelog
2516 2518 nm_data = nodemap.persisted_data(cl)
2517 2519 if nm_data is not None:
2518 2520 docket, data = nm_data
2519 2521 return nodemap.check_data(ui, cl.index, data)
2520 2522 elif opts['metadata']:
2521 2523 unfi = repo.unfiltered()
2522 2524 cl = unfi.changelog
2523 2525 nm_data = nodemap.persisted_data(cl)
2524 2526 if nm_data is not None:
2525 2527 docket, data = nm_data
2526 2528 ui.write((b"uid: %s\n") % docket.uid)
2527 2529 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2528 2530 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2529 2531 ui.write((b"data-length: %d\n") % docket.data_length)
2530 2532 ui.write((b"data-unused: %d\n") % docket.data_unused)
2531 2533 unused_perc = docket.data_unused * 100.0 / docket.data_length
2532 2534 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2533 2535
2534 2536
2535 2537 @command(
2536 2538 b'debugobsolete',
2537 2539 [
2538 2540 (b'', b'flags', 0, _(b'markers flag')),
2539 2541 (
2540 2542 b'',
2541 2543 b'record-parents',
2542 2544 False,
2543 2545 _(b'record parent information for the precursor'),
2544 2546 ),
2545 2547 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2546 2548 (
2547 2549 b'',
2548 2550 b'exclusive',
2549 2551 False,
2550 2552 _(b'restrict display to markers only relevant to REV'),
2551 2553 ),
2552 2554 (b'', b'index', False, _(b'display index of the marker')),
2553 2555 (b'', b'delete', [], _(b'delete markers specified by indices')),
2554 2556 ]
2555 2557 + cmdutil.commitopts2
2556 2558 + cmdutil.formatteropts,
2557 2559 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2558 2560 )
2559 2561 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2560 2562 """create arbitrary obsolete marker
2561 2563
2562 2564 With no arguments, displays the list of obsolescence markers."""
2563 2565
2564 2566 opts = pycompat.byteskwargs(opts)
2565 2567
2566 2568 def parsenodeid(s):
2567 2569 try:
2568 2570 # We do not use revsingle/revrange functions here to accept
2569 2571 # arbitrary node identifiers, possibly not present in the
2570 2572 # local repository.
2571 2573 n = bin(s)
2572 2574 if len(n) != repo.nodeconstants.nodelen:
2573 2575 raise TypeError()
2574 2576 return n
2575 2577 except TypeError:
2576 2578 raise error.InputError(
2577 2579 b'changeset references must be full hexadecimal '
2578 2580 b'node identifiers'
2579 2581 )
2580 2582
2581 2583 if opts.get(b'delete'):
2582 2584 indices = []
2583 2585 for v in opts.get(b'delete'):
2584 2586 try:
2585 2587 indices.append(int(v))
2586 2588 except ValueError:
2587 2589 raise error.InputError(
2588 2590 _(b'invalid index value: %r') % v,
2589 2591 hint=_(b'use integers for indices'),
2590 2592 )
2591 2593
2592 2594 if repo.currenttransaction():
2593 2595 raise error.Abort(
2594 2596 _(b'cannot delete obsmarkers in the middle of transaction.')
2595 2597 )
2596 2598
2597 2599 with repo.lock():
2598 2600 n = repair.deleteobsmarkers(repo.obsstore, indices)
2599 2601 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2600 2602
2601 2603 return
2602 2604
2603 2605 if precursor is not None:
2604 2606 if opts[b'rev']:
2605 2607 raise error.InputError(
2606 2608 b'cannot select revision when creating marker'
2607 2609 )
2608 2610 metadata = {}
2609 2611 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2610 2612 succs = tuple(parsenodeid(succ) for succ in successors)
2611 2613 l = repo.lock()
2612 2614 try:
2613 2615 tr = repo.transaction(b'debugobsolete')
2614 2616 try:
2615 2617 date = opts.get(b'date')
2616 2618 if date:
2617 2619 date = dateutil.parsedate(date)
2618 2620 else:
2619 2621 date = None
2620 2622 prec = parsenodeid(precursor)
2621 2623 parents = None
2622 2624 if opts[b'record_parents']:
2623 2625 if prec not in repo.unfiltered():
2624 2626 raise error.Abort(
2625 2627 b'cannot used --record-parents on '
2626 2628 b'unknown changesets'
2627 2629 )
2628 2630 parents = repo.unfiltered()[prec].parents()
2629 2631 parents = tuple(p.node() for p in parents)
2630 2632 repo.obsstore.create(
2631 2633 tr,
2632 2634 prec,
2633 2635 succs,
2634 2636 opts[b'flags'],
2635 2637 parents=parents,
2636 2638 date=date,
2637 2639 metadata=metadata,
2638 2640 ui=ui,
2639 2641 )
2640 2642 tr.close()
2641 2643 except ValueError as exc:
2642 2644 raise error.Abort(
2643 2645 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2644 2646 )
2645 2647 finally:
2646 2648 tr.release()
2647 2649 finally:
2648 2650 l.release()
2649 2651 else:
2650 2652 if opts[b'rev']:
2651 2653 revs = logcmdutil.revrange(repo, opts[b'rev'])
2652 2654 nodes = [repo[r].node() for r in revs]
2653 2655 markers = list(
2654 2656 obsutil.getmarkers(
2655 2657 repo, nodes=nodes, exclusive=opts[b'exclusive']
2656 2658 )
2657 2659 )
2658 2660 markers.sort(key=lambda x: x._data)
2659 2661 else:
2660 2662 markers = obsutil.getmarkers(repo)
2661 2663
2662 2664 markerstoiter = markers
2663 2665 isrelevant = lambda m: True
2664 2666 if opts.get(b'rev') and opts.get(b'index'):
2665 2667 markerstoiter = obsutil.getmarkers(repo)
2666 2668 markerset = set(markers)
2667 2669 isrelevant = lambda m: m in markerset
2668 2670
2669 2671 fm = ui.formatter(b'debugobsolete', opts)
2670 2672 for i, m in enumerate(markerstoiter):
2671 2673 if not isrelevant(m):
2672 2674 # marker can be irrelevant when we're iterating over a set
2673 2675 # of markers (markerstoiter) which is bigger than the set
2674 2676 # of markers we want to display (markers)
2675 2677 # this can happen if both --index and --rev options are
2676 2678 # provided and thus we need to iterate over all of the markers
2677 2679 # to get the correct indices, but only display the ones that
2678 2680 # are relevant to --rev value
2679 2681 continue
2680 2682 fm.startitem()
2681 2683 ind = i if opts.get(b'index') else None
2682 2684 cmdutil.showmarker(fm, m, index=ind)
2683 2685 fm.end()
2684 2686
2685 2687
2686 2688 @command(
2687 2689 b'debugp1copies',
2688 2690 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2689 2691 _(b'[-r REV]'),
2690 2692 )
2691 2693 def debugp1copies(ui, repo, **opts):
2692 2694 """dump copy information compared to p1"""
2693 2695
2694 2696 opts = pycompat.byteskwargs(opts)
2695 2697 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2696 2698 for dst, src in ctx.p1copies().items():
2697 2699 ui.write(b'%s -> %s\n' % (src, dst))
2698 2700
2699 2701
2700 2702 @command(
2701 2703 b'debugp2copies',
2702 2704 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2703 2705 _(b'[-r REV]'),
2704 2706 )
2705 2707 def debugp1copies(ui, repo, **opts):
2706 2708 """dump copy information compared to p2"""
2707 2709
2708 2710 opts = pycompat.byteskwargs(opts)
2709 2711 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2710 2712 for dst, src in ctx.p2copies().items():
2711 2713 ui.write(b'%s -> %s\n' % (src, dst))
2712 2714
2713 2715
2714 2716 @command(
2715 2717 b'debugpathcomplete',
2716 2718 [
2717 2719 (b'f', b'full', None, _(b'complete an entire path')),
2718 2720 (b'n', b'normal', None, _(b'show only normal files')),
2719 2721 (b'a', b'added', None, _(b'show only added files')),
2720 2722 (b'r', b'removed', None, _(b'show only removed files')),
2721 2723 ],
2722 2724 _(b'FILESPEC...'),
2723 2725 )
2724 2726 def debugpathcomplete(ui, repo, *specs, **opts):
2725 2727 """complete part or all of a tracked path
2726 2728
2727 2729 This command supports shells that offer path name completion. It
2728 2730 currently completes only files already known to the dirstate.
2729 2731
2730 2732 Completion extends only to the next path segment unless
2731 2733 --full is specified, in which case entire paths are used."""
2732 2734
2733 2735 def complete(path, acceptable):
2734 2736 dirstate = repo.dirstate
2735 2737 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2736 2738 rootdir = repo.root + pycompat.ossep
2737 2739 if spec != repo.root and not spec.startswith(rootdir):
2738 2740 return [], []
2739 2741 if os.path.isdir(spec):
2740 2742 spec += b'/'
2741 2743 spec = spec[len(rootdir) :]
2742 2744 fixpaths = pycompat.ossep != b'/'
2743 2745 if fixpaths:
2744 2746 spec = spec.replace(pycompat.ossep, b'/')
2745 2747 speclen = len(spec)
2746 2748 fullpaths = opts['full']
2747 2749 files, dirs = set(), set()
2748 2750 adddir, addfile = dirs.add, files.add
2749 2751 for f, st in dirstate.items():
2750 2752 if f.startswith(spec) and st.state in acceptable:
2751 2753 if fixpaths:
2752 2754 f = f.replace(b'/', pycompat.ossep)
2753 2755 if fullpaths:
2754 2756 addfile(f)
2755 2757 continue
2756 2758 s = f.find(pycompat.ossep, speclen)
2757 2759 if s >= 0:
2758 2760 adddir(f[:s])
2759 2761 else:
2760 2762 addfile(f)
2761 2763 return files, dirs
2762 2764
2763 2765 acceptable = b''
2764 2766 if opts['normal']:
2765 2767 acceptable += b'nm'
2766 2768 if opts['added']:
2767 2769 acceptable += b'a'
2768 2770 if opts['removed']:
2769 2771 acceptable += b'r'
2770 2772 cwd = repo.getcwd()
2771 2773 if not specs:
2772 2774 specs = [b'.']
2773 2775
2774 2776 files, dirs = set(), set()
2775 2777 for spec in specs:
2776 2778 f, d = complete(spec, acceptable or b'nmar')
2777 2779 files.update(f)
2778 2780 dirs.update(d)
2779 2781 files.update(dirs)
2780 2782 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2781 2783 ui.write(b'\n')
2782 2784
2783 2785
2784 2786 @command(
2785 2787 b'debugpathcopies',
2786 2788 cmdutil.walkopts,
2787 2789 b'hg debugpathcopies REV1 REV2 [FILE]',
2788 2790 inferrepo=True,
2789 2791 )
2790 2792 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2791 2793 """show copies between two revisions"""
2792 2794 ctx1 = scmutil.revsingle(repo, rev1)
2793 2795 ctx2 = scmutil.revsingle(repo, rev2)
2794 2796 m = scmutil.match(ctx1, pats, opts)
2795 2797 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2796 2798 ui.write(b'%s -> %s\n' % (src, dst))
2797 2799
2798 2800
2799 2801 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2800 2802 def debugpeer(ui, path):
2801 2803 """establish a connection to a peer repository"""
2802 2804 # Always enable peer request logging. Requires --debug to display
2803 2805 # though.
2804 2806 overrides = {
2805 2807 (b'devel', b'debug.peer-request'): True,
2806 2808 }
2807 2809
2808 2810 with ui.configoverride(overrides):
2809 2811 peer = hg.peer(ui, {}, path)
2810 2812
2811 2813 try:
2812 2814 local = peer.local() is not None
2813 2815 canpush = peer.canpush()
2814 2816
2815 2817 ui.write(_(b'url: %s\n') % peer.url())
2816 2818 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2817 2819 ui.write(
2818 2820 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2819 2821 )
2820 2822 finally:
2821 2823 peer.close()
2822 2824
2823 2825
2824 2826 @command(
2825 2827 b'debugpickmergetool',
2826 2828 [
2827 2829 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2828 2830 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2829 2831 ]
2830 2832 + cmdutil.walkopts
2831 2833 + cmdutil.mergetoolopts,
2832 2834 _(b'[PATTERN]...'),
2833 2835 inferrepo=True,
2834 2836 )
2835 2837 def debugpickmergetool(ui, repo, *pats, **opts):
2836 2838 """examine which merge tool is chosen for specified file
2837 2839
2838 2840 As described in :hg:`help merge-tools`, Mercurial examines
2839 2841 configurations below in this order to decide which merge tool is
2840 2842 chosen for specified file.
2841 2843
2842 2844 1. ``--tool`` option
2843 2845 2. ``HGMERGE`` environment variable
2844 2846 3. configurations in ``merge-patterns`` section
2845 2847 4. configuration of ``ui.merge``
2846 2848 5. configurations in ``merge-tools`` section
2847 2849 6. ``hgmerge`` tool (for historical reason only)
2848 2850 7. default tool for fallback (``:merge`` or ``:prompt``)
2849 2851
2850 2852 This command writes out examination result in the style below::
2851 2853
2852 2854 FILE = MERGETOOL
2853 2855
2854 2856 By default, all files known in the first parent context of the
2855 2857 working directory are examined. Use file patterns and/or -I/-X
2856 2858 options to limit target files. -r/--rev is also useful to examine
2857 2859 files in another context without actual updating to it.
2858 2860
2859 2861 With --debug, this command shows warning messages while matching
2860 2862 against ``merge-patterns`` and so on, too. It is recommended to
2861 2863 use this option with explicit file patterns and/or -I/-X options,
2862 2864 because this option increases amount of output per file according
2863 2865 to configurations in hgrc.
2864 2866
2865 2867 With -v/--verbose, this command shows configurations below at
2866 2868 first (only if specified).
2867 2869
2868 2870 - ``--tool`` option
2869 2871 - ``HGMERGE`` environment variable
2870 2872 - configuration of ``ui.merge``
2871 2873
2872 2874 If merge tool is chosen before matching against
2873 2875 ``merge-patterns``, this command can't show any helpful
2874 2876 information, even with --debug. In such case, information above is
2875 2877 useful to know why a merge tool is chosen.
2876 2878 """
2877 2879 opts = pycompat.byteskwargs(opts)
2878 2880 overrides = {}
2879 2881 if opts[b'tool']:
2880 2882 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2881 2883 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2882 2884
2883 2885 with ui.configoverride(overrides, b'debugmergepatterns'):
2884 2886 hgmerge = encoding.environ.get(b"HGMERGE")
2885 2887 if hgmerge is not None:
2886 2888 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2887 2889 uimerge = ui.config(b"ui", b"merge")
2888 2890 if uimerge:
2889 2891 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2890 2892
2891 2893 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2892 2894 m = scmutil.match(ctx, pats, opts)
2893 2895 changedelete = opts[b'changedelete']
2894 2896 for path in ctx.walk(m):
2895 2897 fctx = ctx[path]
2896 2898 with ui.silent(
2897 2899 error=True
2898 2900 ) if not ui.debugflag else util.nullcontextmanager():
2899 2901 tool, toolpath = filemerge._picktool(
2900 2902 repo,
2901 2903 ui,
2902 2904 path,
2903 2905 fctx.isbinary(),
2904 2906 b'l' in fctx.flags(),
2905 2907 changedelete,
2906 2908 )
2907 2909 ui.write(b'%s = %s\n' % (path, tool))
2908 2910
2909 2911
2910 2912 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2911 2913 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2912 2914 """access the pushkey key/value protocol
2913 2915
2914 2916 With two args, list the keys in the given namespace.
2915 2917
2916 2918 With five args, set a key to new if it currently is set to old.
2917 2919 Reports success or failure.
2918 2920 """
2919 2921
2920 2922 target = hg.peer(ui, {}, repopath)
2921 2923 try:
2922 2924 if keyinfo:
2923 2925 key, old, new = keyinfo
2924 2926 with target.commandexecutor() as e:
2925 2927 r = e.callcommand(
2926 2928 b'pushkey',
2927 2929 {
2928 2930 b'namespace': namespace,
2929 2931 b'key': key,
2930 2932 b'old': old,
2931 2933 b'new': new,
2932 2934 },
2933 2935 ).result()
2934 2936
2935 2937 ui.status(pycompat.bytestr(r) + b'\n')
2936 2938 return not r
2937 2939 else:
2938 2940 for k, v in sorted(target.listkeys(namespace).items()):
2939 2941 ui.write(
2940 2942 b"%s\t%s\n"
2941 2943 % (stringutil.escapestr(k), stringutil.escapestr(v))
2942 2944 )
2943 2945 finally:
2944 2946 target.close()
2945 2947
2946 2948
2947 2949 @command(b'debugpvec', [], _(b'A B'))
2948 2950 def debugpvec(ui, repo, a, b=None):
2949 2951 ca = scmutil.revsingle(repo, a)
2950 2952 cb = scmutil.revsingle(repo, b)
2951 2953 pa = pvec.ctxpvec(ca)
2952 2954 pb = pvec.ctxpvec(cb)
2953 2955 if pa == pb:
2954 2956 rel = b"="
2955 2957 elif pa > pb:
2956 2958 rel = b">"
2957 2959 elif pa < pb:
2958 2960 rel = b"<"
2959 2961 elif pa | pb:
2960 2962 rel = b"|"
2961 2963 ui.write(_(b"a: %s\n") % pa)
2962 2964 ui.write(_(b"b: %s\n") % pb)
2963 2965 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2964 2966 ui.write(
2965 2967 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2966 2968 % (
2967 2969 abs(pa._depth - pb._depth),
2968 2970 pvec._hamming(pa._vec, pb._vec),
2969 2971 pa.distance(pb),
2970 2972 rel,
2971 2973 )
2972 2974 )
2973 2975
2974 2976
2975 2977 @command(
2976 2978 b'debugrebuilddirstate|debugrebuildstate',
2977 2979 [
2978 2980 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2979 2981 (
2980 2982 b'',
2981 2983 b'minimal',
2982 2984 None,
2983 2985 _(
2984 2986 b'only rebuild files that are inconsistent with '
2985 2987 b'the working copy parent'
2986 2988 ),
2987 2989 ),
2988 2990 ],
2989 2991 _(b'[-r REV]'),
2990 2992 )
2991 2993 def debugrebuilddirstate(ui, repo, rev, **opts):
2992 2994 """rebuild the dirstate as it would look like for the given revision
2993 2995
2994 2996 If no revision is specified the first current parent will be used.
2995 2997
2996 2998 The dirstate will be set to the files of the given revision.
2997 2999 The actual working directory content or existing dirstate
2998 3000 information such as adds or removes is not considered.
2999 3001
3000 3002 ``minimal`` will only rebuild the dirstate status for files that claim to be
3001 3003 tracked but are not in the parent manifest, or that exist in the parent
3002 3004 manifest but are not in the dirstate. It will not change adds, removes, or
3003 3005 modified files that are in the working copy parent.
3004 3006
3005 3007 One use of this command is to make the next :hg:`status` invocation
3006 3008 check the actual file content.
3007 3009 """
3008 3010 ctx = scmutil.revsingle(repo, rev)
3009 3011 with repo.wlock():
3010 3012 dirstate = repo.dirstate
3011 3013 changedfiles = None
3012 3014 # See command doc for what minimal does.
3013 3015 if opts.get('minimal'):
3014 3016 manifestfiles = set(ctx.manifest().keys())
3015 3017 dirstatefiles = set(dirstate)
3016 3018 manifestonly = manifestfiles - dirstatefiles
3017 3019 dsonly = dirstatefiles - manifestfiles
3018 3020 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
3019 3021 changedfiles = manifestonly | dsnotadded
3020 3022
3021 3023 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
3022 3024
3023 3025
3024 3026 @command(
3025 3027 b'debugrebuildfncache',
3026 3028 [
3027 3029 (
3028 3030 b'',
3029 3031 b'only-data',
3030 3032 False,
3031 3033 _(b'only look for wrong .d files (much faster)'),
3032 3034 )
3033 3035 ],
3034 3036 b'',
3035 3037 )
3036 3038 def debugrebuildfncache(ui, repo, **opts):
3037 3039 """rebuild the fncache file"""
3038 3040 opts = pycompat.byteskwargs(opts)
3039 3041 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
3040 3042
3041 3043
3042 3044 @command(
3043 3045 b'debugrename',
3044 3046 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3045 3047 _(b'[-r REV] [FILE]...'),
3046 3048 )
3047 3049 def debugrename(ui, repo, *pats, **opts):
3048 3050 """dump rename information"""
3049 3051
3050 3052 opts = pycompat.byteskwargs(opts)
3051 3053 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3052 3054 m = scmutil.match(ctx, pats, opts)
3053 3055 for abs in ctx.walk(m):
3054 3056 fctx = ctx[abs]
3055 3057 o = fctx.filelog().renamed(fctx.filenode())
3056 3058 rel = repo.pathto(abs)
3057 3059 if o:
3058 3060 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3059 3061 else:
3060 3062 ui.write(_(b"%s not renamed\n") % rel)
3061 3063
3062 3064
3063 3065 @command(b'debugrequires|debugrequirements', [], b'')
3064 3066 def debugrequirements(ui, repo):
3065 3067 """print the current repo requirements"""
3066 3068 for r in sorted(repo.requirements):
3067 3069 ui.write(b"%s\n" % r)
3068 3070
3069 3071
3070 3072 @command(
3071 3073 b'debugrevlog',
3072 3074 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3073 3075 _(b'-c|-m|FILE'),
3074 3076 optionalrepo=True,
3075 3077 )
3076 3078 def debugrevlog(ui, repo, file_=None, **opts):
3077 3079 """show data and statistics about a revlog"""
3078 3080 opts = pycompat.byteskwargs(opts)
3079 3081 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
3080 3082
3081 3083 if opts.get(b"dump"):
3082 3084 numrevs = len(r)
3083 3085 ui.write(
3084 3086 (
3085 3087 b"# rev p1rev p2rev start end deltastart base p1 p2"
3086 3088 b" rawsize totalsize compression heads chainlen\n"
3087 3089 )
3088 3090 )
3089 3091 ts = 0
3090 3092 heads = set()
3091 3093
3092 3094 for rev in pycompat.xrange(numrevs):
3093 3095 dbase = r.deltaparent(rev)
3094 3096 if dbase == -1:
3095 3097 dbase = rev
3096 3098 cbase = r.chainbase(rev)
3097 3099 clen = r.chainlen(rev)
3098 3100 p1, p2 = r.parentrevs(rev)
3099 3101 rs = r.rawsize(rev)
3100 3102 ts = ts + rs
3101 3103 heads -= set(r.parentrevs(rev))
3102 3104 heads.add(rev)
3103 3105 try:
3104 3106 compression = ts / r.end(rev)
3105 3107 except ZeroDivisionError:
3106 3108 compression = 0
3107 3109 ui.write(
3108 3110 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
3109 3111 b"%11d %5d %8d\n"
3110 3112 % (
3111 3113 rev,
3112 3114 p1,
3113 3115 p2,
3114 3116 r.start(rev),
3115 3117 r.end(rev),
3116 3118 r.start(dbase),
3117 3119 r.start(cbase),
3118 3120 r.start(p1),
3119 3121 r.start(p2),
3120 3122 rs,
3121 3123 ts,
3122 3124 compression,
3123 3125 len(heads),
3124 3126 clen,
3125 3127 )
3126 3128 )
3127 3129 return 0
3128 3130
3129 3131 format = r._format_version
3130 3132 v = r._format_flags
3131 3133 flags = []
3132 3134 gdelta = False
3133 3135 if v & revlog.FLAG_INLINE_DATA:
3134 3136 flags.append(b'inline')
3135 3137 if v & revlog.FLAG_GENERALDELTA:
3136 3138 gdelta = True
3137 3139 flags.append(b'generaldelta')
3138 3140 if not flags:
3139 3141 flags = [b'(none)']
3140 3142
3141 3143 ### tracks merge vs single parent
3142 3144 nummerges = 0
3143 3145
3144 3146 ### tracks ways the "delta" are build
3145 3147 # nodelta
3146 3148 numempty = 0
3147 3149 numemptytext = 0
3148 3150 numemptydelta = 0
3149 3151 # full file content
3150 3152 numfull = 0
3151 3153 # intermediate snapshot against a prior snapshot
3152 3154 numsemi = 0
3153 3155 # snapshot count per depth
3154 3156 numsnapdepth = collections.defaultdict(lambda: 0)
3155 3157 # delta against previous revision
3156 3158 numprev = 0
3157 3159 # delta against first or second parent (not prev)
3158 3160 nump1 = 0
3159 3161 nump2 = 0
3160 3162 # delta against neither prev nor parents
3161 3163 numother = 0
3162 3164 # delta against prev that are also first or second parent
3163 3165 # (details of `numprev`)
3164 3166 nump1prev = 0
3165 3167 nump2prev = 0
3166 3168
3167 3169 # data about delta chain of each revs
3168 3170 chainlengths = []
3169 3171 chainbases = []
3170 3172 chainspans = []
3171 3173
3172 3174 # data about each revision
3173 3175 datasize = [None, 0, 0]
3174 3176 fullsize = [None, 0, 0]
3175 3177 semisize = [None, 0, 0]
3176 3178 # snapshot count per depth
3177 3179 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3178 3180 deltasize = [None, 0, 0]
3179 3181 chunktypecounts = {}
3180 3182 chunktypesizes = {}
3181 3183
3182 3184 def addsize(size, l):
3183 3185 if l[0] is None or size < l[0]:
3184 3186 l[0] = size
3185 3187 if size > l[1]:
3186 3188 l[1] = size
3187 3189 l[2] += size
3188 3190
3189 3191 numrevs = len(r)
3190 3192 for rev in pycompat.xrange(numrevs):
3191 3193 p1, p2 = r.parentrevs(rev)
3192 3194 delta = r.deltaparent(rev)
3193 3195 if format > 0:
3194 3196 addsize(r.rawsize(rev), datasize)
3195 3197 if p2 != nullrev:
3196 3198 nummerges += 1
3197 3199 size = r.length(rev)
3198 3200 if delta == nullrev:
3199 3201 chainlengths.append(0)
3200 3202 chainbases.append(r.start(rev))
3201 3203 chainspans.append(size)
3202 3204 if size == 0:
3203 3205 numempty += 1
3204 3206 numemptytext += 1
3205 3207 else:
3206 3208 numfull += 1
3207 3209 numsnapdepth[0] += 1
3208 3210 addsize(size, fullsize)
3209 3211 addsize(size, snapsizedepth[0])
3210 3212 else:
3211 3213 chainlengths.append(chainlengths[delta] + 1)
3212 3214 baseaddr = chainbases[delta]
3213 3215 revaddr = r.start(rev)
3214 3216 chainbases.append(baseaddr)
3215 3217 chainspans.append((revaddr - baseaddr) + size)
3216 3218 if size == 0:
3217 3219 numempty += 1
3218 3220 numemptydelta += 1
3219 3221 elif r.issnapshot(rev):
3220 3222 addsize(size, semisize)
3221 3223 numsemi += 1
3222 3224 depth = r.snapshotdepth(rev)
3223 3225 numsnapdepth[depth] += 1
3224 3226 addsize(size, snapsizedepth[depth])
3225 3227 else:
3226 3228 addsize(size, deltasize)
3227 3229 if delta == rev - 1:
3228 3230 numprev += 1
3229 3231 if delta == p1:
3230 3232 nump1prev += 1
3231 3233 elif delta == p2:
3232 3234 nump2prev += 1
3233 3235 elif delta == p1:
3234 3236 nump1 += 1
3235 3237 elif delta == p2:
3236 3238 nump2 += 1
3237 3239 elif delta != nullrev:
3238 3240 numother += 1
3239 3241
3240 3242 # Obtain data on the raw chunks in the revlog.
3241 3243 if util.safehasattr(r, b'_getsegmentforrevs'):
3242 3244 segment = r._getsegmentforrevs(rev, rev)[1]
3243 3245 else:
3244 3246 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3245 3247 if segment:
3246 3248 chunktype = bytes(segment[0:1])
3247 3249 else:
3248 3250 chunktype = b'empty'
3249 3251
3250 3252 if chunktype not in chunktypecounts:
3251 3253 chunktypecounts[chunktype] = 0
3252 3254 chunktypesizes[chunktype] = 0
3253 3255
3254 3256 chunktypecounts[chunktype] += 1
3255 3257 chunktypesizes[chunktype] += size
3256 3258
3257 3259 # Adjust size min value for empty cases
3258 3260 for size in (datasize, fullsize, semisize, deltasize):
3259 3261 if size[0] is None:
3260 3262 size[0] = 0
3261 3263
3262 3264 numdeltas = numrevs - numfull - numempty - numsemi
3263 3265 numoprev = numprev - nump1prev - nump2prev
3264 3266 totalrawsize = datasize[2]
3265 3267 datasize[2] /= numrevs
3266 3268 fulltotal = fullsize[2]
3267 3269 if numfull == 0:
3268 3270 fullsize[2] = 0
3269 3271 else:
3270 3272 fullsize[2] /= numfull
3271 3273 semitotal = semisize[2]
3272 3274 snaptotal = {}
3273 3275 if numsemi > 0:
3274 3276 semisize[2] /= numsemi
3275 3277 for depth in snapsizedepth:
3276 3278 snaptotal[depth] = snapsizedepth[depth][2]
3277 3279 snapsizedepth[depth][2] /= numsnapdepth[depth]
3278 3280
3279 3281 deltatotal = deltasize[2]
3280 3282 if numdeltas > 0:
3281 3283 deltasize[2] /= numdeltas
3282 3284 totalsize = fulltotal + semitotal + deltatotal
3283 3285 avgchainlen = sum(chainlengths) / numrevs
3284 3286 maxchainlen = max(chainlengths)
3285 3287 maxchainspan = max(chainspans)
3286 3288 compratio = 1
3287 3289 if totalsize:
3288 3290 compratio = totalrawsize / totalsize
3289 3291
3290 3292 basedfmtstr = b'%%%dd\n'
3291 3293 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3292 3294
3293 3295 def dfmtstr(max):
3294 3296 return basedfmtstr % len(str(max))
3295 3297
3296 3298 def pcfmtstr(max, padding=0):
3297 3299 return basepcfmtstr % (len(str(max)), b' ' * padding)
3298 3300
3299 3301 def pcfmt(value, total):
3300 3302 if total:
3301 3303 return (value, 100 * float(value) / total)
3302 3304 else:
3303 3305 return value, 100.0
3304 3306
3305 3307 ui.writenoi18n(b'format : %d\n' % format)
3306 3308 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3307 3309
3308 3310 ui.write(b'\n')
3309 3311 fmt = pcfmtstr(totalsize)
3310 3312 fmt2 = dfmtstr(totalsize)
3311 3313 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3312 3314 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3313 3315 ui.writenoi18n(
3314 3316 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3315 3317 )
3316 3318 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3317 3319 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3318 3320 ui.writenoi18n(
3319 3321 b' text : '
3320 3322 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3321 3323 )
3322 3324 ui.writenoi18n(
3323 3325 b' delta : '
3324 3326 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3325 3327 )
3326 3328 ui.writenoi18n(
3327 3329 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3328 3330 )
3329 3331 for depth in sorted(numsnapdepth):
3330 3332 ui.write(
3331 3333 (b' lvl-%-3d : ' % depth)
3332 3334 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3333 3335 )
3334 3336 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3335 3337 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3336 3338 ui.writenoi18n(
3337 3339 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3338 3340 )
3339 3341 for depth in sorted(numsnapdepth):
3340 3342 ui.write(
3341 3343 (b' lvl-%-3d : ' % depth)
3342 3344 + fmt % pcfmt(snaptotal[depth], totalsize)
3343 3345 )
3344 3346 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3345 3347
3346 3348 def fmtchunktype(chunktype):
3347 3349 if chunktype == b'empty':
3348 3350 return b' %s : ' % chunktype
3349 3351 elif chunktype in pycompat.bytestr(string.ascii_letters):
3350 3352 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3351 3353 else:
3352 3354 return b' 0x%s : ' % hex(chunktype)
3353 3355
3354 3356 ui.write(b'\n')
3355 3357 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3356 3358 for chunktype in sorted(chunktypecounts):
3357 3359 ui.write(fmtchunktype(chunktype))
3358 3360 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3359 3361 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3360 3362 for chunktype in sorted(chunktypecounts):
3361 3363 ui.write(fmtchunktype(chunktype))
3362 3364 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3363 3365
3364 3366 ui.write(b'\n')
3365 3367 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3366 3368 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3367 3369 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3368 3370 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3369 3371 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3370 3372
3371 3373 if format > 0:
3372 3374 ui.write(b'\n')
3373 3375 ui.writenoi18n(
3374 3376 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3375 3377 % tuple(datasize)
3376 3378 )
3377 3379 ui.writenoi18n(
3378 3380 b'full revision size (min/max/avg) : %d / %d / %d\n'
3379 3381 % tuple(fullsize)
3380 3382 )
3381 3383 ui.writenoi18n(
3382 3384 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3383 3385 % tuple(semisize)
3384 3386 )
3385 3387 for depth in sorted(snapsizedepth):
3386 3388 if depth == 0:
3387 3389 continue
3388 3390 ui.writenoi18n(
3389 3391 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3390 3392 % ((depth,) + tuple(snapsizedepth[depth]))
3391 3393 )
3392 3394 ui.writenoi18n(
3393 3395 b'delta size (min/max/avg) : %d / %d / %d\n'
3394 3396 % tuple(deltasize)
3395 3397 )
3396 3398
3397 3399 if numdeltas > 0:
3398 3400 ui.write(b'\n')
3399 3401 fmt = pcfmtstr(numdeltas)
3400 3402 fmt2 = pcfmtstr(numdeltas, 4)
3401 3403 ui.writenoi18n(
3402 3404 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3403 3405 )
3404 3406 if numprev > 0:
3405 3407 ui.writenoi18n(
3406 3408 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3407 3409 )
3408 3410 ui.writenoi18n(
3409 3411 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3410 3412 )
3411 3413 ui.writenoi18n(
3412 3414 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3413 3415 )
3414 3416 if gdelta:
3415 3417 ui.writenoi18n(
3416 3418 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3417 3419 )
3418 3420 ui.writenoi18n(
3419 3421 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3420 3422 )
3421 3423 ui.writenoi18n(
3422 3424 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3423 3425 )
3424 3426
3425 3427
3426 3428 @command(
3427 3429 b'debugrevlogindex',
3428 3430 cmdutil.debugrevlogopts
3429 3431 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3430 3432 _(b'[-f FORMAT] -c|-m|FILE'),
3431 3433 optionalrepo=True,
3432 3434 )
3433 3435 def debugrevlogindex(ui, repo, file_=None, **opts):
3434 3436 """dump the contents of a revlog index"""
3435 3437 opts = pycompat.byteskwargs(opts)
3436 3438 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3437 3439 format = opts.get(b'format', 0)
3438 3440 if format not in (0, 1):
3439 3441 raise error.Abort(_(b"unknown format %d") % format)
3440 3442
3441 3443 if ui.debugflag:
3442 3444 shortfn = hex
3443 3445 else:
3444 3446 shortfn = short
3445 3447
3446 3448 # There might not be anything in r, so have a sane default
3447 3449 idlen = 12
3448 3450 for i in r:
3449 3451 idlen = len(shortfn(r.node(i)))
3450 3452 break
3451 3453
3452 3454 if format == 0:
3453 3455 if ui.verbose:
3454 3456 ui.writenoi18n(
3455 3457 b" rev offset length linkrev %s %s p2\n"
3456 3458 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3457 3459 )
3458 3460 else:
3459 3461 ui.writenoi18n(
3460 3462 b" rev linkrev %s %s p2\n"
3461 3463 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3462 3464 )
3463 3465 elif format == 1:
3464 3466 if ui.verbose:
3465 3467 ui.writenoi18n(
3466 3468 (
3467 3469 b" rev flag offset length size link p1"
3468 3470 b" p2 %s\n"
3469 3471 )
3470 3472 % b"nodeid".rjust(idlen)
3471 3473 )
3472 3474 else:
3473 3475 ui.writenoi18n(
3474 3476 b" rev flag size link p1 p2 %s\n"
3475 3477 % b"nodeid".rjust(idlen)
3476 3478 )
3477 3479
3478 3480 for i in r:
3479 3481 node = r.node(i)
3480 3482 if format == 0:
3481 3483 try:
3482 3484 pp = r.parents(node)
3483 3485 except Exception:
3484 3486 pp = [repo.nullid, repo.nullid]
3485 3487 if ui.verbose:
3486 3488 ui.write(
3487 3489 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3488 3490 % (
3489 3491 i,
3490 3492 r.start(i),
3491 3493 r.length(i),
3492 3494 r.linkrev(i),
3493 3495 shortfn(node),
3494 3496 shortfn(pp[0]),
3495 3497 shortfn(pp[1]),
3496 3498 )
3497 3499 )
3498 3500 else:
3499 3501 ui.write(
3500 3502 b"% 6d % 7d %s %s %s\n"
3501 3503 % (
3502 3504 i,
3503 3505 r.linkrev(i),
3504 3506 shortfn(node),
3505 3507 shortfn(pp[0]),
3506 3508 shortfn(pp[1]),
3507 3509 )
3508 3510 )
3509 3511 elif format == 1:
3510 3512 pr = r.parentrevs(i)
3511 3513 if ui.verbose:
3512 3514 ui.write(
3513 3515 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3514 3516 % (
3515 3517 i,
3516 3518 r.flags(i),
3517 3519 r.start(i),
3518 3520 r.length(i),
3519 3521 r.rawsize(i),
3520 3522 r.linkrev(i),
3521 3523 pr[0],
3522 3524 pr[1],
3523 3525 shortfn(node),
3524 3526 )
3525 3527 )
3526 3528 else:
3527 3529 ui.write(
3528 3530 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3529 3531 % (
3530 3532 i,
3531 3533 r.flags(i),
3532 3534 r.rawsize(i),
3533 3535 r.linkrev(i),
3534 3536 pr[0],
3535 3537 pr[1],
3536 3538 shortfn(node),
3537 3539 )
3538 3540 )
3539 3541
3540 3542
3541 3543 @command(
3542 3544 b'debugrevspec',
3543 3545 [
3544 3546 (
3545 3547 b'',
3546 3548 b'optimize',
3547 3549 None,
3548 3550 _(b'print parsed tree after optimizing (DEPRECATED)'),
3549 3551 ),
3550 3552 (
3551 3553 b'',
3552 3554 b'show-revs',
3553 3555 True,
3554 3556 _(b'print list of result revisions (default)'),
3555 3557 ),
3556 3558 (
3557 3559 b's',
3558 3560 b'show-set',
3559 3561 None,
3560 3562 _(b'print internal representation of result set'),
3561 3563 ),
3562 3564 (
3563 3565 b'p',
3564 3566 b'show-stage',
3565 3567 [],
3566 3568 _(b'print parsed tree at the given stage'),
3567 3569 _(b'NAME'),
3568 3570 ),
3569 3571 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3570 3572 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3571 3573 ],
3572 3574 b'REVSPEC',
3573 3575 )
3574 3576 def debugrevspec(ui, repo, expr, **opts):
3575 3577 """parse and apply a revision specification
3576 3578
3577 3579 Use -p/--show-stage option to print the parsed tree at the given stages.
3578 3580 Use -p all to print tree at every stage.
3579 3581
3580 3582 Use --no-show-revs option with -s or -p to print only the set
3581 3583 representation or the parsed tree respectively.
3582 3584
3583 3585 Use --verify-optimized to compare the optimized result with the unoptimized
3584 3586 one. Returns 1 if the optimized result differs.
3585 3587 """
3586 3588 opts = pycompat.byteskwargs(opts)
3587 3589 aliases = ui.configitems(b'revsetalias')
3588 3590 stages = [
3589 3591 (b'parsed', lambda tree: tree),
3590 3592 (
3591 3593 b'expanded',
3592 3594 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3593 3595 ),
3594 3596 (b'concatenated', revsetlang.foldconcat),
3595 3597 (b'analyzed', revsetlang.analyze),
3596 3598 (b'optimized', revsetlang.optimize),
3597 3599 ]
3598 3600 if opts[b'no_optimized']:
3599 3601 stages = stages[:-1]
3600 3602 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3601 3603 raise error.Abort(
3602 3604 _(b'cannot use --verify-optimized with --no-optimized')
3603 3605 )
3604 3606 stagenames = {n for n, f in stages}
3605 3607
3606 3608 showalways = set()
3607 3609 showchanged = set()
3608 3610 if ui.verbose and not opts[b'show_stage']:
3609 3611 # show parsed tree by --verbose (deprecated)
3610 3612 showalways.add(b'parsed')
3611 3613 showchanged.update([b'expanded', b'concatenated'])
3612 3614 if opts[b'optimize']:
3613 3615 showalways.add(b'optimized')
3614 3616 if opts[b'show_stage'] and opts[b'optimize']:
3615 3617 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3616 3618 if opts[b'show_stage'] == [b'all']:
3617 3619 showalways.update(stagenames)
3618 3620 else:
3619 3621 for n in opts[b'show_stage']:
3620 3622 if n not in stagenames:
3621 3623 raise error.Abort(_(b'invalid stage name: %s') % n)
3622 3624 showalways.update(opts[b'show_stage'])
3623 3625
3624 3626 treebystage = {}
3625 3627 printedtree = None
3626 3628 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3627 3629 for n, f in stages:
3628 3630 treebystage[n] = tree = f(tree)
3629 3631 if n in showalways or (n in showchanged and tree != printedtree):
3630 3632 if opts[b'show_stage'] or n != b'parsed':
3631 3633 ui.write(b"* %s:\n" % n)
3632 3634 ui.write(revsetlang.prettyformat(tree), b"\n")
3633 3635 printedtree = tree
3634 3636
3635 3637 if opts[b'verify_optimized']:
3636 3638 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3637 3639 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3638 3640 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3639 3641 ui.writenoi18n(
3640 3642 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3641 3643 )
3642 3644 ui.writenoi18n(
3643 3645 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3644 3646 )
3645 3647 arevs = list(arevs)
3646 3648 brevs = list(brevs)
3647 3649 if arevs == brevs:
3648 3650 return 0
3649 3651 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3650 3652 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3651 3653 sm = difflib.SequenceMatcher(None, arevs, brevs)
3652 3654 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3653 3655 if tag in ('delete', 'replace'):
3654 3656 for c in arevs[alo:ahi]:
3655 3657 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3656 3658 if tag in ('insert', 'replace'):
3657 3659 for c in brevs[blo:bhi]:
3658 3660 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3659 3661 if tag == 'equal':
3660 3662 for c in arevs[alo:ahi]:
3661 3663 ui.write(b' %d\n' % c)
3662 3664 return 1
3663 3665
3664 3666 func = revset.makematcher(tree)
3665 3667 revs = func(repo)
3666 3668 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3667 3669 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3668 3670 if not opts[b'show_revs']:
3669 3671 return
3670 3672 for c in revs:
3671 3673 ui.write(b"%d\n" % c)
3672 3674
3673 3675
3674 3676 @command(
3675 3677 b'debugserve',
3676 3678 [
3677 3679 (
3678 3680 b'',
3679 3681 b'sshstdio',
3680 3682 False,
3681 3683 _(b'run an SSH server bound to process handles'),
3682 3684 ),
3683 3685 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3684 3686 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3685 3687 ],
3686 3688 b'',
3687 3689 )
3688 3690 def debugserve(ui, repo, **opts):
3689 3691 """run a server with advanced settings
3690 3692
3691 3693 This command is similar to :hg:`serve`. It exists partially as a
3692 3694 workaround to the fact that ``hg serve --stdio`` must have specific
3693 3695 arguments for security reasons.
3694 3696 """
3695 3697 opts = pycompat.byteskwargs(opts)
3696 3698
3697 3699 if not opts[b'sshstdio']:
3698 3700 raise error.Abort(_(b'only --sshstdio is currently supported'))
3699 3701
3700 3702 logfh = None
3701 3703
3702 3704 if opts[b'logiofd'] and opts[b'logiofile']:
3703 3705 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3704 3706
3705 3707 if opts[b'logiofd']:
3706 3708 # Ideally we would be line buffered. But line buffering in binary
3707 3709 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3708 3710 # buffering could have performance impacts. But since this isn't
3709 3711 # performance critical code, it should be fine.
3710 3712 try:
3711 3713 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3712 3714 except OSError as e:
3713 3715 if e.errno != errno.ESPIPE:
3714 3716 raise
3715 3717 # can't seek a pipe, so `ab` mode fails on py3
3716 3718 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3717 3719 elif opts[b'logiofile']:
3718 3720 logfh = open(opts[b'logiofile'], b'ab', 0)
3719 3721
3720 3722 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3721 3723 s.serve_forever()
3722 3724
3723 3725
3724 3726 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3725 3727 def debugsetparents(ui, repo, rev1, rev2=None):
3726 3728 """manually set the parents of the current working directory (DANGEROUS)
3727 3729
3728 3730 This command is not what you are looking for and should not be used. Using
3729 3731 this command will most certainly results in slight corruption of the file
3730 3732 level histories withing your repository. DO NOT USE THIS COMMAND.
3731 3733
3732 3734 The command update the p1 and p2 field in the dirstate, and not touching
3733 3735 anything else. This useful for writing repository conversion tools, but
3734 3736 should be used with extreme care. For example, neither the working
3735 3737 directory nor the dirstate is updated, so file status may be incorrect
3736 3738 after running this command. Only used if you are one of the few people that
3737 3739 deeply unstand both conversion tools and file level histories. If you are
3738 3740 reading this help, you are not one of this people (most of them sailed west
3739 3741 from Mithlond anyway.
3740 3742
3741 3743 So one last time DO NOT USE THIS COMMAND.
3742 3744
3743 3745 Returns 0 on success.
3744 3746 """
3745 3747
3746 3748 node1 = scmutil.revsingle(repo, rev1).node()
3747 3749 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3748 3750
3749 3751 with repo.wlock():
3750 3752 repo.setparents(node1, node2)
3751 3753
3752 3754
3753 3755 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3754 3756 def debugsidedata(ui, repo, file_, rev=None, **opts):
3755 3757 """dump the side data for a cl/manifest/file revision
3756 3758
3757 3759 Use --verbose to dump the sidedata content."""
3758 3760 opts = pycompat.byteskwargs(opts)
3759 3761 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3760 3762 if rev is not None:
3761 3763 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3762 3764 file_, rev = None, file_
3763 3765 elif rev is None:
3764 3766 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3765 3767 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3766 3768 r = getattr(r, '_revlog', r)
3767 3769 try:
3768 3770 sidedata = r.sidedata(r.lookup(rev))
3769 3771 except KeyError:
3770 3772 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3771 3773 if sidedata:
3772 3774 sidedata = list(sidedata.items())
3773 3775 sidedata.sort()
3774 3776 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3775 3777 for key, value in sidedata:
3776 3778 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3777 3779 if ui.verbose:
3778 3780 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3779 3781
3780 3782
3781 3783 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3782 3784 def debugssl(ui, repo, source=None, **opts):
3783 3785 """test a secure connection to a server
3784 3786
3785 3787 This builds the certificate chain for the server on Windows, installing the
3786 3788 missing intermediates and trusted root via Windows Update if necessary. It
3787 3789 does nothing on other platforms.
3788 3790
3789 3791 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3790 3792 that server is used. See :hg:`help urls` for more information.
3791 3793
3792 3794 If the update succeeds, retry the original operation. Otherwise, the cause
3793 3795 of the SSL error is likely another issue.
3794 3796 """
3795 3797 if not pycompat.iswindows:
3796 3798 raise error.Abort(
3797 3799 _(b'certificate chain building is only possible on Windows')
3798 3800 )
3799 3801
3800 3802 if not source:
3801 3803 if not repo:
3802 3804 raise error.Abort(
3803 3805 _(
3804 3806 b"there is no Mercurial repository here, and no "
3805 3807 b"server specified"
3806 3808 )
3807 3809 )
3808 3810 source = b"default"
3809 3811
3810 3812 source, branches = urlutil.get_unique_pull_path(
3811 3813 b'debugssl', repo, ui, source
3812 3814 )
3813 3815 url = urlutil.url(source)
3814 3816
3815 3817 defaultport = {b'https': 443, b'ssh': 22}
3816 3818 if url.scheme in defaultport:
3817 3819 try:
3818 3820 addr = (url.host, int(url.port or defaultport[url.scheme]))
3819 3821 except ValueError:
3820 3822 raise error.Abort(_(b"malformed port number in URL"))
3821 3823 else:
3822 3824 raise error.Abort(_(b"only https and ssh connections are supported"))
3823 3825
3824 3826 from . import win32
3825 3827
3826 3828 s = ssl.wrap_socket(
3827 3829 socket.socket(),
3828 3830 ssl_version=ssl.PROTOCOL_TLS,
3829 3831 cert_reqs=ssl.CERT_NONE,
3830 3832 ca_certs=None,
3831 3833 )
3832 3834
3833 3835 try:
3834 3836 s.connect(addr)
3835 3837 cert = s.getpeercert(True)
3836 3838
3837 3839 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3838 3840
3839 3841 complete = win32.checkcertificatechain(cert, build=False)
3840 3842
3841 3843 if not complete:
3842 3844 ui.status(_(b'certificate chain is incomplete, updating... '))
3843 3845
3844 3846 if not win32.checkcertificatechain(cert):
3845 3847 ui.status(_(b'failed.\n'))
3846 3848 else:
3847 3849 ui.status(_(b'done.\n'))
3848 3850 else:
3849 3851 ui.status(_(b'full certificate chain is available\n'))
3850 3852 finally:
3851 3853 s.close()
3852 3854
3853 3855
3854 3856 @command(
3855 3857 b"debugbackupbundle",
3856 3858 [
3857 3859 (
3858 3860 b"",
3859 3861 b"recover",
3860 3862 b"",
3861 3863 b"brings the specified changeset back into the repository",
3862 3864 )
3863 3865 ]
3864 3866 + cmdutil.logopts,
3865 3867 _(b"hg debugbackupbundle [--recover HASH]"),
3866 3868 )
3867 3869 def debugbackupbundle(ui, repo, *pats, **opts):
3868 3870 """lists the changesets available in backup bundles
3869 3871
3870 3872 Without any arguments, this command prints a list of the changesets in each
3871 3873 backup bundle.
3872 3874
3873 3875 --recover takes a changeset hash and unbundles the first bundle that
3874 3876 contains that hash, which puts that changeset back in your repository.
3875 3877
3876 3878 --verbose will print the entire commit message and the bundle path for that
3877 3879 backup.
3878 3880 """
3879 3881 backups = list(
3880 3882 filter(
3881 3883 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3882 3884 )
3883 3885 )
3884 3886 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3885 3887
3886 3888 opts = pycompat.byteskwargs(opts)
3887 3889 opts[b"bundle"] = b""
3888 3890 opts[b"force"] = None
3889 3891 limit = logcmdutil.getlimit(opts)
3890 3892
3891 3893 def display(other, chlist, displayer):
3892 3894 if opts.get(b"newest_first"):
3893 3895 chlist.reverse()
3894 3896 count = 0
3895 3897 for n in chlist:
3896 3898 if limit is not None and count >= limit:
3897 3899 break
3898 3900 parents = [
3899 3901 True for p in other.changelog.parents(n) if p != repo.nullid
3900 3902 ]
3901 3903 if opts.get(b"no_merges") and len(parents) == 2:
3902 3904 continue
3903 3905 count += 1
3904 3906 displayer.show(other[n])
3905 3907
3906 3908 recovernode = opts.get(b"recover")
3907 3909 if recovernode:
3908 3910 if scmutil.isrevsymbol(repo, recovernode):
3909 3911 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3910 3912 return
3911 3913 elif backups:
3912 3914 msg = _(
3913 3915 b"Recover changesets using: hg debugbackupbundle --recover "
3914 3916 b"<changeset hash>\n\nAvailable backup changesets:"
3915 3917 )
3916 3918 ui.status(msg, label=b"status.removed")
3917 3919 else:
3918 3920 ui.status(_(b"no backup changesets found\n"))
3919 3921 return
3920 3922
3921 3923 for backup in backups:
3922 3924 # Much of this is copied from the hg incoming logic
3923 3925 source = os.path.relpath(backup, encoding.getcwd())
3924 3926 source, branches = urlutil.get_unique_pull_path(
3925 3927 b'debugbackupbundle',
3926 3928 repo,
3927 3929 ui,
3928 3930 source,
3929 3931 default_branches=opts.get(b'branch'),
3930 3932 )
3931 3933 try:
3932 3934 other = hg.peer(repo, opts, source)
3933 3935 except error.LookupError as ex:
3934 3936 msg = _(b"\nwarning: unable to open bundle %s") % source
3935 3937 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3936 3938 ui.warn(msg, hint=hint)
3937 3939 continue
3938 3940 revs, checkout = hg.addbranchrevs(
3939 3941 repo, other, branches, opts.get(b"rev")
3940 3942 )
3941 3943
3942 3944 if revs:
3943 3945 revs = [other.lookup(rev) for rev in revs]
3944 3946
3945 3947 with ui.silent():
3946 3948 try:
3947 3949 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3948 3950 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3949 3951 )
3950 3952 except error.LookupError:
3951 3953 continue
3952 3954
3953 3955 try:
3954 3956 if not chlist:
3955 3957 continue
3956 3958 if recovernode:
3957 3959 with repo.lock(), repo.transaction(b"unbundle") as tr:
3958 3960 if scmutil.isrevsymbol(other, recovernode):
3959 3961 ui.status(_(b"Unbundling %s\n") % (recovernode))
3960 3962 f = hg.openpath(ui, source)
3961 3963 gen = exchange.readbundle(ui, f, source)
3962 3964 if isinstance(gen, bundle2.unbundle20):
3963 3965 bundle2.applybundle(
3964 3966 repo,
3965 3967 gen,
3966 3968 tr,
3967 3969 source=b"unbundle",
3968 3970 url=b"bundle:" + source,
3969 3971 )
3970 3972 else:
3971 3973 gen.apply(repo, b"unbundle", b"bundle:" + source)
3972 3974 break
3973 3975 else:
3974 3976 backupdate = encoding.strtolocal(
3975 3977 time.strftime(
3976 3978 "%a %H:%M, %Y-%m-%d",
3977 3979 time.localtime(os.path.getmtime(source)),
3978 3980 )
3979 3981 )
3980 3982 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3981 3983 if ui.verbose:
3982 3984 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3983 3985 else:
3984 3986 opts[
3985 3987 b"template"
3986 3988 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3987 3989 displayer = logcmdutil.changesetdisplayer(
3988 3990 ui, other, opts, False
3989 3991 )
3990 3992 display(other, chlist, displayer)
3991 3993 displayer.close()
3992 3994 finally:
3993 3995 cleanupfn()
3994 3996
3995 3997
3996 3998 @command(
3997 3999 b'debugsub',
3998 4000 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3999 4001 _(b'[-r REV] [REV]'),
4000 4002 )
4001 4003 def debugsub(ui, repo, rev=None):
4002 4004 ctx = scmutil.revsingle(repo, rev, None)
4003 4005 for k, v in sorted(ctx.substate.items()):
4004 4006 ui.writenoi18n(b'path %s\n' % k)
4005 4007 ui.writenoi18n(b' source %s\n' % v[0])
4006 4008 ui.writenoi18n(b' revision %s\n' % v[1])
4007 4009
4008 4010
4009 4011 @command(b'debugshell', optionalrepo=True)
4010 4012 def debugshell(ui, repo):
4011 4013 """run an interactive Python interpreter
4012 4014
4013 4015 The local namespace is provided with a reference to the ui and
4014 4016 the repo instance (if available).
4015 4017 """
4016 4018 import code
4017 4019
4018 4020 imported_objects = {
4019 4021 'ui': ui,
4020 4022 'repo': repo,
4021 4023 }
4022 4024
4023 4025 code.interact(local=imported_objects)
4024 4026
4025 4027
4026 4028 @command(
4027 4029 b'debugsuccessorssets',
4028 4030 [(b'', b'closest', False, _(b'return closest successors sets only'))],
4029 4031 _(b'[REV]'),
4030 4032 )
4031 4033 def debugsuccessorssets(ui, repo, *revs, **opts):
4032 4034 """show set of successors for revision
4033 4035
4034 4036 A successors set of changeset A is a consistent group of revisions that
4035 4037 succeed A. It contains non-obsolete changesets only unless closests
4036 4038 successors set is set.
4037 4039
4038 4040 In most cases a changeset A has a single successors set containing a single
4039 4041 successor (changeset A replaced by A').
4040 4042
4041 4043 A changeset that is made obsolete with no successors are called "pruned".
4042 4044 Such changesets have no successors sets at all.
4043 4045
4044 4046 A changeset that has been "split" will have a successors set containing
4045 4047 more than one successor.
4046 4048
4047 4049 A changeset that has been rewritten in multiple different ways is called
4048 4050 "divergent". Such changesets have multiple successor sets (each of which
4049 4051 may also be split, i.e. have multiple successors).
4050 4052
4051 4053 Results are displayed as follows::
4052 4054
4053 4055 <rev1>
4054 4056 <successors-1A>
4055 4057 <rev2>
4056 4058 <successors-2A>
4057 4059 <successors-2B1> <successors-2B2> <successors-2B3>
4058 4060
4059 4061 Here rev2 has two possible (i.e. divergent) successors sets. The first
4060 4062 holds one element, whereas the second holds three (i.e. the changeset has
4061 4063 been split).
4062 4064 """
4063 4065 # passed to successorssets caching computation from one call to another
4064 4066 cache = {}
4065 4067 ctx2str = bytes
4066 4068 node2str = short
4067 4069 for rev in logcmdutil.revrange(repo, revs):
4068 4070 ctx = repo[rev]
4069 4071 ui.write(b'%s\n' % ctx2str(ctx))
4070 4072 for succsset in obsutil.successorssets(
4071 4073 repo, ctx.node(), closest=opts['closest'], cache=cache
4072 4074 ):
4073 4075 if succsset:
4074 4076 ui.write(b' ')
4075 4077 ui.write(node2str(succsset[0]))
4076 4078 for node in succsset[1:]:
4077 4079 ui.write(b' ')
4078 4080 ui.write(node2str(node))
4079 4081 ui.write(b'\n')
4080 4082
4081 4083
4082 4084 @command(b'debugtagscache', [])
4083 4085 def debugtagscache(ui, repo):
4084 4086 """display the contents of .hg/cache/hgtagsfnodes1"""
4085 4087 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
4086 4088 flog = repo.file(b'.hgtags')
4087 4089 for r in repo:
4088 4090 node = repo[r].node()
4089 4091 tagsnode = cache.getfnode(node, computemissing=False)
4090 4092 if tagsnode:
4091 4093 tagsnodedisplay = hex(tagsnode)
4092 4094 if not flog.hasnode(tagsnode):
4093 4095 tagsnodedisplay += b' (unknown node)'
4094 4096 elif tagsnode is None:
4095 4097 tagsnodedisplay = b'missing'
4096 4098 else:
4097 4099 tagsnodedisplay = b'invalid'
4098 4100
4099 4101 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
4100 4102
4101 4103
4102 4104 @command(
4103 4105 b'debugtemplate',
4104 4106 [
4105 4107 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
4106 4108 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
4107 4109 ],
4108 4110 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
4109 4111 optionalrepo=True,
4110 4112 )
4111 4113 def debugtemplate(ui, repo, tmpl, **opts):
4112 4114 """parse and apply a template
4113 4115
4114 4116 If -r/--rev is given, the template is processed as a log template and
4115 4117 applied to the given changesets. Otherwise, it is processed as a generic
4116 4118 template.
4117 4119
4118 4120 Use --verbose to print the parsed tree.
4119 4121 """
4120 4122 revs = None
4121 4123 if opts['rev']:
4122 4124 if repo is None:
4123 4125 raise error.RepoError(
4124 4126 _(b'there is no Mercurial repository here (.hg not found)')
4125 4127 )
4126 4128 revs = logcmdutil.revrange(repo, opts['rev'])
4127 4129
4128 4130 props = {}
4129 4131 for d in opts['define']:
4130 4132 try:
4131 4133 k, v = (e.strip() for e in d.split(b'=', 1))
4132 4134 if not k or k == b'ui':
4133 4135 raise ValueError
4134 4136 props[k] = v
4135 4137 except ValueError:
4136 4138 raise error.Abort(_(b'malformed keyword definition: %s') % d)
4137 4139
4138 4140 if ui.verbose:
4139 4141 aliases = ui.configitems(b'templatealias')
4140 4142 tree = templater.parse(tmpl)
4141 4143 ui.note(templater.prettyformat(tree), b'\n')
4142 4144 newtree = templater.expandaliases(tree, aliases)
4143 4145 if newtree != tree:
4144 4146 ui.notenoi18n(
4145 4147 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4146 4148 )
4147 4149
4148 4150 if revs is None:
4149 4151 tres = formatter.templateresources(ui, repo)
4150 4152 t = formatter.maketemplater(ui, tmpl, resources=tres)
4151 4153 if ui.verbose:
4152 4154 kwds, funcs = t.symbolsuseddefault()
4153 4155 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4154 4156 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4155 4157 ui.write(t.renderdefault(props))
4156 4158 else:
4157 4159 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4158 4160 if ui.verbose:
4159 4161 kwds, funcs = displayer.t.symbolsuseddefault()
4160 4162 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4161 4163 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4162 4164 for r in revs:
4163 4165 displayer.show(repo[r], **pycompat.strkwargs(props))
4164 4166 displayer.close()
4165 4167
4166 4168
4167 4169 @command(
4168 4170 b'debuguigetpass',
4169 4171 [
4170 4172 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4171 4173 ],
4172 4174 _(b'[-p TEXT]'),
4173 4175 norepo=True,
4174 4176 )
4175 4177 def debuguigetpass(ui, prompt=b''):
4176 4178 """show prompt to type password"""
4177 4179 r = ui.getpass(prompt)
4178 4180 if r is None:
4179 4181 r = b"<default response>"
4180 4182 ui.writenoi18n(b'response: %s\n' % r)
4181 4183
4182 4184
4183 4185 @command(
4184 4186 b'debuguiprompt',
4185 4187 [
4186 4188 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4187 4189 ],
4188 4190 _(b'[-p TEXT]'),
4189 4191 norepo=True,
4190 4192 )
4191 4193 def debuguiprompt(ui, prompt=b''):
4192 4194 """show plain prompt"""
4193 4195 r = ui.prompt(prompt)
4194 4196 ui.writenoi18n(b'response: %s\n' % r)
4195 4197
4196 4198
4197 4199 @command(b'debugupdatecaches', [])
4198 4200 def debugupdatecaches(ui, repo, *pats, **opts):
4199 4201 """warm all known caches in the repository"""
4200 4202 with repo.wlock(), repo.lock():
4201 4203 repo.updatecaches(caches=repository.CACHES_ALL)
4202 4204
4203 4205
4204 4206 @command(
4205 4207 b'debugupgraderepo',
4206 4208 [
4207 4209 (
4208 4210 b'o',
4209 4211 b'optimize',
4210 4212 [],
4211 4213 _(b'extra optimization to perform'),
4212 4214 _(b'NAME'),
4213 4215 ),
4214 4216 (b'', b'run', False, _(b'performs an upgrade')),
4215 4217 (b'', b'backup', True, _(b'keep the old repository content around')),
4216 4218 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4217 4219 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4218 4220 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4219 4221 ],
4220 4222 )
4221 4223 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4222 4224 """upgrade a repository to use different features
4223 4225
4224 4226 If no arguments are specified, the repository is evaluated for upgrade
4225 4227 and a list of problems and potential optimizations is printed.
4226 4228
4227 4229 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4228 4230 can be influenced via additional arguments. More details will be provided
4229 4231 by the command output when run without ``--run``.
4230 4232
4231 4233 During the upgrade, the repository will be locked and no writes will be
4232 4234 allowed.
4233 4235
4234 4236 At the end of the upgrade, the repository may not be readable while new
4235 4237 repository data is swapped in. This window will be as long as it takes to
4236 4238 rename some directories inside the ``.hg`` directory. On most machines, this
4237 4239 should complete almost instantaneously and the chances of a consumer being
4238 4240 unable to access the repository should be low.
4239 4241
4240 4242 By default, all revlogs will be upgraded. You can restrict this using flags
4241 4243 such as `--manifest`:
4242 4244
4243 4245 * `--manifest`: only optimize the manifest
4244 4246 * `--no-manifest`: optimize all revlog but the manifest
4245 4247 * `--changelog`: optimize the changelog only
4246 4248 * `--no-changelog --no-manifest`: optimize filelogs only
4247 4249 * `--filelogs`: optimize the filelogs only
4248 4250 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4249 4251 """
4250 4252 return upgrade.upgraderepo(
4251 4253 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4252 4254 )
4253 4255
4254 4256
4255 4257 @command(
4256 4258 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4257 4259 )
4258 4260 def debugwalk(ui, repo, *pats, **opts):
4259 4261 """show how files match on given patterns"""
4260 4262 opts = pycompat.byteskwargs(opts)
4261 4263 m = scmutil.match(repo[None], pats, opts)
4262 4264 if ui.verbose:
4263 4265 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4264 4266 items = list(repo[None].walk(m))
4265 4267 if not items:
4266 4268 return
4267 4269 f = lambda fn: fn
4268 4270 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4269 4271 f = lambda fn: util.normpath(fn)
4270 4272 fmt = b'f %%-%ds %%-%ds %%s' % (
4271 4273 max([len(abs) for abs in items]),
4272 4274 max([len(repo.pathto(abs)) for abs in items]),
4273 4275 )
4274 4276 for abs in items:
4275 4277 line = fmt % (
4276 4278 abs,
4277 4279 f(repo.pathto(abs)),
4278 4280 m.exact(abs) and b'exact' or b'',
4279 4281 )
4280 4282 ui.write(b"%s\n" % line.rstrip())
4281 4283
4282 4284
4283 4285 @command(b'debugwhyunstable', [], _(b'REV'))
4284 4286 def debugwhyunstable(ui, repo, rev):
4285 4287 """explain instabilities of a changeset"""
4286 4288 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4287 4289 dnodes = b''
4288 4290 if entry.get(b'divergentnodes'):
4289 4291 dnodes = (
4290 4292 b' '.join(
4291 4293 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4292 4294 for ctx in entry[b'divergentnodes']
4293 4295 )
4294 4296 + b' '
4295 4297 )
4296 4298 ui.write(
4297 4299 b'%s: %s%s %s\n'
4298 4300 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4299 4301 )
4300 4302
4301 4303
4302 4304 @command(
4303 4305 b'debugwireargs',
4304 4306 [
4305 4307 (b'', b'three', b'', b'three'),
4306 4308 (b'', b'four', b'', b'four'),
4307 4309 (b'', b'five', b'', b'five'),
4308 4310 ]
4309 4311 + cmdutil.remoteopts,
4310 4312 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4311 4313 norepo=True,
4312 4314 )
4313 4315 def debugwireargs(ui, repopath, *vals, **opts):
4314 4316 opts = pycompat.byteskwargs(opts)
4315 4317 repo = hg.peer(ui, opts, repopath)
4316 4318 try:
4317 4319 for opt in cmdutil.remoteopts:
4318 4320 del opts[opt[1]]
4319 4321 args = {}
4320 4322 for k, v in opts.items():
4321 4323 if v:
4322 4324 args[k] = v
4323 4325 args = pycompat.strkwargs(args)
4324 4326 # run twice to check that we don't mess up the stream for the next command
4325 4327 res1 = repo.debugwireargs(*vals, **args)
4326 4328 res2 = repo.debugwireargs(*vals, **args)
4327 4329 ui.write(b"%s\n" % res1)
4328 4330 if res1 != res2:
4329 4331 ui.warn(b"%s\n" % res2)
4330 4332 finally:
4331 4333 repo.close()
4332 4334
4333 4335
4334 4336 def _parsewirelangblocks(fh):
4335 4337 activeaction = None
4336 4338 blocklines = []
4337 4339 lastindent = 0
4338 4340
4339 4341 for line in fh:
4340 4342 line = line.rstrip()
4341 4343 if not line:
4342 4344 continue
4343 4345
4344 4346 if line.startswith(b'#'):
4345 4347 continue
4346 4348
4347 4349 if not line.startswith(b' '):
4348 4350 # New block. Flush previous one.
4349 4351 if activeaction:
4350 4352 yield activeaction, blocklines
4351 4353
4352 4354 activeaction = line
4353 4355 blocklines = []
4354 4356 lastindent = 0
4355 4357 continue
4356 4358
4357 4359 # Else we start with an indent.
4358 4360
4359 4361 if not activeaction:
4360 4362 raise error.Abort(_(b'indented line outside of block'))
4361 4363
4362 4364 indent = len(line) - len(line.lstrip())
4363 4365
4364 4366 # If this line is indented more than the last line, concatenate it.
4365 4367 if indent > lastindent and blocklines:
4366 4368 blocklines[-1] += line.lstrip()
4367 4369 else:
4368 4370 blocklines.append(line)
4369 4371 lastindent = indent
4370 4372
4371 4373 # Flush last block.
4372 4374 if activeaction:
4373 4375 yield activeaction, blocklines
4374 4376
4375 4377
4376 4378 @command(
4377 4379 b'debugwireproto',
4378 4380 [
4379 4381 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4380 4382 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4381 4383 (
4382 4384 b'',
4383 4385 b'noreadstderr',
4384 4386 False,
4385 4387 _(b'do not read from stderr of the remote'),
4386 4388 ),
4387 4389 (
4388 4390 b'',
4389 4391 b'nologhandshake',
4390 4392 False,
4391 4393 _(b'do not log I/O related to the peer handshake'),
4392 4394 ),
4393 4395 ]
4394 4396 + cmdutil.remoteopts,
4395 4397 _(b'[PATH]'),
4396 4398 optionalrepo=True,
4397 4399 )
4398 4400 def debugwireproto(ui, repo, path=None, **opts):
4399 4401 """send wire protocol commands to a server
4400 4402
4401 4403 This command can be used to issue wire protocol commands to remote
4402 4404 peers and to debug the raw data being exchanged.
4403 4405
4404 4406 ``--localssh`` will start an SSH server against the current repository
4405 4407 and connect to that. By default, the connection will perform a handshake
4406 4408 and establish an appropriate peer instance.
4407 4409
4408 4410 ``--peer`` can be used to bypass the handshake protocol and construct a
4409 4411 peer instance using the specified class type. Valid values are ``raw``,
4410 4412 ``ssh1``. ``raw`` instances only allow sending raw data payloads and
4411 4413 don't support higher-level command actions.
4412 4414
4413 4415 ``--noreadstderr`` can be used to disable automatic reading from stderr
4414 4416 of the peer (for SSH connections only). Disabling automatic reading of
4415 4417 stderr is useful for making output more deterministic.
4416 4418
4417 4419 Commands are issued via a mini language which is specified via stdin.
4418 4420 The language consists of individual actions to perform. An action is
4419 4421 defined by a block. A block is defined as a line with no leading
4420 4422 space followed by 0 or more lines with leading space. Blocks are
4421 4423 effectively a high-level command with additional metadata.
4422 4424
4423 4425 Lines beginning with ``#`` are ignored.
4424 4426
4425 4427 The following sections denote available actions.
4426 4428
4427 4429 raw
4428 4430 ---
4429 4431
4430 4432 Send raw data to the server.
4431 4433
4432 4434 The block payload contains the raw data to send as one atomic send
4433 4435 operation. The data may not actually be delivered in a single system
4434 4436 call: it depends on the abilities of the transport being used.
4435 4437
4436 4438 Each line in the block is de-indented and concatenated. Then, that
4437 4439 value is evaluated as a Python b'' literal. This allows the use of
4438 4440 backslash escaping, etc.
4439 4441
4440 4442 raw+
4441 4443 ----
4442 4444
4443 4445 Behaves like ``raw`` except flushes output afterwards.
4444 4446
4445 4447 command <X>
4446 4448 -----------
4447 4449
4448 4450 Send a request to run a named command, whose name follows the ``command``
4449 4451 string.
4450 4452
4451 4453 Arguments to the command are defined as lines in this block. The format of
4452 4454 each line is ``<key> <value>``. e.g.::
4453 4455
4454 4456 command listkeys
4455 4457 namespace bookmarks
4456 4458
4457 4459 If the value begins with ``eval:``, it will be interpreted as a Python
4458 4460 literal expression. Otherwise values are interpreted as Python b'' literals.
4459 4461 This allows sending complex types and encoding special byte sequences via
4460 4462 backslash escaping.
4461 4463
4462 4464 The following arguments have special meaning:
4463 4465
4464 4466 ``PUSHFILE``
4465 4467 When defined, the *push* mechanism of the peer will be used instead
4466 4468 of the static request-response mechanism and the content of the
4467 4469 file specified in the value of this argument will be sent as the
4468 4470 command payload.
4469 4471
4470 4472 This can be used to submit a local bundle file to the remote.
4471 4473
4472 4474 batchbegin
4473 4475 ----------
4474 4476
4475 4477 Instruct the peer to begin a batched send.
4476 4478
4477 4479 All ``command`` blocks are queued for execution until the next
4478 4480 ``batchsubmit`` block.
4479 4481
4480 4482 batchsubmit
4481 4483 -----------
4482 4484
4483 4485 Submit previously queued ``command`` blocks as a batch request.
4484 4486
4485 4487 This action MUST be paired with a ``batchbegin`` action.
4486 4488
4487 4489 httprequest <method> <path>
4488 4490 ---------------------------
4489 4491
4490 4492 (HTTP peer only)
4491 4493
4492 4494 Send an HTTP request to the peer.
4493 4495
4494 4496 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4495 4497
4496 4498 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4497 4499 headers to add to the request. e.g. ``Accept: foo``.
4498 4500
4499 4501 The following arguments are special:
4500 4502
4501 4503 ``BODYFILE``
4502 4504 The content of the file defined as the value to this argument will be
4503 4505 transferred verbatim as the HTTP request body.
4504 4506
4505 4507 ``frame <type> <flags> <payload>``
4506 4508 Send a unified protocol frame as part of the request body.
4507 4509
4508 4510 All frames will be collected and sent as the body to the HTTP
4509 4511 request.
4510 4512
4511 4513 close
4512 4514 -----
4513 4515
4514 4516 Close the connection to the server.
4515 4517
4516 4518 flush
4517 4519 -----
4518 4520
4519 4521 Flush data written to the server.
4520 4522
4521 4523 readavailable
4522 4524 -------------
4523 4525
4524 4526 Close the write end of the connection and read all available data from
4525 4527 the server.
4526 4528
4527 4529 If the connection to the server encompasses multiple pipes, we poll both
4528 4530 pipes and read available data.
4529 4531
4530 4532 readline
4531 4533 --------
4532 4534
4533 4535 Read a line of output from the server. If there are multiple output
4534 4536 pipes, reads only the main pipe.
4535 4537
4536 4538 ereadline
4537 4539 ---------
4538 4540
4539 4541 Like ``readline``, but read from the stderr pipe, if available.
4540 4542
4541 4543 read <X>
4542 4544 --------
4543 4545
4544 4546 ``read()`` N bytes from the server's main output pipe.
4545 4547
4546 4548 eread <X>
4547 4549 ---------
4548 4550
4549 4551 ``read()`` N bytes from the server's stderr pipe, if available.
4550 4552
4551 4553 Specifying Unified Frame-Based Protocol Frames
4552 4554 ----------------------------------------------
4553 4555
4554 4556 It is possible to emit a *Unified Frame-Based Protocol* by using special
4555 4557 syntax.
4556 4558
4557 4559 A frame is composed as a type, flags, and payload. These can be parsed
4558 4560 from a string of the form:
4559 4561
4560 4562 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4561 4563
4562 4564 ``request-id`` and ``stream-id`` are integers defining the request and
4563 4565 stream identifiers.
4564 4566
4565 4567 ``type`` can be an integer value for the frame type or the string name
4566 4568 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4567 4569 ``command-name``.
4568 4570
4569 4571 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4570 4572 components. Each component (and there can be just one) can be an integer
4571 4573 or a flag name for stream flags or frame flags, respectively. Values are
4572 4574 resolved to integers and then bitwise OR'd together.
4573 4575
4574 4576 ``payload`` represents the raw frame payload. If it begins with
4575 4577 ``cbor:``, the following string is evaluated as Python code and the
4576 4578 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4577 4579 as a Python byte string literal.
4578 4580 """
4579 4581 opts = pycompat.byteskwargs(opts)
4580 4582
4581 4583 if opts[b'localssh'] and not repo:
4582 4584 raise error.Abort(_(b'--localssh requires a repository'))
4583 4585
4584 4586 if opts[b'peer'] and opts[b'peer'] not in (
4585 4587 b'raw',
4586 4588 b'ssh1',
4587 4589 ):
4588 4590 raise error.Abort(
4589 4591 _(b'invalid value for --peer'),
4590 4592 hint=_(b'valid values are "raw" and "ssh1"'),
4591 4593 )
4592 4594
4593 4595 if path and opts[b'localssh']:
4594 4596 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4595 4597
4596 4598 if ui.interactive():
4597 4599 ui.write(_(b'(waiting for commands on stdin)\n'))
4598 4600
4599 4601 blocks = list(_parsewirelangblocks(ui.fin))
4600 4602
4601 4603 proc = None
4602 4604 stdin = None
4603 4605 stdout = None
4604 4606 stderr = None
4605 4607 opener = None
4606 4608
4607 4609 if opts[b'localssh']:
4608 4610 # We start the SSH server in its own process so there is process
4609 4611 # separation. This prevents a whole class of potential bugs around
4610 4612 # shared state from interfering with server operation.
4611 4613 args = procutil.hgcmd() + [
4612 4614 b'-R',
4613 4615 repo.root,
4614 4616 b'debugserve',
4615 4617 b'--sshstdio',
4616 4618 ]
4617 4619 proc = subprocess.Popen(
4618 4620 pycompat.rapply(procutil.tonativestr, args),
4619 4621 stdin=subprocess.PIPE,
4620 4622 stdout=subprocess.PIPE,
4621 4623 stderr=subprocess.PIPE,
4622 4624 bufsize=0,
4623 4625 )
4624 4626
4625 4627 stdin = proc.stdin
4626 4628 stdout = proc.stdout
4627 4629 stderr = proc.stderr
4628 4630
4629 4631 # We turn the pipes into observers so we can log I/O.
4630 4632 if ui.verbose or opts[b'peer'] == b'raw':
4631 4633 stdin = util.makeloggingfileobject(
4632 4634 ui, proc.stdin, b'i', logdata=True
4633 4635 )
4634 4636 stdout = util.makeloggingfileobject(
4635 4637 ui, proc.stdout, b'o', logdata=True
4636 4638 )
4637 4639 stderr = util.makeloggingfileobject(
4638 4640 ui, proc.stderr, b'e', logdata=True
4639 4641 )
4640 4642
4641 4643 # --localssh also implies the peer connection settings.
4642 4644
4643 4645 url = b'ssh://localserver'
4644 4646 autoreadstderr = not opts[b'noreadstderr']
4645 4647
4646 4648 if opts[b'peer'] == b'ssh1':
4647 4649 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4648 4650 peer = sshpeer.sshv1peer(
4649 4651 ui,
4650 4652 url,
4651 4653 proc,
4652 4654 stdin,
4653 4655 stdout,
4654 4656 stderr,
4655 4657 None,
4656 4658 autoreadstderr=autoreadstderr,
4657 4659 )
4658 4660 elif opts[b'peer'] == b'raw':
4659 4661 ui.write(_(b'using raw connection to peer\n'))
4660 4662 peer = None
4661 4663 else:
4662 4664 ui.write(_(b'creating ssh peer from handshake results\n'))
4663 4665 peer = sshpeer.makepeer(
4664 4666 ui,
4665 4667 url,
4666 4668 proc,
4667 4669 stdin,
4668 4670 stdout,
4669 4671 stderr,
4670 4672 autoreadstderr=autoreadstderr,
4671 4673 )
4672 4674
4673 4675 elif path:
4674 4676 # We bypass hg.peer() so we can proxy the sockets.
4675 4677 # TODO consider not doing this because we skip
4676 4678 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4677 4679 u = urlutil.url(path)
4678 4680 if u.scheme != b'http':
4679 4681 raise error.Abort(_(b'only http:// paths are currently supported'))
4680 4682
4681 4683 url, authinfo = u.authinfo()
4682 4684 openerargs = {
4683 4685 'useragent': b'Mercurial debugwireproto',
4684 4686 }
4685 4687
4686 4688 # Turn pipes/sockets into observers so we can log I/O.
4687 4689 if ui.verbose:
4688 4690 openerargs.update(
4689 4691 {
4690 4692 'loggingfh': ui,
4691 4693 'loggingname': b's',
4692 4694 'loggingopts': {
4693 4695 'logdata': True,
4694 4696 'logdataapis': False,
4695 4697 },
4696 4698 }
4697 4699 )
4698 4700
4699 4701 if ui.debugflag:
4700 4702 openerargs['loggingopts']['logdataapis'] = True
4701 4703
4702 4704 # Don't send default headers when in raw mode. This allows us to
4703 4705 # bypass most of the behavior of our URL handling code so we can
4704 4706 # have near complete control over what's sent on the wire.
4705 4707 if opts[b'peer'] == b'raw':
4706 4708 openerargs['sendaccept'] = False
4707 4709
4708 4710 opener = urlmod.opener(ui, authinfo, **openerargs)
4709 4711
4710 4712 if opts[b'peer'] == b'raw':
4711 4713 ui.write(_(b'using raw connection to peer\n'))
4712 4714 peer = None
4713 4715 elif opts[b'peer']:
4714 4716 raise error.Abort(
4715 4717 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4716 4718 )
4717 4719 else:
4718 4720 peer = httppeer.makepeer(ui, path, opener=opener)
4719 4721
4720 4722 # We /could/ populate stdin/stdout with sock.makefile()...
4721 4723 else:
4722 4724 raise error.Abort(_(b'unsupported connection configuration'))
4723 4725
4724 4726 batchedcommands = None
4725 4727
4726 4728 # Now perform actions based on the parsed wire language instructions.
4727 4729 for action, lines in blocks:
4728 4730 if action in (b'raw', b'raw+'):
4729 4731 if not stdin:
4730 4732 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4731 4733
4732 4734 # Concatenate the data together.
4733 4735 data = b''.join(l.lstrip() for l in lines)
4734 4736 data = stringutil.unescapestr(data)
4735 4737 stdin.write(data)
4736 4738
4737 4739 if action == b'raw+':
4738 4740 stdin.flush()
4739 4741 elif action == b'flush':
4740 4742 if not stdin:
4741 4743 raise error.Abort(_(b'cannot call flush on this peer'))
4742 4744 stdin.flush()
4743 4745 elif action.startswith(b'command'):
4744 4746 if not peer:
4745 4747 raise error.Abort(
4746 4748 _(
4747 4749 b'cannot send commands unless peer instance '
4748 4750 b'is available'
4749 4751 )
4750 4752 )
4751 4753
4752 4754 command = action.split(b' ', 1)[1]
4753 4755
4754 4756 args = {}
4755 4757 for line in lines:
4756 4758 # We need to allow empty values.
4757 4759 fields = line.lstrip().split(b' ', 1)
4758 4760 if len(fields) == 1:
4759 4761 key = fields[0]
4760 4762 value = b''
4761 4763 else:
4762 4764 key, value = fields
4763 4765
4764 4766 if value.startswith(b'eval:'):
4765 4767 value = stringutil.evalpythonliteral(value[5:])
4766 4768 else:
4767 4769 value = stringutil.unescapestr(value)
4768 4770
4769 4771 args[key] = value
4770 4772
4771 4773 if batchedcommands is not None:
4772 4774 batchedcommands.append((command, args))
4773 4775 continue
4774 4776
4775 4777 ui.status(_(b'sending %s command\n') % command)
4776 4778
4777 4779 if b'PUSHFILE' in args:
4778 4780 with open(args[b'PUSHFILE'], 'rb') as fh:
4779 4781 del args[b'PUSHFILE']
4780 4782 res, output = peer._callpush(
4781 4783 command, fh, **pycompat.strkwargs(args)
4782 4784 )
4783 4785 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4784 4786 ui.status(
4785 4787 _(b'remote output: %s\n') % stringutil.escapestr(output)
4786 4788 )
4787 4789 else:
4788 4790 with peer.commandexecutor() as e:
4789 4791 res = e.callcommand(command, args).result()
4790 4792
4791 4793 ui.status(
4792 4794 _(b'response: %s\n')
4793 4795 % stringutil.pprint(res, bprefix=True, indent=2)
4794 4796 )
4795 4797
4796 4798 elif action == b'batchbegin':
4797 4799 if batchedcommands is not None:
4798 4800 raise error.Abort(_(b'nested batchbegin not allowed'))
4799 4801
4800 4802 batchedcommands = []
4801 4803 elif action == b'batchsubmit':
4802 4804 # There is a batching API we could go through. But it would be
4803 4805 # difficult to normalize requests into function calls. It is easier
4804 4806 # to bypass this layer and normalize to commands + args.
4805 4807 ui.status(
4806 4808 _(b'sending batch with %d sub-commands\n')
4807 4809 % len(batchedcommands)
4808 4810 )
4809 4811 assert peer is not None
4810 4812 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4811 4813 ui.status(
4812 4814 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4813 4815 )
4814 4816
4815 4817 batchedcommands = None
4816 4818
4817 4819 elif action.startswith(b'httprequest '):
4818 4820 if not opener:
4819 4821 raise error.Abort(
4820 4822 _(b'cannot use httprequest without an HTTP peer')
4821 4823 )
4822 4824
4823 4825 request = action.split(b' ', 2)
4824 4826 if len(request) != 3:
4825 4827 raise error.Abort(
4826 4828 _(
4827 4829 b'invalid httprequest: expected format is '
4828 4830 b'"httprequest <method> <path>'
4829 4831 )
4830 4832 )
4831 4833
4832 4834 method, httppath = request[1:]
4833 4835 headers = {}
4834 4836 body = None
4835 4837 frames = []
4836 4838 for line in lines:
4837 4839 line = line.lstrip()
4838 4840 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4839 4841 if m:
4840 4842 # Headers need to use native strings.
4841 4843 key = pycompat.strurl(m.group(1))
4842 4844 value = pycompat.strurl(m.group(2))
4843 4845 headers[key] = value
4844 4846 continue
4845 4847
4846 4848 if line.startswith(b'BODYFILE '):
4847 4849 with open(line.split(b' ', 1), b'rb') as fh:
4848 4850 body = fh.read()
4849 4851 elif line.startswith(b'frame '):
4850 4852 frame = wireprotoframing.makeframefromhumanstring(
4851 4853 line[len(b'frame ') :]
4852 4854 )
4853 4855
4854 4856 frames.append(frame)
4855 4857 else:
4856 4858 raise error.Abort(
4857 4859 _(b'unknown argument to httprequest: %s') % line
4858 4860 )
4859 4861
4860 4862 url = path + httppath
4861 4863
4862 4864 if frames:
4863 4865 body = b''.join(bytes(f) for f in frames)
4864 4866
4865 4867 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4866 4868
4867 4869 # urllib.Request insists on using has_data() as a proxy for
4868 4870 # determining the request method. Override that to use our
4869 4871 # explicitly requested method.
4870 4872 req.get_method = lambda: pycompat.sysstr(method)
4871 4873
4872 4874 try:
4873 4875 res = opener.open(req)
4874 4876 body = res.read()
4875 4877 except util.urlerr.urlerror as e:
4876 4878 # read() method must be called, but only exists in Python 2
4877 4879 getattr(e, 'read', lambda: None)()
4878 4880 continue
4879 4881
4880 4882 ct = res.headers.get('Content-Type')
4881 4883 if ct == 'application/mercurial-cbor':
4882 4884 ui.write(
4883 4885 _(b'cbor> %s\n')
4884 4886 % stringutil.pprint(
4885 4887 cborutil.decodeall(body), bprefix=True, indent=2
4886 4888 )
4887 4889 )
4888 4890
4889 4891 elif action == b'close':
4890 4892 assert peer is not None
4891 4893 peer.close()
4892 4894 elif action == b'readavailable':
4893 4895 if not stdout or not stderr:
4894 4896 raise error.Abort(
4895 4897 _(b'readavailable not available on this peer')
4896 4898 )
4897 4899
4898 4900 stdin.close()
4899 4901 stdout.read()
4900 4902 stderr.read()
4901 4903
4902 4904 elif action == b'readline':
4903 4905 if not stdout:
4904 4906 raise error.Abort(_(b'readline not available on this peer'))
4905 4907 stdout.readline()
4906 4908 elif action == b'ereadline':
4907 4909 if not stderr:
4908 4910 raise error.Abort(_(b'ereadline not available on this peer'))
4909 4911 stderr.readline()
4910 4912 elif action.startswith(b'read '):
4911 4913 count = int(action.split(b' ', 1)[1])
4912 4914 if not stdout:
4913 4915 raise error.Abort(_(b'read not available on this peer'))
4914 4916 stdout.read(count)
4915 4917 elif action.startswith(b'eread '):
4916 4918 count = int(action.split(b' ', 1)[1])
4917 4919 if not stderr:
4918 4920 raise error.Abort(_(b'eread not available on this peer'))
4919 4921 stderr.read(count)
4920 4922 else:
4921 4923 raise error.Abort(_(b'unknown action: %s') % action)
4922 4924
4923 4925 if batchedcommands is not None:
4924 4926 raise error.Abort(_(b'unclosed "batchbegin" request'))
4925 4927
4926 4928 if peer:
4927 4929 peer.close()
4928 4930
4929 4931 if proc:
4930 4932 proc.kill()
@@ -1,2089 +1,2089 b''
1 1 #require no-reposimplestore
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [extensions]
5 5 > share =
6 6 > [format]
7 7 > # stabilize test accross variant
8 8 > revlog-compression=zlib
9 9 > [storage]
10 10 > dirstate-v2.slow-path=allow
11 11 > EOF
12 12
13 13 store and revlogv1 are required in source
14 14
15 15 $ hg --config format.usestore=false init no-store
16 16 $ hg -R no-store debugupgraderepo
17 17 abort: cannot upgrade repository; requirement missing: store
18 18 [255]
19 19
20 20 $ hg init no-revlogv1
21 21 $ cat > no-revlogv1/.hg/requires << EOF
22 22 > dotencode
23 23 > fncache
24 24 > generaldelta
25 25 > store
26 26 > EOF
27 27
28 28 $ hg -R no-revlogv1 debugupgraderepo
29 29 abort: cannot upgrade repository; missing a revlog version
30 30 [255]
31 31
32 32 Cannot upgrade shared repositories
33 33
34 34 $ hg init share-parent
35 35 $ hg -R share-parent debugbuilddag -n .+9
36 36 $ hg -R share-parent up tip
37 37 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
38 38 $ hg -q share share-parent share-child
39 39
40 40 $ hg -R share-child debugupgraderepo --config format.sparse-revlog=no
41 41 abort: cannot use these actions on a share repository: sparserevlog
42 42 (upgrade the main repository directly)
43 43 [255]
44 44
45 45 Unless the action is compatible with share
46 46
47 47 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=yes --quiet
48 48 requirements
49 49 preserved: * (glob)
50 50 added: dirstate-v2
51 51
52 52 no revlogs to process
53 53
54 54
55 55 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=yes --quiet --run
56 56 upgrade will perform the following actions:
57 57
58 58 requirements
59 59 preserved: * (glob)
60 60 added: dirstate-v2
61 61
62 62 no revlogs to process
63 63
64 64 $ hg debugformat -R share-child | grep dirstate-v2
65 65 dirstate-v2: yes
66 66 $ hg debugformat -R share-parent | grep dirstate-v2
67 67 dirstate-v2: no
68 68 $ hg status --all -R share-child
69 69 C nf0
70 70 C nf1
71 71 C nf2
72 72 C nf3
73 73 C nf4
74 74 C nf5
75 75 C nf6
76 76 C nf7
77 77 C nf8
78 78 C nf9
79 79 $ hg log -l 3 -R share-child
80 80 changeset: 9:0059eb38e4a4
81 81 tag: tip
82 82 user: debugbuilddag
83 83 date: Thu Jan 01 00:00:09 1970 +0000
84 84 summary: r9
85 85
86 86 changeset: 8:4d5be70c8130
87 87 user: debugbuilddag
88 88 date: Thu Jan 01 00:00:08 1970 +0000
89 89 summary: r8
90 90
91 91 changeset: 7:e60bfe72517e
92 92 user: debugbuilddag
93 93 date: Thu Jan 01 00:00:07 1970 +0000
94 94 summary: r7
95 95
96 96 $ hg status --all -R share-parent
97 97 C nf0
98 98 C nf1
99 99 C nf2
100 100 C nf3
101 101 C nf4
102 102 C nf5
103 103 C nf6
104 104 C nf7
105 105 C nf8
106 106 C nf9
107 107 $ hg log -l 3 -R share-parent
108 108 changeset: 9:0059eb38e4a4
109 109 tag: tip
110 110 user: debugbuilddag
111 111 date: Thu Jan 01 00:00:09 1970 +0000
112 112 summary: r9
113 113
114 114 changeset: 8:4d5be70c8130
115 115 user: debugbuilddag
116 116 date: Thu Jan 01 00:00:08 1970 +0000
117 117 summary: r8
118 118
119 119 changeset: 7:e60bfe72517e
120 120 user: debugbuilddag
121 121 date: Thu Jan 01 00:00:07 1970 +0000
122 122 summary: r7
123 123
124 124
125 125 $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=no --quiet --run
126 126 upgrade will perform the following actions:
127 127
128 128 requirements
129 129 preserved: * (glob)
130 130 removed: dirstate-v2
131 131
132 132 no revlogs to process
133 133
134 134 $ hg debugformat -R share-child | grep dirstate-v2
135 135 dirstate-v2: no
136 136 $ hg debugformat -R share-parent | grep dirstate-v2
137 137 dirstate-v2: no
138 138 $ hg status --all -R share-child
139 139 C nf0
140 140 C nf1
141 141 C nf2
142 142 C nf3
143 143 C nf4
144 144 C nf5
145 145 C nf6
146 146 C nf7
147 147 C nf8
148 148 C nf9
149 149 $ hg log -l 3 -R share-child
150 150 changeset: 9:0059eb38e4a4
151 151 tag: tip
152 152 user: debugbuilddag
153 153 date: Thu Jan 01 00:00:09 1970 +0000
154 154 summary: r9
155 155
156 156 changeset: 8:4d5be70c8130
157 157 user: debugbuilddag
158 158 date: Thu Jan 01 00:00:08 1970 +0000
159 159 summary: r8
160 160
161 161 changeset: 7:e60bfe72517e
162 162 user: debugbuilddag
163 163 date: Thu Jan 01 00:00:07 1970 +0000
164 164 summary: r7
165 165
166 166 $ hg status --all -R share-parent
167 167 C nf0
168 168 C nf1
169 169 C nf2
170 170 C nf3
171 171 C nf4
172 172 C nf5
173 173 C nf6
174 174 C nf7
175 175 C nf8
176 176 C nf9
177 177 $ hg log -l 3 -R share-parent
178 178 changeset: 9:0059eb38e4a4
179 179 tag: tip
180 180 user: debugbuilddag
181 181 date: Thu Jan 01 00:00:09 1970 +0000
182 182 summary: r9
183 183
184 184 changeset: 8:4d5be70c8130
185 185 user: debugbuilddag
186 186 date: Thu Jan 01 00:00:08 1970 +0000
187 187 summary: r8
188 188
189 189 changeset: 7:e60bfe72517e
190 190 user: debugbuilddag
191 191 date: Thu Jan 01 00:00:07 1970 +0000
192 192 summary: r7
193 193
194 194
195 195 Do not yet support upgrading treemanifest repos
196 196
197 197 $ hg --config experimental.treemanifest=true init treemanifest
198 198 $ hg -R treemanifest debugupgraderepo
199 199 abort: cannot upgrade repository; unsupported source requirement: treemanifest
200 200 [255]
201 201
202 202 Cannot add treemanifest requirement during upgrade
203 203
204 204 $ hg init disallowaddedreq
205 205 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
206 206 abort: cannot upgrade repository; do not support adding requirement: treemanifest
207 207 [255]
208 208
209 209 An upgrade of a repository created with recommended settings only suggests optimizations
210 210
211 211 $ hg init empty
212 212 $ cd empty
213 213 $ hg debugformat
214 214 format-variant repo
215 215 fncache: yes
216 216 dirstate-v2: no
217 217 tracked-hint: no
218 218 dotencode: yes
219 219 generaldelta: yes
220 220 share-safe: yes
221 221 sparserevlog: yes
222 222 persistent-nodemap: no (no-rust !)
223 223 persistent-nodemap: yes (rust !)
224 224 copies-sdc: no
225 225 revlog-v2: no
226 226 changelog-v2: no
227 227 plain-cl-delta: yes
228 228 compression: zlib
229 229 compression-level: default
230 230 $ hg debugformat --verbose
231 231 format-variant repo config default
232 232 fncache: yes yes yes
233 233 dirstate-v2: no no no
234 234 tracked-hint: no no no
235 235 dotencode: yes yes yes
236 236 generaldelta: yes yes yes
237 237 share-safe: yes yes yes
238 238 sparserevlog: yes yes yes
239 239 persistent-nodemap: no no no (no-rust !)
240 240 persistent-nodemap: yes yes no (rust !)
241 241 copies-sdc: no no no
242 242 revlog-v2: no no no
243 243 changelog-v2: no no no
244 244 plain-cl-delta: yes yes yes
245 245 compression: zlib zlib zlib (no-zstd !)
246 246 compression: zlib zlib zstd (zstd !)
247 247 compression-level: default default default
248 248 $ hg debugformat --verbose --config format.usefncache=no
249 249 format-variant repo config default
250 250 fncache: yes no yes
251 251 dirstate-v2: no no no
252 252 tracked-hint: no no no
253 253 dotencode: yes no yes
254 254 generaldelta: yes yes yes
255 255 share-safe: yes yes yes
256 256 sparserevlog: yes yes yes
257 257 persistent-nodemap: no no no (no-rust !)
258 258 persistent-nodemap: yes yes no (rust !)
259 259 copies-sdc: no no no
260 260 revlog-v2: no no no
261 261 changelog-v2: no no no
262 262 plain-cl-delta: yes yes yes
263 263 compression: zlib zlib zlib (no-zstd !)
264 264 compression: zlib zlib zstd (zstd !)
265 265 compression-level: default default default
266 266 $ hg debugformat --verbose --config format.usefncache=no --color=debug
267 267 format-variant repo config default
268 268 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
269 269 [formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
270 270 [formatvariant.name.uptodate|tracked-hint: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
271 271 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
272 272 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
273 273 [formatvariant.name.uptodate|share-safe: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
274 274 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
275 275 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
276 276 [formatvariant.name.mismatchdefault|persistent-nodemap:][formatvariant.repo.mismatchdefault| yes][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
277 277 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
278 278 [formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
279 279 [formatvariant.name.uptodate|changelog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
280 280 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
281 281 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
282 282 [formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
283 283 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
284 284 $ hg debugformat -Tjson
285 285 [
286 286 {
287 287 "config": true,
288 288 "default": true,
289 289 "name": "fncache",
290 290 "repo": true
291 291 },
292 292 {
293 293 "config": false,
294 294 "default": false,
295 295 "name": "dirstate-v2",
296 296 "repo": false
297 297 },
298 298 {
299 299 "config": false,
300 300 "default": false,
301 301 "name": "tracked-hint",
302 302 "repo": false
303 303 },
304 304 {
305 305 "config": true,
306 306 "default": true,
307 307 "name": "dotencode",
308 308 "repo": true
309 309 },
310 310 {
311 311 "config": true,
312 312 "default": true,
313 313 "name": "generaldelta",
314 314 "repo": true
315 315 },
316 316 {
317 317 "config": true,
318 318 "default": true,
319 319 "name": "share-safe",
320 320 "repo": true
321 321 },
322 322 {
323 323 "config": true,
324 324 "default": true,
325 325 "name": "sparserevlog",
326 326 "repo": true
327 327 },
328 328 {
329 329 "config": false, (no-rust !)
330 330 "config": true, (rust !)
331 331 "default": false,
332 332 "name": "persistent-nodemap",
333 333 "repo": false (no-rust !)
334 334 "repo": true (rust !)
335 335 },
336 336 {
337 337 "config": false,
338 338 "default": false,
339 339 "name": "copies-sdc",
340 340 "repo": false
341 341 },
342 342 {
343 343 "config": false,
344 344 "default": false,
345 345 "name": "revlog-v2",
346 346 "repo": false
347 347 },
348 348 {
349 349 "config": false,
350 350 "default": false,
351 351 "name": "changelog-v2",
352 352 "repo": false
353 353 },
354 354 {
355 355 "config": true,
356 356 "default": true,
357 357 "name": "plain-cl-delta",
358 358 "repo": true
359 359 },
360 360 {
361 361 "config": "zlib",
362 362 "default": "zlib", (no-zstd !)
363 363 "default": "zstd", (zstd !)
364 364 "name": "compression",
365 365 "repo": "zlib"
366 366 },
367 367 {
368 368 "config": "default",
369 369 "default": "default",
370 370 "name": "compression-level",
371 371 "repo": "default"
372 372 }
373 373 ]
374 374 $ hg debugupgraderepo
375 375 (no format upgrades found in existing repository)
376 376 performing an upgrade with "--run" will make the following changes:
377 377
378 378 requirements
379 379 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
380 380 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
381 381
382 382 no revlogs to process
383 383
384 384 additional optimizations are available by specifying "--optimize <name>":
385 385
386 386 re-delta-parent
387 387 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
388 388
389 389 re-delta-multibase
390 390 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
391 391
392 392 re-delta-all
393 393 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
394 394
395 395 re-delta-fulladd
396 396 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
397 397
398 398
399 399 $ hg debugupgraderepo --quiet
400 400 requirements
401 401 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
402 402 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
403 403
404 404 no revlogs to process
405 405
406 406
407 407 --optimize can be used to add optimizations
408 408
409 409 $ hg debugupgrade --optimize 're-delta-parent'
410 410 (no format upgrades found in existing repository)
411 411 performing an upgrade with "--run" will make the following changes:
412 412
413 413 requirements
414 414 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
415 415 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
416 416
417 417 optimisations: re-delta-parent
418 418
419 419 re-delta-parent
420 420 deltas within internal storage will choose a new base revision if needed
421 421
422 422 processed revlogs:
423 423 - all-filelogs
424 424 - changelog
425 425 - manifest
426 426
427 427 additional optimizations are available by specifying "--optimize <name>":
428 428
429 429 re-delta-multibase
430 430 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
431 431
432 432 re-delta-all
433 433 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
434 434
435 435 re-delta-fulladd
436 436 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
437 437
438 438
439 439 modern form of the option
440 440
441 441 $ hg debugupgrade --optimize re-delta-parent
442 442 (no format upgrades found in existing repository)
443 443 performing an upgrade with "--run" will make the following changes:
444 444
445 445 requirements
446 446 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
447 447 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
448 448
449 449 optimisations: re-delta-parent
450 450
451 451 re-delta-parent
452 452 deltas within internal storage will choose a new base revision if needed
453 453
454 454 processed revlogs:
455 455 - all-filelogs
456 456 - changelog
457 457 - manifest
458 458
459 459 additional optimizations are available by specifying "--optimize <name>":
460 460
461 461 re-delta-multibase
462 462 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
463 463
464 464 re-delta-all
465 465 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
466 466
467 467 re-delta-fulladd
468 468 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
469 469
470 470 $ hg debugupgrade --optimize re-delta-parent --quiet
471 471 requirements
472 472 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
473 473 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
474 474
475 475 optimisations: re-delta-parent
476 476
477 477 processed revlogs:
478 478 - all-filelogs
479 479 - changelog
480 480 - manifest
481 481
482 482
483 483 unknown optimization:
484 484
485 485 $ hg debugupgrade --optimize foobar
486 486 abort: unknown optimization action requested: foobar
487 487 (run without arguments to see valid optimizations)
488 488 [255]
489 489
490 490 Various sub-optimal detections work
491 491
492 492 $ cat > .hg/requires << EOF
493 493 > revlogv1
494 494 > store
495 495 > EOF
496 496
497 497 $ hg debugformat
498 498 format-variant repo
499 499 fncache: no
500 500 dirstate-v2: no
501 501 tracked-hint: no
502 502 dotencode: no
503 503 generaldelta: no
504 504 share-safe: no
505 505 sparserevlog: no
506 506 persistent-nodemap: no
507 507 copies-sdc: no
508 508 revlog-v2: no
509 509 changelog-v2: no
510 510 plain-cl-delta: yes
511 511 compression: zlib
512 512 compression-level: default
513 513 $ hg debugformat --verbose
514 514 format-variant repo config default
515 515 fncache: no yes yes
516 516 dirstate-v2: no no no
517 517 tracked-hint: no no no
518 518 dotencode: no yes yes
519 519 generaldelta: no yes yes
520 520 share-safe: no yes yes
521 521 sparserevlog: no yes yes
522 522 persistent-nodemap: no no no (no-rust !)
523 523 persistent-nodemap: no yes no (rust !)
524 524 copies-sdc: no no no
525 525 revlog-v2: no no no
526 526 changelog-v2: no no no
527 527 plain-cl-delta: yes yes yes
528 528 compression: zlib zlib zlib (no-zstd !)
529 529 compression: zlib zlib zstd (zstd !)
530 530 compression-level: default default default
531 531 $ hg debugformat --verbose --config format.usegeneraldelta=no
532 532 format-variant repo config default
533 533 fncache: no yes yes
534 534 dirstate-v2: no no no
535 535 tracked-hint: no no no
536 536 dotencode: no yes yes
537 537 generaldelta: no no yes
538 538 share-safe: no yes yes
539 539 sparserevlog: no no yes
540 540 persistent-nodemap: no no no (no-rust !)
541 541 persistent-nodemap: no yes no (rust !)
542 542 copies-sdc: no no no
543 543 revlog-v2: no no no
544 544 changelog-v2: no no no
545 545 plain-cl-delta: yes yes yes
546 546 compression: zlib zlib zlib (no-zstd !)
547 547 compression: zlib zlib zstd (zstd !)
548 548 compression-level: default default default
549 549 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
550 550 format-variant repo config default
551 551 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
552 552 [formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
553 553 [formatvariant.name.uptodate|tracked-hint: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
554 554 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
555 555 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
556 556 [formatvariant.name.mismatchconfig|share-safe: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
557 557 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
558 558 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
559 559 [formatvariant.name.mismatchconfig|persistent-nodemap:][formatvariant.repo.mismatchconfig| no][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
560 560 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
561 561 [formatvariant.name.uptodate|revlog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
562 562 [formatvariant.name.uptodate|changelog-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
563 563 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
564 564 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] (no-zstd !)
565 565 [formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
566 566 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
567 567 $ hg debugupgraderepo
568 568 note: selecting all-filelogs for processing to change: dotencode
569 569 note: selecting all-manifestlogs for processing to change: dotencode
570 570 note: selecting changelog for processing to change: dotencode
571 571
572 572 repository lacks features recommended by current config options:
573 573
574 574 fncache
575 575 long and reserved filenames may not work correctly; repository performance is sub-optimal
576 576
577 577 dotencode
578 578 storage of filenames beginning with a period or space may not work correctly
579 579
580 580 generaldelta
581 581 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
582 582
583 583 share-safe
584 584 old shared repositories do not share source repository requirements and config. This leads to various problems when the source repository format is upgraded or some new extensions are enabled.
585 585
586 586 sparserevlog
587 587 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
588 588
589 589 persistent-nodemap (rust !)
590 590 persist the node -> rev mapping on disk to speedup lookup (rust !)
591 591 (rust !)
592 592
593 593 performing an upgrade with "--run" will make the following changes:
594 594
595 595 requirements
596 596 preserved: revlogv1, store
597 597 added: dotencode, fncache, generaldelta, share-safe, sparserevlog (no-rust !)
598 598 added: dotencode, fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
599 599
600 600 fncache
601 601 repository will be more resilient to storing certain paths and performance of certain operations should be improved
602 602
603 603 dotencode
604 604 repository will be better able to store files beginning with a space or period
605 605
606 606 generaldelta
607 607 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
608 608
609 609 share-safe
610 610 Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
611 611
612 612 sparserevlog
613 613 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
614 614
615 615 persistent-nodemap (rust !)
616 616 Speedup revision lookup by node id. (rust !)
617 617 (rust !)
618 618 processed revlogs:
619 619 - all-filelogs
620 620 - changelog
621 621 - manifest
622 622
623 623 additional optimizations are available by specifying "--optimize <name>":
624 624
625 625 re-delta-parent
626 626 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
627 627
628 628 re-delta-multibase
629 629 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
630 630
631 631 re-delta-all
632 632 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
633 633
634 634 re-delta-fulladd
635 635 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
636 636
637 637 $ hg debugupgraderepo --quiet
638 638 requirements
639 639 preserved: revlogv1, store
640 640 added: dotencode, fncache, generaldelta, share-safe, sparserevlog (no-rust !)
641 641 added: dotencode, fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
642 642
643 643 processed revlogs:
644 644 - all-filelogs
645 645 - changelog
646 646 - manifest
647 647
648 648
649 649 $ hg --config format.dotencode=false debugupgraderepo
650 650 note: selecting all-filelogs for processing to change: fncache
651 651 note: selecting all-manifestlogs for processing to change: fncache
652 652 note: selecting changelog for processing to change: fncache
653 653
654 654 repository lacks features recommended by current config options:
655 655
656 656 fncache
657 657 long and reserved filenames may not work correctly; repository performance is sub-optimal
658 658
659 659 generaldelta
660 660 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
661 661
662 662 share-safe
663 663 old shared repositories do not share source repository requirements and config. This leads to various problems when the source repository format is upgraded or some new extensions are enabled.
664 664
665 665 sparserevlog
666 666 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
667 667
668 668 persistent-nodemap (rust !)
669 669 persist the node -> rev mapping on disk to speedup lookup (rust !)
670 670 (rust !)
671 671 repository lacks features used by the default config options:
672 672
673 673 dotencode
674 674 storage of filenames beginning with a period or space may not work correctly
675 675
676 676
677 677 performing an upgrade with "--run" will make the following changes:
678 678
679 679 requirements
680 680 preserved: revlogv1, store
681 681 added: fncache, generaldelta, share-safe, sparserevlog (no-rust !)
682 682 added: fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
683 683
684 684 fncache
685 685 repository will be more resilient to storing certain paths and performance of certain operations should be improved
686 686
687 687 generaldelta
688 688 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
689 689
690 690 share-safe
691 691 Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
692 692
693 693 sparserevlog
694 694 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
695 695
696 696 persistent-nodemap (rust !)
697 697 Speedup revision lookup by node id. (rust !)
698 698 (rust !)
699 699 processed revlogs:
700 700 - all-filelogs
701 701 - changelog
702 702 - manifest
703 703
704 704 additional optimizations are available by specifying "--optimize <name>":
705 705
706 706 re-delta-parent
707 707 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
708 708
709 709 re-delta-multibase
710 710 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
711 711
712 712 re-delta-all
713 713 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
714 714
715 715 re-delta-fulladd
716 716 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
717 717
718 718
719 719 $ cd ..
720 720
721 721 Upgrading a repository that is already modern essentially no-ops
722 722
723 723 $ hg init modern
724 724 $ hg -R modern debugupgraderepo --run
725 725 nothing to do
726 726
727 727 Upgrading a repository to generaldelta works
728 728
729 729 $ hg --config format.usegeneraldelta=false init upgradegd
730 730 $ cd upgradegd
731 731 $ touch f0
732 732 $ hg -q commit -A -m initial
733 733 $ mkdir FooBarDirectory.d
734 734 $ touch FooBarDirectory.d/f1
735 735 $ hg -q commit -A -m 'add f1'
736 736 $ hg -q up -r 0
737 737 >>> import random
738 738 >>> random.seed(0) # have a reproducible content
739 739 >>> with open("f2", "wb") as f:
740 740 ... for i in range(100000):
741 741 ... f.write(b"%d\n" % random.randint(1000000000, 9999999999)) and None
742 742 $ hg -q commit -A -m 'add f2'
743 743
744 744 make sure we have a .d file
745 745
746 746 $ ls -d .hg/store/data/*
747 747 .hg/store/data/_foo_bar_directory.d.hg
748 748 .hg/store/data/f0.i
749 749 .hg/store/data/f2.d
750 750 .hg/store/data/f2.i
751 751
752 752 $ hg debugupgraderepo --run --config format.sparse-revlog=false
753 753 note: selecting all-filelogs for processing to change: generaldelta
754 754 note: selecting all-manifestlogs for processing to change: generaldelta
755 755 note: selecting changelog for processing to change: generaldelta
756 756
757 757 upgrade will perform the following actions:
758 758
759 759 requirements
760 760 preserved: dotencode, fncache, revlogv1, share-safe, store (no-rust !)
761 761 preserved: dotencode, fncache, persistent-nodemap, revlogv1, share-safe, store (rust !)
762 762 added: generaldelta
763 763
764 764 generaldelta
765 765 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
766 766
767 767 processed revlogs:
768 768 - all-filelogs
769 769 - changelog
770 770 - manifest
771 771
772 772 beginning upgrade...
773 773 repository locked and read-only
774 774 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
775 775 (it is safe to interrupt this process any time before data migration completes)
776 776 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
777 777 migrating 519 KB in store; 1.05 MB tracked data
778 778 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
779 779 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
780 780 migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
781 781 finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
782 782 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
783 783 finished migrating 3 changelog revisions; change in size: 0 bytes
784 784 finished migrating 9 total revisions; total change in store size: -17 bytes
785 785 copying phaseroots
786 786 copying requires
787 787 data fully upgraded in a temporary repository
788 788 marking source repository as being upgraded; clients will be unable to read from repository
789 789 starting in-place swap of repository data
790 790 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
791 791 replacing store...
792 792 store replacement complete; repository was inconsistent for *s (glob)
793 793 finalizing requirements file and making repository readable again
794 794 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
795 795 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
796 796 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
797 797
798 798 Original requirements backed up
799 799
800 800 $ cat .hg/upgradebackup.*/requires
801 801 share-safe
802 802 $ cat .hg/upgradebackup.*/store/requires
803 803 dotencode
804 804 fncache
805 805 persistent-nodemap (rust !)
806 806 revlogv1
807 807 store
808 808 upgradeinprogress
809 809
810 810 generaldelta added to original requirements files
811 811
812 812 $ hg debugrequires
813 813 dotencode
814 814 fncache
815 815 generaldelta
816 816 persistent-nodemap (rust !)
817 817 revlogv1
818 818 share-safe
819 819 store
820 820
821 821 store directory has files we expect
822 822
823 823 $ ls .hg/store
824 824 00changelog.i
825 825 00manifest.i
826 826 data
827 827 fncache
828 828 phaseroots
829 829 requires
830 830 undo
831 831 undo.backupfiles
832 832 undo.phaseroots
833 833
834 834 manifest should be generaldelta
835 835
836 836 $ hg debugrevlog -m | grep flags
837 837 flags : inline, generaldelta
838 838
839 839 verify should be happy
840 840
841 841 $ hg verify
842 842 checking changesets
843 843 checking manifests
844 844 crosschecking files in changesets and manifests
845 845 checking files
846 846 checked 3 changesets with 3 changes to 3 files
847 847
848 848 old store should be backed up
849 849
850 850 $ ls -d .hg/upgradebackup.*/
851 851 .hg/upgradebackup.*/ (glob)
852 852 $ ls .hg/upgradebackup.*/store
853 853 00changelog.i
854 854 00manifest.i
855 855 data
856 856 fncache
857 857 phaseroots
858 858 requires
859 859 undo
860 860 undo.backup.fncache
861 861 undo.backupfiles
862 862 undo.phaseroots
863 863
864 864 unless --no-backup is passed
865 865
866 866 $ rm -rf .hg/upgradebackup.*/
867 867 $ hg debugupgraderepo --run --no-backup
868 868 note: selecting all-filelogs for processing to change: sparserevlog
869 869 note: selecting all-manifestlogs for processing to change: sparserevlog
870 870 note: selecting changelog for processing to change: sparserevlog
871 871
872 872 upgrade will perform the following actions:
873 873
874 874 requirements
875 875 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
876 876 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
877 877 added: sparserevlog
878 878
879 879 sparserevlog
880 880 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
881 881
882 882 processed revlogs:
883 883 - all-filelogs
884 884 - changelog
885 885 - manifest
886 886
887 887 beginning upgrade...
888 888 repository locked and read-only
889 889 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
890 890 (it is safe to interrupt this process any time before data migration completes)
891 891 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
892 892 migrating 519 KB in store; 1.05 MB tracked data
893 893 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
894 894 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
895 895 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
896 896 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
897 897 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
898 898 finished migrating 3 changelog revisions; change in size: 0 bytes
899 899 finished migrating 9 total revisions; total change in store size: 0 bytes
900 900 copying phaseroots
901 901 copying requires
902 902 data fully upgraded in a temporary repository
903 903 marking source repository as being upgraded; clients will be unable to read from repository
904 904 starting in-place swap of repository data
905 905 replacing store...
906 906 store replacement complete; repository was inconsistent for * (glob)
907 907 finalizing requirements file and making repository readable again
908 908 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
909 909 $ ls -1 .hg/ | grep upgradebackup
910 910 [1]
911 911
912 912 We can restrict optimization to some revlog:
913 913
914 914 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
915 915 upgrade will perform the following actions:
916 916
917 917 requirements
918 918 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
919 919 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
920 920
921 921 optimisations: re-delta-parent
922 922
923 923 re-delta-parent
924 924 deltas within internal storage will choose a new base revision if needed
925 925
926 926 processed revlogs:
927 927 - manifest
928 928
929 929 beginning upgrade...
930 930 repository locked and read-only
931 931 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
932 932 (it is safe to interrupt this process any time before data migration completes)
933 933 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
934 934 migrating 519 KB in store; 1.05 MB tracked data
935 935 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
936 936 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
937 937 blindly copying data/f0.i containing 1 revisions
938 938 blindly copying data/f2.i containing 1 revisions
939 939 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
940 940 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
941 941 cloning 3 revisions from 00manifest.i
942 942 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
943 943 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
944 944 blindly copying 00changelog.i containing 3 revisions
945 945 finished migrating 3 changelog revisions; change in size: 0 bytes
946 946 finished migrating 9 total revisions; total change in store size: 0 bytes
947 947 copying phaseroots
948 948 copying requires
949 949 data fully upgraded in a temporary repository
950 950 marking source repository as being upgraded; clients will be unable to read from repository
951 951 starting in-place swap of repository data
952 952 replacing store...
953 953 store replacement complete; repository was inconsistent for *s (glob)
954 954 finalizing requirements file and making repository readable again
955 955 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
956 956
957 957 Check that the repo still works fine
958 958
959 959 $ hg log -G --stat
960 960 @ changeset: 2:fca376863211 (py3 !)
961 961 | tag: tip
962 962 | parent: 0:ba592bf28da2
963 963 | user: test
964 964 | date: Thu Jan 01 00:00:00 1970 +0000
965 965 | summary: add f2
966 966 |
967 967 | f2 | 100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
968 968 | 1 files changed, 100000 insertions(+), 0 deletions(-)
969 969 |
970 970 | o changeset: 1:2029ce2354e2
971 971 |/ user: test
972 972 | date: Thu Jan 01 00:00:00 1970 +0000
973 973 | summary: add f1
974 974 |
975 975 |
976 976 o changeset: 0:ba592bf28da2
977 977 user: test
978 978 date: Thu Jan 01 00:00:00 1970 +0000
979 979 summary: initial
980 980
981 981
982 982
983 983 $ hg verify
984 984 checking changesets
985 985 checking manifests
986 986 crosschecking files in changesets and manifests
987 987 checking files
988 988 checked 3 changesets with 3 changes to 3 files
989 989
990 990 Check we can select negatively
991 991
992 992 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
993 993 upgrade will perform the following actions:
994 994
995 995 requirements
996 996 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
997 997 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
998 998
999 999 optimisations: re-delta-parent
1000 1000
1001 1001 re-delta-parent
1002 1002 deltas within internal storage will choose a new base revision if needed
1003 1003
1004 1004 processed revlogs:
1005 1005 - all-filelogs
1006 1006 - changelog
1007 1007
1008 1008 beginning upgrade...
1009 1009 repository locked and read-only
1010 1010 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1011 1011 (it is safe to interrupt this process any time before data migration completes)
1012 1012 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1013 1013 migrating 519 KB in store; 1.05 MB tracked data
1014 1014 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1015 1015 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1016 1016 cloning 1 revisions from data/f0.i
1017 1017 cloning 1 revisions from data/f2.i
1018 1018 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1019 1019 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1020 1020 blindly copying 00manifest.i containing 3 revisions
1021 1021 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1022 1022 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1023 1023 cloning 3 revisions from 00changelog.i
1024 1024 finished migrating 3 changelog revisions; change in size: 0 bytes
1025 1025 finished migrating 9 total revisions; total change in store size: 0 bytes
1026 1026 copying phaseroots
1027 1027 copying requires
1028 1028 data fully upgraded in a temporary repository
1029 1029 marking source repository as being upgraded; clients will be unable to read from repository
1030 1030 starting in-place swap of repository data
1031 1031 replacing store...
1032 1032 store replacement complete; repository was inconsistent for *s (glob)
1033 1033 finalizing requirements file and making repository readable again
1034 1034 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1035 1035 $ hg verify
1036 1036 checking changesets
1037 1037 checking manifests
1038 1038 crosschecking files in changesets and manifests
1039 1039 checking files
1040 1040 checked 3 changesets with 3 changes to 3 files
1041 1041
1042 1042 Check that we can select changelog only
1043 1043
1044 1044 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
1045 1045 upgrade will perform the following actions:
1046 1046
1047 1047 requirements
1048 1048 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1049 1049 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1050 1050
1051 1051 optimisations: re-delta-parent
1052 1052
1053 1053 re-delta-parent
1054 1054 deltas within internal storage will choose a new base revision if needed
1055 1055
1056 1056 processed revlogs:
1057 1057 - changelog
1058 1058
1059 1059 beginning upgrade...
1060 1060 repository locked and read-only
1061 1061 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1062 1062 (it is safe to interrupt this process any time before data migration completes)
1063 1063 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1064 1064 migrating 519 KB in store; 1.05 MB tracked data
1065 1065 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1066 1066 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
1067 1067 blindly copying data/f0.i containing 1 revisions
1068 1068 blindly copying data/f2.i containing 1 revisions
1069 1069 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1070 1070 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1071 1071 blindly copying 00manifest.i containing 3 revisions
1072 1072 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1073 1073 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1074 1074 cloning 3 revisions from 00changelog.i
1075 1075 finished migrating 3 changelog revisions; change in size: 0 bytes
1076 1076 finished migrating 9 total revisions; total change in store size: 0 bytes
1077 1077 copying phaseroots
1078 1078 copying requires
1079 1079 data fully upgraded in a temporary repository
1080 1080 marking source repository as being upgraded; clients will be unable to read from repository
1081 1081 starting in-place swap of repository data
1082 1082 replacing store...
1083 1083 store replacement complete; repository was inconsistent for *s (glob)
1084 1084 finalizing requirements file and making repository readable again
1085 1085 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1086 1086 $ hg verify
1087 1087 checking changesets
1088 1088 checking manifests
1089 1089 crosschecking files in changesets and manifests
1090 1090 checking files
1091 1091 checked 3 changesets with 3 changes to 3 files
1092 1092
1093 1093 Check that we can select filelog only
1094 1094
1095 1095 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
1096 1096 upgrade will perform the following actions:
1097 1097
1098 1098 requirements
1099 1099 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1100 1100 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1101 1101
1102 1102 optimisations: re-delta-parent
1103 1103
1104 1104 re-delta-parent
1105 1105 deltas within internal storage will choose a new base revision if needed
1106 1106
1107 1107 processed revlogs:
1108 1108 - all-filelogs
1109 1109
1110 1110 beginning upgrade...
1111 1111 repository locked and read-only
1112 1112 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1113 1113 (it is safe to interrupt this process any time before data migration completes)
1114 1114 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1115 1115 migrating 519 KB in store; 1.05 MB tracked data
1116 1116 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1117 1117 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1118 1118 cloning 1 revisions from data/f0.i
1119 1119 cloning 1 revisions from data/f2.i
1120 1120 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1121 1121 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1122 1122 blindly copying 00manifest.i containing 3 revisions
1123 1123 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1124 1124 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1125 1125 blindly copying 00changelog.i containing 3 revisions
1126 1126 finished migrating 3 changelog revisions; change in size: 0 bytes
1127 1127 finished migrating 9 total revisions; total change in store size: 0 bytes
1128 1128 copying phaseroots
1129 1129 copying requires
1130 1130 data fully upgraded in a temporary repository
1131 1131 marking source repository as being upgraded; clients will be unable to read from repository
1132 1132 starting in-place swap of repository data
1133 1133 replacing store...
1134 1134 store replacement complete; repository was inconsistent for *s (glob)
1135 1135 finalizing requirements file and making repository readable again
1136 1136 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1137 1137 $ hg verify
1138 1138 checking changesets
1139 1139 checking manifests
1140 1140 crosschecking files in changesets and manifests
1141 1141 checking files
1142 1142 checked 3 changesets with 3 changes to 3 files
1143 1143
1144 1144
1145 1145 Check you can't skip revlog clone during important format downgrade
1146 1146
1147 1147 $ echo "[format]" > .hg/hgrc
1148 1148 $ echo "sparse-revlog=no" >> .hg/hgrc
1149 1149 $ hg debugupgrade --optimize re-delta-parent --no-manifest --no-backup --quiet
1150 1150 warning: ignoring --no-manifest, as upgrade is changing: sparserevlog
1151 1151
1152 1152 requirements
1153 1153 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1154 1154 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1155 1155 removed: sparserevlog
1156 1156
1157 1157 optimisations: re-delta-parent
1158 1158
1159 1159 processed revlogs:
1160 1160 - all-filelogs
1161 1161 - changelog
1162 1162 - manifest
1163 1163
1164 1164 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
1165 1165 note: selecting all-filelogs for processing to change: sparserevlog
1166 1166 note: selecting changelog for processing to change: sparserevlog
1167 1167
1168 1168 upgrade will perform the following actions:
1169 1169
1170 1170 requirements
1171 1171 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1172 1172 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1173 1173 removed: sparserevlog
1174 1174
1175 1175 optimisations: re-delta-parent
1176 1176
1177 1177 re-delta-parent
1178 1178 deltas within internal storage will choose a new base revision if needed
1179 1179
1180 1180 processed revlogs:
1181 1181 - all-filelogs
1182 1182 - changelog
1183 1183 - manifest
1184 1184
1185 1185 beginning upgrade...
1186 1186 repository locked and read-only
1187 1187 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1188 1188 (it is safe to interrupt this process any time before data migration completes)
1189 1189 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1190 1190 migrating 519 KB in store; 1.05 MB tracked data
1191 1191 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1192 1192 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1193 1193 cloning 1 revisions from data/f0.i
1194 1194 cloning 1 revisions from data/f2.i
1195 1195 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1196 1196 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1197 1197 cloning 3 revisions from 00manifest.i
1198 1198 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1199 1199 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1200 1200 cloning 3 revisions from 00changelog.i
1201 1201 finished migrating 3 changelog revisions; change in size: 0 bytes
1202 1202 finished migrating 9 total revisions; total change in store size: 0 bytes
1203 1203 copying phaseroots
1204 1204 copying requires
1205 1205 data fully upgraded in a temporary repository
1206 1206 marking source repository as being upgraded; clients will be unable to read from repository
1207 1207 starting in-place swap of repository data
1208 1208 replacing store...
1209 1209 store replacement complete; repository was inconsistent for *s (glob)
1210 1210 finalizing requirements file and making repository readable again
1211 1211 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1212 1212 $ hg verify
1213 1213 checking changesets
1214 1214 checking manifests
1215 1215 crosschecking files in changesets and manifests
1216 1216 checking files
1217 1217 checked 3 changesets with 3 changes to 3 files
1218 1218
1219 1219 Check you can't skip revlog clone during important format upgrade
1220 1220
1221 1221 $ echo "sparse-revlog=yes" >> .hg/hgrc
1222 1222 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
1223 1223 note: selecting all-filelogs for processing to change: sparserevlog
1224 1224 note: selecting changelog for processing to change: sparserevlog
1225 1225
1226 1226 upgrade will perform the following actions:
1227 1227
1228 1228 requirements
1229 1229 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1230 1230 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1231 1231 added: sparserevlog
1232 1232
1233 1233 optimisations: re-delta-parent
1234 1234
1235 1235 sparserevlog
1236 1236 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
1237 1237
1238 1238 re-delta-parent
1239 1239 deltas within internal storage will choose a new base revision if needed
1240 1240
1241 1241 processed revlogs:
1242 1242 - all-filelogs
1243 1243 - changelog
1244 1244 - manifest
1245 1245
1246 1246 beginning upgrade...
1247 1247 repository locked and read-only
1248 1248 creating temporary repository to stage upgraded data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1249 1249 (it is safe to interrupt this process any time before data migration completes)
1250 1250 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1251 1251 migrating 519 KB in store; 1.05 MB tracked data
1252 1252 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
1253 1253 cloning 1 revisions from data/FooBarDirectory.d/f1.i
1254 1254 cloning 1 revisions from data/f0.i
1255 1255 cloning 1 revisions from data/f2.i
1256 1256 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
1257 1257 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
1258 1258 cloning 3 revisions from 00manifest.i
1259 1259 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1260 1260 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
1261 1261 cloning 3 revisions from 00changelog.i
1262 1262 finished migrating 3 changelog revisions; change in size: 0 bytes
1263 1263 finished migrating 9 total revisions; total change in store size: 0 bytes
1264 1264 copying phaseroots
1265 1265 copying requires
1266 1266 data fully upgraded in a temporary repository
1267 1267 marking source repository as being upgraded; clients will be unable to read from repository
1268 1268 starting in-place swap of repository data
1269 1269 replacing store...
1270 1270 store replacement complete; repository was inconsistent for *s (glob)
1271 1271 finalizing requirements file and making repository readable again
1272 1272 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1273 1273 $ hg verify
1274 1274 checking changesets
1275 1275 checking manifests
1276 1276 crosschecking files in changesets and manifests
1277 1277 checking files
1278 1278 checked 3 changesets with 3 changes to 3 files
1279 1279
1280 1280 $ cd ..
1281 1281
1282 1282 store files with special filenames aren't encoded during copy
1283 1283
1284 1284 $ hg init store-filenames
1285 1285 $ cd store-filenames
1286 1286 $ touch foo
1287 1287 $ hg -q commit -A -m initial
1288 1288 $ touch .hg/store/.XX_special_filename
1289 1289
1290 1290 $ hg debugupgraderepo --run
1291 1291 nothing to do
1292 1292 $ hg debugupgraderepo --run --optimize 're-delta-fulladd'
1293 1293 upgrade will perform the following actions:
1294 1294
1295 1295 requirements
1296 1296 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1297 1297 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1298 1298
1299 1299 optimisations: re-delta-fulladd
1300 1300
1301 1301 re-delta-fulladd
1302 1302 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
1303 1303
1304 1304 processed revlogs:
1305 1305 - all-filelogs
1306 1306 - changelog
1307 1307 - manifest
1308 1308
1309 1309 beginning upgrade...
1310 1310 repository locked and read-only
1311 1311 creating temporary repository to stage upgraded data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1312 1312 (it is safe to interrupt this process any time before data migration completes)
1313 1313 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1314 1314 migrating 301 bytes in store; 107 bytes tracked data
1315 1315 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1316 1316 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1317 1317 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1318 1318 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1319 1319 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1320 1320 finished migrating 1 changelog revisions; change in size: 0 bytes
1321 1321 finished migrating 3 total revisions; total change in store size: 0 bytes
1322 1322 copying .XX_special_filename
1323 1323 copying phaseroots
1324 1324 copying requires
1325 1325 data fully upgraded in a temporary repository
1326 1326 marking source repository as being upgraded; clients will be unable to read from repository
1327 1327 starting in-place swap of repository data
1328 1328 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1329 1329 replacing store...
1330 1330 store replacement complete; repository was inconsistent for *s (glob)
1331 1331 finalizing requirements file and making repository readable again
1332 1332 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1333 1333 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1334 1334 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1335 1335
1336 1336 fncache is valid after upgrade
1337 1337
1338 1338 $ hg debugrebuildfncache
1339 1339 fncache already up to date
1340 1340
1341 1341 $ cd ..
1342 1342
1343 1343 Check upgrading a large file repository
1344 1344 ---------------------------------------
1345 1345
1346 1346 $ hg init largefilesrepo
1347 1347 $ cat << EOF >> largefilesrepo/.hg/hgrc
1348 1348 > [extensions]
1349 1349 > largefiles =
1350 1350 > EOF
1351 1351
1352 1352 $ cd largefilesrepo
1353 1353 $ touch foo
1354 1354 $ hg add --large foo
1355 1355 $ hg -q commit -m initial
1356 1356 $ hg debugrequires
1357 1357 dotencode
1358 1358 fncache
1359 1359 generaldelta
1360 1360 largefiles
1361 1361 persistent-nodemap (rust !)
1362 1362 revlogv1
1363 1363 share-safe
1364 1364 sparserevlog
1365 1365 store
1366 1366
1367 1367 $ hg debugupgraderepo --run
1368 1368 nothing to do
1369 1369 $ hg debugrequires
1370 1370 dotencode
1371 1371 fncache
1372 1372 generaldelta
1373 1373 largefiles
1374 1374 persistent-nodemap (rust !)
1375 1375 revlogv1
1376 1376 share-safe
1377 1377 sparserevlog
1378 1378 store
1379 1379
1380 1380 $ cat << EOF >> .hg/hgrc
1381 1381 > [extensions]
1382 1382 > lfs =
1383 1383 > [lfs]
1384 1384 > threshold = 10
1385 1385 > EOF
1386 1386 $ echo '123456789012345' > lfs.bin
1387 1387 $ hg ci -Am 'lfs.bin'
1388 1388 adding lfs.bin
1389 1389 $ hg debugrequires | grep lfs
1390 1390 lfs
1391 1391 $ find .hg/store/lfs -type f
1392 1392 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1393 1393
1394 1394 $ hg debugupgraderepo --run
1395 1395 nothing to do
1396 1396
1397 1397 $ hg debugrequires | grep lfs
1398 1398 lfs
1399 1399 $ find .hg/store/lfs -type f
1400 1400 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1401 1401 $ hg verify
1402 1402 checking changesets
1403 1403 checking manifests
1404 1404 crosschecking files in changesets and manifests
1405 1405 checking files
1406 1406 checked 2 changesets with 2 changes to 2 files
1407 1407 $ hg debugdata lfs.bin 0
1408 1408 version https://git-lfs.github.com/spec/v1
1409 1409 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1410 1410 size 16
1411 1411 x-is-binary 0
1412 1412
1413 1413 $ cd ..
1414 1414
1415 1415 repository config is taken in account
1416 1416 -------------------------------------
1417 1417
1418 1418 $ cat << EOF >> $HGRCPATH
1419 1419 > [format]
1420 1420 > maxchainlen = 1
1421 1421 > EOF
1422 1422
1423 1423 $ hg init localconfig
1424 1424 $ cd localconfig
1425 1425 $ cat << EOF > file
1426 1426 > some content
1427 1427 > with some length
1428 1428 > to make sure we get a delta
1429 1429 > after changes
1430 1430 > very long
1431 1431 > very long
1432 1432 > very long
1433 1433 > very long
1434 1434 > very long
1435 1435 > very long
1436 1436 > very long
1437 1437 > very long
1438 1438 > very long
1439 1439 > very long
1440 1440 > very long
1441 1441 > EOF
1442 1442 $ hg -q commit -A -m A
1443 1443 $ echo "new line" >> file
1444 1444 $ hg -q commit -m B
1445 1445 $ echo "new line" >> file
1446 1446 $ hg -q commit -m C
1447 1447
1448 1448 $ cat << EOF >> .hg/hgrc
1449 1449 > [format]
1450 1450 > maxchainlen = 9001
1451 1451 > EOF
1452 1452 $ hg config format
1453 1453 format.revlog-compression=$BUNDLE2_COMPRESSIONS$
1454 1454 format.maxchainlen=9001
1455 1455 $ hg debugdeltachain file
1456 1456 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1457 1457 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1458 1458 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1459 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1459 2 1 2 0 snap 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1460 1460
1461 1461 $ hg debugupgraderepo --run --optimize 're-delta-all'
1462 1462 upgrade will perform the following actions:
1463 1463
1464 1464 requirements
1465 1465 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1466 1466 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1467 1467
1468 1468 optimisations: re-delta-all
1469 1469
1470 1470 re-delta-all
1471 1471 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
1472 1472
1473 1473 processed revlogs:
1474 1474 - all-filelogs
1475 1475 - changelog
1476 1476 - manifest
1477 1477
1478 1478 beginning upgrade...
1479 1479 repository locked and read-only
1480 1480 creating temporary repository to stage upgraded data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
1481 1481 (it is safe to interrupt this process any time before data migration completes)
1482 1482 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1483 1483 migrating 1019 bytes in store; 882 bytes tracked data
1484 1484 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
1485 1485 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1486 1486 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1487 1487 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1488 1488 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1489 1489 finished migrating 3 changelog revisions; change in size: 0 bytes
1490 1490 finished migrating 9 total revisions; total change in store size: -9 bytes
1491 1491 copying phaseroots
1492 1492 copying requires
1493 1493 data fully upgraded in a temporary repository
1494 1494 marking source repository as being upgraded; clients will be unable to read from repository
1495 1495 starting in-place swap of repository data
1496 1496 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1497 1497 replacing store...
1498 1498 store replacement complete; repository was inconsistent for *s (glob)
1499 1499 finalizing requirements file and making repository readable again
1500 1500 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1501 1501 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1502 1502 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1503 1503 $ hg debugdeltachain file
1504 1504 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1505 1505 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1506 1506 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1507 1507 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1508 1508 $ cd ..
1509 1509
1510 1510 $ cat << EOF >> $HGRCPATH
1511 1511 > [format]
1512 1512 > maxchainlen = 9001
1513 1513 > EOF
1514 1514
1515 1515 Check upgrading a sparse-revlog repository
1516 1516 ---------------------------------------
1517 1517
1518 1518 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1519 1519 $ cd sparserevlogrepo
1520 1520 $ touch foo
1521 1521 $ hg add foo
1522 1522 $ hg -q commit -m "foo"
1523 1523 $ hg debugrequires
1524 1524 dotencode
1525 1525 fncache
1526 1526 generaldelta
1527 1527 persistent-nodemap (rust !)
1528 1528 revlogv1
1529 1529 share-safe
1530 1530 store
1531 1531
1532 1532 Check that we can add the sparse-revlog format requirement
1533 1533 $ hg --config format.sparse-revlog=yes debugupgraderepo --run --quiet
1534 1534 upgrade will perform the following actions:
1535 1535
1536 1536 requirements
1537 1537 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1538 1538 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1539 1539 added: sparserevlog
1540 1540
1541 1541 processed revlogs:
1542 1542 - all-filelogs
1543 1543 - changelog
1544 1544 - manifest
1545 1545
1546 1546 $ hg debugrequires
1547 1547 dotencode
1548 1548 fncache
1549 1549 generaldelta
1550 1550 persistent-nodemap (rust !)
1551 1551 revlogv1
1552 1552 share-safe
1553 1553 sparserevlog
1554 1554 store
1555 1555
1556 1556 Check that we can remove the sparse-revlog format requirement
1557 1557 $ hg --config format.sparse-revlog=no debugupgraderepo --run --quiet
1558 1558 upgrade will perform the following actions:
1559 1559
1560 1560 requirements
1561 1561 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1562 1562 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1563 1563 removed: sparserevlog
1564 1564
1565 1565 processed revlogs:
1566 1566 - all-filelogs
1567 1567 - changelog
1568 1568 - manifest
1569 1569
1570 1570 $ hg debugrequires
1571 1571 dotencode
1572 1572 fncache
1573 1573 generaldelta
1574 1574 persistent-nodemap (rust !)
1575 1575 revlogv1
1576 1576 share-safe
1577 1577 store
1578 1578
1579 1579 #if zstd
1580 1580
1581 1581 Check upgrading to a zstd revlog
1582 1582 --------------------------------
1583 1583
1584 1584 upgrade
1585 1585
1586 1586 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup --quiet
1587 1587 upgrade will perform the following actions:
1588 1588
1589 1589 requirements
1590 1590 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
1591 1591 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
1592 1592 added: revlog-compression-zstd, sparserevlog
1593 1593
1594 1594 processed revlogs:
1595 1595 - all-filelogs
1596 1596 - changelog
1597 1597 - manifest
1598 1598
1599 1599 $ hg debugformat -v
1600 1600 format-variant repo config default
1601 1601 fncache: yes yes yes
1602 1602 dirstate-v2: no no no
1603 1603 tracked-hint: no no no
1604 1604 dotencode: yes yes yes
1605 1605 generaldelta: yes yes yes
1606 1606 share-safe: yes yes yes
1607 1607 sparserevlog: yes yes yes
1608 1608 persistent-nodemap: no no no (no-rust !)
1609 1609 persistent-nodemap: yes yes no (rust !)
1610 1610 copies-sdc: no no no
1611 1611 revlog-v2: no no no
1612 1612 changelog-v2: no no no
1613 1613 plain-cl-delta: yes yes yes
1614 1614 compression: zlib zlib zlib (no-zstd !)
1615 1615 compression: zstd zlib zstd (zstd !)
1616 1616 compression-level: default default default
1617 1617 $ hg debugrequires
1618 1618 dotencode
1619 1619 fncache
1620 1620 generaldelta
1621 1621 persistent-nodemap (rust !)
1622 1622 revlog-compression-zstd
1623 1623 revlogv1
1624 1624 share-safe
1625 1625 sparserevlog
1626 1626 store
1627 1627
1628 1628 downgrade
1629 1629
1630 1630 $ hg debugupgraderepo --run --no-backup --quiet
1631 1631 upgrade will perform the following actions:
1632 1632
1633 1633 requirements
1634 1634 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1635 1635 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1636 1636 removed: revlog-compression-zstd
1637 1637
1638 1638 processed revlogs:
1639 1639 - all-filelogs
1640 1640 - changelog
1641 1641 - manifest
1642 1642
1643 1643 $ hg debugformat -v
1644 1644 format-variant repo config default
1645 1645 fncache: yes yes yes
1646 1646 dirstate-v2: no no no
1647 1647 tracked-hint: no no no
1648 1648 dotencode: yes yes yes
1649 1649 generaldelta: yes yes yes
1650 1650 share-safe: yes yes yes
1651 1651 sparserevlog: yes yes yes
1652 1652 persistent-nodemap: no no no (no-rust !)
1653 1653 persistent-nodemap: yes yes no (rust !)
1654 1654 copies-sdc: no no no
1655 1655 revlog-v2: no no no
1656 1656 changelog-v2: no no no
1657 1657 plain-cl-delta: yes yes yes
1658 1658 compression: zlib zlib zlib (no-zstd !)
1659 1659 compression: zlib zlib zstd (zstd !)
1660 1660 compression-level: default default default
1661 1661 $ hg debugrequires
1662 1662 dotencode
1663 1663 fncache
1664 1664 generaldelta
1665 1665 persistent-nodemap (rust !)
1666 1666 revlogv1
1667 1667 share-safe
1668 1668 sparserevlog
1669 1669 store
1670 1670
1671 1671 upgrade from hgrc
1672 1672
1673 1673 $ cat >> .hg/hgrc << EOF
1674 1674 > [format]
1675 1675 > revlog-compression=zstd
1676 1676 > EOF
1677 1677 $ hg debugupgraderepo --run --no-backup --quiet
1678 1678 upgrade will perform the following actions:
1679 1679
1680 1680 requirements
1681 1681 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
1682 1682 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
1683 1683 added: revlog-compression-zstd
1684 1684
1685 1685 processed revlogs:
1686 1686 - all-filelogs
1687 1687 - changelog
1688 1688 - manifest
1689 1689
1690 1690 $ hg debugformat -v
1691 1691 format-variant repo config default
1692 1692 fncache: yes yes yes
1693 1693 dirstate-v2: no no no
1694 1694 tracked-hint: no no no
1695 1695 dotencode: yes yes yes
1696 1696 generaldelta: yes yes yes
1697 1697 share-safe: yes yes yes
1698 1698 sparserevlog: yes yes yes
1699 1699 persistent-nodemap: no no no (no-rust !)
1700 1700 persistent-nodemap: yes yes no (rust !)
1701 1701 copies-sdc: no no no
1702 1702 revlog-v2: no no no
1703 1703 changelog-v2: no no no
1704 1704 plain-cl-delta: yes yes yes
1705 1705 compression: zlib zlib zlib (no-zstd !)
1706 1706 compression: zstd zstd zstd (zstd !)
1707 1707 compression-level: default default default
1708 1708 $ hg debugrequires
1709 1709 dotencode
1710 1710 fncache
1711 1711 generaldelta
1712 1712 persistent-nodemap (rust !)
1713 1713 revlog-compression-zstd
1714 1714 revlogv1
1715 1715 share-safe
1716 1716 sparserevlog
1717 1717 store
1718 1718
1719 1719 #endif
1720 1720
1721 1721 Check upgrading to a revlog format supporting sidedata
1722 1722 ------------------------------------------------------
1723 1723
1724 1724 upgrade
1725 1725
1726 1726 $ hg debugsidedata -c 0
1727 1727 $ hg --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
1728 1728 upgrade will perform the following actions:
1729 1729
1730 1730 requirements
1731 1731 preserved: dotencode, fncache, generaldelta, share-safe, store (no-zstd !)
1732 1732 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1733 1733 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1734 1734 removed: revlogv1
1735 1735 added: exp-revlogv2.2 (zstd !)
1736 1736 added: exp-revlogv2.2, sparserevlog (no-zstd !)
1737 1737
1738 1738 processed revlogs:
1739 1739 - all-filelogs
1740 1740 - changelog
1741 1741 - manifest
1742 1742
1743 1743 $ hg debugformat -v
1744 1744 format-variant repo config default
1745 1745 fncache: yes yes yes
1746 1746 dirstate-v2: no no no
1747 1747 tracked-hint: no no no
1748 1748 dotencode: yes yes yes
1749 1749 generaldelta: yes yes yes
1750 1750 share-safe: yes yes yes
1751 1751 sparserevlog: yes yes yes
1752 1752 persistent-nodemap: no no no (no-rust !)
1753 1753 persistent-nodemap: yes yes no (rust !)
1754 1754 copies-sdc: no no no
1755 1755 revlog-v2: yes no no
1756 1756 changelog-v2: no no no
1757 1757 plain-cl-delta: yes yes yes
1758 1758 compression: zlib zlib zlib (no-zstd !)
1759 1759 compression: zstd zstd zstd (zstd !)
1760 1760 compression-level: default default default
1761 1761 $ hg debugrequires
1762 1762 dotencode
1763 1763 exp-revlogv2.2
1764 1764 fncache
1765 1765 generaldelta
1766 1766 persistent-nodemap (rust !)
1767 1767 revlog-compression-zstd (zstd !)
1768 1768 share-safe
1769 1769 sparserevlog
1770 1770 store
1771 1771 $ hg debugsidedata -c 0
1772 1772 2 sidedata entries
1773 1773 entry-0001 size 4
1774 1774 entry-0002 size 32
1775 1775
1776 1776 downgrade
1777 1777
1778 1778 $ hg debugupgraderepo --config experimental.revlogv2=no --run --no-backup --quiet
1779 1779 upgrade will perform the following actions:
1780 1780
1781 1781 requirements
1782 1782 preserved: dotencode, fncache, generaldelta, share-safe, sparserevlog, store (no-zstd !)
1783 1783 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1784 1784 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1785 1785 removed: exp-revlogv2.2
1786 1786 added: revlogv1
1787 1787
1788 1788 processed revlogs:
1789 1789 - all-filelogs
1790 1790 - changelog
1791 1791 - manifest
1792 1792
1793 1793 $ hg debugformat -v
1794 1794 format-variant repo config default
1795 1795 fncache: yes yes yes
1796 1796 dirstate-v2: no no no
1797 1797 tracked-hint: no no no
1798 1798 dotencode: yes yes yes
1799 1799 generaldelta: yes yes yes
1800 1800 share-safe: yes yes yes
1801 1801 sparserevlog: yes yes yes
1802 1802 persistent-nodemap: no no no (no-rust !)
1803 1803 persistent-nodemap: yes yes no (rust !)
1804 1804 copies-sdc: no no no
1805 1805 revlog-v2: no no no
1806 1806 changelog-v2: no no no
1807 1807 plain-cl-delta: yes yes yes
1808 1808 compression: zlib zlib zlib (no-zstd !)
1809 1809 compression: zstd zstd zstd (zstd !)
1810 1810 compression-level: default default default
1811 1811 $ hg debugrequires
1812 1812 dotencode
1813 1813 fncache
1814 1814 generaldelta
1815 1815 persistent-nodemap (rust !)
1816 1816 revlog-compression-zstd (zstd !)
1817 1817 revlogv1
1818 1818 share-safe
1819 1819 sparserevlog
1820 1820 store
1821 1821 $ hg debugsidedata -c 0
1822 1822
1823 1823 upgrade from hgrc
1824 1824
1825 1825 $ cat >> .hg/hgrc << EOF
1826 1826 > [experimental]
1827 1827 > revlogv2=enable-unstable-format-and-corrupt-my-data
1828 1828 > EOF
1829 1829 $ hg debugupgraderepo --run --no-backup --quiet
1830 1830 upgrade will perform the following actions:
1831 1831
1832 1832 requirements
1833 1833 preserved: dotencode, fncache, generaldelta, share-safe, sparserevlog, store (no-zstd !)
1834 1834 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
1835 1835 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
1836 1836 removed: revlogv1
1837 1837 added: exp-revlogv2.2
1838 1838
1839 1839 processed revlogs:
1840 1840 - all-filelogs
1841 1841 - changelog
1842 1842 - manifest
1843 1843
1844 1844 $ hg debugformat -v
1845 1845 format-variant repo config default
1846 1846 fncache: yes yes yes
1847 1847 dirstate-v2: no no no
1848 1848 tracked-hint: no no no
1849 1849 dotencode: yes yes yes
1850 1850 generaldelta: yes yes yes
1851 1851 share-safe: yes yes yes
1852 1852 sparserevlog: yes yes yes
1853 1853 persistent-nodemap: no no no (no-rust !)
1854 1854 persistent-nodemap: yes yes no (rust !)
1855 1855 copies-sdc: no no no
1856 1856 revlog-v2: yes yes no
1857 1857 changelog-v2: no no no
1858 1858 plain-cl-delta: yes yes yes
1859 1859 compression: zlib zlib zlib (no-zstd !)
1860 1860 compression: zstd zstd zstd (zstd !)
1861 1861 compression-level: default default default
1862 1862 $ hg debugrequires
1863 1863 dotencode
1864 1864 exp-revlogv2.2
1865 1865 fncache
1866 1866 generaldelta
1867 1867 persistent-nodemap (rust !)
1868 1868 revlog-compression-zstd (zstd !)
1869 1869 share-safe
1870 1870 sparserevlog
1871 1871 store
1872 1872 $ hg debugsidedata -c 0
1873 1873
1874 1874 Demonstrate that nothing to perform upgrade will still run all the way through
1875 1875
1876 1876 $ hg debugupgraderepo --run
1877 1877 nothing to do
1878 1878
1879 1879 #if no-rust
1880 1880
1881 1881 $ cat << EOF >> $HGRCPATH
1882 1882 > [storage]
1883 1883 > dirstate-v2.slow-path = allow
1884 1884 > EOF
1885 1885
1886 1886 #endif
1887 1887
1888 1888 Upgrade to dirstate-v2
1889 1889
1890 1890 $ hg debugformat -v --config format.use-dirstate-v2=1 | grep dirstate-v2
1891 1891 dirstate-v2: no yes no
1892 1892 $ hg debugupgraderepo --config format.use-dirstate-v2=1 --run
1893 1893 upgrade will perform the following actions:
1894 1894
1895 1895 requirements
1896 1896 preserved: * (glob)
1897 1897 added: dirstate-v2
1898 1898
1899 1899 dirstate-v2
1900 1900 "hg status" will be faster
1901 1901
1902 1902 no revlogs to process
1903 1903
1904 1904 beginning upgrade...
1905 1905 repository locked and read-only
1906 1906 creating temporary repository to stage upgraded data: $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1907 1907 (it is safe to interrupt this process any time before data migration completes)
1908 1908 upgrading to dirstate-v2 from v1
1909 1909 replaced files will be backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1910 1910 removing temporary repository $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1911 1911 $ ls .hg/upgradebackup.*/dirstate
1912 1912 .hg/upgradebackup.*/dirstate (glob)
1913 1913 $ hg debugformat -v | grep dirstate-v2
1914 1914 dirstate-v2: yes no no
1915 1915 $ hg status
1916 1916 $ dd bs=12 count=1 if=.hg/dirstate 2> /dev/null
1917 1917 dirstate-v2
1918 1918
1919 1919 Downgrade from dirstate-v2
1920 1920
1921 1921 $ hg debugupgraderepo --run
1922 1922 upgrade will perform the following actions:
1923 1923
1924 1924 requirements
1925 1925 preserved: * (glob)
1926 1926 removed: dirstate-v2
1927 1927
1928 1928 no revlogs to process
1929 1929
1930 1930 beginning upgrade...
1931 1931 repository locked and read-only
1932 1932 creating temporary repository to stage upgraded data: $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1933 1933 (it is safe to interrupt this process any time before data migration completes)
1934 1934 downgrading from dirstate-v2 to v1
1935 1935 replaced files will be backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
1936 1936 removing temporary repository $TESTTMP/sparserevlogrepo/.hg/upgrade.* (glob)
1937 1937 $ hg debugformat -v | grep dirstate-v2
1938 1938 dirstate-v2: no no no
1939 1939 $ hg status
1940 1940
1941 1941 $ cd ..
1942 1942
1943 1943 dirstate-v2: upgrade and downgrade from and empty repository:
1944 1944 -------------------------------------------------------------
1945 1945
1946 1946 $ hg init --config format.use-dirstate-v2=no dirstate-v2-empty
1947 1947 $ cd dirstate-v2-empty
1948 1948 $ hg debugformat | grep dirstate-v2
1949 1949 dirstate-v2: no
1950 1950
1951 1951 upgrade
1952 1952
1953 1953 $ hg debugupgraderepo --run --config format.use-dirstate-v2=yes
1954 1954 upgrade will perform the following actions:
1955 1955
1956 1956 requirements
1957 1957 preserved: * (glob)
1958 1958 added: dirstate-v2
1959 1959
1960 1960 dirstate-v2
1961 1961 "hg status" will be faster
1962 1962
1963 1963 no revlogs to process
1964 1964
1965 1965 beginning upgrade...
1966 1966 repository locked and read-only
1967 1967 creating temporary repository to stage upgraded data: $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1968 1968 (it is safe to interrupt this process any time before data migration completes)
1969 1969 upgrading to dirstate-v2 from v1
1970 1970 replaced files will be backed up at $TESTTMP/dirstate-v2-empty/.hg/upgradebackup.* (glob)
1971 1971 removing temporary repository $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1972 1972 $ hg debugformat | grep dirstate-v2
1973 1973 dirstate-v2: yes
1974 1974
1975 1975 downgrade
1976 1976
1977 1977 $ hg debugupgraderepo --run --config format.use-dirstate-v2=no
1978 1978 upgrade will perform the following actions:
1979 1979
1980 1980 requirements
1981 1981 preserved: * (glob)
1982 1982 removed: dirstate-v2
1983 1983
1984 1984 no revlogs to process
1985 1985
1986 1986 beginning upgrade...
1987 1987 repository locked and read-only
1988 1988 creating temporary repository to stage upgraded data: $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1989 1989 (it is safe to interrupt this process any time before data migration completes)
1990 1990 downgrading from dirstate-v2 to v1
1991 1991 replaced files will be backed up at $TESTTMP/dirstate-v2-empty/.hg/upgradebackup.* (glob)
1992 1992 removing temporary repository $TESTTMP/dirstate-v2-empty/.hg/upgrade.* (glob)
1993 1993 $ hg debugformat | grep dirstate-v2
1994 1994 dirstate-v2: no
1995 1995
1996 1996 $ cd ..
1997 1997
1998 1998 Test automatic upgrade/downgrade
1999 1999 ================================
2000 2000
2001 2001
2002 2002 For dirstate v2
2003 2003 ---------------
2004 2004
2005 2005 create an initial repository
2006 2006
2007 2007 $ hg init auto-upgrade \
2008 2008 > --config format.use-dirstate-v2=no \
2009 2009 > --config format.use-dirstate-tracked-hint=yes \
2010 2010 > --config format.use-share-safe=no
2011 2011 $ hg debugbuilddag -R auto-upgrade --new-file .+5
2012 2012 $ hg -R auto-upgrade update
2013 2013 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
2014 2014 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2015 2015 dirstate-v2: no
2016 2016
2017 2017 upgrade it to dirstate-v2 automatically
2018 2018
2019 2019 $ hg status -R auto-upgrade \
2020 2020 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2021 2021 > --config format.use-dirstate-v2=yes
2022 2022 automatically upgrading repository to the `dirstate-v2` feature
2023 2023 (see `hg help config.format.use-dirstate-v2` for details)
2024 2024 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2025 2025 dirstate-v2: yes
2026 2026
2027 2027 downgrade it from dirstate-v2 automatically
2028 2028
2029 2029 $ hg status -R auto-upgrade \
2030 2030 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2031 2031 > --config format.use-dirstate-v2=no
2032 2032 automatically downgrading repository from the `dirstate-v2` feature
2033 2033 (see `hg help config.format.use-dirstate-v2` for details)
2034 2034 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2035 2035 dirstate-v2: no
2036 2036
2037 2037
2038 2038 For multiple change at the same time
2039 2039 ------------------------------------
2040 2040
2041 2041 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2042 2042 dirstate-v2: no
2043 2043 tracked-hint: yes
2044 2044 share-safe: no
2045 2045
2046 2046 $ hg status -R auto-upgrade \
2047 2047 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2048 2048 > --config format.use-dirstate-v2=yes \
2049 2049 > --config format.use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories=yes \
2050 2050 > --config format.use-dirstate-tracked-hint=no\
2051 2051 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories=yes \
2052 2052 > --config format.use-share-safe=yes
2053 2053 automatically upgrading repository to the `dirstate-v2` feature
2054 2054 (see `hg help config.format.use-dirstate-v2` for details)
2055 2055 automatically upgrading repository to the `share-safe` feature
2056 2056 (see `hg help config.format.use-share-safe` for details)
2057 2057 automatically downgrading repository from the `tracked-hint` feature
2058 2058 (see `hg help config.format.use-dirstate-tracked-hint` for details)
2059 2059 $ hg debugformat -R auto-upgrade | egrep '(dirstate-v2|tracked|share-safe)'
2060 2060 dirstate-v2: yes
2061 2061 tracked-hint: no
2062 2062 share-safe: yes
2063 2063
2064 2064 Attempting Auto-upgrade on a read-only repository
2065 2065 -------------------------------------------------
2066 2066
2067 2067 $ chmod -R a-w auto-upgrade
2068 2068
2069 2069 $ hg status -R auto-upgrade \
2070 2070 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2071 2071 > --config format.use-dirstate-v2=no
2072 2072 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2073 2073 dirstate-v2: yes
2074 2074
2075 2075 $ chmod -R u+w auto-upgrade
2076 2076
2077 2077 Attempting Auto-upgrade on a locked repository
2078 2078 ----------------------------------------------
2079 2079
2080 2080 $ hg -R auto-upgrade debuglock --set-lock --quiet &
2081 2081 $ echo $! >> $DAEMON_PIDS
2082 2082 $ $RUNTESTDIR/testlib/wait-on-file 10 auto-upgrade/.hg/store/lock
2083 2083 $ hg status -R auto-upgrade \
2084 2084 > --config format.use-dirstate-v2.automatic-upgrade-of-mismatching-repositories=yes \
2085 2085 > --config format.use-dirstate-v2=no
2086 2086 $ hg debugformat -R auto-upgrade | grep dirstate-v2
2087 2087 dirstate-v2: yes
2088 2088
2089 2089 $ killdaemons.py
General Comments 0
You need to be logged in to leave comments. Login now