##// END OF EJS Templates
upgrade: drop support for old style optimization names...
Pulkit Goyal -
r46825:083438d6 default
parent child Browse files
Show More
@@ -1,4661 +1,4661 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import codecs
11 11 import collections
12 12 import difflib
13 13 import errno
14 14 import glob
15 15 import operator
16 16 import os
17 17 import platform
18 18 import random
19 19 import re
20 20 import socket
21 21 import ssl
22 22 import stat
23 23 import string
24 24 import subprocess
25 25 import sys
26 26 import time
27 27
28 28 from .i18n import _
29 29 from .node import (
30 30 bin,
31 31 hex,
32 32 nullid,
33 33 nullrev,
34 34 short,
35 35 )
36 36 from .pycompat import (
37 37 getattr,
38 38 open,
39 39 )
40 40 from . import (
41 41 bundle2,
42 42 bundlerepo,
43 43 changegroup,
44 44 cmdutil,
45 45 color,
46 46 context,
47 47 copies,
48 48 dagparser,
49 49 encoding,
50 50 error,
51 51 exchange,
52 52 extensions,
53 53 filemerge,
54 54 filesetlang,
55 55 formatter,
56 56 hg,
57 57 httppeer,
58 58 localrepo,
59 59 lock as lockmod,
60 60 logcmdutil,
61 61 mergestate as mergestatemod,
62 62 metadata,
63 63 obsolete,
64 64 obsutil,
65 65 pathutil,
66 66 phases,
67 67 policy,
68 68 pvec,
69 69 pycompat,
70 70 registrar,
71 71 repair,
72 72 revlog,
73 73 revset,
74 74 revsetlang,
75 75 scmutil,
76 76 setdiscovery,
77 77 simplemerge,
78 78 sshpeer,
79 79 sslutil,
80 80 streamclone,
81 81 strip,
82 82 tags as tagsmod,
83 83 templater,
84 84 treediscovery,
85 85 upgrade,
86 86 url as urlmod,
87 87 util,
88 88 vfs as vfsmod,
89 89 wireprotoframing,
90 90 wireprotoserver,
91 91 wireprotov2peer,
92 92 )
93 93 from .utils import (
94 94 cborutil,
95 95 compression,
96 96 dateutil,
97 97 procutil,
98 98 stringutil,
99 99 )
100 100
101 101 from .revlogutils import (
102 102 deltas as deltautil,
103 103 nodemap,
104 104 sidedata,
105 105 )
106 106
107 107 release = lockmod.release
108 108
109 109 table = {}
110 110 table.update(strip.command._table)
111 111 command = registrar.command(table)
112 112
113 113
114 114 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
115 115 def debugancestor(ui, repo, *args):
116 116 """find the ancestor revision of two revisions in a given index"""
117 117 if len(args) == 3:
118 118 index, rev1, rev2 = args
119 119 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
120 120 lookup = r.lookup
121 121 elif len(args) == 2:
122 122 if not repo:
123 123 raise error.Abort(
124 124 _(b'there is no Mercurial repository here (.hg not found)')
125 125 )
126 126 rev1, rev2 = args
127 127 r = repo.changelog
128 128 lookup = repo.lookup
129 129 else:
130 130 raise error.Abort(_(b'either two or three arguments required'))
131 131 a = r.ancestor(lookup(rev1), lookup(rev2))
132 132 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
133 133
134 134
135 135 @command(b'debugantivirusrunning', [])
136 136 def debugantivirusrunning(ui, repo):
137 137 """attempt to trigger an antivirus scanner to see if one is active"""
138 138 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
139 139 f.write(
140 140 util.b85decode(
141 141 # This is a base85-armored version of the EICAR test file. See
142 142 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
143 143 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
144 144 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
145 145 )
146 146 )
147 147 # Give an AV engine time to scan the file.
148 148 time.sleep(2)
149 149 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
150 150
151 151
152 152 @command(b'debugapplystreamclonebundle', [], b'FILE')
153 153 def debugapplystreamclonebundle(ui, repo, fname):
154 154 """apply a stream clone bundle file"""
155 155 f = hg.openpath(ui, fname)
156 156 gen = exchange.readbundle(ui, f, fname)
157 157 gen.apply(repo)
158 158
159 159
160 160 @command(
161 161 b'debugbuilddag',
162 162 [
163 163 (
164 164 b'm',
165 165 b'mergeable-file',
166 166 None,
167 167 _(b'add single file mergeable changes'),
168 168 ),
169 169 (
170 170 b'o',
171 171 b'overwritten-file',
172 172 None,
173 173 _(b'add single file all revs overwrite'),
174 174 ),
175 175 (b'n', b'new-file', None, _(b'add new file at each rev')),
176 176 ],
177 177 _(b'[OPTION]... [TEXT]'),
178 178 )
179 179 def debugbuilddag(
180 180 ui,
181 181 repo,
182 182 text=None,
183 183 mergeable_file=False,
184 184 overwritten_file=False,
185 185 new_file=False,
186 186 ):
187 187 """builds a repo with a given DAG from scratch in the current empty repo
188 188
189 189 The description of the DAG is read from stdin if not given on the
190 190 command line.
191 191
192 192 Elements:
193 193
194 194 - "+n" is a linear run of n nodes based on the current default parent
195 195 - "." is a single node based on the current default parent
196 196 - "$" resets the default parent to null (implied at the start);
197 197 otherwise the default parent is always the last node created
198 198 - "<p" sets the default parent to the backref p
199 199 - "*p" is a fork at parent p, which is a backref
200 200 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
201 201 - "/p2" is a merge of the preceding node and p2
202 202 - ":tag" defines a local tag for the preceding node
203 203 - "@branch" sets the named branch for subsequent nodes
204 204 - "#...\\n" is a comment up to the end of the line
205 205
206 206 Whitespace between the above elements is ignored.
207 207
208 208 A backref is either
209 209
210 210 - a number n, which references the node curr-n, where curr is the current
211 211 node, or
212 212 - the name of a local tag you placed earlier using ":tag", or
213 213 - empty to denote the default parent.
214 214
215 215 All string valued-elements are either strictly alphanumeric, or must
216 216 be enclosed in double quotes ("..."), with "\\" as escape character.
217 217 """
218 218
219 219 if text is None:
220 220 ui.status(_(b"reading DAG from stdin\n"))
221 221 text = ui.fin.read()
222 222
223 223 cl = repo.changelog
224 224 if len(cl) > 0:
225 225 raise error.Abort(_(b'repository is not empty'))
226 226
227 227 # determine number of revs in DAG
228 228 total = 0
229 229 for type, data in dagparser.parsedag(text):
230 230 if type == b'n':
231 231 total += 1
232 232
233 233 if mergeable_file:
234 234 linesperrev = 2
235 235 # make a file with k lines per rev
236 236 initialmergedlines = [
237 237 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
238 238 ]
239 239 initialmergedlines.append(b"")
240 240
241 241 tags = []
242 242 progress = ui.makeprogress(
243 243 _(b'building'), unit=_(b'revisions'), total=total
244 244 )
245 245 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
246 246 at = -1
247 247 atbranch = b'default'
248 248 nodeids = []
249 249 id = 0
250 250 progress.update(id)
251 251 for type, data in dagparser.parsedag(text):
252 252 if type == b'n':
253 253 ui.note((b'node %s\n' % pycompat.bytestr(data)))
254 254 id, ps = data
255 255
256 256 files = []
257 257 filecontent = {}
258 258
259 259 p2 = None
260 260 if mergeable_file:
261 261 fn = b"mf"
262 262 p1 = repo[ps[0]]
263 263 if len(ps) > 1:
264 264 p2 = repo[ps[1]]
265 265 pa = p1.ancestor(p2)
266 266 base, local, other = [
267 267 x[fn].data() for x in (pa, p1, p2)
268 268 ]
269 269 m3 = simplemerge.Merge3Text(base, local, other)
270 270 ml = [l.strip() for l in m3.merge_lines()]
271 271 ml.append(b"")
272 272 elif at > 0:
273 273 ml = p1[fn].data().split(b"\n")
274 274 else:
275 275 ml = initialmergedlines
276 276 ml[id * linesperrev] += b" r%i" % id
277 277 mergedtext = b"\n".join(ml)
278 278 files.append(fn)
279 279 filecontent[fn] = mergedtext
280 280
281 281 if overwritten_file:
282 282 fn = b"of"
283 283 files.append(fn)
284 284 filecontent[fn] = b"r%i\n" % id
285 285
286 286 if new_file:
287 287 fn = b"nf%i" % id
288 288 files.append(fn)
289 289 filecontent[fn] = b"r%i\n" % id
290 290 if len(ps) > 1:
291 291 if not p2:
292 292 p2 = repo[ps[1]]
293 293 for fn in p2:
294 294 if fn.startswith(b"nf"):
295 295 files.append(fn)
296 296 filecontent[fn] = p2[fn].data()
297 297
298 298 def fctxfn(repo, cx, path):
299 299 if path in filecontent:
300 300 return context.memfilectx(
301 301 repo, cx, path, filecontent[path]
302 302 )
303 303 return None
304 304
305 305 if len(ps) == 0 or ps[0] < 0:
306 306 pars = [None, None]
307 307 elif len(ps) == 1:
308 308 pars = [nodeids[ps[0]], None]
309 309 else:
310 310 pars = [nodeids[p] for p in ps]
311 311 cx = context.memctx(
312 312 repo,
313 313 pars,
314 314 b"r%i" % id,
315 315 files,
316 316 fctxfn,
317 317 date=(id, 0),
318 318 user=b"debugbuilddag",
319 319 extra={b'branch': atbranch},
320 320 )
321 321 nodeid = repo.commitctx(cx)
322 322 nodeids.append(nodeid)
323 323 at = id
324 324 elif type == b'l':
325 325 id, name = data
326 326 ui.note((b'tag %s\n' % name))
327 327 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
328 328 elif type == b'a':
329 329 ui.note((b'branch %s\n' % data))
330 330 atbranch = data
331 331 progress.update(id)
332 332
333 333 if tags:
334 334 repo.vfs.write(b"localtags", b"".join(tags))
335 335
336 336
337 337 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
338 338 indent_string = b' ' * indent
339 339 if all:
340 340 ui.writenoi18n(
341 341 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
342 342 % indent_string
343 343 )
344 344
345 345 def showchunks(named):
346 346 ui.write(b"\n%s%s\n" % (indent_string, named))
347 347 for deltadata in gen.deltaiter():
348 348 node, p1, p2, cs, deltabase, delta, flags = deltadata
349 349 ui.write(
350 350 b"%s%s %s %s %s %s %d\n"
351 351 % (
352 352 indent_string,
353 353 hex(node),
354 354 hex(p1),
355 355 hex(p2),
356 356 hex(cs),
357 357 hex(deltabase),
358 358 len(delta),
359 359 )
360 360 )
361 361
362 362 gen.changelogheader()
363 363 showchunks(b"changelog")
364 364 gen.manifestheader()
365 365 showchunks(b"manifest")
366 366 for chunkdata in iter(gen.filelogheader, {}):
367 367 fname = chunkdata[b'filename']
368 368 showchunks(fname)
369 369 else:
370 370 if isinstance(gen, bundle2.unbundle20):
371 371 raise error.Abort(_(b'use debugbundle2 for this file'))
372 372 gen.changelogheader()
373 373 for deltadata in gen.deltaiter():
374 374 node, p1, p2, cs, deltabase, delta, flags = deltadata
375 375 ui.write(b"%s%s\n" % (indent_string, hex(node)))
376 376
377 377
378 378 def _debugobsmarkers(ui, part, indent=0, **opts):
379 379 """display version and markers contained in 'data'"""
380 380 opts = pycompat.byteskwargs(opts)
381 381 data = part.read()
382 382 indent_string = b' ' * indent
383 383 try:
384 384 version, markers = obsolete._readmarkers(data)
385 385 except error.UnknownVersion as exc:
386 386 msg = b"%sunsupported version: %s (%d bytes)\n"
387 387 msg %= indent_string, exc.version, len(data)
388 388 ui.write(msg)
389 389 else:
390 390 msg = b"%sversion: %d (%d bytes)\n"
391 391 msg %= indent_string, version, len(data)
392 392 ui.write(msg)
393 393 fm = ui.formatter(b'debugobsolete', opts)
394 394 for rawmarker in sorted(markers):
395 395 m = obsutil.marker(None, rawmarker)
396 396 fm.startitem()
397 397 fm.plain(indent_string)
398 398 cmdutil.showmarker(fm, m)
399 399 fm.end()
400 400
401 401
402 402 def _debugphaseheads(ui, data, indent=0):
403 403 """display version and markers contained in 'data'"""
404 404 indent_string = b' ' * indent
405 405 headsbyphase = phases.binarydecode(data)
406 406 for phase in phases.allphases:
407 407 for head in headsbyphase[phase]:
408 408 ui.write(indent_string)
409 409 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
410 410
411 411
412 412 def _quasirepr(thing):
413 413 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
414 414 return b'{%s}' % (
415 415 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
416 416 )
417 417 return pycompat.bytestr(repr(thing))
418 418
419 419
420 420 def _debugbundle2(ui, gen, all=None, **opts):
421 421 """lists the contents of a bundle2"""
422 422 if not isinstance(gen, bundle2.unbundle20):
423 423 raise error.Abort(_(b'not a bundle2 file'))
424 424 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
425 425 parttypes = opts.get('part_type', [])
426 426 for part in gen.iterparts():
427 427 if parttypes and part.type not in parttypes:
428 428 continue
429 429 msg = b'%s -- %s (mandatory: %r)\n'
430 430 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
431 431 if part.type == b'changegroup':
432 432 version = part.params.get(b'version', b'01')
433 433 cg = changegroup.getunbundler(version, part, b'UN')
434 434 if not ui.quiet:
435 435 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
436 436 if part.type == b'obsmarkers':
437 437 if not ui.quiet:
438 438 _debugobsmarkers(ui, part, indent=4, **opts)
439 439 if part.type == b'phase-heads':
440 440 if not ui.quiet:
441 441 _debugphaseheads(ui, part, indent=4)
442 442
443 443
444 444 @command(
445 445 b'debugbundle',
446 446 [
447 447 (b'a', b'all', None, _(b'show all details')),
448 448 (b'', b'part-type', [], _(b'show only the named part type')),
449 449 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
450 450 ],
451 451 _(b'FILE'),
452 452 norepo=True,
453 453 )
454 454 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
455 455 """lists the contents of a bundle"""
456 456 with hg.openpath(ui, bundlepath) as f:
457 457 if spec:
458 458 spec = exchange.getbundlespec(ui, f)
459 459 ui.write(b'%s\n' % spec)
460 460 return
461 461
462 462 gen = exchange.readbundle(ui, f, bundlepath)
463 463 if isinstance(gen, bundle2.unbundle20):
464 464 return _debugbundle2(ui, gen, all=all, **opts)
465 465 _debugchangegroup(ui, gen, all=all, **opts)
466 466
467 467
468 468 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
469 469 def debugcapabilities(ui, path, **opts):
470 470 """lists the capabilities of a remote peer"""
471 471 opts = pycompat.byteskwargs(opts)
472 472 peer = hg.peer(ui, opts, path)
473 473 caps = peer.capabilities()
474 474 ui.writenoi18n(b'Main capabilities:\n')
475 475 for c in sorted(caps):
476 476 ui.write(b' %s\n' % c)
477 477 b2caps = bundle2.bundle2caps(peer)
478 478 if b2caps:
479 479 ui.writenoi18n(b'Bundle2 capabilities:\n')
480 480 for key, values in sorted(pycompat.iteritems(b2caps)):
481 481 ui.write(b' %s\n' % key)
482 482 for v in values:
483 483 ui.write(b' %s\n' % v)
484 484
485 485
486 486 @command(b'debugchangedfiles', [], b'REV')
487 487 def debugchangedfiles(ui, repo, rev):
488 488 """list the stored files changes for a revision"""
489 489 ctx = scmutil.revsingle(repo, rev, None)
490 490 sd = repo.changelog.sidedata(ctx.rev())
491 491 files_block = sd.get(sidedata.SD_FILES)
492 492 if files_block is not None:
493 493 files = metadata.decode_files_sidedata(sd)
494 494 for f in sorted(files.touched):
495 495 if f in files.added:
496 496 action = b"added"
497 497 elif f in files.removed:
498 498 action = b"removed"
499 499 elif f in files.merged:
500 500 action = b"merged"
501 501 elif f in files.salvaged:
502 502 action = b"salvaged"
503 503 else:
504 504 action = b"touched"
505 505
506 506 copy_parent = b""
507 507 copy_source = b""
508 508 if f in files.copied_from_p1:
509 509 copy_parent = b"p1"
510 510 copy_source = files.copied_from_p1[f]
511 511 elif f in files.copied_from_p2:
512 512 copy_parent = b"p2"
513 513 copy_source = files.copied_from_p2[f]
514 514
515 515 data = (action, copy_parent, f, copy_source)
516 516 template = b"%-8s %2s: %s, %s;\n"
517 517 ui.write(template % data)
518 518
519 519
520 520 @command(b'debugcheckstate', [], b'')
521 521 def debugcheckstate(ui, repo):
522 522 """validate the correctness of the current dirstate"""
523 523 parent1, parent2 = repo.dirstate.parents()
524 524 m1 = repo[parent1].manifest()
525 525 m2 = repo[parent2].manifest()
526 526 errors = 0
527 527 for f in repo.dirstate:
528 528 state = repo.dirstate[f]
529 529 if state in b"nr" and f not in m1:
530 530 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
531 531 errors += 1
532 532 if state in b"a" and f in m1:
533 533 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
534 534 errors += 1
535 535 if state in b"m" and f not in m1 and f not in m2:
536 536 ui.warn(
537 537 _(b"%s in state %s, but not in either manifest\n") % (f, state)
538 538 )
539 539 errors += 1
540 540 for f in m1:
541 541 state = repo.dirstate[f]
542 542 if state not in b"nrm":
543 543 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
544 544 errors += 1
545 545 if errors:
546 546 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
547 547 raise error.Abort(errstr)
548 548
549 549
550 550 @command(
551 551 b'debugcolor',
552 552 [(b'', b'style', None, _(b'show all configured styles'))],
553 553 b'hg debugcolor',
554 554 )
555 555 def debugcolor(ui, repo, **opts):
556 556 """show available color, effects or style"""
557 557 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
558 558 if opts.get('style'):
559 559 return _debugdisplaystyle(ui)
560 560 else:
561 561 return _debugdisplaycolor(ui)
562 562
563 563
564 564 def _debugdisplaycolor(ui):
565 565 ui = ui.copy()
566 566 ui._styles.clear()
567 567 for effect in color._activeeffects(ui).keys():
568 568 ui._styles[effect] = effect
569 569 if ui._terminfoparams:
570 570 for k, v in ui.configitems(b'color'):
571 571 if k.startswith(b'color.'):
572 572 ui._styles[k] = k[6:]
573 573 elif k.startswith(b'terminfo.'):
574 574 ui._styles[k] = k[9:]
575 575 ui.write(_(b'available colors:\n'))
576 576 # sort label with a '_' after the other to group '_background' entry.
577 577 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
578 578 for colorname, label in items:
579 579 ui.write(b'%s\n' % colorname, label=label)
580 580
581 581
582 582 def _debugdisplaystyle(ui):
583 583 ui.write(_(b'available style:\n'))
584 584 if not ui._styles:
585 585 return
586 586 width = max(len(s) for s in ui._styles)
587 587 for label, effects in sorted(ui._styles.items()):
588 588 ui.write(b'%s' % label, label=label)
589 589 if effects:
590 590 # 50
591 591 ui.write(b': ')
592 592 ui.write(b' ' * (max(0, width - len(label))))
593 593 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
594 594 ui.write(b'\n')
595 595
596 596
597 597 @command(b'debugcreatestreamclonebundle', [], b'FILE')
598 598 def debugcreatestreamclonebundle(ui, repo, fname):
599 599 """create a stream clone bundle file
600 600
601 601 Stream bundles are special bundles that are essentially archives of
602 602 revlog files. They are commonly used for cloning very quickly.
603 603 """
604 604 # TODO we may want to turn this into an abort when this functionality
605 605 # is moved into `hg bundle`.
606 606 if phases.hassecret(repo):
607 607 ui.warn(
608 608 _(
609 609 b'(warning: stream clone bundle will contain secret '
610 610 b'revisions)\n'
611 611 )
612 612 )
613 613
614 614 requirements, gen = streamclone.generatebundlev1(repo)
615 615 changegroup.writechunks(ui, gen, fname)
616 616
617 617 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
618 618
619 619
620 620 @command(
621 621 b'debugdag',
622 622 [
623 623 (b't', b'tags', None, _(b'use tags as labels')),
624 624 (b'b', b'branches', None, _(b'annotate with branch names')),
625 625 (b'', b'dots', None, _(b'use dots for runs')),
626 626 (b's', b'spaces', None, _(b'separate elements by spaces')),
627 627 ],
628 628 _(b'[OPTION]... [FILE [REV]...]'),
629 629 optionalrepo=True,
630 630 )
631 631 def debugdag(ui, repo, file_=None, *revs, **opts):
632 632 """format the changelog or an index DAG as a concise textual description
633 633
634 634 If you pass a revlog index, the revlog's DAG is emitted. If you list
635 635 revision numbers, they get labeled in the output as rN.
636 636
637 637 Otherwise, the changelog DAG of the current repo is emitted.
638 638 """
639 639 spaces = opts.get('spaces')
640 640 dots = opts.get('dots')
641 641 if file_:
642 642 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
643 643 revs = {int(r) for r in revs}
644 644
645 645 def events():
646 646 for r in rlog:
647 647 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
648 648 if r in revs:
649 649 yield b'l', (r, b"r%i" % r)
650 650
651 651 elif repo:
652 652 cl = repo.changelog
653 653 tags = opts.get('tags')
654 654 branches = opts.get('branches')
655 655 if tags:
656 656 labels = {}
657 657 for l, n in repo.tags().items():
658 658 labels.setdefault(cl.rev(n), []).append(l)
659 659
660 660 def events():
661 661 b = b"default"
662 662 for r in cl:
663 663 if branches:
664 664 newb = cl.read(cl.node(r))[5][b'branch']
665 665 if newb != b:
666 666 yield b'a', newb
667 667 b = newb
668 668 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
669 669 if tags:
670 670 ls = labels.get(r)
671 671 if ls:
672 672 for l in ls:
673 673 yield b'l', (r, l)
674 674
675 675 else:
676 676 raise error.Abort(_(b'need repo for changelog dag'))
677 677
678 678 for line in dagparser.dagtextlines(
679 679 events(),
680 680 addspaces=spaces,
681 681 wraplabels=True,
682 682 wrapannotations=True,
683 683 wrapnonlinear=dots,
684 684 usedots=dots,
685 685 maxlinewidth=70,
686 686 ):
687 687 ui.write(line)
688 688 ui.write(b"\n")
689 689
690 690
691 691 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
692 692 def debugdata(ui, repo, file_, rev=None, **opts):
693 693 """dump the contents of a data file revision"""
694 694 opts = pycompat.byteskwargs(opts)
695 695 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
696 696 if rev is not None:
697 697 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
698 698 file_, rev = None, file_
699 699 elif rev is None:
700 700 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
701 701 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
702 702 try:
703 703 ui.write(r.rawdata(r.lookup(rev)))
704 704 except KeyError:
705 705 raise error.Abort(_(b'invalid revision identifier %s') % rev)
706 706
707 707
708 708 @command(
709 709 b'debugdate',
710 710 [(b'e', b'extended', None, _(b'try extended date formats'))],
711 711 _(b'[-e] DATE [RANGE]'),
712 712 norepo=True,
713 713 optionalrepo=True,
714 714 )
715 715 def debugdate(ui, date, range=None, **opts):
716 716 """parse and display a date"""
717 717 if opts["extended"]:
718 718 d = dateutil.parsedate(date, dateutil.extendeddateformats)
719 719 else:
720 720 d = dateutil.parsedate(date)
721 721 ui.writenoi18n(b"internal: %d %d\n" % d)
722 722 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
723 723 if range:
724 724 m = dateutil.matchdate(range)
725 725 ui.writenoi18n(b"match: %s\n" % m(d[0]))
726 726
727 727
728 728 @command(
729 729 b'debugdeltachain',
730 730 cmdutil.debugrevlogopts + cmdutil.formatteropts,
731 731 _(b'-c|-m|FILE'),
732 732 optionalrepo=True,
733 733 )
734 734 def debugdeltachain(ui, repo, file_=None, **opts):
735 735 """dump information about delta chains in a revlog
736 736
737 737 Output can be templatized. Available template keywords are:
738 738
739 739 :``rev``: revision number
740 740 :``chainid``: delta chain identifier (numbered by unique base)
741 741 :``chainlen``: delta chain length to this revision
742 742 :``prevrev``: previous revision in delta chain
743 743 :``deltatype``: role of delta / how it was computed
744 744 :``compsize``: compressed size of revision
745 745 :``uncompsize``: uncompressed size of revision
746 746 :``chainsize``: total size of compressed revisions in chain
747 747 :``chainratio``: total chain size divided by uncompressed revision size
748 748 (new delta chains typically start at ratio 2.00)
749 749 :``lindist``: linear distance from base revision in delta chain to end
750 750 of this revision
751 751 :``extradist``: total size of revisions not part of this delta chain from
752 752 base of delta chain to end of this revision; a measurement
753 753 of how much extra data we need to read/seek across to read
754 754 the delta chain for this revision
755 755 :``extraratio``: extradist divided by chainsize; another representation of
756 756 how much unrelated data is needed to load this delta chain
757 757
758 758 If the repository is configured to use the sparse read, additional keywords
759 759 are available:
760 760
761 761 :``readsize``: total size of data read from the disk for a revision
762 762 (sum of the sizes of all the blocks)
763 763 :``largestblock``: size of the largest block of data read from the disk
764 764 :``readdensity``: density of useful bytes in the data read from the disk
765 765 :``srchunks``: in how many data hunks the whole revision would be read
766 766
767 767 The sparse read can be enabled with experimental.sparse-read = True
768 768 """
769 769 opts = pycompat.byteskwargs(opts)
770 770 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
771 771 index = r.index
772 772 start = r.start
773 773 length = r.length
774 774 generaldelta = r.version & revlog.FLAG_GENERALDELTA
775 775 withsparseread = getattr(r, '_withsparseread', False)
776 776
777 777 def revinfo(rev):
778 778 e = index[rev]
779 779 compsize = e[1]
780 780 uncompsize = e[2]
781 781 chainsize = 0
782 782
783 783 if generaldelta:
784 784 if e[3] == e[5]:
785 785 deltatype = b'p1'
786 786 elif e[3] == e[6]:
787 787 deltatype = b'p2'
788 788 elif e[3] == rev - 1:
789 789 deltatype = b'prev'
790 790 elif e[3] == rev:
791 791 deltatype = b'base'
792 792 else:
793 793 deltatype = b'other'
794 794 else:
795 795 if e[3] == rev:
796 796 deltatype = b'base'
797 797 else:
798 798 deltatype = b'prev'
799 799
800 800 chain = r._deltachain(rev)[0]
801 801 for iterrev in chain:
802 802 e = index[iterrev]
803 803 chainsize += e[1]
804 804
805 805 return compsize, uncompsize, deltatype, chain, chainsize
806 806
807 807 fm = ui.formatter(b'debugdeltachain', opts)
808 808
809 809 fm.plain(
810 810 b' rev chain# chainlen prev delta '
811 811 b'size rawsize chainsize ratio lindist extradist '
812 812 b'extraratio'
813 813 )
814 814 if withsparseread:
815 815 fm.plain(b' readsize largestblk rddensity srchunks')
816 816 fm.plain(b'\n')
817 817
818 818 chainbases = {}
819 819 for rev in r:
820 820 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
821 821 chainbase = chain[0]
822 822 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
823 823 basestart = start(chainbase)
824 824 revstart = start(rev)
825 825 lineardist = revstart + comp - basestart
826 826 extradist = lineardist - chainsize
827 827 try:
828 828 prevrev = chain[-2]
829 829 except IndexError:
830 830 prevrev = -1
831 831
832 832 if uncomp != 0:
833 833 chainratio = float(chainsize) / float(uncomp)
834 834 else:
835 835 chainratio = chainsize
836 836
837 837 if chainsize != 0:
838 838 extraratio = float(extradist) / float(chainsize)
839 839 else:
840 840 extraratio = extradist
841 841
842 842 fm.startitem()
843 843 fm.write(
844 844 b'rev chainid chainlen prevrev deltatype compsize '
845 845 b'uncompsize chainsize chainratio lindist extradist '
846 846 b'extraratio',
847 847 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
848 848 rev,
849 849 chainid,
850 850 len(chain),
851 851 prevrev,
852 852 deltatype,
853 853 comp,
854 854 uncomp,
855 855 chainsize,
856 856 chainratio,
857 857 lineardist,
858 858 extradist,
859 859 extraratio,
860 860 rev=rev,
861 861 chainid=chainid,
862 862 chainlen=len(chain),
863 863 prevrev=prevrev,
864 864 deltatype=deltatype,
865 865 compsize=comp,
866 866 uncompsize=uncomp,
867 867 chainsize=chainsize,
868 868 chainratio=chainratio,
869 869 lindist=lineardist,
870 870 extradist=extradist,
871 871 extraratio=extraratio,
872 872 )
873 873 if withsparseread:
874 874 readsize = 0
875 875 largestblock = 0
876 876 srchunks = 0
877 877
878 878 for revschunk in deltautil.slicechunk(r, chain):
879 879 srchunks += 1
880 880 blkend = start(revschunk[-1]) + length(revschunk[-1])
881 881 blksize = blkend - start(revschunk[0])
882 882
883 883 readsize += blksize
884 884 if largestblock < blksize:
885 885 largestblock = blksize
886 886
887 887 if readsize:
888 888 readdensity = float(chainsize) / float(readsize)
889 889 else:
890 890 readdensity = 1
891 891
892 892 fm.write(
893 893 b'readsize largestblock readdensity srchunks',
894 894 b' %10d %10d %9.5f %8d',
895 895 readsize,
896 896 largestblock,
897 897 readdensity,
898 898 srchunks,
899 899 readsize=readsize,
900 900 largestblock=largestblock,
901 901 readdensity=readdensity,
902 902 srchunks=srchunks,
903 903 )
904 904
905 905 fm.plain(b'\n')
906 906
907 907 fm.end()
908 908
909 909
910 910 @command(
911 911 b'debugdirstate|debugstate',
912 912 [
913 913 (
914 914 b'',
915 915 b'nodates',
916 916 None,
917 917 _(b'do not display the saved mtime (DEPRECATED)'),
918 918 ),
919 919 (b'', b'dates', True, _(b'display the saved mtime')),
920 920 (b'', b'datesort', None, _(b'sort by saved mtime')),
921 921 ],
922 922 _(b'[OPTION]...'),
923 923 )
924 924 def debugstate(ui, repo, **opts):
925 925 """show the contents of the current dirstate"""
926 926
927 927 nodates = not opts['dates']
928 928 if opts.get('nodates') is not None:
929 929 nodates = True
930 930 datesort = opts.get('datesort')
931 931
932 932 if datesort:
933 933 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
934 934 else:
935 935 keyfunc = None # sort by filename
936 936 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
937 937 if ent[3] == -1:
938 938 timestr = b'unset '
939 939 elif nodates:
940 940 timestr = b'set '
941 941 else:
942 942 timestr = time.strftime(
943 943 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
944 944 )
945 945 timestr = encoding.strtolocal(timestr)
946 946 if ent[1] & 0o20000:
947 947 mode = b'lnk'
948 948 else:
949 949 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
950 950 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
951 951 for f in repo.dirstate.copies():
952 952 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
953 953
954 954
955 955 @command(
956 956 b'debugdiscovery',
957 957 [
958 958 (b'', b'old', None, _(b'use old-style discovery')),
959 959 (
960 960 b'',
961 961 b'nonheads',
962 962 None,
963 963 _(b'use old-style discovery with non-heads included'),
964 964 ),
965 965 (b'', b'rev', [], b'restrict discovery to this set of revs'),
966 966 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
967 967 ]
968 968 + cmdutil.remoteopts,
969 969 _(b'[--rev REV] [OTHER]'),
970 970 )
971 971 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
972 972 """runs the changeset discovery protocol in isolation"""
973 973 opts = pycompat.byteskwargs(opts)
974 974 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
975 975 remote = hg.peer(repo, opts, remoteurl)
976 976 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
977 977
978 978 # make sure tests are repeatable
979 979 random.seed(int(opts[b'seed']))
980 980
981 981 data = {}
982 982 if opts.get(b'old'):
983 983
984 984 def doit(pushedrevs, remoteheads, remote=remote):
985 985 if not util.safehasattr(remote, b'branches'):
986 986 # enable in-client legacy support
987 987 remote = localrepo.locallegacypeer(remote.local())
988 988 common, _in, hds = treediscovery.findcommonincoming(
989 989 repo, remote, force=True, audit=data
990 990 )
991 991 common = set(common)
992 992 if not opts.get(b'nonheads'):
993 993 ui.writenoi18n(
994 994 b"unpruned common: %s\n"
995 995 % b" ".join(sorted(short(n) for n in common))
996 996 )
997 997
998 998 clnode = repo.changelog.node
999 999 common = repo.revs(b'heads(::%ln)', common)
1000 1000 common = {clnode(r) for r in common}
1001 1001 return common, hds
1002 1002
1003 1003 else:
1004 1004
1005 1005 def doit(pushedrevs, remoteheads, remote=remote):
1006 1006 nodes = None
1007 1007 if pushedrevs:
1008 1008 revs = scmutil.revrange(repo, pushedrevs)
1009 1009 nodes = [repo[r].node() for r in revs]
1010 1010 common, any, hds = setdiscovery.findcommonheads(
1011 1011 ui, repo, remote, ancestorsof=nodes, audit=data
1012 1012 )
1013 1013 return common, hds
1014 1014
1015 1015 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1016 1016 localrevs = opts[b'rev']
1017 1017 with util.timedcm('debug-discovery') as t:
1018 1018 common, hds = doit(localrevs, remoterevs)
1019 1019
1020 1020 # compute all statistics
1021 1021 heads_common = set(common)
1022 1022 heads_remote = set(hds)
1023 1023 heads_local = set(repo.heads())
1024 1024 # note: they cannot be a local or remote head that is in common and not
1025 1025 # itself a head of common.
1026 1026 heads_common_local = heads_common & heads_local
1027 1027 heads_common_remote = heads_common & heads_remote
1028 1028 heads_common_both = heads_common & heads_remote & heads_local
1029 1029
1030 1030 all = repo.revs(b'all()')
1031 1031 common = repo.revs(b'::%ln', common)
1032 1032 roots_common = repo.revs(b'roots(::%ld)', common)
1033 1033 missing = repo.revs(b'not ::%ld', common)
1034 1034 heads_missing = repo.revs(b'heads(%ld)', missing)
1035 1035 roots_missing = repo.revs(b'roots(%ld)', missing)
1036 1036 assert len(common) + len(missing) == len(all)
1037 1037
1038 1038 initial_undecided = repo.revs(
1039 1039 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1040 1040 )
1041 1041 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1042 1042 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1043 1043 common_initial_undecided = initial_undecided & common
1044 1044 missing_initial_undecided = initial_undecided & missing
1045 1045
1046 1046 data[b'elapsed'] = t.elapsed
1047 1047 data[b'nb-common-heads'] = len(heads_common)
1048 1048 data[b'nb-common-heads-local'] = len(heads_common_local)
1049 1049 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1050 1050 data[b'nb-common-heads-both'] = len(heads_common_both)
1051 1051 data[b'nb-common-roots'] = len(roots_common)
1052 1052 data[b'nb-head-local'] = len(heads_local)
1053 1053 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1054 1054 data[b'nb-head-remote'] = len(heads_remote)
1055 1055 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1056 1056 heads_common_remote
1057 1057 )
1058 1058 data[b'nb-revs'] = len(all)
1059 1059 data[b'nb-revs-common'] = len(common)
1060 1060 data[b'nb-revs-missing'] = len(missing)
1061 1061 data[b'nb-missing-heads'] = len(heads_missing)
1062 1062 data[b'nb-missing-roots'] = len(roots_missing)
1063 1063 data[b'nb-ini_und'] = len(initial_undecided)
1064 1064 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1065 1065 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1066 1066 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1067 1067 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1068 1068
1069 1069 # display discovery summary
1070 1070 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
1071 1071 ui.writenoi18n(b"round-trips: %(total-roundtrips)9d\n" % data)
1072 1072 ui.writenoi18n(b"heads summary:\n")
1073 1073 ui.writenoi18n(b" total common heads: %(nb-common-heads)9d\n" % data)
1074 1074 ui.writenoi18n(
1075 1075 b" also local heads: %(nb-common-heads-local)9d\n" % data
1076 1076 )
1077 1077 ui.writenoi18n(
1078 1078 b" also remote heads: %(nb-common-heads-remote)9d\n" % data
1079 1079 )
1080 1080 ui.writenoi18n(b" both: %(nb-common-heads-both)9d\n" % data)
1081 1081 ui.writenoi18n(b" local heads: %(nb-head-local)9d\n" % data)
1082 1082 ui.writenoi18n(
1083 1083 b" common: %(nb-common-heads-local)9d\n" % data
1084 1084 )
1085 1085 ui.writenoi18n(
1086 1086 b" missing: %(nb-head-local-missing)9d\n" % data
1087 1087 )
1088 1088 ui.writenoi18n(b" remote heads: %(nb-head-remote)9d\n" % data)
1089 1089 ui.writenoi18n(
1090 1090 b" common: %(nb-common-heads-remote)9d\n" % data
1091 1091 )
1092 1092 ui.writenoi18n(
1093 1093 b" unknown: %(nb-head-remote-unknown)9d\n" % data
1094 1094 )
1095 1095 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
1096 1096 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
1097 1097 ui.writenoi18n(b" heads: %(nb-common-heads)9d\n" % data)
1098 1098 ui.writenoi18n(b" roots: %(nb-common-roots)9d\n" % data)
1099 1099 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
1100 1100 ui.writenoi18n(b" heads: %(nb-missing-heads)9d\n" % data)
1101 1101 ui.writenoi18n(b" roots: %(nb-missing-roots)9d\n" % data)
1102 1102 ui.writenoi18n(b" first undecided set: %(nb-ini_und)9d\n" % data)
1103 1103 ui.writenoi18n(b" heads: %(nb-ini_und-heads)9d\n" % data)
1104 1104 ui.writenoi18n(b" roots: %(nb-ini_und-roots)9d\n" % data)
1105 1105 ui.writenoi18n(b" common: %(nb-ini_und-common)9d\n" % data)
1106 1106 ui.writenoi18n(b" missing: %(nb-ini_und-missing)9d\n" % data)
1107 1107
1108 1108 if ui.verbose:
1109 1109 ui.writenoi18n(
1110 1110 b"common heads: %s\n"
1111 1111 % b" ".join(sorted(short(n) for n in heads_common))
1112 1112 )
1113 1113
1114 1114
1115 1115 _chunksize = 4 << 10
1116 1116
1117 1117
1118 1118 @command(
1119 1119 b'debugdownload',
1120 1120 [
1121 1121 (b'o', b'output', b'', _(b'path')),
1122 1122 ],
1123 1123 optionalrepo=True,
1124 1124 )
1125 1125 def debugdownload(ui, repo, url, output=None, **opts):
1126 1126 """download a resource using Mercurial logic and config"""
1127 1127 fh = urlmod.open(ui, url, output)
1128 1128
1129 1129 dest = ui
1130 1130 if output:
1131 1131 dest = open(output, b"wb", _chunksize)
1132 1132 try:
1133 1133 data = fh.read(_chunksize)
1134 1134 while data:
1135 1135 dest.write(data)
1136 1136 data = fh.read(_chunksize)
1137 1137 finally:
1138 1138 if output:
1139 1139 dest.close()
1140 1140
1141 1141
1142 1142 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1143 1143 def debugextensions(ui, repo, **opts):
1144 1144 '''show information about active extensions'''
1145 1145 opts = pycompat.byteskwargs(opts)
1146 1146 exts = extensions.extensions(ui)
1147 1147 hgver = util.version()
1148 1148 fm = ui.formatter(b'debugextensions', opts)
1149 1149 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1150 1150 isinternal = extensions.ismoduleinternal(extmod)
1151 1151 extsource = None
1152 1152
1153 1153 if util.safehasattr(extmod, '__file__'):
1154 1154 extsource = pycompat.fsencode(extmod.__file__)
1155 1155 elif getattr(sys, 'oxidized', False):
1156 1156 extsource = pycompat.sysexecutable
1157 1157 if isinternal:
1158 1158 exttestedwith = [] # never expose magic string to users
1159 1159 else:
1160 1160 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1161 1161 extbuglink = getattr(extmod, 'buglink', None)
1162 1162
1163 1163 fm.startitem()
1164 1164
1165 1165 if ui.quiet or ui.verbose:
1166 1166 fm.write(b'name', b'%s\n', extname)
1167 1167 else:
1168 1168 fm.write(b'name', b'%s', extname)
1169 1169 if isinternal or hgver in exttestedwith:
1170 1170 fm.plain(b'\n')
1171 1171 elif not exttestedwith:
1172 1172 fm.plain(_(b' (untested!)\n'))
1173 1173 else:
1174 1174 lasttestedversion = exttestedwith[-1]
1175 1175 fm.plain(b' (%s!)\n' % lasttestedversion)
1176 1176
1177 1177 fm.condwrite(
1178 1178 ui.verbose and extsource,
1179 1179 b'source',
1180 1180 _(b' location: %s\n'),
1181 1181 extsource or b"",
1182 1182 )
1183 1183
1184 1184 if ui.verbose:
1185 1185 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1186 1186 fm.data(bundled=isinternal)
1187 1187
1188 1188 fm.condwrite(
1189 1189 ui.verbose and exttestedwith,
1190 1190 b'testedwith',
1191 1191 _(b' tested with: %s\n'),
1192 1192 fm.formatlist(exttestedwith, name=b'ver'),
1193 1193 )
1194 1194
1195 1195 fm.condwrite(
1196 1196 ui.verbose and extbuglink,
1197 1197 b'buglink',
1198 1198 _(b' bug reporting: %s\n'),
1199 1199 extbuglink or b"",
1200 1200 )
1201 1201
1202 1202 fm.end()
1203 1203
1204 1204
1205 1205 @command(
1206 1206 b'debugfileset',
1207 1207 [
1208 1208 (
1209 1209 b'r',
1210 1210 b'rev',
1211 1211 b'',
1212 1212 _(b'apply the filespec on this revision'),
1213 1213 _(b'REV'),
1214 1214 ),
1215 1215 (
1216 1216 b'',
1217 1217 b'all-files',
1218 1218 False,
1219 1219 _(b'test files from all revisions and working directory'),
1220 1220 ),
1221 1221 (
1222 1222 b's',
1223 1223 b'show-matcher',
1224 1224 None,
1225 1225 _(b'print internal representation of matcher'),
1226 1226 ),
1227 1227 (
1228 1228 b'p',
1229 1229 b'show-stage',
1230 1230 [],
1231 1231 _(b'print parsed tree at the given stage'),
1232 1232 _(b'NAME'),
1233 1233 ),
1234 1234 ],
1235 1235 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1236 1236 )
1237 1237 def debugfileset(ui, repo, expr, **opts):
1238 1238 '''parse and apply a fileset specification'''
1239 1239 from . import fileset
1240 1240
1241 1241 fileset.symbols # force import of fileset so we have predicates to optimize
1242 1242 opts = pycompat.byteskwargs(opts)
1243 1243 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1244 1244
1245 1245 stages = [
1246 1246 (b'parsed', pycompat.identity),
1247 1247 (b'analyzed', filesetlang.analyze),
1248 1248 (b'optimized', filesetlang.optimize),
1249 1249 ]
1250 1250 stagenames = {n for n, f in stages}
1251 1251
1252 1252 showalways = set()
1253 1253 if ui.verbose and not opts[b'show_stage']:
1254 1254 # show parsed tree by --verbose (deprecated)
1255 1255 showalways.add(b'parsed')
1256 1256 if opts[b'show_stage'] == [b'all']:
1257 1257 showalways.update(stagenames)
1258 1258 else:
1259 1259 for n in opts[b'show_stage']:
1260 1260 if n not in stagenames:
1261 1261 raise error.Abort(_(b'invalid stage name: %s') % n)
1262 1262 showalways.update(opts[b'show_stage'])
1263 1263
1264 1264 tree = filesetlang.parse(expr)
1265 1265 for n, f in stages:
1266 1266 tree = f(tree)
1267 1267 if n in showalways:
1268 1268 if opts[b'show_stage'] or n != b'parsed':
1269 1269 ui.write(b"* %s:\n" % n)
1270 1270 ui.write(filesetlang.prettyformat(tree), b"\n")
1271 1271
1272 1272 files = set()
1273 1273 if opts[b'all_files']:
1274 1274 for r in repo:
1275 1275 c = repo[r]
1276 1276 files.update(c.files())
1277 1277 files.update(c.substate)
1278 1278 if opts[b'all_files'] or ctx.rev() is None:
1279 1279 wctx = repo[None]
1280 1280 files.update(
1281 1281 repo.dirstate.walk(
1282 1282 scmutil.matchall(repo),
1283 1283 subrepos=list(wctx.substate),
1284 1284 unknown=True,
1285 1285 ignored=True,
1286 1286 )
1287 1287 )
1288 1288 files.update(wctx.substate)
1289 1289 else:
1290 1290 files.update(ctx.files())
1291 1291 files.update(ctx.substate)
1292 1292
1293 1293 m = ctx.matchfileset(repo.getcwd(), expr)
1294 1294 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1295 1295 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1296 1296 for f in sorted(files):
1297 1297 if not m(f):
1298 1298 continue
1299 1299 ui.write(b"%s\n" % f)
1300 1300
1301 1301
1302 1302 @command(b'debugformat', [] + cmdutil.formatteropts)
1303 1303 def debugformat(ui, repo, **opts):
1304 1304 """display format information about the current repository
1305 1305
1306 1306 Use --verbose to get extra information about current config value and
1307 1307 Mercurial default."""
1308 1308 opts = pycompat.byteskwargs(opts)
1309 1309 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1310 1310 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1311 1311
1312 1312 def makeformatname(name):
1313 1313 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1314 1314
1315 1315 fm = ui.formatter(b'debugformat', opts)
1316 1316 if fm.isplain():
1317 1317
1318 1318 def formatvalue(value):
1319 1319 if util.safehasattr(value, b'startswith'):
1320 1320 return value
1321 1321 if value:
1322 1322 return b'yes'
1323 1323 else:
1324 1324 return b'no'
1325 1325
1326 1326 else:
1327 1327 formatvalue = pycompat.identity
1328 1328
1329 1329 fm.plain(b'format-variant')
1330 1330 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1331 1331 fm.plain(b' repo')
1332 1332 if ui.verbose:
1333 1333 fm.plain(b' config default')
1334 1334 fm.plain(b'\n')
1335 1335 for fv in upgrade.allformatvariant:
1336 1336 fm.startitem()
1337 1337 repovalue = fv.fromrepo(repo)
1338 1338 configvalue = fv.fromconfig(repo)
1339 1339
1340 1340 if repovalue != configvalue:
1341 1341 namelabel = b'formatvariant.name.mismatchconfig'
1342 1342 repolabel = b'formatvariant.repo.mismatchconfig'
1343 1343 elif repovalue != fv.default:
1344 1344 namelabel = b'formatvariant.name.mismatchdefault'
1345 1345 repolabel = b'formatvariant.repo.mismatchdefault'
1346 1346 else:
1347 1347 namelabel = b'formatvariant.name.uptodate'
1348 1348 repolabel = b'formatvariant.repo.uptodate'
1349 1349
1350 1350 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1351 1351 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1352 1352 if fv.default != configvalue:
1353 1353 configlabel = b'formatvariant.config.special'
1354 1354 else:
1355 1355 configlabel = b'formatvariant.config.default'
1356 1356 fm.condwrite(
1357 1357 ui.verbose,
1358 1358 b'config',
1359 1359 b' %6s',
1360 1360 formatvalue(configvalue),
1361 1361 label=configlabel,
1362 1362 )
1363 1363 fm.condwrite(
1364 1364 ui.verbose,
1365 1365 b'default',
1366 1366 b' %7s',
1367 1367 formatvalue(fv.default),
1368 1368 label=b'formatvariant.default',
1369 1369 )
1370 1370 fm.plain(b'\n')
1371 1371 fm.end()
1372 1372
1373 1373
1374 1374 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1375 1375 def debugfsinfo(ui, path=b"."):
1376 1376 """show information detected about current filesystem"""
1377 1377 ui.writenoi18n(b'path: %s\n' % path)
1378 1378 ui.writenoi18n(
1379 1379 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1380 1380 )
1381 1381 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1382 1382 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1383 1383 ui.writenoi18n(
1384 1384 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1385 1385 )
1386 1386 ui.writenoi18n(
1387 1387 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1388 1388 )
1389 1389 casesensitive = b'(unknown)'
1390 1390 try:
1391 1391 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1392 1392 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1393 1393 except OSError:
1394 1394 pass
1395 1395 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1396 1396
1397 1397
1398 1398 @command(
1399 1399 b'debuggetbundle',
1400 1400 [
1401 1401 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1402 1402 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1403 1403 (
1404 1404 b't',
1405 1405 b'type',
1406 1406 b'bzip2',
1407 1407 _(b'bundle compression type to use'),
1408 1408 _(b'TYPE'),
1409 1409 ),
1410 1410 ],
1411 1411 _(b'REPO FILE [-H|-C ID]...'),
1412 1412 norepo=True,
1413 1413 )
1414 1414 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1415 1415 """retrieves a bundle from a repo
1416 1416
1417 1417 Every ID must be a full-length hex node id string. Saves the bundle to the
1418 1418 given file.
1419 1419 """
1420 1420 opts = pycompat.byteskwargs(opts)
1421 1421 repo = hg.peer(ui, opts, repopath)
1422 1422 if not repo.capable(b'getbundle'):
1423 1423 raise error.Abort(b"getbundle() not supported by target repository")
1424 1424 args = {}
1425 1425 if common:
1426 1426 args['common'] = [bin(s) for s in common]
1427 1427 if head:
1428 1428 args['heads'] = [bin(s) for s in head]
1429 1429 # TODO: get desired bundlecaps from command line.
1430 1430 args['bundlecaps'] = None
1431 1431 bundle = repo.getbundle(b'debug', **args)
1432 1432
1433 1433 bundletype = opts.get(b'type', b'bzip2').lower()
1434 1434 btypes = {
1435 1435 b'none': b'HG10UN',
1436 1436 b'bzip2': b'HG10BZ',
1437 1437 b'gzip': b'HG10GZ',
1438 1438 b'bundle2': b'HG20',
1439 1439 }
1440 1440 bundletype = btypes.get(bundletype)
1441 1441 if bundletype not in bundle2.bundletypes:
1442 1442 raise error.Abort(_(b'unknown bundle type specified with --type'))
1443 1443 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1444 1444
1445 1445
1446 1446 @command(b'debugignore', [], b'[FILE]')
1447 1447 def debugignore(ui, repo, *files, **opts):
1448 1448 """display the combined ignore pattern and information about ignored files
1449 1449
1450 1450 With no argument display the combined ignore pattern.
1451 1451
1452 1452 Given space separated file names, shows if the given file is ignored and
1453 1453 if so, show the ignore rule (file and line number) that matched it.
1454 1454 """
1455 1455 ignore = repo.dirstate._ignore
1456 1456 if not files:
1457 1457 # Show all the patterns
1458 1458 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1459 1459 else:
1460 1460 m = scmutil.match(repo[None], pats=files)
1461 1461 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1462 1462 for f in m.files():
1463 1463 nf = util.normpath(f)
1464 1464 ignored = None
1465 1465 ignoredata = None
1466 1466 if nf != b'.':
1467 1467 if ignore(nf):
1468 1468 ignored = nf
1469 1469 ignoredata = repo.dirstate._ignorefileandline(nf)
1470 1470 else:
1471 1471 for p in pathutil.finddirs(nf):
1472 1472 if ignore(p):
1473 1473 ignored = p
1474 1474 ignoredata = repo.dirstate._ignorefileandline(p)
1475 1475 break
1476 1476 if ignored:
1477 1477 if ignored == nf:
1478 1478 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1479 1479 else:
1480 1480 ui.write(
1481 1481 _(
1482 1482 b"%s is ignored because of "
1483 1483 b"containing directory %s\n"
1484 1484 )
1485 1485 % (uipathfn(f), ignored)
1486 1486 )
1487 1487 ignorefile, lineno, line = ignoredata
1488 1488 ui.write(
1489 1489 _(b"(ignore rule in %s, line %d: '%s')\n")
1490 1490 % (ignorefile, lineno, line)
1491 1491 )
1492 1492 else:
1493 1493 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1494 1494
1495 1495
1496 1496 @command(
1497 1497 b'debugindex',
1498 1498 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1499 1499 _(b'-c|-m|FILE'),
1500 1500 )
1501 1501 def debugindex(ui, repo, file_=None, **opts):
1502 1502 """dump index data for a storage primitive"""
1503 1503 opts = pycompat.byteskwargs(opts)
1504 1504 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1505 1505
1506 1506 if ui.debugflag:
1507 1507 shortfn = hex
1508 1508 else:
1509 1509 shortfn = short
1510 1510
1511 1511 idlen = 12
1512 1512 for i in store:
1513 1513 idlen = len(shortfn(store.node(i)))
1514 1514 break
1515 1515
1516 1516 fm = ui.formatter(b'debugindex', opts)
1517 1517 fm.plain(
1518 1518 b' rev linkrev %s %s p2\n'
1519 1519 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1520 1520 )
1521 1521
1522 1522 for rev in store:
1523 1523 node = store.node(rev)
1524 1524 parents = store.parents(node)
1525 1525
1526 1526 fm.startitem()
1527 1527 fm.write(b'rev', b'%6d ', rev)
1528 1528 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1529 1529 fm.write(b'node', b'%s ', shortfn(node))
1530 1530 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1531 1531 fm.write(b'p2', b'%s', shortfn(parents[1]))
1532 1532 fm.plain(b'\n')
1533 1533
1534 1534 fm.end()
1535 1535
1536 1536
1537 1537 @command(
1538 1538 b'debugindexdot',
1539 1539 cmdutil.debugrevlogopts,
1540 1540 _(b'-c|-m|FILE'),
1541 1541 optionalrepo=True,
1542 1542 )
1543 1543 def debugindexdot(ui, repo, file_=None, **opts):
1544 1544 """dump an index DAG as a graphviz dot file"""
1545 1545 opts = pycompat.byteskwargs(opts)
1546 1546 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1547 1547 ui.writenoi18n(b"digraph G {\n")
1548 1548 for i in r:
1549 1549 node = r.node(i)
1550 1550 pp = r.parents(node)
1551 1551 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1552 1552 if pp[1] != nullid:
1553 1553 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1554 1554 ui.write(b"}\n")
1555 1555
1556 1556
1557 1557 @command(b'debugindexstats', [])
1558 1558 def debugindexstats(ui, repo):
1559 1559 """show stats related to the changelog index"""
1560 1560 repo.changelog.shortest(nullid, 1)
1561 1561 index = repo.changelog.index
1562 1562 if not util.safehasattr(index, b'stats'):
1563 1563 raise error.Abort(_(b'debugindexstats only works with native code'))
1564 1564 for k, v in sorted(index.stats().items()):
1565 1565 ui.write(b'%s: %d\n' % (k, v))
1566 1566
1567 1567
1568 1568 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1569 1569 def debuginstall(ui, **opts):
1570 1570 """test Mercurial installation
1571 1571
1572 1572 Returns 0 on success.
1573 1573 """
1574 1574 opts = pycompat.byteskwargs(opts)
1575 1575
1576 1576 problems = 0
1577 1577
1578 1578 fm = ui.formatter(b'debuginstall', opts)
1579 1579 fm.startitem()
1580 1580
1581 1581 # encoding might be unknown or wrong. don't translate these messages.
1582 1582 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1583 1583 err = None
1584 1584 try:
1585 1585 codecs.lookup(pycompat.sysstr(encoding.encoding))
1586 1586 except LookupError as inst:
1587 1587 err = stringutil.forcebytestr(inst)
1588 1588 problems += 1
1589 1589 fm.condwrite(
1590 1590 err,
1591 1591 b'encodingerror',
1592 1592 b" %s\n (check that your locale is properly set)\n",
1593 1593 err,
1594 1594 )
1595 1595
1596 1596 # Python
1597 1597 pythonlib = None
1598 1598 if util.safehasattr(os, '__file__'):
1599 1599 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1600 1600 elif getattr(sys, 'oxidized', False):
1601 1601 pythonlib = pycompat.sysexecutable
1602 1602
1603 1603 fm.write(
1604 1604 b'pythonexe',
1605 1605 _(b"checking Python executable (%s)\n"),
1606 1606 pycompat.sysexecutable or _(b"unknown"),
1607 1607 )
1608 1608 fm.write(
1609 1609 b'pythonimplementation',
1610 1610 _(b"checking Python implementation (%s)\n"),
1611 1611 pycompat.sysbytes(platform.python_implementation()),
1612 1612 )
1613 1613 fm.write(
1614 1614 b'pythonver',
1615 1615 _(b"checking Python version (%s)\n"),
1616 1616 (b"%d.%d.%d" % sys.version_info[:3]),
1617 1617 )
1618 1618 fm.write(
1619 1619 b'pythonlib',
1620 1620 _(b"checking Python lib (%s)...\n"),
1621 1621 pythonlib or _(b"unknown"),
1622 1622 )
1623 1623
1624 1624 try:
1625 1625 from . import rustext
1626 1626
1627 1627 rustext.__doc__ # trigger lazy import
1628 1628 except ImportError:
1629 1629 rustext = None
1630 1630
1631 1631 security = set(sslutil.supportedprotocols)
1632 1632 if sslutil.hassni:
1633 1633 security.add(b'sni')
1634 1634
1635 1635 fm.write(
1636 1636 b'pythonsecurity',
1637 1637 _(b"checking Python security support (%s)\n"),
1638 1638 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1639 1639 )
1640 1640
1641 1641 # These are warnings, not errors. So don't increment problem count. This
1642 1642 # may change in the future.
1643 1643 if b'tls1.2' not in security:
1644 1644 fm.plain(
1645 1645 _(
1646 1646 b' TLS 1.2 not supported by Python install; '
1647 1647 b'network connections lack modern security\n'
1648 1648 )
1649 1649 )
1650 1650 if b'sni' not in security:
1651 1651 fm.plain(
1652 1652 _(
1653 1653 b' SNI not supported by Python install; may have '
1654 1654 b'connectivity issues with some servers\n'
1655 1655 )
1656 1656 )
1657 1657
1658 1658 fm.plain(
1659 1659 _(
1660 1660 b"checking Rust extensions (%s)\n"
1661 1661 % (b'missing' if rustext is None else b'installed')
1662 1662 ),
1663 1663 )
1664 1664
1665 1665 # TODO print CA cert info
1666 1666
1667 1667 # hg version
1668 1668 hgver = util.version()
1669 1669 fm.write(
1670 1670 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1671 1671 )
1672 1672 fm.write(
1673 1673 b'hgverextra',
1674 1674 _(b"checking Mercurial custom build (%s)\n"),
1675 1675 b'+'.join(hgver.split(b'+')[1:]),
1676 1676 )
1677 1677
1678 1678 # compiled modules
1679 1679 hgmodules = None
1680 1680 if util.safehasattr(sys.modules[__name__], '__file__'):
1681 1681 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1682 1682 elif getattr(sys, 'oxidized', False):
1683 1683 hgmodules = pycompat.sysexecutable
1684 1684
1685 1685 fm.write(
1686 1686 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1687 1687 )
1688 1688 fm.write(
1689 1689 b'hgmodules',
1690 1690 _(b"checking installed modules (%s)...\n"),
1691 1691 hgmodules or _(b"unknown"),
1692 1692 )
1693 1693
1694 1694 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1695 1695 rustext = rustandc # for now, that's the only case
1696 1696 cext = policy.policy in (b'c', b'allow') or rustandc
1697 1697 nopure = cext or rustext
1698 1698 if nopure:
1699 1699 err = None
1700 1700 try:
1701 1701 if cext:
1702 1702 from .cext import ( # pytype: disable=import-error
1703 1703 base85,
1704 1704 bdiff,
1705 1705 mpatch,
1706 1706 osutil,
1707 1707 )
1708 1708
1709 1709 # quiet pyflakes
1710 1710 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1711 1711 if rustext:
1712 1712 from .rustext import ( # pytype: disable=import-error
1713 1713 ancestor,
1714 1714 dirstate,
1715 1715 )
1716 1716
1717 1717 dir(ancestor), dir(dirstate) # quiet pyflakes
1718 1718 except Exception as inst:
1719 1719 err = stringutil.forcebytestr(inst)
1720 1720 problems += 1
1721 1721 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1722 1722
1723 1723 compengines = util.compengines._engines.values()
1724 1724 fm.write(
1725 1725 b'compengines',
1726 1726 _(b'checking registered compression engines (%s)\n'),
1727 1727 fm.formatlist(
1728 1728 sorted(e.name() for e in compengines),
1729 1729 name=b'compengine',
1730 1730 fmt=b'%s',
1731 1731 sep=b', ',
1732 1732 ),
1733 1733 )
1734 1734 fm.write(
1735 1735 b'compenginesavail',
1736 1736 _(b'checking available compression engines (%s)\n'),
1737 1737 fm.formatlist(
1738 1738 sorted(e.name() for e in compengines if e.available()),
1739 1739 name=b'compengine',
1740 1740 fmt=b'%s',
1741 1741 sep=b', ',
1742 1742 ),
1743 1743 )
1744 1744 wirecompengines = compression.compengines.supportedwireengines(
1745 1745 compression.SERVERROLE
1746 1746 )
1747 1747 fm.write(
1748 1748 b'compenginesserver',
1749 1749 _(
1750 1750 b'checking available compression engines '
1751 1751 b'for wire protocol (%s)\n'
1752 1752 ),
1753 1753 fm.formatlist(
1754 1754 [e.name() for e in wirecompengines if e.wireprotosupport()],
1755 1755 name=b'compengine',
1756 1756 fmt=b'%s',
1757 1757 sep=b', ',
1758 1758 ),
1759 1759 )
1760 1760 re2 = b'missing'
1761 1761 if util._re2:
1762 1762 re2 = b'available'
1763 1763 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1764 1764 fm.data(re2=bool(util._re2))
1765 1765
1766 1766 # templates
1767 1767 p = templater.templatedir()
1768 1768 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1769 1769 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1770 1770 if p:
1771 1771 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1772 1772 if m:
1773 1773 # template found, check if it is working
1774 1774 err = None
1775 1775 try:
1776 1776 templater.templater.frommapfile(m)
1777 1777 except Exception as inst:
1778 1778 err = stringutil.forcebytestr(inst)
1779 1779 p = None
1780 1780 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1781 1781 else:
1782 1782 p = None
1783 1783 fm.condwrite(
1784 1784 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1785 1785 )
1786 1786 fm.condwrite(
1787 1787 not m,
1788 1788 b'defaulttemplatenotfound',
1789 1789 _(b" template '%s' not found\n"),
1790 1790 b"default",
1791 1791 )
1792 1792 if not p:
1793 1793 problems += 1
1794 1794 fm.condwrite(
1795 1795 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1796 1796 )
1797 1797
1798 1798 # editor
1799 1799 editor = ui.geteditor()
1800 1800 editor = util.expandpath(editor)
1801 1801 editorbin = procutil.shellsplit(editor)[0]
1802 1802 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1803 1803 cmdpath = procutil.findexe(editorbin)
1804 1804 fm.condwrite(
1805 1805 not cmdpath and editor == b'vi',
1806 1806 b'vinotfound',
1807 1807 _(
1808 1808 b" No commit editor set and can't find %s in PATH\n"
1809 1809 b" (specify a commit editor in your configuration"
1810 1810 b" file)\n"
1811 1811 ),
1812 1812 not cmdpath and editor == b'vi' and editorbin,
1813 1813 )
1814 1814 fm.condwrite(
1815 1815 not cmdpath and editor != b'vi',
1816 1816 b'editornotfound',
1817 1817 _(
1818 1818 b" Can't find editor '%s' in PATH\n"
1819 1819 b" (specify a commit editor in your configuration"
1820 1820 b" file)\n"
1821 1821 ),
1822 1822 not cmdpath and editorbin,
1823 1823 )
1824 1824 if not cmdpath and editor != b'vi':
1825 1825 problems += 1
1826 1826
1827 1827 # check username
1828 1828 username = None
1829 1829 err = None
1830 1830 try:
1831 1831 username = ui.username()
1832 1832 except error.Abort as e:
1833 1833 err = e.message
1834 1834 problems += 1
1835 1835
1836 1836 fm.condwrite(
1837 1837 username, b'username', _(b"checking username (%s)\n"), username
1838 1838 )
1839 1839 fm.condwrite(
1840 1840 err,
1841 1841 b'usernameerror',
1842 1842 _(
1843 1843 b"checking username...\n %s\n"
1844 1844 b" (specify a username in your configuration file)\n"
1845 1845 ),
1846 1846 err,
1847 1847 )
1848 1848
1849 1849 for name, mod in extensions.extensions():
1850 1850 handler = getattr(mod, 'debuginstall', None)
1851 1851 if handler is not None:
1852 1852 problems += handler(ui, fm)
1853 1853
1854 1854 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1855 1855 if not problems:
1856 1856 fm.data(problems=problems)
1857 1857 fm.condwrite(
1858 1858 problems,
1859 1859 b'problems',
1860 1860 _(b"%d problems detected, please check your install!\n"),
1861 1861 problems,
1862 1862 )
1863 1863 fm.end()
1864 1864
1865 1865 return problems
1866 1866
1867 1867
1868 1868 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1869 1869 def debugknown(ui, repopath, *ids, **opts):
1870 1870 """test whether node ids are known to a repo
1871 1871
1872 1872 Every ID must be a full-length hex node id string. Returns a list of 0s
1873 1873 and 1s indicating unknown/known.
1874 1874 """
1875 1875 opts = pycompat.byteskwargs(opts)
1876 1876 repo = hg.peer(ui, opts, repopath)
1877 1877 if not repo.capable(b'known'):
1878 1878 raise error.Abort(b"known() not supported by target repository")
1879 1879 flags = repo.known([bin(s) for s in ids])
1880 1880 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1881 1881
1882 1882
1883 1883 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1884 1884 def debuglabelcomplete(ui, repo, *args):
1885 1885 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1886 1886 debugnamecomplete(ui, repo, *args)
1887 1887
1888 1888
1889 1889 @command(
1890 1890 b'debuglocks',
1891 1891 [
1892 1892 (b'L', b'force-lock', None, _(b'free the store lock (DANGEROUS)')),
1893 1893 (
1894 1894 b'W',
1895 1895 b'force-wlock',
1896 1896 None,
1897 1897 _(b'free the working state lock (DANGEROUS)'),
1898 1898 ),
1899 1899 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1900 1900 (
1901 1901 b'S',
1902 1902 b'set-wlock',
1903 1903 None,
1904 1904 _(b'set the working state lock until stopped'),
1905 1905 ),
1906 1906 ],
1907 1907 _(b'[OPTION]...'),
1908 1908 )
1909 1909 def debuglocks(ui, repo, **opts):
1910 1910 """show or modify state of locks
1911 1911
1912 1912 By default, this command will show which locks are held. This
1913 1913 includes the user and process holding the lock, the amount of time
1914 1914 the lock has been held, and the machine name where the process is
1915 1915 running if it's not local.
1916 1916
1917 1917 Locks protect the integrity of Mercurial's data, so should be
1918 1918 treated with care. System crashes or other interruptions may cause
1919 1919 locks to not be properly released, though Mercurial will usually
1920 1920 detect and remove such stale locks automatically.
1921 1921
1922 1922 However, detecting stale locks may not always be possible (for
1923 1923 instance, on a shared filesystem). Removing locks may also be
1924 1924 blocked by filesystem permissions.
1925 1925
1926 1926 Setting a lock will prevent other commands from changing the data.
1927 1927 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1928 1928 The set locks are removed when the command exits.
1929 1929
1930 1930 Returns 0 if no locks are held.
1931 1931
1932 1932 """
1933 1933
1934 1934 if opts.get('force_lock'):
1935 1935 repo.svfs.unlink(b'lock')
1936 1936 if opts.get('force_wlock'):
1937 1937 repo.vfs.unlink(b'wlock')
1938 1938 if opts.get('force_lock') or opts.get('force_wlock'):
1939 1939 return 0
1940 1940
1941 1941 locks = []
1942 1942 try:
1943 1943 if opts.get('set_wlock'):
1944 1944 try:
1945 1945 locks.append(repo.wlock(False))
1946 1946 except error.LockHeld:
1947 1947 raise error.Abort(_(b'wlock is already held'))
1948 1948 if opts.get('set_lock'):
1949 1949 try:
1950 1950 locks.append(repo.lock(False))
1951 1951 except error.LockHeld:
1952 1952 raise error.Abort(_(b'lock is already held'))
1953 1953 if len(locks):
1954 1954 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
1955 1955 return 0
1956 1956 finally:
1957 1957 release(*locks)
1958 1958
1959 1959 now = time.time()
1960 1960 held = 0
1961 1961
1962 1962 def report(vfs, name, method):
1963 1963 # this causes stale locks to get reaped for more accurate reporting
1964 1964 try:
1965 1965 l = method(False)
1966 1966 except error.LockHeld:
1967 1967 l = None
1968 1968
1969 1969 if l:
1970 1970 l.release()
1971 1971 else:
1972 1972 try:
1973 1973 st = vfs.lstat(name)
1974 1974 age = now - st[stat.ST_MTIME]
1975 1975 user = util.username(st.st_uid)
1976 1976 locker = vfs.readlock(name)
1977 1977 if b":" in locker:
1978 1978 host, pid = locker.split(b':')
1979 1979 if host == socket.gethostname():
1980 1980 locker = b'user %s, process %s' % (user or b'None', pid)
1981 1981 else:
1982 1982 locker = b'user %s, process %s, host %s' % (
1983 1983 user or b'None',
1984 1984 pid,
1985 1985 host,
1986 1986 )
1987 1987 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
1988 1988 return 1
1989 1989 except OSError as e:
1990 1990 if e.errno != errno.ENOENT:
1991 1991 raise
1992 1992
1993 1993 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
1994 1994 return 0
1995 1995
1996 1996 held += report(repo.svfs, b"lock", repo.lock)
1997 1997 held += report(repo.vfs, b"wlock", repo.wlock)
1998 1998
1999 1999 return held
2000 2000
2001 2001
2002 2002 @command(
2003 2003 b'debugmanifestfulltextcache',
2004 2004 [
2005 2005 (b'', b'clear', False, _(b'clear the cache')),
2006 2006 (
2007 2007 b'a',
2008 2008 b'add',
2009 2009 [],
2010 2010 _(b'add the given manifest nodes to the cache'),
2011 2011 _(b'NODE'),
2012 2012 ),
2013 2013 ],
2014 2014 b'',
2015 2015 )
2016 2016 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2017 2017 """show, clear or amend the contents of the manifest fulltext cache"""
2018 2018
2019 2019 def getcache():
2020 2020 r = repo.manifestlog.getstorage(b'')
2021 2021 try:
2022 2022 return r._fulltextcache
2023 2023 except AttributeError:
2024 2024 msg = _(
2025 2025 b"Current revlog implementation doesn't appear to have a "
2026 2026 b"manifest fulltext cache\n"
2027 2027 )
2028 2028 raise error.Abort(msg)
2029 2029
2030 2030 if opts.get('clear'):
2031 2031 with repo.wlock():
2032 2032 cache = getcache()
2033 2033 cache.clear(clear_persisted_data=True)
2034 2034 return
2035 2035
2036 2036 if add:
2037 2037 with repo.wlock():
2038 2038 m = repo.manifestlog
2039 2039 store = m.getstorage(b'')
2040 2040 for n in add:
2041 2041 try:
2042 2042 manifest = m[store.lookup(n)]
2043 2043 except error.LookupError as e:
2044 2044 raise error.Abort(e, hint=b"Check your manifest node id")
2045 2045 manifest.read() # stores revisision in cache too
2046 2046 return
2047 2047
2048 2048 cache = getcache()
2049 2049 if not len(cache):
2050 2050 ui.write(_(b'cache empty\n'))
2051 2051 else:
2052 2052 ui.write(
2053 2053 _(
2054 2054 b'cache contains %d manifest entries, in order of most to '
2055 2055 b'least recent:\n'
2056 2056 )
2057 2057 % (len(cache),)
2058 2058 )
2059 2059 totalsize = 0
2060 2060 for nodeid in cache:
2061 2061 # Use cache.get to not update the LRU order
2062 2062 data = cache.peek(nodeid)
2063 2063 size = len(data)
2064 2064 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2065 2065 ui.write(
2066 2066 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2067 2067 )
2068 2068 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2069 2069 ui.write(
2070 2070 _(b'total cache data size %s, on-disk %s\n')
2071 2071 % (util.bytecount(totalsize), util.bytecount(ondisk))
2072 2072 )
2073 2073
2074 2074
2075 2075 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2076 2076 def debugmergestate(ui, repo, *args, **opts):
2077 2077 """print merge state
2078 2078
2079 2079 Use --verbose to print out information about whether v1 or v2 merge state
2080 2080 was chosen."""
2081 2081
2082 2082 if ui.verbose:
2083 2083 ms = mergestatemod.mergestate(repo)
2084 2084
2085 2085 # sort so that reasonable information is on top
2086 2086 v1records = ms._readrecordsv1()
2087 2087 v2records = ms._readrecordsv2()
2088 2088
2089 2089 if not v1records and not v2records:
2090 2090 pass
2091 2091 elif not v2records:
2092 2092 ui.writenoi18n(b'no version 2 merge state\n')
2093 2093 elif ms._v1v2match(v1records, v2records):
2094 2094 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2095 2095 else:
2096 2096 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2097 2097
2098 2098 opts = pycompat.byteskwargs(opts)
2099 2099 if not opts[b'template']:
2100 2100 opts[b'template'] = (
2101 2101 b'{if(commits, "", "no merge state found\n")}'
2102 2102 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2103 2103 b'{files % "file: {path} (state \\"{state}\\")\n'
2104 2104 b'{if(local_path, "'
2105 2105 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2106 2106 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2107 2107 b' other path: {other_path} (node {other_node})\n'
2108 2108 b'")}'
2109 2109 b'{if(rename_side, "'
2110 2110 b' rename side: {rename_side}\n'
2111 2111 b' renamed path: {renamed_path}\n'
2112 2112 b'")}'
2113 2113 b'{extras % " extra: {key} = {value}\n"}'
2114 2114 b'"}'
2115 2115 b'{extras % "extra: {file} ({key} = {value})\n"}'
2116 2116 )
2117 2117
2118 2118 ms = mergestatemod.mergestate.read(repo)
2119 2119
2120 2120 fm = ui.formatter(b'debugmergestate', opts)
2121 2121 fm.startitem()
2122 2122
2123 2123 fm_commits = fm.nested(b'commits')
2124 2124 if ms.active():
2125 2125 for name, node, label_index in (
2126 2126 (b'local', ms.local, 0),
2127 2127 (b'other', ms.other, 1),
2128 2128 ):
2129 2129 fm_commits.startitem()
2130 2130 fm_commits.data(name=name)
2131 2131 fm_commits.data(node=hex(node))
2132 2132 if ms._labels and len(ms._labels) > label_index:
2133 2133 fm_commits.data(label=ms._labels[label_index])
2134 2134 fm_commits.end()
2135 2135
2136 2136 fm_files = fm.nested(b'files')
2137 2137 if ms.active():
2138 2138 for f in ms:
2139 2139 fm_files.startitem()
2140 2140 fm_files.data(path=f)
2141 2141 state = ms._state[f]
2142 2142 fm_files.data(state=state[0])
2143 2143 if state[0] in (
2144 2144 mergestatemod.MERGE_RECORD_UNRESOLVED,
2145 2145 mergestatemod.MERGE_RECORD_RESOLVED,
2146 2146 ):
2147 2147 fm_files.data(local_key=state[1])
2148 2148 fm_files.data(local_path=state[2])
2149 2149 fm_files.data(ancestor_path=state[3])
2150 2150 fm_files.data(ancestor_node=state[4])
2151 2151 fm_files.data(other_path=state[5])
2152 2152 fm_files.data(other_node=state[6])
2153 2153 fm_files.data(local_flags=state[7])
2154 2154 elif state[0] in (
2155 2155 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2156 2156 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2157 2157 ):
2158 2158 fm_files.data(renamed_path=state[1])
2159 2159 fm_files.data(rename_side=state[2])
2160 2160 fm_extras = fm_files.nested(b'extras')
2161 2161 for k, v in sorted(ms.extras(f).items()):
2162 2162 fm_extras.startitem()
2163 2163 fm_extras.data(key=k)
2164 2164 fm_extras.data(value=v)
2165 2165 fm_extras.end()
2166 2166
2167 2167 fm_files.end()
2168 2168
2169 2169 fm_extras = fm.nested(b'extras')
2170 2170 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2171 2171 if f in ms:
2172 2172 # If file is in mergestate, we have already processed it's extras
2173 2173 continue
2174 2174 for k, v in pycompat.iteritems(d):
2175 2175 fm_extras.startitem()
2176 2176 fm_extras.data(file=f)
2177 2177 fm_extras.data(key=k)
2178 2178 fm_extras.data(value=v)
2179 2179 fm_extras.end()
2180 2180
2181 2181 fm.end()
2182 2182
2183 2183
2184 2184 @command(b'debugnamecomplete', [], _(b'NAME...'))
2185 2185 def debugnamecomplete(ui, repo, *args):
2186 2186 '''complete "names" - tags, open branch names, bookmark names'''
2187 2187
2188 2188 names = set()
2189 2189 # since we previously only listed open branches, we will handle that
2190 2190 # specially (after this for loop)
2191 2191 for name, ns in pycompat.iteritems(repo.names):
2192 2192 if name != b'branches':
2193 2193 names.update(ns.listnames(repo))
2194 2194 names.update(
2195 2195 tag
2196 2196 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2197 2197 if not closed
2198 2198 )
2199 2199 completions = set()
2200 2200 if not args:
2201 2201 args = [b'']
2202 2202 for a in args:
2203 2203 completions.update(n for n in names if n.startswith(a))
2204 2204 ui.write(b'\n'.join(sorted(completions)))
2205 2205 ui.write(b'\n')
2206 2206
2207 2207
2208 2208 @command(
2209 2209 b'debugnodemap',
2210 2210 [
2211 2211 (
2212 2212 b'',
2213 2213 b'dump-new',
2214 2214 False,
2215 2215 _(b'write a (new) persistent binary nodemap on stdin'),
2216 2216 ),
2217 2217 (b'', b'dump-disk', False, _(b'dump on-disk data on stdin')),
2218 2218 (
2219 2219 b'',
2220 2220 b'check',
2221 2221 False,
2222 2222 _(b'check that the data on disk data are correct.'),
2223 2223 ),
2224 2224 (
2225 2225 b'',
2226 2226 b'metadata',
2227 2227 False,
2228 2228 _(b'display the on disk meta data for the nodemap'),
2229 2229 ),
2230 2230 ],
2231 2231 )
2232 2232 def debugnodemap(ui, repo, **opts):
2233 2233 """write and inspect on disk nodemap"""
2234 2234 if opts['dump_new']:
2235 2235 unfi = repo.unfiltered()
2236 2236 cl = unfi.changelog
2237 2237 if util.safehasattr(cl.index, "nodemap_data_all"):
2238 2238 data = cl.index.nodemap_data_all()
2239 2239 else:
2240 2240 data = nodemap.persistent_data(cl.index)
2241 2241 ui.write(data)
2242 2242 elif opts['dump_disk']:
2243 2243 unfi = repo.unfiltered()
2244 2244 cl = unfi.changelog
2245 2245 nm_data = nodemap.persisted_data(cl)
2246 2246 if nm_data is not None:
2247 2247 docket, data = nm_data
2248 2248 ui.write(data[:])
2249 2249 elif opts['check']:
2250 2250 unfi = repo.unfiltered()
2251 2251 cl = unfi.changelog
2252 2252 nm_data = nodemap.persisted_data(cl)
2253 2253 if nm_data is not None:
2254 2254 docket, data = nm_data
2255 2255 return nodemap.check_data(ui, cl.index, data)
2256 2256 elif opts['metadata']:
2257 2257 unfi = repo.unfiltered()
2258 2258 cl = unfi.changelog
2259 2259 nm_data = nodemap.persisted_data(cl)
2260 2260 if nm_data is not None:
2261 2261 docket, data = nm_data
2262 2262 ui.write((b"uid: %s\n") % docket.uid)
2263 2263 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2264 2264 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2265 2265 ui.write((b"data-length: %d\n") % docket.data_length)
2266 2266 ui.write((b"data-unused: %d\n") % docket.data_unused)
2267 2267 unused_perc = docket.data_unused * 100.0 / docket.data_length
2268 2268 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2269 2269
2270 2270
2271 2271 @command(
2272 2272 b'debugobsolete',
2273 2273 [
2274 2274 (b'', b'flags', 0, _(b'markers flag')),
2275 2275 (
2276 2276 b'',
2277 2277 b'record-parents',
2278 2278 False,
2279 2279 _(b'record parent information for the precursor'),
2280 2280 ),
2281 2281 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2282 2282 (
2283 2283 b'',
2284 2284 b'exclusive',
2285 2285 False,
2286 2286 _(b'restrict display to markers only relevant to REV'),
2287 2287 ),
2288 2288 (b'', b'index', False, _(b'display index of the marker')),
2289 2289 (b'', b'delete', [], _(b'delete markers specified by indices')),
2290 2290 ]
2291 2291 + cmdutil.commitopts2
2292 2292 + cmdutil.formatteropts,
2293 2293 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2294 2294 )
2295 2295 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2296 2296 """create arbitrary obsolete marker
2297 2297
2298 2298 With no arguments, displays the list of obsolescence markers."""
2299 2299
2300 2300 opts = pycompat.byteskwargs(opts)
2301 2301
2302 2302 def parsenodeid(s):
2303 2303 try:
2304 2304 # We do not use revsingle/revrange functions here to accept
2305 2305 # arbitrary node identifiers, possibly not present in the
2306 2306 # local repository.
2307 2307 n = bin(s)
2308 2308 if len(n) != len(nullid):
2309 2309 raise TypeError()
2310 2310 return n
2311 2311 except TypeError:
2312 2312 raise error.InputError(
2313 2313 b'changeset references must be full hexadecimal '
2314 2314 b'node identifiers'
2315 2315 )
2316 2316
2317 2317 if opts.get(b'delete'):
2318 2318 indices = []
2319 2319 for v in opts.get(b'delete'):
2320 2320 try:
2321 2321 indices.append(int(v))
2322 2322 except ValueError:
2323 2323 raise error.InputError(
2324 2324 _(b'invalid index value: %r') % v,
2325 2325 hint=_(b'use integers for indices'),
2326 2326 )
2327 2327
2328 2328 if repo.currenttransaction():
2329 2329 raise error.Abort(
2330 2330 _(b'cannot delete obsmarkers in the middle of transaction.')
2331 2331 )
2332 2332
2333 2333 with repo.lock():
2334 2334 n = repair.deleteobsmarkers(repo.obsstore, indices)
2335 2335 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2336 2336
2337 2337 return
2338 2338
2339 2339 if precursor is not None:
2340 2340 if opts[b'rev']:
2341 2341 raise error.InputError(
2342 2342 b'cannot select revision when creating marker'
2343 2343 )
2344 2344 metadata = {}
2345 2345 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2346 2346 succs = tuple(parsenodeid(succ) for succ in successors)
2347 2347 l = repo.lock()
2348 2348 try:
2349 2349 tr = repo.transaction(b'debugobsolete')
2350 2350 try:
2351 2351 date = opts.get(b'date')
2352 2352 if date:
2353 2353 date = dateutil.parsedate(date)
2354 2354 else:
2355 2355 date = None
2356 2356 prec = parsenodeid(precursor)
2357 2357 parents = None
2358 2358 if opts[b'record_parents']:
2359 2359 if prec not in repo.unfiltered():
2360 2360 raise error.Abort(
2361 2361 b'cannot used --record-parents on '
2362 2362 b'unknown changesets'
2363 2363 )
2364 2364 parents = repo.unfiltered()[prec].parents()
2365 2365 parents = tuple(p.node() for p in parents)
2366 2366 repo.obsstore.create(
2367 2367 tr,
2368 2368 prec,
2369 2369 succs,
2370 2370 opts[b'flags'],
2371 2371 parents=parents,
2372 2372 date=date,
2373 2373 metadata=metadata,
2374 2374 ui=ui,
2375 2375 )
2376 2376 tr.close()
2377 2377 except ValueError as exc:
2378 2378 raise error.Abort(
2379 2379 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2380 2380 )
2381 2381 finally:
2382 2382 tr.release()
2383 2383 finally:
2384 2384 l.release()
2385 2385 else:
2386 2386 if opts[b'rev']:
2387 2387 revs = scmutil.revrange(repo, opts[b'rev'])
2388 2388 nodes = [repo[r].node() for r in revs]
2389 2389 markers = list(
2390 2390 obsutil.getmarkers(
2391 2391 repo, nodes=nodes, exclusive=opts[b'exclusive']
2392 2392 )
2393 2393 )
2394 2394 markers.sort(key=lambda x: x._data)
2395 2395 else:
2396 2396 markers = obsutil.getmarkers(repo)
2397 2397
2398 2398 markerstoiter = markers
2399 2399 isrelevant = lambda m: True
2400 2400 if opts.get(b'rev') and opts.get(b'index'):
2401 2401 markerstoiter = obsutil.getmarkers(repo)
2402 2402 markerset = set(markers)
2403 2403 isrelevant = lambda m: m in markerset
2404 2404
2405 2405 fm = ui.formatter(b'debugobsolete', opts)
2406 2406 for i, m in enumerate(markerstoiter):
2407 2407 if not isrelevant(m):
2408 2408 # marker can be irrelevant when we're iterating over a set
2409 2409 # of markers (markerstoiter) which is bigger than the set
2410 2410 # of markers we want to display (markers)
2411 2411 # this can happen if both --index and --rev options are
2412 2412 # provided and thus we need to iterate over all of the markers
2413 2413 # to get the correct indices, but only display the ones that
2414 2414 # are relevant to --rev value
2415 2415 continue
2416 2416 fm.startitem()
2417 2417 ind = i if opts.get(b'index') else None
2418 2418 cmdutil.showmarker(fm, m, index=ind)
2419 2419 fm.end()
2420 2420
2421 2421
2422 2422 @command(
2423 2423 b'debugp1copies',
2424 2424 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2425 2425 _(b'[-r REV]'),
2426 2426 )
2427 2427 def debugp1copies(ui, repo, **opts):
2428 2428 """dump copy information compared to p1"""
2429 2429
2430 2430 opts = pycompat.byteskwargs(opts)
2431 2431 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2432 2432 for dst, src in ctx.p1copies().items():
2433 2433 ui.write(b'%s -> %s\n' % (src, dst))
2434 2434
2435 2435
2436 2436 @command(
2437 2437 b'debugp2copies',
2438 2438 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2439 2439 _(b'[-r REV]'),
2440 2440 )
2441 2441 def debugp1copies(ui, repo, **opts):
2442 2442 """dump copy information compared to p2"""
2443 2443
2444 2444 opts = pycompat.byteskwargs(opts)
2445 2445 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2446 2446 for dst, src in ctx.p2copies().items():
2447 2447 ui.write(b'%s -> %s\n' % (src, dst))
2448 2448
2449 2449
2450 2450 @command(
2451 2451 b'debugpathcomplete',
2452 2452 [
2453 2453 (b'f', b'full', None, _(b'complete an entire path')),
2454 2454 (b'n', b'normal', None, _(b'show only normal files')),
2455 2455 (b'a', b'added', None, _(b'show only added files')),
2456 2456 (b'r', b'removed', None, _(b'show only removed files')),
2457 2457 ],
2458 2458 _(b'FILESPEC...'),
2459 2459 )
2460 2460 def debugpathcomplete(ui, repo, *specs, **opts):
2461 2461 """complete part or all of a tracked path
2462 2462
2463 2463 This command supports shells that offer path name completion. It
2464 2464 currently completes only files already known to the dirstate.
2465 2465
2466 2466 Completion extends only to the next path segment unless
2467 2467 --full is specified, in which case entire paths are used."""
2468 2468
2469 2469 def complete(path, acceptable):
2470 2470 dirstate = repo.dirstate
2471 2471 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2472 2472 rootdir = repo.root + pycompat.ossep
2473 2473 if spec != repo.root and not spec.startswith(rootdir):
2474 2474 return [], []
2475 2475 if os.path.isdir(spec):
2476 2476 spec += b'/'
2477 2477 spec = spec[len(rootdir) :]
2478 2478 fixpaths = pycompat.ossep != b'/'
2479 2479 if fixpaths:
2480 2480 spec = spec.replace(pycompat.ossep, b'/')
2481 2481 speclen = len(spec)
2482 2482 fullpaths = opts['full']
2483 2483 files, dirs = set(), set()
2484 2484 adddir, addfile = dirs.add, files.add
2485 2485 for f, st in pycompat.iteritems(dirstate):
2486 2486 if f.startswith(spec) and st[0] in acceptable:
2487 2487 if fixpaths:
2488 2488 f = f.replace(b'/', pycompat.ossep)
2489 2489 if fullpaths:
2490 2490 addfile(f)
2491 2491 continue
2492 2492 s = f.find(pycompat.ossep, speclen)
2493 2493 if s >= 0:
2494 2494 adddir(f[:s])
2495 2495 else:
2496 2496 addfile(f)
2497 2497 return files, dirs
2498 2498
2499 2499 acceptable = b''
2500 2500 if opts['normal']:
2501 2501 acceptable += b'nm'
2502 2502 if opts['added']:
2503 2503 acceptable += b'a'
2504 2504 if opts['removed']:
2505 2505 acceptable += b'r'
2506 2506 cwd = repo.getcwd()
2507 2507 if not specs:
2508 2508 specs = [b'.']
2509 2509
2510 2510 files, dirs = set(), set()
2511 2511 for spec in specs:
2512 2512 f, d = complete(spec, acceptable or b'nmar')
2513 2513 files.update(f)
2514 2514 dirs.update(d)
2515 2515 files.update(dirs)
2516 2516 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2517 2517 ui.write(b'\n')
2518 2518
2519 2519
2520 2520 @command(
2521 2521 b'debugpathcopies',
2522 2522 cmdutil.walkopts,
2523 2523 b'hg debugpathcopies REV1 REV2 [FILE]',
2524 2524 inferrepo=True,
2525 2525 )
2526 2526 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2527 2527 """show copies between two revisions"""
2528 2528 ctx1 = scmutil.revsingle(repo, rev1)
2529 2529 ctx2 = scmutil.revsingle(repo, rev2)
2530 2530 m = scmutil.match(ctx1, pats, opts)
2531 2531 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2532 2532 ui.write(b'%s -> %s\n' % (src, dst))
2533 2533
2534 2534
2535 2535 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2536 2536 def debugpeer(ui, path):
2537 2537 """establish a connection to a peer repository"""
2538 2538 # Always enable peer request logging. Requires --debug to display
2539 2539 # though.
2540 2540 overrides = {
2541 2541 (b'devel', b'debug.peer-request'): True,
2542 2542 }
2543 2543
2544 2544 with ui.configoverride(overrides):
2545 2545 peer = hg.peer(ui, {}, path)
2546 2546
2547 2547 local = peer.local() is not None
2548 2548 canpush = peer.canpush()
2549 2549
2550 2550 ui.write(_(b'url: %s\n') % peer.url())
2551 2551 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2552 2552 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2553 2553
2554 2554
2555 2555 @command(
2556 2556 b'debugpickmergetool',
2557 2557 [
2558 2558 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2559 2559 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2560 2560 ]
2561 2561 + cmdutil.walkopts
2562 2562 + cmdutil.mergetoolopts,
2563 2563 _(b'[PATTERN]...'),
2564 2564 inferrepo=True,
2565 2565 )
2566 2566 def debugpickmergetool(ui, repo, *pats, **opts):
2567 2567 """examine which merge tool is chosen for specified file
2568 2568
2569 2569 As described in :hg:`help merge-tools`, Mercurial examines
2570 2570 configurations below in this order to decide which merge tool is
2571 2571 chosen for specified file.
2572 2572
2573 2573 1. ``--tool`` option
2574 2574 2. ``HGMERGE`` environment variable
2575 2575 3. configurations in ``merge-patterns`` section
2576 2576 4. configuration of ``ui.merge``
2577 2577 5. configurations in ``merge-tools`` section
2578 2578 6. ``hgmerge`` tool (for historical reason only)
2579 2579 7. default tool for fallback (``:merge`` or ``:prompt``)
2580 2580
2581 2581 This command writes out examination result in the style below::
2582 2582
2583 2583 FILE = MERGETOOL
2584 2584
2585 2585 By default, all files known in the first parent context of the
2586 2586 working directory are examined. Use file patterns and/or -I/-X
2587 2587 options to limit target files. -r/--rev is also useful to examine
2588 2588 files in another context without actual updating to it.
2589 2589
2590 2590 With --debug, this command shows warning messages while matching
2591 2591 against ``merge-patterns`` and so on, too. It is recommended to
2592 2592 use this option with explicit file patterns and/or -I/-X options,
2593 2593 because this option increases amount of output per file according
2594 2594 to configurations in hgrc.
2595 2595
2596 2596 With -v/--verbose, this command shows configurations below at
2597 2597 first (only if specified).
2598 2598
2599 2599 - ``--tool`` option
2600 2600 - ``HGMERGE`` environment variable
2601 2601 - configuration of ``ui.merge``
2602 2602
2603 2603 If merge tool is chosen before matching against
2604 2604 ``merge-patterns``, this command can't show any helpful
2605 2605 information, even with --debug. In such case, information above is
2606 2606 useful to know why a merge tool is chosen.
2607 2607 """
2608 2608 opts = pycompat.byteskwargs(opts)
2609 2609 overrides = {}
2610 2610 if opts[b'tool']:
2611 2611 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2612 2612 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2613 2613
2614 2614 with ui.configoverride(overrides, b'debugmergepatterns'):
2615 2615 hgmerge = encoding.environ.get(b"HGMERGE")
2616 2616 if hgmerge is not None:
2617 2617 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2618 2618 uimerge = ui.config(b"ui", b"merge")
2619 2619 if uimerge:
2620 2620 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2621 2621
2622 2622 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2623 2623 m = scmutil.match(ctx, pats, opts)
2624 2624 changedelete = opts[b'changedelete']
2625 2625 for path in ctx.walk(m):
2626 2626 fctx = ctx[path]
2627 2627 try:
2628 2628 if not ui.debugflag:
2629 2629 ui.pushbuffer(error=True)
2630 2630 tool, toolpath = filemerge._picktool(
2631 2631 repo,
2632 2632 ui,
2633 2633 path,
2634 2634 fctx.isbinary(),
2635 2635 b'l' in fctx.flags(),
2636 2636 changedelete,
2637 2637 )
2638 2638 finally:
2639 2639 if not ui.debugflag:
2640 2640 ui.popbuffer()
2641 2641 ui.write(b'%s = %s\n' % (path, tool))
2642 2642
2643 2643
2644 2644 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2645 2645 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2646 2646 """access the pushkey key/value protocol
2647 2647
2648 2648 With two args, list the keys in the given namespace.
2649 2649
2650 2650 With five args, set a key to new if it currently is set to old.
2651 2651 Reports success or failure.
2652 2652 """
2653 2653
2654 2654 target = hg.peer(ui, {}, repopath)
2655 2655 if keyinfo:
2656 2656 key, old, new = keyinfo
2657 2657 with target.commandexecutor() as e:
2658 2658 r = e.callcommand(
2659 2659 b'pushkey',
2660 2660 {
2661 2661 b'namespace': namespace,
2662 2662 b'key': key,
2663 2663 b'old': old,
2664 2664 b'new': new,
2665 2665 },
2666 2666 ).result()
2667 2667
2668 2668 ui.status(pycompat.bytestr(r) + b'\n')
2669 2669 return not r
2670 2670 else:
2671 2671 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2672 2672 ui.write(
2673 2673 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2674 2674 )
2675 2675
2676 2676
2677 2677 @command(b'debugpvec', [], _(b'A B'))
2678 2678 def debugpvec(ui, repo, a, b=None):
2679 2679 ca = scmutil.revsingle(repo, a)
2680 2680 cb = scmutil.revsingle(repo, b)
2681 2681 pa = pvec.ctxpvec(ca)
2682 2682 pb = pvec.ctxpvec(cb)
2683 2683 if pa == pb:
2684 2684 rel = b"="
2685 2685 elif pa > pb:
2686 2686 rel = b">"
2687 2687 elif pa < pb:
2688 2688 rel = b"<"
2689 2689 elif pa | pb:
2690 2690 rel = b"|"
2691 2691 ui.write(_(b"a: %s\n") % pa)
2692 2692 ui.write(_(b"b: %s\n") % pb)
2693 2693 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2694 2694 ui.write(
2695 2695 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2696 2696 % (
2697 2697 abs(pa._depth - pb._depth),
2698 2698 pvec._hamming(pa._vec, pb._vec),
2699 2699 pa.distance(pb),
2700 2700 rel,
2701 2701 )
2702 2702 )
2703 2703
2704 2704
2705 2705 @command(
2706 2706 b'debugrebuilddirstate|debugrebuildstate',
2707 2707 [
2708 2708 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2709 2709 (
2710 2710 b'',
2711 2711 b'minimal',
2712 2712 None,
2713 2713 _(
2714 2714 b'only rebuild files that are inconsistent with '
2715 2715 b'the working copy parent'
2716 2716 ),
2717 2717 ),
2718 2718 ],
2719 2719 _(b'[-r REV]'),
2720 2720 )
2721 2721 def debugrebuilddirstate(ui, repo, rev, **opts):
2722 2722 """rebuild the dirstate as it would look like for the given revision
2723 2723
2724 2724 If no revision is specified the first current parent will be used.
2725 2725
2726 2726 The dirstate will be set to the files of the given revision.
2727 2727 The actual working directory content or existing dirstate
2728 2728 information such as adds or removes is not considered.
2729 2729
2730 2730 ``minimal`` will only rebuild the dirstate status for files that claim to be
2731 2731 tracked but are not in the parent manifest, or that exist in the parent
2732 2732 manifest but are not in the dirstate. It will not change adds, removes, or
2733 2733 modified files that are in the working copy parent.
2734 2734
2735 2735 One use of this command is to make the next :hg:`status` invocation
2736 2736 check the actual file content.
2737 2737 """
2738 2738 ctx = scmutil.revsingle(repo, rev)
2739 2739 with repo.wlock():
2740 2740 dirstate = repo.dirstate
2741 2741 changedfiles = None
2742 2742 # See command doc for what minimal does.
2743 2743 if opts.get('minimal'):
2744 2744 manifestfiles = set(ctx.manifest().keys())
2745 2745 dirstatefiles = set(dirstate)
2746 2746 manifestonly = manifestfiles - dirstatefiles
2747 2747 dsonly = dirstatefiles - manifestfiles
2748 2748 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2749 2749 changedfiles = manifestonly | dsnotadded
2750 2750
2751 2751 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2752 2752
2753 2753
2754 2754 @command(b'debugrebuildfncache', [], b'')
2755 2755 def debugrebuildfncache(ui, repo):
2756 2756 """rebuild the fncache file"""
2757 2757 repair.rebuildfncache(ui, repo)
2758 2758
2759 2759
2760 2760 @command(
2761 2761 b'debugrename',
2762 2762 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2763 2763 _(b'[-r REV] [FILE]...'),
2764 2764 )
2765 2765 def debugrename(ui, repo, *pats, **opts):
2766 2766 """dump rename information"""
2767 2767
2768 2768 opts = pycompat.byteskwargs(opts)
2769 2769 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2770 2770 m = scmutil.match(ctx, pats, opts)
2771 2771 for abs in ctx.walk(m):
2772 2772 fctx = ctx[abs]
2773 2773 o = fctx.filelog().renamed(fctx.filenode())
2774 2774 rel = repo.pathto(abs)
2775 2775 if o:
2776 2776 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2777 2777 else:
2778 2778 ui.write(_(b"%s not renamed\n") % rel)
2779 2779
2780 2780
2781 2781 @command(b'debugrequires|debugrequirements', [], b'')
2782 2782 def debugrequirements(ui, repo):
2783 2783 """ print the current repo requirements """
2784 2784 for r in sorted(repo.requirements):
2785 2785 ui.write(b"%s\n" % r)
2786 2786
2787 2787
2788 2788 @command(
2789 2789 b'debugrevlog',
2790 2790 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2791 2791 _(b'-c|-m|FILE'),
2792 2792 optionalrepo=True,
2793 2793 )
2794 2794 def debugrevlog(ui, repo, file_=None, **opts):
2795 2795 """show data and statistics about a revlog"""
2796 2796 opts = pycompat.byteskwargs(opts)
2797 2797 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2798 2798
2799 2799 if opts.get(b"dump"):
2800 2800 numrevs = len(r)
2801 2801 ui.write(
2802 2802 (
2803 2803 b"# rev p1rev p2rev start end deltastart base p1 p2"
2804 2804 b" rawsize totalsize compression heads chainlen\n"
2805 2805 )
2806 2806 )
2807 2807 ts = 0
2808 2808 heads = set()
2809 2809
2810 2810 for rev in pycompat.xrange(numrevs):
2811 2811 dbase = r.deltaparent(rev)
2812 2812 if dbase == -1:
2813 2813 dbase = rev
2814 2814 cbase = r.chainbase(rev)
2815 2815 clen = r.chainlen(rev)
2816 2816 p1, p2 = r.parentrevs(rev)
2817 2817 rs = r.rawsize(rev)
2818 2818 ts = ts + rs
2819 2819 heads -= set(r.parentrevs(rev))
2820 2820 heads.add(rev)
2821 2821 try:
2822 2822 compression = ts / r.end(rev)
2823 2823 except ZeroDivisionError:
2824 2824 compression = 0
2825 2825 ui.write(
2826 2826 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2827 2827 b"%11d %5d %8d\n"
2828 2828 % (
2829 2829 rev,
2830 2830 p1,
2831 2831 p2,
2832 2832 r.start(rev),
2833 2833 r.end(rev),
2834 2834 r.start(dbase),
2835 2835 r.start(cbase),
2836 2836 r.start(p1),
2837 2837 r.start(p2),
2838 2838 rs,
2839 2839 ts,
2840 2840 compression,
2841 2841 len(heads),
2842 2842 clen,
2843 2843 )
2844 2844 )
2845 2845 return 0
2846 2846
2847 2847 v = r.version
2848 2848 format = v & 0xFFFF
2849 2849 flags = []
2850 2850 gdelta = False
2851 2851 if v & revlog.FLAG_INLINE_DATA:
2852 2852 flags.append(b'inline')
2853 2853 if v & revlog.FLAG_GENERALDELTA:
2854 2854 gdelta = True
2855 2855 flags.append(b'generaldelta')
2856 2856 if not flags:
2857 2857 flags = [b'(none)']
2858 2858
2859 2859 ### tracks merge vs single parent
2860 2860 nummerges = 0
2861 2861
2862 2862 ### tracks ways the "delta" are build
2863 2863 # nodelta
2864 2864 numempty = 0
2865 2865 numemptytext = 0
2866 2866 numemptydelta = 0
2867 2867 # full file content
2868 2868 numfull = 0
2869 2869 # intermediate snapshot against a prior snapshot
2870 2870 numsemi = 0
2871 2871 # snapshot count per depth
2872 2872 numsnapdepth = collections.defaultdict(lambda: 0)
2873 2873 # delta against previous revision
2874 2874 numprev = 0
2875 2875 # delta against first or second parent (not prev)
2876 2876 nump1 = 0
2877 2877 nump2 = 0
2878 2878 # delta against neither prev nor parents
2879 2879 numother = 0
2880 2880 # delta against prev that are also first or second parent
2881 2881 # (details of `numprev`)
2882 2882 nump1prev = 0
2883 2883 nump2prev = 0
2884 2884
2885 2885 # data about delta chain of each revs
2886 2886 chainlengths = []
2887 2887 chainbases = []
2888 2888 chainspans = []
2889 2889
2890 2890 # data about each revision
2891 2891 datasize = [None, 0, 0]
2892 2892 fullsize = [None, 0, 0]
2893 2893 semisize = [None, 0, 0]
2894 2894 # snapshot count per depth
2895 2895 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2896 2896 deltasize = [None, 0, 0]
2897 2897 chunktypecounts = {}
2898 2898 chunktypesizes = {}
2899 2899
2900 2900 def addsize(size, l):
2901 2901 if l[0] is None or size < l[0]:
2902 2902 l[0] = size
2903 2903 if size > l[1]:
2904 2904 l[1] = size
2905 2905 l[2] += size
2906 2906
2907 2907 numrevs = len(r)
2908 2908 for rev in pycompat.xrange(numrevs):
2909 2909 p1, p2 = r.parentrevs(rev)
2910 2910 delta = r.deltaparent(rev)
2911 2911 if format > 0:
2912 2912 addsize(r.rawsize(rev), datasize)
2913 2913 if p2 != nullrev:
2914 2914 nummerges += 1
2915 2915 size = r.length(rev)
2916 2916 if delta == nullrev:
2917 2917 chainlengths.append(0)
2918 2918 chainbases.append(r.start(rev))
2919 2919 chainspans.append(size)
2920 2920 if size == 0:
2921 2921 numempty += 1
2922 2922 numemptytext += 1
2923 2923 else:
2924 2924 numfull += 1
2925 2925 numsnapdepth[0] += 1
2926 2926 addsize(size, fullsize)
2927 2927 addsize(size, snapsizedepth[0])
2928 2928 else:
2929 2929 chainlengths.append(chainlengths[delta] + 1)
2930 2930 baseaddr = chainbases[delta]
2931 2931 revaddr = r.start(rev)
2932 2932 chainbases.append(baseaddr)
2933 2933 chainspans.append((revaddr - baseaddr) + size)
2934 2934 if size == 0:
2935 2935 numempty += 1
2936 2936 numemptydelta += 1
2937 2937 elif r.issnapshot(rev):
2938 2938 addsize(size, semisize)
2939 2939 numsemi += 1
2940 2940 depth = r.snapshotdepth(rev)
2941 2941 numsnapdepth[depth] += 1
2942 2942 addsize(size, snapsizedepth[depth])
2943 2943 else:
2944 2944 addsize(size, deltasize)
2945 2945 if delta == rev - 1:
2946 2946 numprev += 1
2947 2947 if delta == p1:
2948 2948 nump1prev += 1
2949 2949 elif delta == p2:
2950 2950 nump2prev += 1
2951 2951 elif delta == p1:
2952 2952 nump1 += 1
2953 2953 elif delta == p2:
2954 2954 nump2 += 1
2955 2955 elif delta != nullrev:
2956 2956 numother += 1
2957 2957
2958 2958 # Obtain data on the raw chunks in the revlog.
2959 2959 if util.safehasattr(r, b'_getsegmentforrevs'):
2960 2960 segment = r._getsegmentforrevs(rev, rev)[1]
2961 2961 else:
2962 2962 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2963 2963 if segment:
2964 2964 chunktype = bytes(segment[0:1])
2965 2965 else:
2966 2966 chunktype = b'empty'
2967 2967
2968 2968 if chunktype not in chunktypecounts:
2969 2969 chunktypecounts[chunktype] = 0
2970 2970 chunktypesizes[chunktype] = 0
2971 2971
2972 2972 chunktypecounts[chunktype] += 1
2973 2973 chunktypesizes[chunktype] += size
2974 2974
2975 2975 # Adjust size min value for empty cases
2976 2976 for size in (datasize, fullsize, semisize, deltasize):
2977 2977 if size[0] is None:
2978 2978 size[0] = 0
2979 2979
2980 2980 numdeltas = numrevs - numfull - numempty - numsemi
2981 2981 numoprev = numprev - nump1prev - nump2prev
2982 2982 totalrawsize = datasize[2]
2983 2983 datasize[2] /= numrevs
2984 2984 fulltotal = fullsize[2]
2985 2985 if numfull == 0:
2986 2986 fullsize[2] = 0
2987 2987 else:
2988 2988 fullsize[2] /= numfull
2989 2989 semitotal = semisize[2]
2990 2990 snaptotal = {}
2991 2991 if numsemi > 0:
2992 2992 semisize[2] /= numsemi
2993 2993 for depth in snapsizedepth:
2994 2994 snaptotal[depth] = snapsizedepth[depth][2]
2995 2995 snapsizedepth[depth][2] /= numsnapdepth[depth]
2996 2996
2997 2997 deltatotal = deltasize[2]
2998 2998 if numdeltas > 0:
2999 2999 deltasize[2] /= numdeltas
3000 3000 totalsize = fulltotal + semitotal + deltatotal
3001 3001 avgchainlen = sum(chainlengths) / numrevs
3002 3002 maxchainlen = max(chainlengths)
3003 3003 maxchainspan = max(chainspans)
3004 3004 compratio = 1
3005 3005 if totalsize:
3006 3006 compratio = totalrawsize / totalsize
3007 3007
3008 3008 basedfmtstr = b'%%%dd\n'
3009 3009 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3010 3010
3011 3011 def dfmtstr(max):
3012 3012 return basedfmtstr % len(str(max))
3013 3013
3014 3014 def pcfmtstr(max, padding=0):
3015 3015 return basepcfmtstr % (len(str(max)), b' ' * padding)
3016 3016
3017 3017 def pcfmt(value, total):
3018 3018 if total:
3019 3019 return (value, 100 * float(value) / total)
3020 3020 else:
3021 3021 return value, 100.0
3022 3022
3023 3023 ui.writenoi18n(b'format : %d\n' % format)
3024 3024 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3025 3025
3026 3026 ui.write(b'\n')
3027 3027 fmt = pcfmtstr(totalsize)
3028 3028 fmt2 = dfmtstr(totalsize)
3029 3029 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3030 3030 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3031 3031 ui.writenoi18n(
3032 3032 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3033 3033 )
3034 3034 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3035 3035 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3036 3036 ui.writenoi18n(
3037 3037 b' text : '
3038 3038 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3039 3039 )
3040 3040 ui.writenoi18n(
3041 3041 b' delta : '
3042 3042 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3043 3043 )
3044 3044 ui.writenoi18n(
3045 3045 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3046 3046 )
3047 3047 for depth in sorted(numsnapdepth):
3048 3048 ui.write(
3049 3049 (b' lvl-%-3d : ' % depth)
3050 3050 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3051 3051 )
3052 3052 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3053 3053 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3054 3054 ui.writenoi18n(
3055 3055 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3056 3056 )
3057 3057 for depth in sorted(numsnapdepth):
3058 3058 ui.write(
3059 3059 (b' lvl-%-3d : ' % depth)
3060 3060 + fmt % pcfmt(snaptotal[depth], totalsize)
3061 3061 )
3062 3062 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3063 3063
3064 3064 def fmtchunktype(chunktype):
3065 3065 if chunktype == b'empty':
3066 3066 return b' %s : ' % chunktype
3067 3067 elif chunktype in pycompat.bytestr(string.ascii_letters):
3068 3068 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3069 3069 else:
3070 3070 return b' 0x%s : ' % hex(chunktype)
3071 3071
3072 3072 ui.write(b'\n')
3073 3073 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3074 3074 for chunktype in sorted(chunktypecounts):
3075 3075 ui.write(fmtchunktype(chunktype))
3076 3076 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3077 3077 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3078 3078 for chunktype in sorted(chunktypecounts):
3079 3079 ui.write(fmtchunktype(chunktype))
3080 3080 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3081 3081
3082 3082 ui.write(b'\n')
3083 3083 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3084 3084 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3085 3085 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3086 3086 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3087 3087 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3088 3088
3089 3089 if format > 0:
3090 3090 ui.write(b'\n')
3091 3091 ui.writenoi18n(
3092 3092 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3093 3093 % tuple(datasize)
3094 3094 )
3095 3095 ui.writenoi18n(
3096 3096 b'full revision size (min/max/avg) : %d / %d / %d\n'
3097 3097 % tuple(fullsize)
3098 3098 )
3099 3099 ui.writenoi18n(
3100 3100 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3101 3101 % tuple(semisize)
3102 3102 )
3103 3103 for depth in sorted(snapsizedepth):
3104 3104 if depth == 0:
3105 3105 continue
3106 3106 ui.writenoi18n(
3107 3107 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3108 3108 % ((depth,) + tuple(snapsizedepth[depth]))
3109 3109 )
3110 3110 ui.writenoi18n(
3111 3111 b'delta size (min/max/avg) : %d / %d / %d\n'
3112 3112 % tuple(deltasize)
3113 3113 )
3114 3114
3115 3115 if numdeltas > 0:
3116 3116 ui.write(b'\n')
3117 3117 fmt = pcfmtstr(numdeltas)
3118 3118 fmt2 = pcfmtstr(numdeltas, 4)
3119 3119 ui.writenoi18n(
3120 3120 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3121 3121 )
3122 3122 if numprev > 0:
3123 3123 ui.writenoi18n(
3124 3124 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3125 3125 )
3126 3126 ui.writenoi18n(
3127 3127 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3128 3128 )
3129 3129 ui.writenoi18n(
3130 3130 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3131 3131 )
3132 3132 if gdelta:
3133 3133 ui.writenoi18n(
3134 3134 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3135 3135 )
3136 3136 ui.writenoi18n(
3137 3137 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3138 3138 )
3139 3139 ui.writenoi18n(
3140 3140 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3141 3141 )
3142 3142
3143 3143
3144 3144 @command(
3145 3145 b'debugrevlogindex',
3146 3146 cmdutil.debugrevlogopts
3147 3147 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3148 3148 _(b'[-f FORMAT] -c|-m|FILE'),
3149 3149 optionalrepo=True,
3150 3150 )
3151 3151 def debugrevlogindex(ui, repo, file_=None, **opts):
3152 3152 """dump the contents of a revlog index"""
3153 3153 opts = pycompat.byteskwargs(opts)
3154 3154 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3155 3155 format = opts.get(b'format', 0)
3156 3156 if format not in (0, 1):
3157 3157 raise error.Abort(_(b"unknown format %d") % format)
3158 3158
3159 3159 if ui.debugflag:
3160 3160 shortfn = hex
3161 3161 else:
3162 3162 shortfn = short
3163 3163
3164 3164 # There might not be anything in r, so have a sane default
3165 3165 idlen = 12
3166 3166 for i in r:
3167 3167 idlen = len(shortfn(r.node(i)))
3168 3168 break
3169 3169
3170 3170 if format == 0:
3171 3171 if ui.verbose:
3172 3172 ui.writenoi18n(
3173 3173 b" rev offset length linkrev %s %s p2\n"
3174 3174 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3175 3175 )
3176 3176 else:
3177 3177 ui.writenoi18n(
3178 3178 b" rev linkrev %s %s p2\n"
3179 3179 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3180 3180 )
3181 3181 elif format == 1:
3182 3182 if ui.verbose:
3183 3183 ui.writenoi18n(
3184 3184 (
3185 3185 b" rev flag offset length size link p1"
3186 3186 b" p2 %s\n"
3187 3187 )
3188 3188 % b"nodeid".rjust(idlen)
3189 3189 )
3190 3190 else:
3191 3191 ui.writenoi18n(
3192 3192 b" rev flag size link p1 p2 %s\n"
3193 3193 % b"nodeid".rjust(idlen)
3194 3194 )
3195 3195
3196 3196 for i in r:
3197 3197 node = r.node(i)
3198 3198 if format == 0:
3199 3199 try:
3200 3200 pp = r.parents(node)
3201 3201 except Exception:
3202 3202 pp = [nullid, nullid]
3203 3203 if ui.verbose:
3204 3204 ui.write(
3205 3205 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3206 3206 % (
3207 3207 i,
3208 3208 r.start(i),
3209 3209 r.length(i),
3210 3210 r.linkrev(i),
3211 3211 shortfn(node),
3212 3212 shortfn(pp[0]),
3213 3213 shortfn(pp[1]),
3214 3214 )
3215 3215 )
3216 3216 else:
3217 3217 ui.write(
3218 3218 b"% 6d % 7d %s %s %s\n"
3219 3219 % (
3220 3220 i,
3221 3221 r.linkrev(i),
3222 3222 shortfn(node),
3223 3223 shortfn(pp[0]),
3224 3224 shortfn(pp[1]),
3225 3225 )
3226 3226 )
3227 3227 elif format == 1:
3228 3228 pr = r.parentrevs(i)
3229 3229 if ui.verbose:
3230 3230 ui.write(
3231 3231 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3232 3232 % (
3233 3233 i,
3234 3234 r.flags(i),
3235 3235 r.start(i),
3236 3236 r.length(i),
3237 3237 r.rawsize(i),
3238 3238 r.linkrev(i),
3239 3239 pr[0],
3240 3240 pr[1],
3241 3241 shortfn(node),
3242 3242 )
3243 3243 )
3244 3244 else:
3245 3245 ui.write(
3246 3246 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3247 3247 % (
3248 3248 i,
3249 3249 r.flags(i),
3250 3250 r.rawsize(i),
3251 3251 r.linkrev(i),
3252 3252 pr[0],
3253 3253 pr[1],
3254 3254 shortfn(node),
3255 3255 )
3256 3256 )
3257 3257
3258 3258
3259 3259 @command(
3260 3260 b'debugrevspec',
3261 3261 [
3262 3262 (
3263 3263 b'',
3264 3264 b'optimize',
3265 3265 None,
3266 3266 _(b'print parsed tree after optimizing (DEPRECATED)'),
3267 3267 ),
3268 3268 (
3269 3269 b'',
3270 3270 b'show-revs',
3271 3271 True,
3272 3272 _(b'print list of result revisions (default)'),
3273 3273 ),
3274 3274 (
3275 3275 b's',
3276 3276 b'show-set',
3277 3277 None,
3278 3278 _(b'print internal representation of result set'),
3279 3279 ),
3280 3280 (
3281 3281 b'p',
3282 3282 b'show-stage',
3283 3283 [],
3284 3284 _(b'print parsed tree at the given stage'),
3285 3285 _(b'NAME'),
3286 3286 ),
3287 3287 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3288 3288 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3289 3289 ],
3290 3290 b'REVSPEC',
3291 3291 )
3292 3292 def debugrevspec(ui, repo, expr, **opts):
3293 3293 """parse and apply a revision specification
3294 3294
3295 3295 Use -p/--show-stage option to print the parsed tree at the given stages.
3296 3296 Use -p all to print tree at every stage.
3297 3297
3298 3298 Use --no-show-revs option with -s or -p to print only the set
3299 3299 representation or the parsed tree respectively.
3300 3300
3301 3301 Use --verify-optimized to compare the optimized result with the unoptimized
3302 3302 one. Returns 1 if the optimized result differs.
3303 3303 """
3304 3304 opts = pycompat.byteskwargs(opts)
3305 3305 aliases = ui.configitems(b'revsetalias')
3306 3306 stages = [
3307 3307 (b'parsed', lambda tree: tree),
3308 3308 (
3309 3309 b'expanded',
3310 3310 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3311 3311 ),
3312 3312 (b'concatenated', revsetlang.foldconcat),
3313 3313 (b'analyzed', revsetlang.analyze),
3314 3314 (b'optimized', revsetlang.optimize),
3315 3315 ]
3316 3316 if opts[b'no_optimized']:
3317 3317 stages = stages[:-1]
3318 3318 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3319 3319 raise error.Abort(
3320 3320 _(b'cannot use --verify-optimized with --no-optimized')
3321 3321 )
3322 3322 stagenames = {n for n, f in stages}
3323 3323
3324 3324 showalways = set()
3325 3325 showchanged = set()
3326 3326 if ui.verbose and not opts[b'show_stage']:
3327 3327 # show parsed tree by --verbose (deprecated)
3328 3328 showalways.add(b'parsed')
3329 3329 showchanged.update([b'expanded', b'concatenated'])
3330 3330 if opts[b'optimize']:
3331 3331 showalways.add(b'optimized')
3332 3332 if opts[b'show_stage'] and opts[b'optimize']:
3333 3333 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3334 3334 if opts[b'show_stage'] == [b'all']:
3335 3335 showalways.update(stagenames)
3336 3336 else:
3337 3337 for n in opts[b'show_stage']:
3338 3338 if n not in stagenames:
3339 3339 raise error.Abort(_(b'invalid stage name: %s') % n)
3340 3340 showalways.update(opts[b'show_stage'])
3341 3341
3342 3342 treebystage = {}
3343 3343 printedtree = None
3344 3344 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3345 3345 for n, f in stages:
3346 3346 treebystage[n] = tree = f(tree)
3347 3347 if n in showalways or (n in showchanged and tree != printedtree):
3348 3348 if opts[b'show_stage'] or n != b'parsed':
3349 3349 ui.write(b"* %s:\n" % n)
3350 3350 ui.write(revsetlang.prettyformat(tree), b"\n")
3351 3351 printedtree = tree
3352 3352
3353 3353 if opts[b'verify_optimized']:
3354 3354 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3355 3355 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3356 3356 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3357 3357 ui.writenoi18n(
3358 3358 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3359 3359 )
3360 3360 ui.writenoi18n(
3361 3361 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3362 3362 )
3363 3363 arevs = list(arevs)
3364 3364 brevs = list(brevs)
3365 3365 if arevs == brevs:
3366 3366 return 0
3367 3367 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3368 3368 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3369 3369 sm = difflib.SequenceMatcher(None, arevs, brevs)
3370 3370 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3371 3371 if tag in ('delete', 'replace'):
3372 3372 for c in arevs[alo:ahi]:
3373 3373 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3374 3374 if tag in ('insert', 'replace'):
3375 3375 for c in brevs[blo:bhi]:
3376 3376 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3377 3377 if tag == 'equal':
3378 3378 for c in arevs[alo:ahi]:
3379 3379 ui.write(b' %d\n' % c)
3380 3380 return 1
3381 3381
3382 3382 func = revset.makematcher(tree)
3383 3383 revs = func(repo)
3384 3384 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3385 3385 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3386 3386 if not opts[b'show_revs']:
3387 3387 return
3388 3388 for c in revs:
3389 3389 ui.write(b"%d\n" % c)
3390 3390
3391 3391
3392 3392 @command(
3393 3393 b'debugserve',
3394 3394 [
3395 3395 (
3396 3396 b'',
3397 3397 b'sshstdio',
3398 3398 False,
3399 3399 _(b'run an SSH server bound to process handles'),
3400 3400 ),
3401 3401 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3402 3402 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3403 3403 ],
3404 3404 b'',
3405 3405 )
3406 3406 def debugserve(ui, repo, **opts):
3407 3407 """run a server with advanced settings
3408 3408
3409 3409 This command is similar to :hg:`serve`. It exists partially as a
3410 3410 workaround to the fact that ``hg serve --stdio`` must have specific
3411 3411 arguments for security reasons.
3412 3412 """
3413 3413 opts = pycompat.byteskwargs(opts)
3414 3414
3415 3415 if not opts[b'sshstdio']:
3416 3416 raise error.Abort(_(b'only --sshstdio is currently supported'))
3417 3417
3418 3418 logfh = None
3419 3419
3420 3420 if opts[b'logiofd'] and opts[b'logiofile']:
3421 3421 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3422 3422
3423 3423 if opts[b'logiofd']:
3424 3424 # Ideally we would be line buffered. But line buffering in binary
3425 3425 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3426 3426 # buffering could have performance impacts. But since this isn't
3427 3427 # performance critical code, it should be fine.
3428 3428 try:
3429 3429 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3430 3430 except OSError as e:
3431 3431 if e.errno != errno.ESPIPE:
3432 3432 raise
3433 3433 # can't seek a pipe, so `ab` mode fails on py3
3434 3434 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3435 3435 elif opts[b'logiofile']:
3436 3436 logfh = open(opts[b'logiofile'], b'ab', 0)
3437 3437
3438 3438 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3439 3439 s.serve_forever()
3440 3440
3441 3441
3442 3442 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3443 3443 def debugsetparents(ui, repo, rev1, rev2=None):
3444 3444 """manually set the parents of the current working directory (DANGEROUS)
3445 3445
3446 3446 This command is not what you are looking for and should not be used. Using
3447 3447 this command will most certainly results in slight corruption of the file
3448 3448 level histories withing your repository. DO NOT USE THIS COMMAND.
3449 3449
3450 3450 The command update the p1 and p2 field in the dirstate, and not touching
3451 3451 anything else. This useful for writing repository conversion tools, but
3452 3452 should be used with extreme care. For example, neither the working
3453 3453 directory nor the dirstate is updated, so file status may be incorrect
3454 3454 after running this command. Only used if you are one of the few people that
3455 3455 deeply unstand both conversion tools and file level histories. If you are
3456 3456 reading this help, you are not one of this people (most of them sailed west
3457 3457 from Mithlond anyway.
3458 3458
3459 3459 So one last time DO NOT USE THIS COMMAND.
3460 3460
3461 3461 Returns 0 on success.
3462 3462 """
3463 3463
3464 3464 node1 = scmutil.revsingle(repo, rev1).node()
3465 3465 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3466 3466
3467 3467 with repo.wlock():
3468 3468 repo.setparents(node1, node2)
3469 3469
3470 3470
3471 3471 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3472 3472 def debugsidedata(ui, repo, file_, rev=None, **opts):
3473 3473 """dump the side data for a cl/manifest/file revision
3474 3474
3475 3475 Use --verbose to dump the sidedata content."""
3476 3476 opts = pycompat.byteskwargs(opts)
3477 3477 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3478 3478 if rev is not None:
3479 3479 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3480 3480 file_, rev = None, file_
3481 3481 elif rev is None:
3482 3482 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3483 3483 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3484 3484 r = getattr(r, '_revlog', r)
3485 3485 try:
3486 3486 sidedata = r.sidedata(r.lookup(rev))
3487 3487 except KeyError:
3488 3488 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3489 3489 if sidedata:
3490 3490 sidedata = list(sidedata.items())
3491 3491 sidedata.sort()
3492 3492 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3493 3493 for key, value in sidedata:
3494 3494 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3495 3495 if ui.verbose:
3496 3496 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3497 3497
3498 3498
3499 3499 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3500 3500 def debugssl(ui, repo, source=None, **opts):
3501 3501 """test a secure connection to a server
3502 3502
3503 3503 This builds the certificate chain for the server on Windows, installing the
3504 3504 missing intermediates and trusted root via Windows Update if necessary. It
3505 3505 does nothing on other platforms.
3506 3506
3507 3507 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3508 3508 that server is used. See :hg:`help urls` for more information.
3509 3509
3510 3510 If the update succeeds, retry the original operation. Otherwise, the cause
3511 3511 of the SSL error is likely another issue.
3512 3512 """
3513 3513 if not pycompat.iswindows:
3514 3514 raise error.Abort(
3515 3515 _(b'certificate chain building is only possible on Windows')
3516 3516 )
3517 3517
3518 3518 if not source:
3519 3519 if not repo:
3520 3520 raise error.Abort(
3521 3521 _(
3522 3522 b"there is no Mercurial repository here, and no "
3523 3523 b"server specified"
3524 3524 )
3525 3525 )
3526 3526 source = b"default"
3527 3527
3528 3528 source, branches = hg.parseurl(ui.expandpath(source))
3529 3529 url = util.url(source)
3530 3530
3531 3531 defaultport = {b'https': 443, b'ssh': 22}
3532 3532 if url.scheme in defaultport:
3533 3533 try:
3534 3534 addr = (url.host, int(url.port or defaultport[url.scheme]))
3535 3535 except ValueError:
3536 3536 raise error.Abort(_(b"malformed port number in URL"))
3537 3537 else:
3538 3538 raise error.Abort(_(b"only https and ssh connections are supported"))
3539 3539
3540 3540 from . import win32
3541 3541
3542 3542 s = ssl.wrap_socket(
3543 3543 socket.socket(),
3544 3544 ssl_version=ssl.PROTOCOL_TLS,
3545 3545 cert_reqs=ssl.CERT_NONE,
3546 3546 ca_certs=None,
3547 3547 )
3548 3548
3549 3549 try:
3550 3550 s.connect(addr)
3551 3551 cert = s.getpeercert(True)
3552 3552
3553 3553 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3554 3554
3555 3555 complete = win32.checkcertificatechain(cert, build=False)
3556 3556
3557 3557 if not complete:
3558 3558 ui.status(_(b'certificate chain is incomplete, updating... '))
3559 3559
3560 3560 if not win32.checkcertificatechain(cert):
3561 3561 ui.status(_(b'failed.\n'))
3562 3562 else:
3563 3563 ui.status(_(b'done.\n'))
3564 3564 else:
3565 3565 ui.status(_(b'full certificate chain is available\n'))
3566 3566 finally:
3567 3567 s.close()
3568 3568
3569 3569
3570 3570 @command(
3571 3571 b"debugbackupbundle",
3572 3572 [
3573 3573 (
3574 3574 b"",
3575 3575 b"recover",
3576 3576 b"",
3577 3577 b"brings the specified changeset back into the repository",
3578 3578 )
3579 3579 ]
3580 3580 + cmdutil.logopts,
3581 3581 _(b"hg debugbackupbundle [--recover HASH]"),
3582 3582 )
3583 3583 def debugbackupbundle(ui, repo, *pats, **opts):
3584 3584 """lists the changesets available in backup bundles
3585 3585
3586 3586 Without any arguments, this command prints a list of the changesets in each
3587 3587 backup bundle.
3588 3588
3589 3589 --recover takes a changeset hash and unbundles the first bundle that
3590 3590 contains that hash, which puts that changeset back in your repository.
3591 3591
3592 3592 --verbose will print the entire commit message and the bundle path for that
3593 3593 backup.
3594 3594 """
3595 3595 backups = list(
3596 3596 filter(
3597 3597 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3598 3598 )
3599 3599 )
3600 3600 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3601 3601
3602 3602 opts = pycompat.byteskwargs(opts)
3603 3603 opts[b"bundle"] = b""
3604 3604 opts[b"force"] = None
3605 3605 limit = logcmdutil.getlimit(opts)
3606 3606
3607 3607 def display(other, chlist, displayer):
3608 3608 if opts.get(b"newest_first"):
3609 3609 chlist.reverse()
3610 3610 count = 0
3611 3611 for n in chlist:
3612 3612 if limit is not None and count >= limit:
3613 3613 break
3614 3614 parents = [True for p in other.changelog.parents(n) if p != nullid]
3615 3615 if opts.get(b"no_merges") and len(parents) == 2:
3616 3616 continue
3617 3617 count += 1
3618 3618 displayer.show(other[n])
3619 3619
3620 3620 recovernode = opts.get(b"recover")
3621 3621 if recovernode:
3622 3622 if scmutil.isrevsymbol(repo, recovernode):
3623 3623 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3624 3624 return
3625 3625 elif backups:
3626 3626 msg = _(
3627 3627 b"Recover changesets using: hg debugbackupbundle --recover "
3628 3628 b"<changeset hash>\n\nAvailable backup changesets:"
3629 3629 )
3630 3630 ui.status(msg, label=b"status.removed")
3631 3631 else:
3632 3632 ui.status(_(b"no backup changesets found\n"))
3633 3633 return
3634 3634
3635 3635 for backup in backups:
3636 3636 # Much of this is copied from the hg incoming logic
3637 3637 source = ui.expandpath(os.path.relpath(backup, encoding.getcwd()))
3638 3638 source, branches = hg.parseurl(source, opts.get(b"branch"))
3639 3639 try:
3640 3640 other = hg.peer(repo, opts, source)
3641 3641 except error.LookupError as ex:
3642 3642 msg = _(b"\nwarning: unable to open bundle %s") % source
3643 3643 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3644 3644 ui.warn(msg, hint=hint)
3645 3645 continue
3646 3646 revs, checkout = hg.addbranchrevs(
3647 3647 repo, other, branches, opts.get(b"rev")
3648 3648 )
3649 3649
3650 3650 if revs:
3651 3651 revs = [other.lookup(rev) for rev in revs]
3652 3652
3653 3653 quiet = ui.quiet
3654 3654 try:
3655 3655 ui.quiet = True
3656 3656 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3657 3657 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3658 3658 )
3659 3659 except error.LookupError:
3660 3660 continue
3661 3661 finally:
3662 3662 ui.quiet = quiet
3663 3663
3664 3664 try:
3665 3665 if not chlist:
3666 3666 continue
3667 3667 if recovernode:
3668 3668 with repo.lock(), repo.transaction(b"unbundle") as tr:
3669 3669 if scmutil.isrevsymbol(other, recovernode):
3670 3670 ui.status(_(b"Unbundling %s\n") % (recovernode))
3671 3671 f = hg.openpath(ui, source)
3672 3672 gen = exchange.readbundle(ui, f, source)
3673 3673 if isinstance(gen, bundle2.unbundle20):
3674 3674 bundle2.applybundle(
3675 3675 repo,
3676 3676 gen,
3677 3677 tr,
3678 3678 source=b"unbundle",
3679 3679 url=b"bundle:" + source,
3680 3680 )
3681 3681 else:
3682 3682 gen.apply(repo, b"unbundle", b"bundle:" + source)
3683 3683 break
3684 3684 else:
3685 3685 backupdate = encoding.strtolocal(
3686 3686 time.strftime(
3687 3687 "%a %H:%M, %Y-%m-%d",
3688 3688 time.localtime(os.path.getmtime(source)),
3689 3689 )
3690 3690 )
3691 3691 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3692 3692 if ui.verbose:
3693 3693 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3694 3694 else:
3695 3695 opts[
3696 3696 b"template"
3697 3697 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3698 3698 displayer = logcmdutil.changesetdisplayer(
3699 3699 ui, other, opts, False
3700 3700 )
3701 3701 display(other, chlist, displayer)
3702 3702 displayer.close()
3703 3703 finally:
3704 3704 cleanupfn()
3705 3705
3706 3706
3707 3707 @command(
3708 3708 b'debugsub',
3709 3709 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3710 3710 _(b'[-r REV] [REV]'),
3711 3711 )
3712 3712 def debugsub(ui, repo, rev=None):
3713 3713 ctx = scmutil.revsingle(repo, rev, None)
3714 3714 for k, v in sorted(ctx.substate.items()):
3715 3715 ui.writenoi18n(b'path %s\n' % k)
3716 3716 ui.writenoi18n(b' source %s\n' % v[0])
3717 3717 ui.writenoi18n(b' revision %s\n' % v[1])
3718 3718
3719 3719
3720 3720 @command(
3721 3721 b'debugsuccessorssets',
3722 3722 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3723 3723 _(b'[REV]'),
3724 3724 )
3725 3725 def debugsuccessorssets(ui, repo, *revs, **opts):
3726 3726 """show set of successors for revision
3727 3727
3728 3728 A successors set of changeset A is a consistent group of revisions that
3729 3729 succeed A. It contains non-obsolete changesets only unless closests
3730 3730 successors set is set.
3731 3731
3732 3732 In most cases a changeset A has a single successors set containing a single
3733 3733 successor (changeset A replaced by A').
3734 3734
3735 3735 A changeset that is made obsolete with no successors are called "pruned".
3736 3736 Such changesets have no successors sets at all.
3737 3737
3738 3738 A changeset that has been "split" will have a successors set containing
3739 3739 more than one successor.
3740 3740
3741 3741 A changeset that has been rewritten in multiple different ways is called
3742 3742 "divergent". Such changesets have multiple successor sets (each of which
3743 3743 may also be split, i.e. have multiple successors).
3744 3744
3745 3745 Results are displayed as follows::
3746 3746
3747 3747 <rev1>
3748 3748 <successors-1A>
3749 3749 <rev2>
3750 3750 <successors-2A>
3751 3751 <successors-2B1> <successors-2B2> <successors-2B3>
3752 3752
3753 3753 Here rev2 has two possible (i.e. divergent) successors sets. The first
3754 3754 holds one element, whereas the second holds three (i.e. the changeset has
3755 3755 been split).
3756 3756 """
3757 3757 # passed to successorssets caching computation from one call to another
3758 3758 cache = {}
3759 3759 ctx2str = bytes
3760 3760 node2str = short
3761 3761 for rev in scmutil.revrange(repo, revs):
3762 3762 ctx = repo[rev]
3763 3763 ui.write(b'%s\n' % ctx2str(ctx))
3764 3764 for succsset in obsutil.successorssets(
3765 3765 repo, ctx.node(), closest=opts['closest'], cache=cache
3766 3766 ):
3767 3767 if succsset:
3768 3768 ui.write(b' ')
3769 3769 ui.write(node2str(succsset[0]))
3770 3770 for node in succsset[1:]:
3771 3771 ui.write(b' ')
3772 3772 ui.write(node2str(node))
3773 3773 ui.write(b'\n')
3774 3774
3775 3775
3776 3776 @command(b'debugtagscache', [])
3777 3777 def debugtagscache(ui, repo):
3778 3778 """display the contents of .hg/cache/hgtagsfnodes1"""
3779 3779 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3780 3780 for r in repo:
3781 3781 node = repo[r].node()
3782 3782 tagsnode = cache.getfnode(node, computemissing=False)
3783 3783 tagsnodedisplay = hex(tagsnode) if tagsnode else b'missing/invalid'
3784 3784 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3785 3785
3786 3786
3787 3787 @command(
3788 3788 b'debugtemplate',
3789 3789 [
3790 3790 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3791 3791 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3792 3792 ],
3793 3793 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3794 3794 optionalrepo=True,
3795 3795 )
3796 3796 def debugtemplate(ui, repo, tmpl, **opts):
3797 3797 """parse and apply a template
3798 3798
3799 3799 If -r/--rev is given, the template is processed as a log template and
3800 3800 applied to the given changesets. Otherwise, it is processed as a generic
3801 3801 template.
3802 3802
3803 3803 Use --verbose to print the parsed tree.
3804 3804 """
3805 3805 revs = None
3806 3806 if opts['rev']:
3807 3807 if repo is None:
3808 3808 raise error.RepoError(
3809 3809 _(b'there is no Mercurial repository here (.hg not found)')
3810 3810 )
3811 3811 revs = scmutil.revrange(repo, opts['rev'])
3812 3812
3813 3813 props = {}
3814 3814 for d in opts['define']:
3815 3815 try:
3816 3816 k, v = (e.strip() for e in d.split(b'=', 1))
3817 3817 if not k or k == b'ui':
3818 3818 raise ValueError
3819 3819 props[k] = v
3820 3820 except ValueError:
3821 3821 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3822 3822
3823 3823 if ui.verbose:
3824 3824 aliases = ui.configitems(b'templatealias')
3825 3825 tree = templater.parse(tmpl)
3826 3826 ui.note(templater.prettyformat(tree), b'\n')
3827 3827 newtree = templater.expandaliases(tree, aliases)
3828 3828 if newtree != tree:
3829 3829 ui.notenoi18n(
3830 3830 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3831 3831 )
3832 3832
3833 3833 if revs is None:
3834 3834 tres = formatter.templateresources(ui, repo)
3835 3835 t = formatter.maketemplater(ui, tmpl, resources=tres)
3836 3836 if ui.verbose:
3837 3837 kwds, funcs = t.symbolsuseddefault()
3838 3838 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3839 3839 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3840 3840 ui.write(t.renderdefault(props))
3841 3841 else:
3842 3842 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3843 3843 if ui.verbose:
3844 3844 kwds, funcs = displayer.t.symbolsuseddefault()
3845 3845 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3846 3846 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3847 3847 for r in revs:
3848 3848 displayer.show(repo[r], **pycompat.strkwargs(props))
3849 3849 displayer.close()
3850 3850
3851 3851
3852 3852 @command(
3853 3853 b'debuguigetpass',
3854 3854 [
3855 3855 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3856 3856 ],
3857 3857 _(b'[-p TEXT]'),
3858 3858 norepo=True,
3859 3859 )
3860 3860 def debuguigetpass(ui, prompt=b''):
3861 3861 """show prompt to type password"""
3862 3862 r = ui.getpass(prompt)
3863 3863 if r is None:
3864 3864 r = b"<default response>"
3865 3865 ui.writenoi18n(b'response: %s\n' % r)
3866 3866
3867 3867
3868 3868 @command(
3869 3869 b'debuguiprompt',
3870 3870 [
3871 3871 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3872 3872 ],
3873 3873 _(b'[-p TEXT]'),
3874 3874 norepo=True,
3875 3875 )
3876 3876 def debuguiprompt(ui, prompt=b''):
3877 3877 """show plain prompt"""
3878 3878 r = ui.prompt(prompt)
3879 3879 ui.writenoi18n(b'response: %s\n' % r)
3880 3880
3881 3881
3882 3882 @command(b'debugupdatecaches', [])
3883 3883 def debugupdatecaches(ui, repo, *pats, **opts):
3884 3884 """warm all known caches in the repository"""
3885 3885 with repo.wlock(), repo.lock():
3886 3886 repo.updatecaches(full=True)
3887 3887
3888 3888
3889 3889 @command(
3890 3890 b'debugupgraderepo',
3891 3891 [
3892 3892 (
3893 3893 b'o',
3894 3894 b'optimize',
3895 3895 [],
3896 3896 _(b'extra optimization to perform'),
3897 3897 _(b'NAME'),
3898 3898 ),
3899 3899 (b'', b'run', False, _(b'performs an upgrade')),
3900 3900 (b'', b'backup', True, _(b'keep the old repository content around')),
3901 3901 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
3902 3902 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
3903 3903 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
3904 3904 ],
3905 3905 )
3906 3906 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
3907 3907 """upgrade a repository to use different features
3908 3908
3909 3909 If no arguments are specified, the repository is evaluated for upgrade
3910 3910 and a list of problems and potential optimizations is printed.
3911 3911
3912 3912 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
3913 3913 can be influenced via additional arguments. More details will be provided
3914 3914 by the command output when run without ``--run``.
3915 3915
3916 3916 During the upgrade, the repository will be locked and no writes will be
3917 3917 allowed.
3918 3918
3919 3919 At the end of the upgrade, the repository may not be readable while new
3920 3920 repository data is swapped in. This window will be as long as it takes to
3921 3921 rename some directories inside the ``.hg`` directory. On most machines, this
3922 3922 should complete almost instantaneously and the chances of a consumer being
3923 3923 unable to access the repository should be low.
3924 3924
3925 3925 By default, all revlog will be upgraded. You can restrict this using flag
3926 3926 such as `--manifest`:
3927 3927
3928 3928 * `--manifest`: only optimize the manifest
3929 3929 * `--no-manifest`: optimize all revlog but the manifest
3930 3930 * `--changelog`: optimize the changelog only
3931 3931 * `--no-changelog --no-manifest`: optimize filelogs only
3932 3932 * `--filelogs`: optimize the filelogs only
3933 3933 * `--no-changelog --no-manifest --no-filelogs`: skip all filelog optimisation
3934 3934 """
3935 3935 return upgrade.upgraderepo(
3936 ui, repo, run=run, optimize=optimize, backup=backup, **opts
3936 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
3937 3937 )
3938 3938
3939 3939
3940 3940 @command(
3941 3941 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
3942 3942 )
3943 3943 def debugwalk(ui, repo, *pats, **opts):
3944 3944 """show how files match on given patterns"""
3945 3945 opts = pycompat.byteskwargs(opts)
3946 3946 m = scmutil.match(repo[None], pats, opts)
3947 3947 if ui.verbose:
3948 3948 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
3949 3949 items = list(repo[None].walk(m))
3950 3950 if not items:
3951 3951 return
3952 3952 f = lambda fn: fn
3953 3953 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
3954 3954 f = lambda fn: util.normpath(fn)
3955 3955 fmt = b'f %%-%ds %%-%ds %%s' % (
3956 3956 max([len(abs) for abs in items]),
3957 3957 max([len(repo.pathto(abs)) for abs in items]),
3958 3958 )
3959 3959 for abs in items:
3960 3960 line = fmt % (
3961 3961 abs,
3962 3962 f(repo.pathto(abs)),
3963 3963 m.exact(abs) and b'exact' or b'',
3964 3964 )
3965 3965 ui.write(b"%s\n" % line.rstrip())
3966 3966
3967 3967
3968 3968 @command(b'debugwhyunstable', [], _(b'REV'))
3969 3969 def debugwhyunstable(ui, repo, rev):
3970 3970 """explain instabilities of a changeset"""
3971 3971 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
3972 3972 dnodes = b''
3973 3973 if entry.get(b'divergentnodes'):
3974 3974 dnodes = (
3975 3975 b' '.join(
3976 3976 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
3977 3977 for ctx in entry[b'divergentnodes']
3978 3978 )
3979 3979 + b' '
3980 3980 )
3981 3981 ui.write(
3982 3982 b'%s: %s%s %s\n'
3983 3983 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
3984 3984 )
3985 3985
3986 3986
3987 3987 @command(
3988 3988 b'debugwireargs',
3989 3989 [
3990 3990 (b'', b'three', b'', b'three'),
3991 3991 (b'', b'four', b'', b'four'),
3992 3992 (b'', b'five', b'', b'five'),
3993 3993 ]
3994 3994 + cmdutil.remoteopts,
3995 3995 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
3996 3996 norepo=True,
3997 3997 )
3998 3998 def debugwireargs(ui, repopath, *vals, **opts):
3999 3999 opts = pycompat.byteskwargs(opts)
4000 4000 repo = hg.peer(ui, opts, repopath)
4001 4001 for opt in cmdutil.remoteopts:
4002 4002 del opts[opt[1]]
4003 4003 args = {}
4004 4004 for k, v in pycompat.iteritems(opts):
4005 4005 if v:
4006 4006 args[k] = v
4007 4007 args = pycompat.strkwargs(args)
4008 4008 # run twice to check that we don't mess up the stream for the next command
4009 4009 res1 = repo.debugwireargs(*vals, **args)
4010 4010 res2 = repo.debugwireargs(*vals, **args)
4011 4011 ui.write(b"%s\n" % res1)
4012 4012 if res1 != res2:
4013 4013 ui.warn(b"%s\n" % res2)
4014 4014
4015 4015
4016 4016 def _parsewirelangblocks(fh):
4017 4017 activeaction = None
4018 4018 blocklines = []
4019 4019 lastindent = 0
4020 4020
4021 4021 for line in fh:
4022 4022 line = line.rstrip()
4023 4023 if not line:
4024 4024 continue
4025 4025
4026 4026 if line.startswith(b'#'):
4027 4027 continue
4028 4028
4029 4029 if not line.startswith(b' '):
4030 4030 # New block. Flush previous one.
4031 4031 if activeaction:
4032 4032 yield activeaction, blocklines
4033 4033
4034 4034 activeaction = line
4035 4035 blocklines = []
4036 4036 lastindent = 0
4037 4037 continue
4038 4038
4039 4039 # Else we start with an indent.
4040 4040
4041 4041 if not activeaction:
4042 4042 raise error.Abort(_(b'indented line outside of block'))
4043 4043
4044 4044 indent = len(line) - len(line.lstrip())
4045 4045
4046 4046 # If this line is indented more than the last line, concatenate it.
4047 4047 if indent > lastindent and blocklines:
4048 4048 blocklines[-1] += line.lstrip()
4049 4049 else:
4050 4050 blocklines.append(line)
4051 4051 lastindent = indent
4052 4052
4053 4053 # Flush last block.
4054 4054 if activeaction:
4055 4055 yield activeaction, blocklines
4056 4056
4057 4057
4058 4058 @command(
4059 4059 b'debugwireproto',
4060 4060 [
4061 4061 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4062 4062 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4063 4063 (
4064 4064 b'',
4065 4065 b'noreadstderr',
4066 4066 False,
4067 4067 _(b'do not read from stderr of the remote'),
4068 4068 ),
4069 4069 (
4070 4070 b'',
4071 4071 b'nologhandshake',
4072 4072 False,
4073 4073 _(b'do not log I/O related to the peer handshake'),
4074 4074 ),
4075 4075 ]
4076 4076 + cmdutil.remoteopts,
4077 4077 _(b'[PATH]'),
4078 4078 optionalrepo=True,
4079 4079 )
4080 4080 def debugwireproto(ui, repo, path=None, **opts):
4081 4081 """send wire protocol commands to a server
4082 4082
4083 4083 This command can be used to issue wire protocol commands to remote
4084 4084 peers and to debug the raw data being exchanged.
4085 4085
4086 4086 ``--localssh`` will start an SSH server against the current repository
4087 4087 and connect to that. By default, the connection will perform a handshake
4088 4088 and establish an appropriate peer instance.
4089 4089
4090 4090 ``--peer`` can be used to bypass the handshake protocol and construct a
4091 4091 peer instance using the specified class type. Valid values are ``raw``,
4092 4092 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4093 4093 raw data payloads and don't support higher-level command actions.
4094 4094
4095 4095 ``--noreadstderr`` can be used to disable automatic reading from stderr
4096 4096 of the peer (for SSH connections only). Disabling automatic reading of
4097 4097 stderr is useful for making output more deterministic.
4098 4098
4099 4099 Commands are issued via a mini language which is specified via stdin.
4100 4100 The language consists of individual actions to perform. An action is
4101 4101 defined by a block. A block is defined as a line with no leading
4102 4102 space followed by 0 or more lines with leading space. Blocks are
4103 4103 effectively a high-level command with additional metadata.
4104 4104
4105 4105 Lines beginning with ``#`` are ignored.
4106 4106
4107 4107 The following sections denote available actions.
4108 4108
4109 4109 raw
4110 4110 ---
4111 4111
4112 4112 Send raw data to the server.
4113 4113
4114 4114 The block payload contains the raw data to send as one atomic send
4115 4115 operation. The data may not actually be delivered in a single system
4116 4116 call: it depends on the abilities of the transport being used.
4117 4117
4118 4118 Each line in the block is de-indented and concatenated. Then, that
4119 4119 value is evaluated as a Python b'' literal. This allows the use of
4120 4120 backslash escaping, etc.
4121 4121
4122 4122 raw+
4123 4123 ----
4124 4124
4125 4125 Behaves like ``raw`` except flushes output afterwards.
4126 4126
4127 4127 command <X>
4128 4128 -----------
4129 4129
4130 4130 Send a request to run a named command, whose name follows the ``command``
4131 4131 string.
4132 4132
4133 4133 Arguments to the command are defined as lines in this block. The format of
4134 4134 each line is ``<key> <value>``. e.g.::
4135 4135
4136 4136 command listkeys
4137 4137 namespace bookmarks
4138 4138
4139 4139 If the value begins with ``eval:``, it will be interpreted as a Python
4140 4140 literal expression. Otherwise values are interpreted as Python b'' literals.
4141 4141 This allows sending complex types and encoding special byte sequences via
4142 4142 backslash escaping.
4143 4143
4144 4144 The following arguments have special meaning:
4145 4145
4146 4146 ``PUSHFILE``
4147 4147 When defined, the *push* mechanism of the peer will be used instead
4148 4148 of the static request-response mechanism and the content of the
4149 4149 file specified in the value of this argument will be sent as the
4150 4150 command payload.
4151 4151
4152 4152 This can be used to submit a local bundle file to the remote.
4153 4153
4154 4154 batchbegin
4155 4155 ----------
4156 4156
4157 4157 Instruct the peer to begin a batched send.
4158 4158
4159 4159 All ``command`` blocks are queued for execution until the next
4160 4160 ``batchsubmit`` block.
4161 4161
4162 4162 batchsubmit
4163 4163 -----------
4164 4164
4165 4165 Submit previously queued ``command`` blocks as a batch request.
4166 4166
4167 4167 This action MUST be paired with a ``batchbegin`` action.
4168 4168
4169 4169 httprequest <method> <path>
4170 4170 ---------------------------
4171 4171
4172 4172 (HTTP peer only)
4173 4173
4174 4174 Send an HTTP request to the peer.
4175 4175
4176 4176 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4177 4177
4178 4178 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4179 4179 headers to add to the request. e.g. ``Accept: foo``.
4180 4180
4181 4181 The following arguments are special:
4182 4182
4183 4183 ``BODYFILE``
4184 4184 The content of the file defined as the value to this argument will be
4185 4185 transferred verbatim as the HTTP request body.
4186 4186
4187 4187 ``frame <type> <flags> <payload>``
4188 4188 Send a unified protocol frame as part of the request body.
4189 4189
4190 4190 All frames will be collected and sent as the body to the HTTP
4191 4191 request.
4192 4192
4193 4193 close
4194 4194 -----
4195 4195
4196 4196 Close the connection to the server.
4197 4197
4198 4198 flush
4199 4199 -----
4200 4200
4201 4201 Flush data written to the server.
4202 4202
4203 4203 readavailable
4204 4204 -------------
4205 4205
4206 4206 Close the write end of the connection and read all available data from
4207 4207 the server.
4208 4208
4209 4209 If the connection to the server encompasses multiple pipes, we poll both
4210 4210 pipes and read available data.
4211 4211
4212 4212 readline
4213 4213 --------
4214 4214
4215 4215 Read a line of output from the server. If there are multiple output
4216 4216 pipes, reads only the main pipe.
4217 4217
4218 4218 ereadline
4219 4219 ---------
4220 4220
4221 4221 Like ``readline``, but read from the stderr pipe, if available.
4222 4222
4223 4223 read <X>
4224 4224 --------
4225 4225
4226 4226 ``read()`` N bytes from the server's main output pipe.
4227 4227
4228 4228 eread <X>
4229 4229 ---------
4230 4230
4231 4231 ``read()`` N bytes from the server's stderr pipe, if available.
4232 4232
4233 4233 Specifying Unified Frame-Based Protocol Frames
4234 4234 ----------------------------------------------
4235 4235
4236 4236 It is possible to emit a *Unified Frame-Based Protocol* by using special
4237 4237 syntax.
4238 4238
4239 4239 A frame is composed as a type, flags, and payload. These can be parsed
4240 4240 from a string of the form:
4241 4241
4242 4242 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4243 4243
4244 4244 ``request-id`` and ``stream-id`` are integers defining the request and
4245 4245 stream identifiers.
4246 4246
4247 4247 ``type`` can be an integer value for the frame type or the string name
4248 4248 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4249 4249 ``command-name``.
4250 4250
4251 4251 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4252 4252 components. Each component (and there can be just one) can be an integer
4253 4253 or a flag name for stream flags or frame flags, respectively. Values are
4254 4254 resolved to integers and then bitwise OR'd together.
4255 4255
4256 4256 ``payload`` represents the raw frame payload. If it begins with
4257 4257 ``cbor:``, the following string is evaluated as Python code and the
4258 4258 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4259 4259 as a Python byte string literal.
4260 4260 """
4261 4261 opts = pycompat.byteskwargs(opts)
4262 4262
4263 4263 if opts[b'localssh'] and not repo:
4264 4264 raise error.Abort(_(b'--localssh requires a repository'))
4265 4265
4266 4266 if opts[b'peer'] and opts[b'peer'] not in (
4267 4267 b'raw',
4268 4268 b'http2',
4269 4269 b'ssh1',
4270 4270 b'ssh2',
4271 4271 ):
4272 4272 raise error.Abort(
4273 4273 _(b'invalid value for --peer'),
4274 4274 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4275 4275 )
4276 4276
4277 4277 if path and opts[b'localssh']:
4278 4278 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4279 4279
4280 4280 if ui.interactive():
4281 4281 ui.write(_(b'(waiting for commands on stdin)\n'))
4282 4282
4283 4283 blocks = list(_parsewirelangblocks(ui.fin))
4284 4284
4285 4285 proc = None
4286 4286 stdin = None
4287 4287 stdout = None
4288 4288 stderr = None
4289 4289 opener = None
4290 4290
4291 4291 if opts[b'localssh']:
4292 4292 # We start the SSH server in its own process so there is process
4293 4293 # separation. This prevents a whole class of potential bugs around
4294 4294 # shared state from interfering with server operation.
4295 4295 args = procutil.hgcmd() + [
4296 4296 b'-R',
4297 4297 repo.root,
4298 4298 b'debugserve',
4299 4299 b'--sshstdio',
4300 4300 ]
4301 4301 proc = subprocess.Popen(
4302 4302 pycompat.rapply(procutil.tonativestr, args),
4303 4303 stdin=subprocess.PIPE,
4304 4304 stdout=subprocess.PIPE,
4305 4305 stderr=subprocess.PIPE,
4306 4306 bufsize=0,
4307 4307 )
4308 4308
4309 4309 stdin = proc.stdin
4310 4310 stdout = proc.stdout
4311 4311 stderr = proc.stderr
4312 4312
4313 4313 # We turn the pipes into observers so we can log I/O.
4314 4314 if ui.verbose or opts[b'peer'] == b'raw':
4315 4315 stdin = util.makeloggingfileobject(
4316 4316 ui, proc.stdin, b'i', logdata=True
4317 4317 )
4318 4318 stdout = util.makeloggingfileobject(
4319 4319 ui, proc.stdout, b'o', logdata=True
4320 4320 )
4321 4321 stderr = util.makeloggingfileobject(
4322 4322 ui, proc.stderr, b'e', logdata=True
4323 4323 )
4324 4324
4325 4325 # --localssh also implies the peer connection settings.
4326 4326
4327 4327 url = b'ssh://localserver'
4328 4328 autoreadstderr = not opts[b'noreadstderr']
4329 4329
4330 4330 if opts[b'peer'] == b'ssh1':
4331 4331 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4332 4332 peer = sshpeer.sshv1peer(
4333 4333 ui,
4334 4334 url,
4335 4335 proc,
4336 4336 stdin,
4337 4337 stdout,
4338 4338 stderr,
4339 4339 None,
4340 4340 autoreadstderr=autoreadstderr,
4341 4341 )
4342 4342 elif opts[b'peer'] == b'ssh2':
4343 4343 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4344 4344 peer = sshpeer.sshv2peer(
4345 4345 ui,
4346 4346 url,
4347 4347 proc,
4348 4348 stdin,
4349 4349 stdout,
4350 4350 stderr,
4351 4351 None,
4352 4352 autoreadstderr=autoreadstderr,
4353 4353 )
4354 4354 elif opts[b'peer'] == b'raw':
4355 4355 ui.write(_(b'using raw connection to peer\n'))
4356 4356 peer = None
4357 4357 else:
4358 4358 ui.write(_(b'creating ssh peer from handshake results\n'))
4359 4359 peer = sshpeer.makepeer(
4360 4360 ui,
4361 4361 url,
4362 4362 proc,
4363 4363 stdin,
4364 4364 stdout,
4365 4365 stderr,
4366 4366 autoreadstderr=autoreadstderr,
4367 4367 )
4368 4368
4369 4369 elif path:
4370 4370 # We bypass hg.peer() so we can proxy the sockets.
4371 4371 # TODO consider not doing this because we skip
4372 4372 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4373 4373 u = util.url(path)
4374 4374 if u.scheme != b'http':
4375 4375 raise error.Abort(_(b'only http:// paths are currently supported'))
4376 4376
4377 4377 url, authinfo = u.authinfo()
4378 4378 openerargs = {
4379 4379 'useragent': b'Mercurial debugwireproto',
4380 4380 }
4381 4381
4382 4382 # Turn pipes/sockets into observers so we can log I/O.
4383 4383 if ui.verbose:
4384 4384 openerargs.update(
4385 4385 {
4386 4386 'loggingfh': ui,
4387 4387 'loggingname': b's',
4388 4388 'loggingopts': {
4389 4389 'logdata': True,
4390 4390 'logdataapis': False,
4391 4391 },
4392 4392 }
4393 4393 )
4394 4394
4395 4395 if ui.debugflag:
4396 4396 openerargs['loggingopts']['logdataapis'] = True
4397 4397
4398 4398 # Don't send default headers when in raw mode. This allows us to
4399 4399 # bypass most of the behavior of our URL handling code so we can
4400 4400 # have near complete control over what's sent on the wire.
4401 4401 if opts[b'peer'] == b'raw':
4402 4402 openerargs['sendaccept'] = False
4403 4403
4404 4404 opener = urlmod.opener(ui, authinfo, **openerargs)
4405 4405
4406 4406 if opts[b'peer'] == b'http2':
4407 4407 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4408 4408 # We go through makepeer() because we need an API descriptor for
4409 4409 # the peer instance to be useful.
4410 4410 with ui.configoverride(
4411 4411 {(b'experimental', b'httppeer.advertise-v2'): True}
4412 4412 ):
4413 4413 if opts[b'nologhandshake']:
4414 4414 ui.pushbuffer()
4415 4415
4416 4416 peer = httppeer.makepeer(ui, path, opener=opener)
4417 4417
4418 4418 if opts[b'nologhandshake']:
4419 4419 ui.popbuffer()
4420 4420
4421 4421 if not isinstance(peer, httppeer.httpv2peer):
4422 4422 raise error.Abort(
4423 4423 _(
4424 4424 b'could not instantiate HTTP peer for '
4425 4425 b'wire protocol version 2'
4426 4426 ),
4427 4427 hint=_(
4428 4428 b'the server may not have the feature '
4429 4429 b'enabled or is not allowing this '
4430 4430 b'client version'
4431 4431 ),
4432 4432 )
4433 4433
4434 4434 elif opts[b'peer'] == b'raw':
4435 4435 ui.write(_(b'using raw connection to peer\n'))
4436 4436 peer = None
4437 4437 elif opts[b'peer']:
4438 4438 raise error.Abort(
4439 4439 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4440 4440 )
4441 4441 else:
4442 4442 peer = httppeer.makepeer(ui, path, opener=opener)
4443 4443
4444 4444 # We /could/ populate stdin/stdout with sock.makefile()...
4445 4445 else:
4446 4446 raise error.Abort(_(b'unsupported connection configuration'))
4447 4447
4448 4448 batchedcommands = None
4449 4449
4450 4450 # Now perform actions based on the parsed wire language instructions.
4451 4451 for action, lines in blocks:
4452 4452 if action in (b'raw', b'raw+'):
4453 4453 if not stdin:
4454 4454 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4455 4455
4456 4456 # Concatenate the data together.
4457 4457 data = b''.join(l.lstrip() for l in lines)
4458 4458 data = stringutil.unescapestr(data)
4459 4459 stdin.write(data)
4460 4460
4461 4461 if action == b'raw+':
4462 4462 stdin.flush()
4463 4463 elif action == b'flush':
4464 4464 if not stdin:
4465 4465 raise error.Abort(_(b'cannot call flush on this peer'))
4466 4466 stdin.flush()
4467 4467 elif action.startswith(b'command'):
4468 4468 if not peer:
4469 4469 raise error.Abort(
4470 4470 _(
4471 4471 b'cannot send commands unless peer instance '
4472 4472 b'is available'
4473 4473 )
4474 4474 )
4475 4475
4476 4476 command = action.split(b' ', 1)[1]
4477 4477
4478 4478 args = {}
4479 4479 for line in lines:
4480 4480 # We need to allow empty values.
4481 4481 fields = line.lstrip().split(b' ', 1)
4482 4482 if len(fields) == 1:
4483 4483 key = fields[0]
4484 4484 value = b''
4485 4485 else:
4486 4486 key, value = fields
4487 4487
4488 4488 if value.startswith(b'eval:'):
4489 4489 value = stringutil.evalpythonliteral(value[5:])
4490 4490 else:
4491 4491 value = stringutil.unescapestr(value)
4492 4492
4493 4493 args[key] = value
4494 4494
4495 4495 if batchedcommands is not None:
4496 4496 batchedcommands.append((command, args))
4497 4497 continue
4498 4498
4499 4499 ui.status(_(b'sending %s command\n') % command)
4500 4500
4501 4501 if b'PUSHFILE' in args:
4502 4502 with open(args[b'PUSHFILE'], 'rb') as fh:
4503 4503 del args[b'PUSHFILE']
4504 4504 res, output = peer._callpush(
4505 4505 command, fh, **pycompat.strkwargs(args)
4506 4506 )
4507 4507 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4508 4508 ui.status(
4509 4509 _(b'remote output: %s\n') % stringutil.escapestr(output)
4510 4510 )
4511 4511 else:
4512 4512 with peer.commandexecutor() as e:
4513 4513 res = e.callcommand(command, args).result()
4514 4514
4515 4515 if isinstance(res, wireprotov2peer.commandresponse):
4516 4516 val = res.objects()
4517 4517 ui.status(
4518 4518 _(b'response: %s\n')
4519 4519 % stringutil.pprint(val, bprefix=True, indent=2)
4520 4520 )
4521 4521 else:
4522 4522 ui.status(
4523 4523 _(b'response: %s\n')
4524 4524 % stringutil.pprint(res, bprefix=True, indent=2)
4525 4525 )
4526 4526
4527 4527 elif action == b'batchbegin':
4528 4528 if batchedcommands is not None:
4529 4529 raise error.Abort(_(b'nested batchbegin not allowed'))
4530 4530
4531 4531 batchedcommands = []
4532 4532 elif action == b'batchsubmit':
4533 4533 # There is a batching API we could go through. But it would be
4534 4534 # difficult to normalize requests into function calls. It is easier
4535 4535 # to bypass this layer and normalize to commands + args.
4536 4536 ui.status(
4537 4537 _(b'sending batch with %d sub-commands\n')
4538 4538 % len(batchedcommands)
4539 4539 )
4540 4540 assert peer is not None
4541 4541 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4542 4542 ui.status(
4543 4543 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4544 4544 )
4545 4545
4546 4546 batchedcommands = None
4547 4547
4548 4548 elif action.startswith(b'httprequest '):
4549 4549 if not opener:
4550 4550 raise error.Abort(
4551 4551 _(b'cannot use httprequest without an HTTP peer')
4552 4552 )
4553 4553
4554 4554 request = action.split(b' ', 2)
4555 4555 if len(request) != 3:
4556 4556 raise error.Abort(
4557 4557 _(
4558 4558 b'invalid httprequest: expected format is '
4559 4559 b'"httprequest <method> <path>'
4560 4560 )
4561 4561 )
4562 4562
4563 4563 method, httppath = request[1:]
4564 4564 headers = {}
4565 4565 body = None
4566 4566 frames = []
4567 4567 for line in lines:
4568 4568 line = line.lstrip()
4569 4569 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4570 4570 if m:
4571 4571 # Headers need to use native strings.
4572 4572 key = pycompat.strurl(m.group(1))
4573 4573 value = pycompat.strurl(m.group(2))
4574 4574 headers[key] = value
4575 4575 continue
4576 4576
4577 4577 if line.startswith(b'BODYFILE '):
4578 4578 with open(line.split(b' ', 1), b'rb') as fh:
4579 4579 body = fh.read()
4580 4580 elif line.startswith(b'frame '):
4581 4581 frame = wireprotoframing.makeframefromhumanstring(
4582 4582 line[len(b'frame ') :]
4583 4583 )
4584 4584
4585 4585 frames.append(frame)
4586 4586 else:
4587 4587 raise error.Abort(
4588 4588 _(b'unknown argument to httprequest: %s') % line
4589 4589 )
4590 4590
4591 4591 url = path + httppath
4592 4592
4593 4593 if frames:
4594 4594 body = b''.join(bytes(f) for f in frames)
4595 4595
4596 4596 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4597 4597
4598 4598 # urllib.Request insists on using has_data() as a proxy for
4599 4599 # determining the request method. Override that to use our
4600 4600 # explicitly requested method.
4601 4601 req.get_method = lambda: pycompat.sysstr(method)
4602 4602
4603 4603 try:
4604 4604 res = opener.open(req)
4605 4605 body = res.read()
4606 4606 except util.urlerr.urlerror as e:
4607 4607 # read() method must be called, but only exists in Python 2
4608 4608 getattr(e, 'read', lambda: None)()
4609 4609 continue
4610 4610
4611 4611 ct = res.headers.get('Content-Type')
4612 4612 if ct == 'application/mercurial-cbor':
4613 4613 ui.write(
4614 4614 _(b'cbor> %s\n')
4615 4615 % stringutil.pprint(
4616 4616 cborutil.decodeall(body), bprefix=True, indent=2
4617 4617 )
4618 4618 )
4619 4619
4620 4620 elif action == b'close':
4621 4621 assert peer is not None
4622 4622 peer.close()
4623 4623 elif action == b'readavailable':
4624 4624 if not stdout or not stderr:
4625 4625 raise error.Abort(
4626 4626 _(b'readavailable not available on this peer')
4627 4627 )
4628 4628
4629 4629 stdin.close()
4630 4630 stdout.read()
4631 4631 stderr.read()
4632 4632
4633 4633 elif action == b'readline':
4634 4634 if not stdout:
4635 4635 raise error.Abort(_(b'readline not available on this peer'))
4636 4636 stdout.readline()
4637 4637 elif action == b'ereadline':
4638 4638 if not stderr:
4639 4639 raise error.Abort(_(b'ereadline not available on this peer'))
4640 4640 stderr.readline()
4641 4641 elif action.startswith(b'read '):
4642 4642 count = int(action.split(b' ', 1)[1])
4643 4643 if not stdout:
4644 4644 raise error.Abort(_(b'read not available on this peer'))
4645 4645 stdout.read(count)
4646 4646 elif action.startswith(b'eread '):
4647 4647 count = int(action.split(b' ', 1)[1])
4648 4648 if not stderr:
4649 4649 raise error.Abort(_(b'eread not available on this peer'))
4650 4650 stderr.read(count)
4651 4651 else:
4652 4652 raise error.Abort(_(b'unknown action: %s') % action)
4653 4653
4654 4654 if batchedcommands is not None:
4655 4655 raise error.Abort(_(b'unclosed "batchbegin" request'))
4656 4656
4657 4657 if peer:
4658 4658 peer.close()
4659 4659
4660 4660 if proc:
4661 4661 proc.kill()
@@ -1,265 +1,251 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 hg,
14 14 localrepo,
15 15 pycompat,
16 16 )
17 17
18 18 from .upgrade_utils import (
19 19 actions as upgrade_actions,
20 20 engine as upgrade_engine,
21 21 )
22 22
23 23 allformatvariant = upgrade_actions.allformatvariant
24 24
25 # search without '-' to support older form on newer client.
26 #
27 # We don't enforce backward compatibility for debug command so this
28 # might eventually be dropped. However, having to use two different
29 # forms in script when comparing result is anoying enough to add
30 # backward compatibility for a while.
31 legacy_opts_map = {
32 b'redeltaparent': b're-delta-parent',
33 b'redeltamultibase': b're-delta-multibase',
34 b'redeltaall': b're-delta-all',
35 b'redeltafulladd': b're-delta-fulladd',
36 }
37
38 25
39 26 def upgraderepo(
40 27 ui,
41 28 repo,
42 29 run=False,
43 30 optimize=None,
44 31 backup=True,
45 32 manifest=None,
46 33 changelog=None,
47 34 filelogs=None,
48 35 ):
49 36 """Upgrade a repository in place."""
50 37 if optimize is None:
51 optimize = []
52 optimize = {legacy_opts_map.get(o, o) for o in optimize}
38 optimize = {}
53 39 repo = repo.unfiltered()
54 40
55 41 revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
56 42 specentries = (
57 43 (upgrade_engine.UPGRADE_CHANGELOG, changelog),
58 44 (upgrade_engine.UPGRADE_MANIFEST, manifest),
59 45 (upgrade_engine.UPGRADE_FILELOGS, filelogs),
60 46 )
61 47 specified = [(y, x) for (y, x) in specentries if x is not None]
62 48 if specified:
63 49 # we have some limitation on revlogs to be recloned
64 50 if any(x for y, x in specified):
65 51 revlogs = set()
66 52 for upgrade, enabled in specified:
67 53 if enabled:
68 54 revlogs.add(upgrade)
69 55 else:
70 56 # none are enabled
71 57 for upgrade, __ in specified:
72 58 revlogs.discard(upgrade)
73 59
74 60 # Ensure the repository can be upgraded.
75 61 upgrade_actions.check_source_requirements(repo)
76 62
77 63 default_options = localrepo.defaultcreateopts(repo.ui)
78 64 newreqs = localrepo.newreporequirements(repo.ui, default_options)
79 65 newreqs.update(upgrade_actions.preservedrequirements(repo))
80 66
81 67 upgrade_actions.check_requirements_changes(repo, newreqs)
82 68
83 69 # Find and validate all improvements that can be made.
84 70 alloptimizations = upgrade_actions.findoptimizations(repo)
85 71
86 72 # Apply and Validate arguments.
87 73 optimizations = []
88 74 for o in alloptimizations:
89 75 if o.name in optimize:
90 76 optimizations.append(o)
91 77 optimize.discard(o.name)
92 78
93 79 if optimize: # anything left is unknown
94 80 raise error.Abort(
95 81 _(b'unknown optimization action requested: %s')
96 82 % b', '.join(sorted(optimize)),
97 83 hint=_(b'run without arguments to see valid optimizations'),
98 84 )
99 85
100 86 format_upgrades = upgrade_actions.find_format_upgrades(repo)
101 87 actions = upgrade_actions.determineactions(
102 88 repo, format_upgrades, repo.requirements, newreqs
103 89 )
104 90 actions.extend(
105 91 o
106 92 for o in sorted(optimizations)
107 93 # determineactions could have added optimisation
108 94 if o not in actions
109 95 )
110 96
111 97 removedreqs = repo.requirements - newreqs
112 98 addedreqs = newreqs - repo.requirements
113 99
114 100 if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
115 101 incompatible = upgrade_actions.RECLONES_REQUIREMENTS & (
116 102 removedreqs | addedreqs
117 103 )
118 104 if incompatible:
119 105 msg = _(
120 106 b'ignoring revlogs selection flags, format requirements '
121 107 b'change: %s\n'
122 108 )
123 109 ui.warn(msg % b', '.join(sorted(incompatible)))
124 110 revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
125 111
126 112 upgrade_op = upgrade_actions.UpgradeOperation(
127 113 ui,
128 114 newreqs,
129 115 repo.requirements,
130 116 actions,
131 117 revlogs,
132 118 )
133 119
134 120 if not run:
135 121 fromconfig = []
136 122 onlydefault = []
137 123
138 124 for d in format_upgrades:
139 125 if d.fromconfig(repo):
140 126 fromconfig.append(d)
141 127 elif d.default:
142 128 onlydefault.append(d)
143 129
144 130 if fromconfig or onlydefault:
145 131
146 132 if fromconfig:
147 133 ui.status(
148 134 _(
149 135 b'repository lacks features recommended by '
150 136 b'current config options:\n\n'
151 137 )
152 138 )
153 139 for i in fromconfig:
154 140 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
155 141
156 142 if onlydefault:
157 143 ui.status(
158 144 _(
159 145 b'repository lacks features used by the default '
160 146 b'config options:\n\n'
161 147 )
162 148 )
163 149 for i in onlydefault:
164 150 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
165 151
166 152 ui.status(b'\n')
167 153 else:
168 154 ui.status(_(b'(no format upgrades found in existing repository)\n'))
169 155
170 156 ui.status(
171 157 _(
172 158 b'performing an upgrade with "--run" will make the following '
173 159 b'changes:\n\n'
174 160 )
175 161 )
176 162
177 163 upgrade_op.print_requirements()
178 164 upgrade_op.print_optimisations()
179 165 upgrade_op.print_upgrade_actions()
180 166 upgrade_op.print_affected_revlogs()
181 167
182 168 if upgrade_op.unused_optimizations:
183 169 ui.status(
184 170 _(
185 171 b'additional optimizations are available by specifying '
186 172 b'"--optimize <name>":\n\n'
187 173 )
188 174 )
189 175 upgrade_op.print_unused_optimizations()
190 176 return
191 177
192 178 # Else we're in the run=true case.
193 179 ui.write(_(b'upgrade will perform the following actions:\n\n'))
194 180 upgrade_op.print_requirements()
195 181 upgrade_op.print_optimisations()
196 182 upgrade_op.print_upgrade_actions()
197 183 upgrade_op.print_affected_revlogs()
198 184
199 185 ui.status(_(b'beginning upgrade...\n'))
200 186 with repo.wlock(), repo.lock():
201 187 ui.status(_(b'repository locked and read-only\n'))
202 188 # Our strategy for upgrading the repository is to create a new,
203 189 # temporary repository, write data to it, then do a swap of the
204 190 # data. There are less heavyweight ways to do this, but it is easier
205 191 # to create a new repo object than to instantiate all the components
206 192 # (like the store) separately.
207 193 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
208 194 backuppath = None
209 195 try:
210 196 ui.status(
211 197 _(
212 198 b'creating temporary repository to stage migrated '
213 199 b'data: %s\n'
214 200 )
215 201 % tmppath
216 202 )
217 203
218 204 # clone ui without using ui.copy because repo.ui is protected
219 205 repoui = repo.ui.__class__(repo.ui)
220 206 dstrepo = hg.repository(repoui, path=tmppath, create=True)
221 207
222 208 with dstrepo.wlock(), dstrepo.lock():
223 209 backuppath = upgrade_engine.upgrade(
224 210 ui, repo, dstrepo, upgrade_op
225 211 )
226 212 if not (backup or backuppath is None):
227 213 ui.status(
228 214 _(b'removing old repository content %s\n') % backuppath
229 215 )
230 216 repo.vfs.rmtree(backuppath, forcibly=True)
231 217 backuppath = None
232 218
233 219 finally:
234 220 ui.status(_(b'removing temporary repository %s\n') % tmppath)
235 221 repo.vfs.rmtree(tmppath, forcibly=True)
236 222
237 223 if backuppath and not ui.quiet:
238 224 ui.warn(
239 225 _(b'copy of old repository backed up at %s\n') % backuppath
240 226 )
241 227 ui.warn(
242 228 _(
243 229 b'the old repository will not be deleted; remove '
244 230 b'it to free up disk space once the upgraded '
245 231 b'repository is verified\n'
246 232 )
247 233 )
248 234
249 235 if upgrade_actions.sharesafe.name in addedreqs:
250 236 ui.warn(
251 237 _(
252 238 b'repository upgraded to share safe mode, existing'
253 239 b' shares will still work in old non-safe mode. '
254 240 b'Re-share existing shares to use them in safe mode'
255 241 b' New shares will be created in safe mode.\n'
256 242 )
257 243 )
258 244 if upgrade_actions.sharesafe.name in removedreqs:
259 245 ui.warn(
260 246 _(
261 247 b'repository downgraded to not use share safe mode, '
262 248 b'existing shares will not work and needs to'
263 249 b' be reshared.\n'
264 250 )
265 251 )
@@ -1,1646 +1,1646 b''
1 1 #require no-reposimplestore
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [extensions]
5 5 > share =
6 6 > EOF
7 7
8 8 store and revlogv1 are required in source
9 9
10 10 $ hg --config format.usestore=false init no-store
11 11 $ hg -R no-store debugupgraderepo
12 12 abort: cannot upgrade repository; requirement missing: store
13 13 [255]
14 14
15 15 $ hg init no-revlogv1
16 16 $ cat > no-revlogv1/.hg/requires << EOF
17 17 > dotencode
18 18 > fncache
19 19 > generaldelta
20 20 > store
21 21 > EOF
22 22
23 23 $ hg -R no-revlogv1 debugupgraderepo
24 24 abort: cannot upgrade repository; requirement missing: revlogv1
25 25 [255]
26 26
27 27 Cannot upgrade shared repositories
28 28
29 29 $ hg init share-parent
30 30 $ hg -q share share-parent share-child
31 31
32 32 $ hg -R share-child debugupgraderepo
33 33 abort: cannot upgrade repository; unsupported source requirement: shared
34 34 [255]
35 35
36 36 Do not yet support upgrading treemanifest repos
37 37
38 38 $ hg --config experimental.treemanifest=true init treemanifest
39 39 $ hg -R treemanifest debugupgraderepo
40 40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
41 41 [255]
42 42
43 43 Cannot add treemanifest requirement during upgrade
44 44
45 45 $ hg init disallowaddedreq
46 46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
47 47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
48 48 [255]
49 49
50 50 An upgrade of a repository created with recommended settings only suggests optimizations
51 51
52 52 $ hg init empty
53 53 $ cd empty
54 54 $ hg debugformat
55 55 format-variant repo
56 56 fncache: yes
57 57 dotencode: yes
58 58 generaldelta: yes
59 59 exp-sharesafe: no
60 60 sparserevlog: yes
61 61 sidedata: no
62 62 persistent-nodemap: no
63 63 copies-sdc: no
64 64 plain-cl-delta: yes
65 65 compression: zlib
66 66 compression-level: default
67 67 $ hg debugformat --verbose
68 68 format-variant repo config default
69 69 fncache: yes yes yes
70 70 dotencode: yes yes yes
71 71 generaldelta: yes yes yes
72 72 exp-sharesafe: no no no
73 73 sparserevlog: yes yes yes
74 74 sidedata: no no no
75 75 persistent-nodemap: no no no
76 76 copies-sdc: no no no
77 77 plain-cl-delta: yes yes yes
78 78 compression: zlib zlib zlib
79 79 compression-level: default default default
80 80 $ hg debugformat --verbose --config format.usefncache=no
81 81 format-variant repo config default
82 82 fncache: yes no yes
83 83 dotencode: yes no yes
84 84 generaldelta: yes yes yes
85 85 exp-sharesafe: no no no
86 86 sparserevlog: yes yes yes
87 87 sidedata: no no no
88 88 persistent-nodemap: no no no
89 89 copies-sdc: no no no
90 90 plain-cl-delta: yes yes yes
91 91 compression: zlib zlib zlib
92 92 compression-level: default default default
93 93 $ hg debugformat --verbose --config format.usefncache=no --color=debug
94 94 format-variant repo config default
95 95 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
96 96 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
97 97 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
98 98 [formatvariant.name.uptodate|exp-sharesafe: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
99 99 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
100 100 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
101 101 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
102 102 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
103 103 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
104 104 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
105 105 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
106 106 $ hg debugformat -Tjson
107 107 [
108 108 {
109 109 "config": true,
110 110 "default": true,
111 111 "name": "fncache",
112 112 "repo": true
113 113 },
114 114 {
115 115 "config": true,
116 116 "default": true,
117 117 "name": "dotencode",
118 118 "repo": true
119 119 },
120 120 {
121 121 "config": true,
122 122 "default": true,
123 123 "name": "generaldelta",
124 124 "repo": true
125 125 },
126 126 {
127 127 "config": false,
128 128 "default": false,
129 129 "name": "exp-sharesafe",
130 130 "repo": false
131 131 },
132 132 {
133 133 "config": true,
134 134 "default": true,
135 135 "name": "sparserevlog",
136 136 "repo": true
137 137 },
138 138 {
139 139 "config": false,
140 140 "default": false,
141 141 "name": "sidedata",
142 142 "repo": false
143 143 },
144 144 {
145 145 "config": false,
146 146 "default": false,
147 147 "name": "persistent-nodemap",
148 148 "repo": false
149 149 },
150 150 {
151 151 "config": false,
152 152 "default": false,
153 153 "name": "copies-sdc",
154 154 "repo": false
155 155 },
156 156 {
157 157 "config": true,
158 158 "default": true,
159 159 "name": "plain-cl-delta",
160 160 "repo": true
161 161 },
162 162 {
163 163 "config": "zlib",
164 164 "default": "zlib",
165 165 "name": "compression",
166 166 "repo": "zlib"
167 167 },
168 168 {
169 169 "config": "default",
170 170 "default": "default",
171 171 "name": "compression-level",
172 172 "repo": "default"
173 173 }
174 174 ]
175 175 $ hg debugupgraderepo
176 176 (no format upgrades found in existing repository)
177 177 performing an upgrade with "--run" will make the following changes:
178 178
179 179 requirements
180 180 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
181 181
182 182 processed revlogs:
183 183 - all-filelogs
184 184 - changelog
185 185 - manifest
186 186
187 187 additional optimizations are available by specifying "--optimize <name>":
188 188
189 189 re-delta-parent
190 190 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
191 191
192 192 re-delta-multibase
193 193 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
194 194
195 195 re-delta-all
196 196 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
197 197
198 198 re-delta-fulladd
199 199 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
200 200
201 201
202 202 $ hg debugupgraderepo --quiet
203 203 requirements
204 204 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
205 205
206 206 processed revlogs:
207 207 - all-filelogs
208 208 - changelog
209 209 - manifest
210 210
211 211
212 212 --optimize can be used to add optimizations
213 213
214 $ hg debugupgrade --optimize redeltaparent
214 $ hg debugupgrade --optimize 're-delta-parent'
215 215 (no format upgrades found in existing repository)
216 216 performing an upgrade with "--run" will make the following changes:
217 217
218 218 requirements
219 219 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
220 220
221 221 optimisations: re-delta-parent
222 222
223 223 re-delta-parent
224 224 deltas within internal storage will choose a new base revision if needed
225 225
226 226 processed revlogs:
227 227 - all-filelogs
228 228 - changelog
229 229 - manifest
230 230
231 231 additional optimizations are available by specifying "--optimize <name>":
232 232
233 233 re-delta-multibase
234 234 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
235 235
236 236 re-delta-all
237 237 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
238 238
239 239 re-delta-fulladd
240 240 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
241 241
242 242
243 243 modern form of the option
244 244
245 245 $ hg debugupgrade --optimize re-delta-parent
246 246 (no format upgrades found in existing repository)
247 247 performing an upgrade with "--run" will make the following changes:
248 248
249 249 requirements
250 250 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
251 251
252 252 optimisations: re-delta-parent
253 253
254 254 re-delta-parent
255 255 deltas within internal storage will choose a new base revision if needed
256 256
257 257 processed revlogs:
258 258 - all-filelogs
259 259 - changelog
260 260 - manifest
261 261
262 262 additional optimizations are available by specifying "--optimize <name>":
263 263
264 264 re-delta-multibase
265 265 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
266 266
267 267 re-delta-all
268 268 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
269 269
270 270 re-delta-fulladd
271 271 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
272 272
273 273 $ hg debugupgrade --optimize re-delta-parent --quiet
274 274 requirements
275 275 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
276 276
277 277 optimisations: re-delta-parent
278 278
279 279 processed revlogs:
280 280 - all-filelogs
281 281 - changelog
282 282 - manifest
283 283
284 284
285 285 unknown optimization:
286 286
287 287 $ hg debugupgrade --optimize foobar
288 288 abort: unknown optimization action requested: foobar
289 289 (run without arguments to see valid optimizations)
290 290 [255]
291 291
292 292 Various sub-optimal detections work
293 293
294 294 $ cat > .hg/requires << EOF
295 295 > revlogv1
296 296 > store
297 297 > EOF
298 298
299 299 $ hg debugformat
300 300 format-variant repo
301 301 fncache: no
302 302 dotencode: no
303 303 generaldelta: no
304 304 exp-sharesafe: no
305 305 sparserevlog: no
306 306 sidedata: no
307 307 persistent-nodemap: no
308 308 copies-sdc: no
309 309 plain-cl-delta: yes
310 310 compression: zlib
311 311 compression-level: default
312 312 $ hg debugformat --verbose
313 313 format-variant repo config default
314 314 fncache: no yes yes
315 315 dotencode: no yes yes
316 316 generaldelta: no yes yes
317 317 exp-sharesafe: no no no
318 318 sparserevlog: no yes yes
319 319 sidedata: no no no
320 320 persistent-nodemap: no no no
321 321 copies-sdc: no no no
322 322 plain-cl-delta: yes yes yes
323 323 compression: zlib zlib zlib
324 324 compression-level: default default default
325 325 $ hg debugformat --verbose --config format.usegeneraldelta=no
326 326 format-variant repo config default
327 327 fncache: no yes yes
328 328 dotencode: no yes yes
329 329 generaldelta: no no yes
330 330 exp-sharesafe: no no no
331 331 sparserevlog: no no yes
332 332 sidedata: no no no
333 333 persistent-nodemap: no no no
334 334 copies-sdc: no no no
335 335 plain-cl-delta: yes yes yes
336 336 compression: zlib zlib zlib
337 337 compression-level: default default default
338 338 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
339 339 format-variant repo config default
340 340 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
341 341 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
342 342 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
343 343 [formatvariant.name.uptodate|exp-sharesafe: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
344 344 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
345 345 [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
346 346 [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
347 347 [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
348 348 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
349 349 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
350 350 [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
351 351 $ hg debugupgraderepo
352 352 repository lacks features recommended by current config options:
353 353
354 354 fncache
355 355 long and reserved filenames may not work correctly; repository performance is sub-optimal
356 356
357 357 dotencode
358 358 storage of filenames beginning with a period or space may not work correctly
359 359
360 360 generaldelta
361 361 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
362 362
363 363 sparserevlog
364 364 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
365 365
366 366
367 367 performing an upgrade with "--run" will make the following changes:
368 368
369 369 requirements
370 370 preserved: revlogv1, store
371 371 added: dotencode, fncache, generaldelta, sparserevlog
372 372
373 373 fncache
374 374 repository will be more resilient to storing certain paths and performance of certain operations should be improved
375 375
376 376 dotencode
377 377 repository will be better able to store files beginning with a space or period
378 378
379 379 generaldelta
380 380 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
381 381
382 382 sparserevlog
383 383 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
384 384
385 385 processed revlogs:
386 386 - all-filelogs
387 387 - changelog
388 388 - manifest
389 389
390 390 additional optimizations are available by specifying "--optimize <name>":
391 391
392 392 re-delta-parent
393 393 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
394 394
395 395 re-delta-multibase
396 396 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
397 397
398 398 re-delta-all
399 399 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
400 400
401 401 re-delta-fulladd
402 402 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
403 403
404 404 $ hg debugupgraderepo --quiet
405 405 requirements
406 406 preserved: revlogv1, store
407 407 added: dotencode, fncache, generaldelta, sparserevlog
408 408
409 409 processed revlogs:
410 410 - all-filelogs
411 411 - changelog
412 412 - manifest
413 413
414 414
415 415 $ hg --config format.dotencode=false debugupgraderepo
416 416 repository lacks features recommended by current config options:
417 417
418 418 fncache
419 419 long and reserved filenames may not work correctly; repository performance is sub-optimal
420 420
421 421 generaldelta
422 422 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
423 423
424 424 sparserevlog
425 425 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
426 426
427 427 repository lacks features used by the default config options:
428 428
429 429 dotencode
430 430 storage of filenames beginning with a period or space may not work correctly
431 431
432 432
433 433 performing an upgrade with "--run" will make the following changes:
434 434
435 435 requirements
436 436 preserved: revlogv1, store
437 437 added: fncache, generaldelta, sparserevlog
438 438
439 439 fncache
440 440 repository will be more resilient to storing certain paths and performance of certain operations should be improved
441 441
442 442 generaldelta
443 443 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
444 444
445 445 sparserevlog
446 446 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
447 447
448 448 processed revlogs:
449 449 - all-filelogs
450 450 - changelog
451 451 - manifest
452 452
453 453 additional optimizations are available by specifying "--optimize <name>":
454 454
455 455 re-delta-parent
456 456 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
457 457
458 458 re-delta-multibase
459 459 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
460 460
461 461 re-delta-all
462 462 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
463 463
464 464 re-delta-fulladd
465 465 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
466 466
467 467
468 468 $ cd ..
469 469
470 470 Upgrading a repository that is already modern essentially no-ops
471 471
472 472 $ hg init modern
473 473 $ hg -R modern debugupgraderepo --run
474 474 upgrade will perform the following actions:
475 475
476 476 requirements
477 477 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
478 478
479 479 processed revlogs:
480 480 - all-filelogs
481 481 - changelog
482 482 - manifest
483 483
484 484 beginning upgrade...
485 485 repository locked and read-only
486 486 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
487 487 (it is safe to interrupt this process any time before data migration completes)
488 488 data fully migrated to temporary repository
489 489 marking source repository as being upgraded; clients will be unable to read from repository
490 490 starting in-place swap of repository data
491 491 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
492 492 replacing store...
493 493 store replacement complete; repository was inconsistent for *s (glob)
494 494 finalizing requirements file and making repository readable again
495 495 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
496 496 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
497 497 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
498 498
499 499 Upgrading a repository to generaldelta works
500 500
501 501 $ hg --config format.usegeneraldelta=false init upgradegd
502 502 $ cd upgradegd
503 503 $ touch f0
504 504 $ hg -q commit -A -m initial
505 505 $ mkdir FooBarDirectory.d
506 506 $ touch FooBarDirectory.d/f1
507 507 $ hg -q commit -A -m 'add f1'
508 508 $ hg -q up -r 0
509 509 >>> from __future__ import absolute_import, print_function
510 510 >>> import random
511 511 >>> random.seed(0) # have a reproducible content
512 512 >>> with open("f2", "wb") as f:
513 513 ... for i in range(100000):
514 514 ... f.write(b"%d\n" % random.randint(1000000000, 9999999999)) and None
515 515 $ hg -q commit -A -m 'add f2'
516 516
517 517 make sure we have a .d file
518 518
519 519 $ ls -d .hg/store/data/*
520 520 .hg/store/data/_foo_bar_directory.d.hg
521 521 .hg/store/data/f0.i
522 522 .hg/store/data/f2.d
523 523 .hg/store/data/f2.i
524 524
525 525 $ hg debugupgraderepo --run --config format.sparse-revlog=false
526 526 upgrade will perform the following actions:
527 527
528 528 requirements
529 529 preserved: dotencode, fncache, revlogv1, store
530 530 added: generaldelta
531 531
532 532 generaldelta
533 533 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
534 534
535 535 processed revlogs:
536 536 - all-filelogs
537 537 - changelog
538 538 - manifest
539 539
540 540 beginning upgrade...
541 541 repository locked and read-only
542 542 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
543 543 (it is safe to interrupt this process any time before data migration completes)
544 544 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
545 545 migrating 519 KB in store; 1.05 MB tracked data
546 546 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
547 547 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
548 548 migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
549 549 finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
550 550 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
551 551 finished migrating 3 changelog revisions; change in size: 0 bytes
552 552 finished migrating 9 total revisions; total change in store size: -17 bytes
553 553 copying phaseroots
554 554 data fully migrated to temporary repository
555 555 marking source repository as being upgraded; clients will be unable to read from repository
556 556 starting in-place swap of repository data
557 557 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
558 558 replacing store...
559 559 store replacement complete; repository was inconsistent for *s (glob)
560 560 finalizing requirements file and making repository readable again
561 561 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
562 562 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
563 563 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
564 564
565 565 Original requirements backed up
566 566
567 567 $ cat .hg/upgradebackup.*/requires
568 568 dotencode
569 569 fncache
570 570 revlogv1
571 571 store
572 572
573 573 generaldelta added to original requirements files
574 574
575 575 $ cat .hg/requires
576 576 dotencode
577 577 fncache
578 578 generaldelta
579 579 revlogv1
580 580 store
581 581
582 582 store directory has files we expect
583 583
584 584 $ ls .hg/store
585 585 00changelog.i
586 586 00manifest.i
587 587 data
588 588 fncache
589 589 phaseroots
590 590 undo
591 591 undo.backupfiles
592 592 undo.phaseroots
593 593
594 594 manifest should be generaldelta
595 595
596 596 $ hg debugrevlog -m | grep flags
597 597 flags : inline, generaldelta
598 598
599 599 verify should be happy
600 600
601 601 $ hg verify
602 602 checking changesets
603 603 checking manifests
604 604 crosschecking files in changesets and manifests
605 605 checking files
606 606 checked 3 changesets with 3 changes to 3 files
607 607
608 608 old store should be backed up
609 609
610 610 $ ls -d .hg/upgradebackup.*/
611 611 .hg/upgradebackup.*/ (glob)
612 612 $ ls .hg/upgradebackup.*/store
613 613 00changelog.i
614 614 00manifest.i
615 615 data
616 616 fncache
617 617 phaseroots
618 618 undo
619 619 undo.backup.fncache
620 620 undo.backupfiles
621 621 undo.phaseroots
622 622
623 623 unless --no-backup is passed
624 624
625 625 $ rm -rf .hg/upgradebackup.*/
626 626 $ hg debugupgraderepo --run --no-backup
627 627 upgrade will perform the following actions:
628 628
629 629 requirements
630 630 preserved: dotencode, fncache, generaldelta, revlogv1, store
631 631 added: sparserevlog
632 632
633 633 sparserevlog
634 634 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
635 635
636 636 processed revlogs:
637 637 - all-filelogs
638 638 - changelog
639 639 - manifest
640 640
641 641 beginning upgrade...
642 642 repository locked and read-only
643 643 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
644 644 (it is safe to interrupt this process any time before data migration completes)
645 645 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
646 646 migrating 519 KB in store; 1.05 MB tracked data
647 647 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
648 648 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
649 649 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
650 650 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
651 651 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
652 652 finished migrating 3 changelog revisions; change in size: 0 bytes
653 653 finished migrating 9 total revisions; total change in store size: 0 bytes
654 654 copying phaseroots
655 655 data fully migrated to temporary repository
656 656 marking source repository as being upgraded; clients will be unable to read from repository
657 657 starting in-place swap of repository data
658 658 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
659 659 replacing store...
660 660 store replacement complete; repository was inconsistent for * (glob)
661 661 finalizing requirements file and making repository readable again
662 662 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
663 663 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
664 664 $ ls -1 .hg/ | grep upgradebackup
665 665 [1]
666 666
667 667 We can restrict optimization to some revlog:
668 668
669 669 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
670 670 upgrade will perform the following actions:
671 671
672 672 requirements
673 673 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
674 674
675 675 optimisations: re-delta-parent
676 676
677 677 re-delta-parent
678 678 deltas within internal storage will choose a new base revision if needed
679 679
680 680 processed revlogs:
681 681 - manifest
682 682
683 683 beginning upgrade...
684 684 repository locked and read-only
685 685 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
686 686 (it is safe to interrupt this process any time before data migration completes)
687 687 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
688 688 migrating 519 KB in store; 1.05 MB tracked data
689 689 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
690 690 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
691 691 blindly copying data/f0.i containing 1 revisions
692 692 blindly copying data/f2.i containing 1 revisions
693 693 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
694 694 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
695 695 cloning 3 revisions from 00manifest.i
696 696 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
697 697 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
698 698 blindly copying 00changelog.i containing 3 revisions
699 699 finished migrating 3 changelog revisions; change in size: 0 bytes
700 700 finished migrating 9 total revisions; total change in store size: 0 bytes
701 701 copying phaseroots
702 702 data fully migrated to temporary repository
703 703 marking source repository as being upgraded; clients will be unable to read from repository
704 704 starting in-place swap of repository data
705 705 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
706 706 replacing store...
707 707 store replacement complete; repository was inconsistent for *s (glob)
708 708 finalizing requirements file and making repository readable again
709 709 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
710 710 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
711 711
712 712 Check that the repo still works fine
713 713
714 714 $ hg log -G --stat
715 715 @ changeset: 2:76d4395f5413 (no-py3 !)
716 716 @ changeset: 2:fca376863211 (py3 !)
717 717 | tag: tip
718 718 | parent: 0:ba592bf28da2
719 719 | user: test
720 720 | date: Thu Jan 01 00:00:00 1970 +0000
721 721 | summary: add f2
722 722 |
723 723 | f2 | 100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
724 724 | 1 files changed, 100000 insertions(+), 0 deletions(-)
725 725 |
726 726 | o changeset: 1:2029ce2354e2
727 727 |/ user: test
728 728 | date: Thu Jan 01 00:00:00 1970 +0000
729 729 | summary: add f1
730 730 |
731 731 |
732 732 o changeset: 0:ba592bf28da2
733 733 user: test
734 734 date: Thu Jan 01 00:00:00 1970 +0000
735 735 summary: initial
736 736
737 737
738 738
739 739 $ hg verify
740 740 checking changesets
741 741 checking manifests
742 742 crosschecking files in changesets and manifests
743 743 checking files
744 744 checked 3 changesets with 3 changes to 3 files
745 745
746 746 Check we can select negatively
747 747
748 748 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
749 749 upgrade will perform the following actions:
750 750
751 751 requirements
752 752 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
753 753
754 754 optimisations: re-delta-parent
755 755
756 756 re-delta-parent
757 757 deltas within internal storage will choose a new base revision if needed
758 758
759 759 processed revlogs:
760 760 - all-filelogs
761 761 - changelog
762 762
763 763 beginning upgrade...
764 764 repository locked and read-only
765 765 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
766 766 (it is safe to interrupt this process any time before data migration completes)
767 767 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
768 768 migrating 519 KB in store; 1.05 MB tracked data
769 769 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
770 770 cloning 1 revisions from data/FooBarDirectory.d/f1.i
771 771 cloning 1 revisions from data/f0.i
772 772 cloning 1 revisions from data/f2.i
773 773 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
774 774 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
775 775 blindly copying 00manifest.i containing 3 revisions
776 776 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
777 777 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
778 778 cloning 3 revisions from 00changelog.i
779 779 finished migrating 3 changelog revisions; change in size: 0 bytes
780 780 finished migrating 9 total revisions; total change in store size: 0 bytes
781 781 copying phaseroots
782 782 data fully migrated to temporary repository
783 783 marking source repository as being upgraded; clients will be unable to read from repository
784 784 starting in-place swap of repository data
785 785 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
786 786 replacing store...
787 787 store replacement complete; repository was inconsistent for *s (glob)
788 788 finalizing requirements file and making repository readable again
789 789 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
790 790 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
791 791 $ hg verify
792 792 checking changesets
793 793 checking manifests
794 794 crosschecking files in changesets and manifests
795 795 checking files
796 796 checked 3 changesets with 3 changes to 3 files
797 797
798 798 Check that we can select changelog only
799 799
800 800 $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
801 801 upgrade will perform the following actions:
802 802
803 803 requirements
804 804 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
805 805
806 806 optimisations: re-delta-parent
807 807
808 808 re-delta-parent
809 809 deltas within internal storage will choose a new base revision if needed
810 810
811 811 processed revlogs:
812 812 - changelog
813 813
814 814 beginning upgrade...
815 815 repository locked and read-only
816 816 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
817 817 (it is safe to interrupt this process any time before data migration completes)
818 818 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
819 819 migrating 519 KB in store; 1.05 MB tracked data
820 820 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
821 821 blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
822 822 blindly copying data/f0.i containing 1 revisions
823 823 blindly copying data/f2.i containing 1 revisions
824 824 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
825 825 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
826 826 blindly copying 00manifest.i containing 3 revisions
827 827 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
828 828 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
829 829 cloning 3 revisions from 00changelog.i
830 830 finished migrating 3 changelog revisions; change in size: 0 bytes
831 831 finished migrating 9 total revisions; total change in store size: 0 bytes
832 832 copying phaseroots
833 833 data fully migrated to temporary repository
834 834 marking source repository as being upgraded; clients will be unable to read from repository
835 835 starting in-place swap of repository data
836 836 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
837 837 replacing store...
838 838 store replacement complete; repository was inconsistent for *s (glob)
839 839 finalizing requirements file and making repository readable again
840 840 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
841 841 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
842 842 $ hg verify
843 843 checking changesets
844 844 checking manifests
845 845 crosschecking files in changesets and manifests
846 846 checking files
847 847 checked 3 changesets with 3 changes to 3 files
848 848
849 849 Check that we can select filelog only
850 850
851 851 $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
852 852 upgrade will perform the following actions:
853 853
854 854 requirements
855 855 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
856 856
857 857 optimisations: re-delta-parent
858 858
859 859 re-delta-parent
860 860 deltas within internal storage will choose a new base revision if needed
861 861
862 862 processed revlogs:
863 863 - all-filelogs
864 864
865 865 beginning upgrade...
866 866 repository locked and read-only
867 867 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
868 868 (it is safe to interrupt this process any time before data migration completes)
869 869 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
870 870 migrating 519 KB in store; 1.05 MB tracked data
871 871 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
872 872 cloning 1 revisions from data/FooBarDirectory.d/f1.i
873 873 cloning 1 revisions from data/f0.i
874 874 cloning 1 revisions from data/f2.i
875 875 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
876 876 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
877 877 blindly copying 00manifest.i containing 3 revisions
878 878 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
879 879 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
880 880 blindly copying 00changelog.i containing 3 revisions
881 881 finished migrating 3 changelog revisions; change in size: 0 bytes
882 882 finished migrating 9 total revisions; total change in store size: 0 bytes
883 883 copying phaseroots
884 884 data fully migrated to temporary repository
885 885 marking source repository as being upgraded; clients will be unable to read from repository
886 886 starting in-place swap of repository data
887 887 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
888 888 replacing store...
889 889 store replacement complete; repository was inconsistent for *s (glob)
890 890 finalizing requirements file and making repository readable again
891 891 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
892 892 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
893 893 $ hg verify
894 894 checking changesets
895 895 checking manifests
896 896 crosschecking files in changesets and manifests
897 897 checking files
898 898 checked 3 changesets with 3 changes to 3 files
899 899
900 900
901 901 Check you can't skip revlog clone during important format downgrade
902 902
903 903 $ echo "[format]" > .hg/hgrc
904 904 $ echo "sparse-revlog=no" >> .hg/hgrc
905 905 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
906 906 ignoring revlogs selection flags, format requirements change: sparserevlog
907 907 upgrade will perform the following actions:
908 908
909 909 requirements
910 910 preserved: dotencode, fncache, generaldelta, revlogv1, store
911 911 removed: sparserevlog
912 912
913 913 optimisations: re-delta-parent
914 914
915 915 re-delta-parent
916 916 deltas within internal storage will choose a new base revision if needed
917 917
918 918 processed revlogs:
919 919 - all-filelogs
920 920 - changelog
921 921 - manifest
922 922
923 923 beginning upgrade...
924 924 repository locked and read-only
925 925 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
926 926 (it is safe to interrupt this process any time before data migration completes)
927 927 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
928 928 migrating 519 KB in store; 1.05 MB tracked data
929 929 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
930 930 cloning 1 revisions from data/FooBarDirectory.d/f1.i
931 931 cloning 1 revisions from data/f0.i
932 932 cloning 1 revisions from data/f2.i
933 933 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
934 934 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
935 935 cloning 3 revisions from 00manifest.i
936 936 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
937 937 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
938 938 cloning 3 revisions from 00changelog.i
939 939 finished migrating 3 changelog revisions; change in size: 0 bytes
940 940 finished migrating 9 total revisions; total change in store size: 0 bytes
941 941 copying phaseroots
942 942 data fully migrated to temporary repository
943 943 marking source repository as being upgraded; clients will be unable to read from repository
944 944 starting in-place swap of repository data
945 945 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
946 946 replacing store...
947 947 store replacement complete; repository was inconsistent for *s (glob)
948 948 finalizing requirements file and making repository readable again
949 949 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
950 950 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
951 951 $ hg verify
952 952 checking changesets
953 953 checking manifests
954 954 crosschecking files in changesets and manifests
955 955 checking files
956 956 checked 3 changesets with 3 changes to 3 files
957 957
958 958 Check you can't skip revlog clone during important format upgrade
959 959
960 960 $ echo "sparse-revlog=yes" >> .hg/hgrc
961 961 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
962 962 ignoring revlogs selection flags, format requirements change: sparserevlog
963 963 upgrade will perform the following actions:
964 964
965 965 requirements
966 966 preserved: dotencode, fncache, generaldelta, revlogv1, store
967 967 added: sparserevlog
968 968
969 969 optimisations: re-delta-parent
970 970
971 971 sparserevlog
972 972 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
973 973
974 974 re-delta-parent
975 975 deltas within internal storage will choose a new base revision if needed
976 976
977 977 processed revlogs:
978 978 - all-filelogs
979 979 - changelog
980 980 - manifest
981 981
982 982 beginning upgrade...
983 983 repository locked and read-only
984 984 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
985 985 (it is safe to interrupt this process any time before data migration completes)
986 986 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
987 987 migrating 519 KB in store; 1.05 MB tracked data
988 988 migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
989 989 cloning 1 revisions from data/FooBarDirectory.d/f1.i
990 990 cloning 1 revisions from data/f0.i
991 991 cloning 1 revisions from data/f2.i
992 992 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
993 993 migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
994 994 cloning 3 revisions from 00manifest.i
995 995 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
996 996 migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
997 997 cloning 3 revisions from 00changelog.i
998 998 finished migrating 3 changelog revisions; change in size: 0 bytes
999 999 finished migrating 9 total revisions; total change in store size: 0 bytes
1000 1000 copying phaseroots
1001 1001 data fully migrated to temporary repository
1002 1002 marking source repository as being upgraded; clients will be unable to read from repository
1003 1003 starting in-place swap of repository data
1004 1004 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
1005 1005 replacing store...
1006 1006 store replacement complete; repository was inconsistent for *s (glob)
1007 1007 finalizing requirements file and making repository readable again
1008 1008 removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
1009 1009 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
1010 1010 $ hg verify
1011 1011 checking changesets
1012 1012 checking manifests
1013 1013 crosschecking files in changesets and manifests
1014 1014 checking files
1015 1015 checked 3 changesets with 3 changes to 3 files
1016 1016
1017 1017 $ cd ..
1018 1018
1019 1019 store files with special filenames aren't encoded during copy
1020 1020
1021 1021 $ hg init store-filenames
1022 1022 $ cd store-filenames
1023 1023 $ touch foo
1024 1024 $ hg -q commit -A -m initial
1025 1025 $ touch .hg/store/.XX_special_filename
1026 1026
1027 1027 $ hg debugupgraderepo --run
1028 1028 upgrade will perform the following actions:
1029 1029
1030 1030 requirements
1031 1031 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1032 1032
1033 1033 processed revlogs:
1034 1034 - all-filelogs
1035 1035 - changelog
1036 1036 - manifest
1037 1037
1038 1038 beginning upgrade...
1039 1039 repository locked and read-only
1040 1040 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1041 1041 (it is safe to interrupt this process any time before data migration completes)
1042 1042 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1043 1043 migrating 301 bytes in store; 107 bytes tracked data
1044 1044 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1045 1045 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1046 1046 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1047 1047 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1048 1048 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1049 1049 finished migrating 1 changelog revisions; change in size: 0 bytes
1050 1050 finished migrating 3 total revisions; total change in store size: 0 bytes
1051 1051 copying .XX_special_filename
1052 1052 copying phaseroots
1053 1053 data fully migrated to temporary repository
1054 1054 marking source repository as being upgraded; clients will be unable to read from repository
1055 1055 starting in-place swap of repository data
1056 1056 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1057 1057 replacing store...
1058 1058 store replacement complete; repository was inconsistent for *s (glob)
1059 1059 finalizing requirements file and making repository readable again
1060 1060 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1061 1061 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1062 1062 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1063 $ hg debugupgraderepo --run --optimize redeltafulladd
1063 $ hg debugupgraderepo --run --optimize 're-delta-fulladd'
1064 1064 upgrade will perform the following actions:
1065 1065
1066 1066 requirements
1067 1067 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1068 1068
1069 1069 optimisations: re-delta-fulladd
1070 1070
1071 1071 re-delta-fulladd
1072 1072 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
1073 1073
1074 1074 processed revlogs:
1075 1075 - all-filelogs
1076 1076 - changelog
1077 1077 - manifest
1078 1078
1079 1079 beginning upgrade...
1080 1080 repository locked and read-only
1081 1081 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1082 1082 (it is safe to interrupt this process any time before data migration completes)
1083 1083 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1084 1084 migrating 301 bytes in store; 107 bytes tracked data
1085 1085 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
1086 1086 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1087 1087 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
1088 1088 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1089 1089 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
1090 1090 finished migrating 1 changelog revisions; change in size: 0 bytes
1091 1091 finished migrating 3 total revisions; total change in store size: 0 bytes
1092 1092 copying .XX_special_filename
1093 1093 copying phaseroots
1094 1094 data fully migrated to temporary repository
1095 1095 marking source repository as being upgraded; clients will be unable to read from repository
1096 1096 starting in-place swap of repository data
1097 1097 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1098 1098 replacing store...
1099 1099 store replacement complete; repository was inconsistent for *s (glob)
1100 1100 finalizing requirements file and making repository readable again
1101 1101 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
1102 1102 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
1103 1103 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1104 1104
1105 1105 fncache is valid after upgrade
1106 1106
1107 1107 $ hg debugrebuildfncache
1108 1108 fncache already up to date
1109 1109
1110 1110 $ cd ..
1111 1111
1112 1112 Check upgrading a large file repository
1113 1113 ---------------------------------------
1114 1114
1115 1115 $ hg init largefilesrepo
1116 1116 $ cat << EOF >> largefilesrepo/.hg/hgrc
1117 1117 > [extensions]
1118 1118 > largefiles =
1119 1119 > EOF
1120 1120
1121 1121 $ cd largefilesrepo
1122 1122 $ touch foo
1123 1123 $ hg add --large foo
1124 1124 $ hg -q commit -m initial
1125 1125 $ cat .hg/requires
1126 1126 dotencode
1127 1127 fncache
1128 1128 generaldelta
1129 1129 largefiles
1130 1130 revlogv1
1131 1131 sparserevlog
1132 1132 store
1133 1133
1134 1134 $ hg debugupgraderepo --run
1135 1135 upgrade will perform the following actions:
1136 1136
1137 1137 requirements
1138 1138 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
1139 1139
1140 1140 processed revlogs:
1141 1141 - all-filelogs
1142 1142 - changelog
1143 1143 - manifest
1144 1144
1145 1145 beginning upgrade...
1146 1146 repository locked and read-only
1147 1147 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1148 1148 (it is safe to interrupt this process any time before data migration completes)
1149 1149 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
1150 1150 migrating 355 bytes in store; 160 bytes tracked data
1151 1151 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
1152 1152 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
1153 1153 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
1154 1154 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
1155 1155 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
1156 1156 finished migrating 1 changelog revisions; change in size: 0 bytes
1157 1157 finished migrating 3 total revisions; total change in store size: 0 bytes
1158 1158 copying phaseroots
1159 1159 data fully migrated to temporary repository
1160 1160 marking source repository as being upgraded; clients will be unable to read from repository
1161 1161 starting in-place swap of repository data
1162 1162 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1163 1163 replacing store...
1164 1164 store replacement complete; repository was inconsistent for *s (glob)
1165 1165 finalizing requirements file and making repository readable again
1166 1166 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1167 1167 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1168 1168 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1169 1169 $ cat .hg/requires
1170 1170 dotencode
1171 1171 fncache
1172 1172 generaldelta
1173 1173 largefiles
1174 1174 revlogv1
1175 1175 sparserevlog
1176 1176 store
1177 1177
1178 1178 $ cat << EOF >> .hg/hgrc
1179 1179 > [extensions]
1180 1180 > lfs =
1181 1181 > [lfs]
1182 1182 > threshold = 10
1183 1183 > EOF
1184 1184 $ echo '123456789012345' > lfs.bin
1185 1185 $ hg ci -Am 'lfs.bin'
1186 1186 adding lfs.bin
1187 1187 $ grep lfs .hg/requires
1188 1188 lfs
1189 1189 $ find .hg/store/lfs -type f
1190 1190 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1191 1191
1192 1192 $ hg debugupgraderepo --run
1193 1193 upgrade will perform the following actions:
1194 1194
1195 1195 requirements
1196 1196 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
1197 1197
1198 1198 processed revlogs:
1199 1199 - all-filelogs
1200 1200 - changelog
1201 1201 - manifest
1202 1202
1203 1203 beginning upgrade...
1204 1204 repository locked and read-only
1205 1205 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1206 1206 (it is safe to interrupt this process any time before data migration completes)
1207 1207 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
1208 1208 migrating 801 bytes in store; 467 bytes tracked data
1209 1209 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
1210 1210 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
1211 1211 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
1212 1212 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
1213 1213 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
1214 1214 finished migrating 2 changelog revisions; change in size: 0 bytes
1215 1215 finished migrating 6 total revisions; total change in store size: 0 bytes
1216 1216 copying phaseroots
1217 1217 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1218 1218 data fully migrated to temporary repository
1219 1219 marking source repository as being upgraded; clients will be unable to read from repository
1220 1220 starting in-place swap of repository data
1221 1221 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1222 1222 replacing store...
1223 1223 store replacement complete; repository was inconsistent for *s (glob)
1224 1224 finalizing requirements file and making repository readable again
1225 1225 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
1226 1226 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
1227 1227 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1228 1228
1229 1229 $ grep lfs .hg/requires
1230 1230 lfs
1231 1231 $ find .hg/store/lfs -type f
1232 1232 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1233 1233 $ hg verify
1234 1234 checking changesets
1235 1235 checking manifests
1236 1236 crosschecking files in changesets and manifests
1237 1237 checking files
1238 1238 checked 2 changesets with 2 changes to 2 files
1239 1239 $ hg debugdata lfs.bin 0
1240 1240 version https://git-lfs.github.com/spec/v1
1241 1241 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
1242 1242 size 16
1243 1243 x-is-binary 0
1244 1244
1245 1245 $ cd ..
1246 1246
1247 1247 repository config is taken in account
1248 1248 -------------------------------------
1249 1249
1250 1250 $ cat << EOF >> $HGRCPATH
1251 1251 > [format]
1252 1252 > maxchainlen = 1
1253 1253 > EOF
1254 1254
1255 1255 $ hg init localconfig
1256 1256 $ cd localconfig
1257 1257 $ cat << EOF > file
1258 1258 > some content
1259 1259 > with some length
1260 1260 > to make sure we get a delta
1261 1261 > after changes
1262 1262 > very long
1263 1263 > very long
1264 1264 > very long
1265 1265 > very long
1266 1266 > very long
1267 1267 > very long
1268 1268 > very long
1269 1269 > very long
1270 1270 > very long
1271 1271 > very long
1272 1272 > very long
1273 1273 > EOF
1274 1274 $ hg -q commit -A -m A
1275 1275 $ echo "new line" >> file
1276 1276 $ hg -q commit -m B
1277 1277 $ echo "new line" >> file
1278 1278 $ hg -q commit -m C
1279 1279
1280 1280 $ cat << EOF >> .hg/hgrc
1281 1281 > [format]
1282 1282 > maxchainlen = 9001
1283 1283 > EOF
1284 1284 $ hg config format
1285 1285 format.maxchainlen=9001
1286 1286 $ hg debugdeltachain file
1287 1287 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1288 1288 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1289 1289 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1290 1290 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
1291 1291
1292 $ hg debugupgraderepo --run --optimize redeltaall
1292 $ hg debugupgraderepo --run --optimize 're-delta-all'
1293 1293 upgrade will perform the following actions:
1294 1294
1295 1295 requirements
1296 1296 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1297 1297
1298 1298 optimisations: re-delta-all
1299 1299
1300 1300 re-delta-all
1301 1301 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
1302 1302
1303 1303 processed revlogs:
1304 1304 - all-filelogs
1305 1305 - changelog
1306 1306 - manifest
1307 1307
1308 1308 beginning upgrade...
1309 1309 repository locked and read-only
1310 1310 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
1311 1311 (it is safe to interrupt this process any time before data migration completes)
1312 1312 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
1313 1313 migrating 1019 bytes in store; 882 bytes tracked data
1314 1314 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
1315 1315 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
1316 1316 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
1317 1317 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
1318 1318 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
1319 1319 finished migrating 3 changelog revisions; change in size: 0 bytes
1320 1320 finished migrating 9 total revisions; total change in store size: -9 bytes
1321 1321 copying phaseroots
1322 1322 data fully migrated to temporary repository
1323 1323 marking source repository as being upgraded; clients will be unable to read from repository
1324 1324 starting in-place swap of repository data
1325 1325 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1326 1326 replacing store...
1327 1327 store replacement complete; repository was inconsistent for *s (glob)
1328 1328 finalizing requirements file and making repository readable again
1329 1329 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
1330 1330 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
1331 1331 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
1332 1332 $ hg debugdeltachain file
1333 1333 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
1334 1334 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
1335 1335 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
1336 1336 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
1337 1337 $ cd ..
1338 1338
1339 1339 $ cat << EOF >> $HGRCPATH
1340 1340 > [format]
1341 1341 > maxchainlen = 9001
1342 1342 > EOF
1343 1343
1344 1344 Check upgrading a sparse-revlog repository
1345 1345 ---------------------------------------
1346 1346
1347 1347 $ hg init sparserevlogrepo --config format.sparse-revlog=no
1348 1348 $ cd sparserevlogrepo
1349 1349 $ touch foo
1350 1350 $ hg add foo
1351 1351 $ hg -q commit -m "foo"
1352 1352 $ cat .hg/requires
1353 1353 dotencode
1354 1354 fncache
1355 1355 generaldelta
1356 1356 revlogv1
1357 1357 store
1358 1358
1359 1359 Check that we can add the sparse-revlog format requirement
1360 1360 $ hg --config format.sparse-revlog=yes debugupgraderepo --run --quiet
1361 1361 upgrade will perform the following actions:
1362 1362
1363 1363 requirements
1364 1364 preserved: dotencode, fncache, generaldelta, revlogv1, store
1365 1365 added: sparserevlog
1366 1366
1367 1367 processed revlogs:
1368 1368 - all-filelogs
1369 1369 - changelog
1370 1370 - manifest
1371 1371
1372 1372 $ cat .hg/requires
1373 1373 dotencode
1374 1374 fncache
1375 1375 generaldelta
1376 1376 revlogv1
1377 1377 sparserevlog
1378 1378 store
1379 1379
1380 1380 Check that we can remove the sparse-revlog format requirement
1381 1381 $ hg --config format.sparse-revlog=no debugupgraderepo --run --quiet
1382 1382 upgrade will perform the following actions:
1383 1383
1384 1384 requirements
1385 1385 preserved: dotencode, fncache, generaldelta, revlogv1, store
1386 1386 removed: sparserevlog
1387 1387
1388 1388 processed revlogs:
1389 1389 - all-filelogs
1390 1390 - changelog
1391 1391 - manifest
1392 1392
1393 1393 $ cat .hg/requires
1394 1394 dotencode
1395 1395 fncache
1396 1396 generaldelta
1397 1397 revlogv1
1398 1398 store
1399 1399
1400 1400 #if zstd
1401 1401
1402 1402 Check upgrading to a zstd revlog
1403 1403 --------------------------------
1404 1404
1405 1405 upgrade
1406 1406
1407 1407 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup --quiet
1408 1408 upgrade will perform the following actions:
1409 1409
1410 1410 requirements
1411 1411 preserved: dotencode, fncache, generaldelta, revlogv1, store
1412 1412 added: revlog-compression-zstd, sparserevlog
1413 1413
1414 1414 processed revlogs:
1415 1415 - all-filelogs
1416 1416 - changelog
1417 1417 - manifest
1418 1418
1419 1419 $ hg debugformat -v
1420 1420 format-variant repo config default
1421 1421 fncache: yes yes yes
1422 1422 dotencode: yes yes yes
1423 1423 generaldelta: yes yes yes
1424 1424 exp-sharesafe: no no no
1425 1425 sparserevlog: yes yes yes
1426 1426 sidedata: no no no
1427 1427 persistent-nodemap: no no no
1428 1428 copies-sdc: no no no
1429 1429 plain-cl-delta: yes yes yes
1430 1430 compression: zstd zlib zlib
1431 1431 compression-level: default default default
1432 1432 $ cat .hg/requires
1433 1433 dotencode
1434 1434 fncache
1435 1435 generaldelta
1436 1436 revlog-compression-zstd
1437 1437 revlogv1
1438 1438 sparserevlog
1439 1439 store
1440 1440
1441 1441 downgrade
1442 1442
1443 1443 $ hg debugupgraderepo --run --no-backup --quiet
1444 1444 upgrade will perform the following actions:
1445 1445
1446 1446 requirements
1447 1447 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1448 1448 removed: revlog-compression-zstd
1449 1449
1450 1450 processed revlogs:
1451 1451 - all-filelogs
1452 1452 - changelog
1453 1453 - manifest
1454 1454
1455 1455 $ hg debugformat -v
1456 1456 format-variant repo config default
1457 1457 fncache: yes yes yes
1458 1458 dotencode: yes yes yes
1459 1459 generaldelta: yes yes yes
1460 1460 exp-sharesafe: no no no
1461 1461 sparserevlog: yes yes yes
1462 1462 sidedata: no no no
1463 1463 persistent-nodemap: no no no
1464 1464 copies-sdc: no no no
1465 1465 plain-cl-delta: yes yes yes
1466 1466 compression: zlib zlib zlib
1467 1467 compression-level: default default default
1468 1468 $ cat .hg/requires
1469 1469 dotencode
1470 1470 fncache
1471 1471 generaldelta
1472 1472 revlogv1
1473 1473 sparserevlog
1474 1474 store
1475 1475
1476 1476 upgrade from hgrc
1477 1477
1478 1478 $ cat >> .hg/hgrc << EOF
1479 1479 > [format]
1480 1480 > revlog-compression=zstd
1481 1481 > EOF
1482 1482 $ hg debugupgraderepo --run --no-backup --quiet
1483 1483 upgrade will perform the following actions:
1484 1484
1485 1485 requirements
1486 1486 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
1487 1487 added: revlog-compression-zstd
1488 1488
1489 1489 processed revlogs:
1490 1490 - all-filelogs
1491 1491 - changelog
1492 1492 - manifest
1493 1493
1494 1494 $ hg debugformat -v
1495 1495 format-variant repo config default
1496 1496 fncache: yes yes yes
1497 1497 dotencode: yes yes yes
1498 1498 generaldelta: yes yes yes
1499 1499 exp-sharesafe: no no no
1500 1500 sparserevlog: yes yes yes
1501 1501 sidedata: no no no
1502 1502 persistent-nodemap: no no no
1503 1503 copies-sdc: no no no
1504 1504 plain-cl-delta: yes yes yes
1505 1505 compression: zstd zstd zlib
1506 1506 compression-level: default default default
1507 1507 $ cat .hg/requires
1508 1508 dotencode
1509 1509 fncache
1510 1510 generaldelta
1511 1511 revlog-compression-zstd
1512 1512 revlogv1
1513 1513 sparserevlog
1514 1514 store
1515 1515
1516 1516 #endif
1517 1517
1518 1518 Check upgrading to a side-data revlog
1519 1519 -------------------------------------
1520 1520
1521 1521 upgrade
1522 1522
1523 1523 $ hg --config format.exp-use-side-data=yes debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
1524 1524 upgrade will perform the following actions:
1525 1525
1526 1526 requirements
1527 1527 preserved: dotencode, fncache, generaldelta, revlogv1, store (no-zstd !)
1528 1528 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
1529 1529 added: exp-sidedata-flag (zstd !)
1530 1530 added: exp-sidedata-flag, sparserevlog (no-zstd !)
1531 1531
1532 1532 processed revlogs:
1533 1533 - all-filelogs
1534 1534 - changelog
1535 1535 - manifest
1536 1536
1537 1537 $ hg debugformat -v
1538 1538 format-variant repo config default
1539 1539 fncache: yes yes yes
1540 1540 dotencode: yes yes yes
1541 1541 generaldelta: yes yes yes
1542 1542 exp-sharesafe: no no no
1543 1543 sparserevlog: yes yes yes
1544 1544 sidedata: yes no no
1545 1545 persistent-nodemap: no no no
1546 1546 copies-sdc: no no no
1547 1547 plain-cl-delta: yes yes yes
1548 1548 compression: zlib zlib zlib (no-zstd !)
1549 1549 compression: zstd zstd zlib (zstd !)
1550 1550 compression-level: default default default
1551 1551 $ cat .hg/requires
1552 1552 dotencode
1553 1553 exp-sidedata-flag
1554 1554 fncache
1555 1555 generaldelta
1556 1556 revlog-compression-zstd (zstd !)
1557 1557 revlogv1
1558 1558 sparserevlog
1559 1559 store
1560 1560 $ hg debugsidedata -c 0
1561 1561 2 sidedata entries
1562 1562 entry-0001 size 4
1563 1563 entry-0002 size 32
1564 1564
1565 1565 downgrade
1566 1566
1567 1567 $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup --quiet
1568 1568 upgrade will perform the following actions:
1569 1569
1570 1570 requirements
1571 1571 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
1572 1572 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
1573 1573 removed: exp-sidedata-flag
1574 1574
1575 1575 processed revlogs:
1576 1576 - all-filelogs
1577 1577 - changelog
1578 1578 - manifest
1579 1579
1580 1580 $ hg debugformat -v
1581 1581 format-variant repo config default
1582 1582 fncache: yes yes yes
1583 1583 dotencode: yes yes yes
1584 1584 generaldelta: yes yes yes
1585 1585 exp-sharesafe: no no no
1586 1586 sparserevlog: yes yes yes
1587 1587 sidedata: no no no
1588 1588 persistent-nodemap: no no no
1589 1589 copies-sdc: no no no
1590 1590 plain-cl-delta: yes yes yes
1591 1591 compression: zlib zlib zlib (no-zstd !)
1592 1592 compression: zstd zstd zlib (zstd !)
1593 1593 compression-level: default default default
1594 1594 $ cat .hg/requires
1595 1595 dotencode
1596 1596 fncache
1597 1597 generaldelta
1598 1598 revlog-compression-zstd (zstd !)
1599 1599 revlogv1
1600 1600 sparserevlog
1601 1601 store
1602 1602 $ hg debugsidedata -c 0
1603 1603
1604 1604 upgrade from hgrc
1605 1605
1606 1606 $ cat >> .hg/hgrc << EOF
1607 1607 > [format]
1608 1608 > exp-use-side-data=yes
1609 1609 > EOF
1610 1610 $ hg debugupgraderepo --run --no-backup --quiet
1611 1611 upgrade will perform the following actions:
1612 1612
1613 1613 requirements
1614 1614 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
1615 1615 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
1616 1616 added: exp-sidedata-flag
1617 1617
1618 1618 processed revlogs:
1619 1619 - all-filelogs
1620 1620 - changelog
1621 1621 - manifest
1622 1622
1623 1623 $ hg debugformat -v
1624 1624 format-variant repo config default
1625 1625 fncache: yes yes yes
1626 1626 dotencode: yes yes yes
1627 1627 generaldelta: yes yes yes
1628 1628 exp-sharesafe: no no no
1629 1629 sparserevlog: yes yes yes
1630 1630 sidedata: yes yes no
1631 1631 persistent-nodemap: no no no
1632 1632 copies-sdc: no no no
1633 1633 plain-cl-delta: yes yes yes
1634 1634 compression: zlib zlib zlib (no-zstd !)
1635 1635 compression: zstd zstd zlib (zstd !)
1636 1636 compression-level: default default default
1637 1637 $ cat .hg/requires
1638 1638 dotencode
1639 1639 exp-sidedata-flag
1640 1640 fncache
1641 1641 generaldelta
1642 1642 revlog-compression-zstd (zstd !)
1643 1643 revlogv1
1644 1644 sparserevlog
1645 1645 store
1646 1646 $ hg debugsidedata -c 0
General Comments 0
You need to be logged in to leave comments. Login now