##// END OF EJS Templates
upgrade: add an argument to control manifest upgrade...
marmoute -
r43098:cf2b765c default
parent child Browse files
Show More
@@ -1,3489 +1,3496 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import codecs
11 11 import collections
12 12 import difflib
13 13 import errno
14 14 import operator
15 15 import os
16 16 import random
17 17 import re
18 18 import socket
19 19 import ssl
20 20 import stat
21 21 import string
22 22 import subprocess
23 23 import sys
24 24 import time
25 25
26 26 from .i18n import _
27 27 from .node import (
28 28 bin,
29 29 hex,
30 30 nullhex,
31 31 nullid,
32 32 nullrev,
33 33 short,
34 34 )
35 35 from . import (
36 36 bundle2,
37 37 changegroup,
38 38 cmdutil,
39 39 color,
40 40 context,
41 41 copies,
42 42 dagparser,
43 43 encoding,
44 44 error,
45 45 exchange,
46 46 extensions,
47 47 filemerge,
48 48 filesetlang,
49 49 formatter,
50 50 hg,
51 51 httppeer,
52 52 localrepo,
53 53 lock as lockmod,
54 54 logcmdutil,
55 55 merge as mergemod,
56 56 obsolete,
57 57 obsutil,
58 58 phases,
59 59 policy,
60 60 pvec,
61 61 pycompat,
62 62 registrar,
63 63 repair,
64 64 revlog,
65 65 revset,
66 66 revsetlang,
67 67 scmutil,
68 68 setdiscovery,
69 69 simplemerge,
70 70 sshpeer,
71 71 sslutil,
72 72 streamclone,
73 73 templater,
74 74 treediscovery,
75 75 upgrade,
76 76 url as urlmod,
77 77 util,
78 78 vfs as vfsmod,
79 79 wireprotoframing,
80 80 wireprotoserver,
81 81 wireprotov2peer,
82 82 )
83 83 from .utils import (
84 84 cborutil,
85 85 compression,
86 86 dateutil,
87 87 procutil,
88 88 stringutil,
89 89 )
90 90
91 91 from .revlogutils import (
92 92 deltas as deltautil
93 93 )
94 94
95 95 release = lockmod.release
96 96
97 97 command = registrar.command()
98 98
99 99 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
100 100 def debugancestor(ui, repo, *args):
101 101 """find the ancestor revision of two revisions in a given index"""
102 102 if len(args) == 3:
103 103 index, rev1, rev2 = args
104 104 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
105 105 lookup = r.lookup
106 106 elif len(args) == 2:
107 107 if not repo:
108 108 raise error.Abort(_('there is no Mercurial repository here '
109 109 '(.hg not found)'))
110 110 rev1, rev2 = args
111 111 r = repo.changelog
112 112 lookup = repo.lookup
113 113 else:
114 114 raise error.Abort(_('either two or three arguments required'))
115 115 a = r.ancestor(lookup(rev1), lookup(rev2))
116 116 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
117 117
118 118 @command('debugapplystreamclonebundle', [], 'FILE')
119 119 def debugapplystreamclonebundle(ui, repo, fname):
120 120 """apply a stream clone bundle file"""
121 121 f = hg.openpath(ui, fname)
122 122 gen = exchange.readbundle(ui, f, fname)
123 123 gen.apply(repo)
124 124
125 125 @command('debugbuilddag',
126 126 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
127 127 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
128 128 ('n', 'new-file', None, _('add new file at each rev'))],
129 129 _('[OPTION]... [TEXT]'))
130 130 def debugbuilddag(ui, repo, text=None,
131 131 mergeable_file=False,
132 132 overwritten_file=False,
133 133 new_file=False):
134 134 """builds a repo with a given DAG from scratch in the current empty repo
135 135
136 136 The description of the DAG is read from stdin if not given on the
137 137 command line.
138 138
139 139 Elements:
140 140
141 141 - "+n" is a linear run of n nodes based on the current default parent
142 142 - "." is a single node based on the current default parent
143 143 - "$" resets the default parent to null (implied at the start);
144 144 otherwise the default parent is always the last node created
145 145 - "<p" sets the default parent to the backref p
146 146 - "*p" is a fork at parent p, which is a backref
147 147 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
148 148 - "/p2" is a merge of the preceding node and p2
149 149 - ":tag" defines a local tag for the preceding node
150 150 - "@branch" sets the named branch for subsequent nodes
151 151 - "#...\\n" is a comment up to the end of the line
152 152
153 153 Whitespace between the above elements is ignored.
154 154
155 155 A backref is either
156 156
157 157 - a number n, which references the node curr-n, where curr is the current
158 158 node, or
159 159 - the name of a local tag you placed earlier using ":tag", or
160 160 - empty to denote the default parent.
161 161
162 162 All string valued-elements are either strictly alphanumeric, or must
163 163 be enclosed in double quotes ("..."), with "\\" as escape character.
164 164 """
165 165
166 166 if text is None:
167 167 ui.status(_("reading DAG from stdin\n"))
168 168 text = ui.fin.read()
169 169
170 170 cl = repo.changelog
171 171 if len(cl) > 0:
172 172 raise error.Abort(_('repository is not empty'))
173 173
174 174 # determine number of revs in DAG
175 175 total = 0
176 176 for type, data in dagparser.parsedag(text):
177 177 if type == 'n':
178 178 total += 1
179 179
180 180 if mergeable_file:
181 181 linesperrev = 2
182 182 # make a file with k lines per rev
183 183 initialmergedlines = ['%d' % i
184 184 for i in pycompat.xrange(0, total * linesperrev)]
185 185 initialmergedlines.append("")
186 186
187 187 tags = []
188 188 progress = ui.makeprogress(_('building'), unit=_('revisions'),
189 189 total=total)
190 190 with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
191 191 at = -1
192 192 atbranch = 'default'
193 193 nodeids = []
194 194 id = 0
195 195 progress.update(id)
196 196 for type, data in dagparser.parsedag(text):
197 197 if type == 'n':
198 198 ui.note(('node %s\n' % pycompat.bytestr(data)))
199 199 id, ps = data
200 200
201 201 files = []
202 202 filecontent = {}
203 203
204 204 p2 = None
205 205 if mergeable_file:
206 206 fn = "mf"
207 207 p1 = repo[ps[0]]
208 208 if len(ps) > 1:
209 209 p2 = repo[ps[1]]
210 210 pa = p1.ancestor(p2)
211 211 base, local, other = [x[fn].data() for x in (pa, p1,
212 212 p2)]
213 213 m3 = simplemerge.Merge3Text(base, local, other)
214 214 ml = [l.strip() for l in m3.merge_lines()]
215 215 ml.append("")
216 216 elif at > 0:
217 217 ml = p1[fn].data().split("\n")
218 218 else:
219 219 ml = initialmergedlines
220 220 ml[id * linesperrev] += " r%i" % id
221 221 mergedtext = "\n".join(ml)
222 222 files.append(fn)
223 223 filecontent[fn] = mergedtext
224 224
225 225 if overwritten_file:
226 226 fn = "of"
227 227 files.append(fn)
228 228 filecontent[fn] = "r%i\n" % id
229 229
230 230 if new_file:
231 231 fn = "nf%i" % id
232 232 files.append(fn)
233 233 filecontent[fn] = "r%i\n" % id
234 234 if len(ps) > 1:
235 235 if not p2:
236 236 p2 = repo[ps[1]]
237 237 for fn in p2:
238 238 if fn.startswith("nf"):
239 239 files.append(fn)
240 240 filecontent[fn] = p2[fn].data()
241 241
242 242 def fctxfn(repo, cx, path):
243 243 if path in filecontent:
244 244 return context.memfilectx(repo, cx, path,
245 245 filecontent[path])
246 246 return None
247 247
248 248 if len(ps) == 0 or ps[0] < 0:
249 249 pars = [None, None]
250 250 elif len(ps) == 1:
251 251 pars = [nodeids[ps[0]], None]
252 252 else:
253 253 pars = [nodeids[p] for p in ps]
254 254 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
255 255 date=(id, 0),
256 256 user="debugbuilddag",
257 257 extra={'branch': atbranch})
258 258 nodeid = repo.commitctx(cx)
259 259 nodeids.append(nodeid)
260 260 at = id
261 261 elif type == 'l':
262 262 id, name = data
263 263 ui.note(('tag %s\n' % name))
264 264 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
265 265 elif type == 'a':
266 266 ui.note(('branch %s\n' % data))
267 267 atbranch = data
268 268 progress.update(id)
269 269
270 270 if tags:
271 271 repo.vfs.write("localtags", "".join(tags))
272 272
273 273 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
274 274 indent_string = ' ' * indent
275 275 if all:
276 276 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
277 277 % indent_string)
278 278
279 279 def showchunks(named):
280 280 ui.write("\n%s%s\n" % (indent_string, named))
281 281 for deltadata in gen.deltaiter():
282 282 node, p1, p2, cs, deltabase, delta, flags = deltadata
283 283 ui.write("%s%s %s %s %s %s %d\n" %
284 284 (indent_string, hex(node), hex(p1), hex(p2),
285 285 hex(cs), hex(deltabase), len(delta)))
286 286
287 287 chunkdata = gen.changelogheader()
288 288 showchunks("changelog")
289 289 chunkdata = gen.manifestheader()
290 290 showchunks("manifest")
291 291 for chunkdata in iter(gen.filelogheader, {}):
292 292 fname = chunkdata['filename']
293 293 showchunks(fname)
294 294 else:
295 295 if isinstance(gen, bundle2.unbundle20):
296 296 raise error.Abort(_('use debugbundle2 for this file'))
297 297 chunkdata = gen.changelogheader()
298 298 for deltadata in gen.deltaiter():
299 299 node, p1, p2, cs, deltabase, delta, flags = deltadata
300 300 ui.write("%s%s\n" % (indent_string, hex(node)))
301 301
302 302 def _debugobsmarkers(ui, part, indent=0, **opts):
303 303 """display version and markers contained in 'data'"""
304 304 opts = pycompat.byteskwargs(opts)
305 305 data = part.read()
306 306 indent_string = ' ' * indent
307 307 try:
308 308 version, markers = obsolete._readmarkers(data)
309 309 except error.UnknownVersion as exc:
310 310 msg = "%sunsupported version: %s (%d bytes)\n"
311 311 msg %= indent_string, exc.version, len(data)
312 312 ui.write(msg)
313 313 else:
314 314 msg = "%sversion: %d (%d bytes)\n"
315 315 msg %= indent_string, version, len(data)
316 316 ui.write(msg)
317 317 fm = ui.formatter('debugobsolete', opts)
318 318 for rawmarker in sorted(markers):
319 319 m = obsutil.marker(None, rawmarker)
320 320 fm.startitem()
321 321 fm.plain(indent_string)
322 322 cmdutil.showmarker(fm, m)
323 323 fm.end()
324 324
325 325 def _debugphaseheads(ui, data, indent=0):
326 326 """display version and markers contained in 'data'"""
327 327 indent_string = ' ' * indent
328 328 headsbyphase = phases.binarydecode(data)
329 329 for phase in phases.allphases:
330 330 for head in headsbyphase[phase]:
331 331 ui.write(indent_string)
332 332 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
333 333
334 334 def _quasirepr(thing):
335 335 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
336 336 return '{%s}' % (
337 337 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
338 338 return pycompat.bytestr(repr(thing))
339 339
340 340 def _debugbundle2(ui, gen, all=None, **opts):
341 341 """lists the contents of a bundle2"""
342 342 if not isinstance(gen, bundle2.unbundle20):
343 343 raise error.Abort(_('not a bundle2 file'))
344 344 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
345 345 parttypes = opts.get(r'part_type', [])
346 346 for part in gen.iterparts():
347 347 if parttypes and part.type not in parttypes:
348 348 continue
349 349 msg = '%s -- %s (mandatory: %r)\n'
350 350 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
351 351 if part.type == 'changegroup':
352 352 version = part.params.get('version', '01')
353 353 cg = changegroup.getunbundler(version, part, 'UN')
354 354 if not ui.quiet:
355 355 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
356 356 if part.type == 'obsmarkers':
357 357 if not ui.quiet:
358 358 _debugobsmarkers(ui, part, indent=4, **opts)
359 359 if part.type == 'phase-heads':
360 360 if not ui.quiet:
361 361 _debugphaseheads(ui, part, indent=4)
362 362
363 363 @command('debugbundle',
364 364 [('a', 'all', None, _('show all details')),
365 365 ('', 'part-type', [], _('show only the named part type')),
366 366 ('', 'spec', None, _('print the bundlespec of the bundle'))],
367 367 _('FILE'),
368 368 norepo=True)
369 369 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
370 370 """lists the contents of a bundle"""
371 371 with hg.openpath(ui, bundlepath) as f:
372 372 if spec:
373 373 spec = exchange.getbundlespec(ui, f)
374 374 ui.write('%s\n' % spec)
375 375 return
376 376
377 377 gen = exchange.readbundle(ui, f, bundlepath)
378 378 if isinstance(gen, bundle2.unbundle20):
379 379 return _debugbundle2(ui, gen, all=all, **opts)
380 380 _debugchangegroup(ui, gen, all=all, **opts)
381 381
382 382 @command('debugcapabilities',
383 383 [], _('PATH'),
384 384 norepo=True)
385 385 def debugcapabilities(ui, path, **opts):
386 386 """lists the capabilities of a remote peer"""
387 387 opts = pycompat.byteskwargs(opts)
388 388 peer = hg.peer(ui, opts, path)
389 389 caps = peer.capabilities()
390 390 ui.write(('Main capabilities:\n'))
391 391 for c in sorted(caps):
392 392 ui.write((' %s\n') % c)
393 393 b2caps = bundle2.bundle2caps(peer)
394 394 if b2caps:
395 395 ui.write(('Bundle2 capabilities:\n'))
396 396 for key, values in sorted(b2caps.iteritems()):
397 397 ui.write((' %s\n') % key)
398 398 for v in values:
399 399 ui.write((' %s\n') % v)
400 400
401 401 @command('debugcheckstate', [], '')
402 402 def debugcheckstate(ui, repo):
403 403 """validate the correctness of the current dirstate"""
404 404 parent1, parent2 = repo.dirstate.parents()
405 405 m1 = repo[parent1].manifest()
406 406 m2 = repo[parent2].manifest()
407 407 errors = 0
408 408 for f in repo.dirstate:
409 409 state = repo.dirstate[f]
410 410 if state in "nr" and f not in m1:
411 411 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
412 412 errors += 1
413 413 if state in "a" and f in m1:
414 414 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
415 415 errors += 1
416 416 if state in "m" and f not in m1 and f not in m2:
417 417 ui.warn(_("%s in state %s, but not in either manifest\n") %
418 418 (f, state))
419 419 errors += 1
420 420 for f in m1:
421 421 state = repo.dirstate[f]
422 422 if state not in "nrm":
423 423 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
424 424 errors += 1
425 425 if errors:
426 426 error = _(".hg/dirstate inconsistent with current parent's manifest")
427 427 raise error.Abort(error)
428 428
429 429 @command('debugcolor',
430 430 [('', 'style', None, _('show all configured styles'))],
431 431 'hg debugcolor')
432 432 def debugcolor(ui, repo, **opts):
433 433 """show available color, effects or style"""
434 434 ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
435 435 if opts.get(r'style'):
436 436 return _debugdisplaystyle(ui)
437 437 else:
438 438 return _debugdisplaycolor(ui)
439 439
440 440 def _debugdisplaycolor(ui):
441 441 ui = ui.copy()
442 442 ui._styles.clear()
443 443 for effect in color._activeeffects(ui).keys():
444 444 ui._styles[effect] = effect
445 445 if ui._terminfoparams:
446 446 for k, v in ui.configitems('color'):
447 447 if k.startswith('color.'):
448 448 ui._styles[k] = k[6:]
449 449 elif k.startswith('terminfo.'):
450 450 ui._styles[k] = k[9:]
451 451 ui.write(_('available colors:\n'))
452 452 # sort label with a '_' after the other to group '_background' entry.
453 453 items = sorted(ui._styles.items(),
454 454 key=lambda i: ('_' in i[0], i[0], i[1]))
455 455 for colorname, label in items:
456 456 ui.write(('%s\n') % colorname, label=label)
457 457
458 458 def _debugdisplaystyle(ui):
459 459 ui.write(_('available style:\n'))
460 460 if not ui._styles:
461 461 return
462 462 width = max(len(s) for s in ui._styles)
463 463 for label, effects in sorted(ui._styles.items()):
464 464 ui.write('%s' % label, label=label)
465 465 if effects:
466 466 # 50
467 467 ui.write(': ')
468 468 ui.write(' ' * (max(0, width - len(label))))
469 469 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
470 470 ui.write('\n')
471 471
472 472 @command('debugcreatestreamclonebundle', [], 'FILE')
473 473 def debugcreatestreamclonebundle(ui, repo, fname):
474 474 """create a stream clone bundle file
475 475
476 476 Stream bundles are special bundles that are essentially archives of
477 477 revlog files. They are commonly used for cloning very quickly.
478 478 """
479 479 # TODO we may want to turn this into an abort when this functionality
480 480 # is moved into `hg bundle`.
481 481 if phases.hassecret(repo):
482 482 ui.warn(_('(warning: stream clone bundle will contain secret '
483 483 'revisions)\n'))
484 484
485 485 requirements, gen = streamclone.generatebundlev1(repo)
486 486 changegroup.writechunks(ui, gen, fname)
487 487
488 488 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
489 489
490 490 @command('debugdag',
491 491 [('t', 'tags', None, _('use tags as labels')),
492 492 ('b', 'branches', None, _('annotate with branch names')),
493 493 ('', 'dots', None, _('use dots for runs')),
494 494 ('s', 'spaces', None, _('separate elements by spaces'))],
495 495 _('[OPTION]... [FILE [REV]...]'),
496 496 optionalrepo=True)
497 497 def debugdag(ui, repo, file_=None, *revs, **opts):
498 498 """format the changelog or an index DAG as a concise textual description
499 499
500 500 If you pass a revlog index, the revlog's DAG is emitted. If you list
501 501 revision numbers, they get labeled in the output as rN.
502 502
503 503 Otherwise, the changelog DAG of the current repo is emitted.
504 504 """
505 505 spaces = opts.get(r'spaces')
506 506 dots = opts.get(r'dots')
507 507 if file_:
508 508 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
509 509 file_)
510 510 revs = set((int(r) for r in revs))
511 511 def events():
512 512 for r in rlog:
513 513 yield 'n', (r, list(p for p in rlog.parentrevs(r)
514 514 if p != -1))
515 515 if r in revs:
516 516 yield 'l', (r, "r%i" % r)
517 517 elif repo:
518 518 cl = repo.changelog
519 519 tags = opts.get(r'tags')
520 520 branches = opts.get(r'branches')
521 521 if tags:
522 522 labels = {}
523 523 for l, n in repo.tags().items():
524 524 labels.setdefault(cl.rev(n), []).append(l)
525 525 def events():
526 526 b = "default"
527 527 for r in cl:
528 528 if branches:
529 529 newb = cl.read(cl.node(r))[5]['branch']
530 530 if newb != b:
531 531 yield 'a', newb
532 532 b = newb
533 533 yield 'n', (r, list(p for p in cl.parentrevs(r)
534 534 if p != -1))
535 535 if tags:
536 536 ls = labels.get(r)
537 537 if ls:
538 538 for l in ls:
539 539 yield 'l', (r, l)
540 540 else:
541 541 raise error.Abort(_('need repo for changelog dag'))
542 542
543 543 for line in dagparser.dagtextlines(events(),
544 544 addspaces=spaces,
545 545 wraplabels=True,
546 546 wrapannotations=True,
547 547 wrapnonlinear=dots,
548 548 usedots=dots,
549 549 maxlinewidth=70):
550 550 ui.write(line)
551 551 ui.write("\n")
552 552
553 553 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
554 554 def debugdata(ui, repo, file_, rev=None, **opts):
555 555 """dump the contents of a data file revision"""
556 556 opts = pycompat.byteskwargs(opts)
557 557 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
558 558 if rev is not None:
559 559 raise error.CommandError('debugdata', _('invalid arguments'))
560 560 file_, rev = None, file_
561 561 elif rev is None:
562 562 raise error.CommandError('debugdata', _('invalid arguments'))
563 563 r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
564 564 try:
565 565 ui.write(r.rawdata(r.lookup(rev)))
566 566 except KeyError:
567 567 raise error.Abort(_('invalid revision identifier %s') % rev)
568 568
569 569 @command('debugdate',
570 570 [('e', 'extended', None, _('try extended date formats'))],
571 571 _('[-e] DATE [RANGE]'),
572 572 norepo=True, optionalrepo=True)
573 573 def debugdate(ui, date, range=None, **opts):
574 574 """parse and display a date"""
575 575 if opts[r"extended"]:
576 576 d = dateutil.parsedate(date, util.extendeddateformats)
577 577 else:
578 578 d = dateutil.parsedate(date)
579 579 ui.write(("internal: %d %d\n") % d)
580 580 ui.write(("standard: %s\n") % dateutil.datestr(d))
581 581 if range:
582 582 m = dateutil.matchdate(range)
583 583 ui.write(("match: %s\n") % m(d[0]))
584 584
585 585 @command('debugdeltachain',
586 586 cmdutil.debugrevlogopts + cmdutil.formatteropts,
587 587 _('-c|-m|FILE'),
588 588 optionalrepo=True)
589 589 def debugdeltachain(ui, repo, file_=None, **opts):
590 590 """dump information about delta chains in a revlog
591 591
592 592 Output can be templatized. Available template keywords are:
593 593
594 594 :``rev``: revision number
595 595 :``chainid``: delta chain identifier (numbered by unique base)
596 596 :``chainlen``: delta chain length to this revision
597 597 :``prevrev``: previous revision in delta chain
598 598 :``deltatype``: role of delta / how it was computed
599 599 :``compsize``: compressed size of revision
600 600 :``uncompsize``: uncompressed size of revision
601 601 :``chainsize``: total size of compressed revisions in chain
602 602 :``chainratio``: total chain size divided by uncompressed revision size
603 603 (new delta chains typically start at ratio 2.00)
604 604 :``lindist``: linear distance from base revision in delta chain to end
605 605 of this revision
606 606 :``extradist``: total size of revisions not part of this delta chain from
607 607 base of delta chain to end of this revision; a measurement
608 608 of how much extra data we need to read/seek across to read
609 609 the delta chain for this revision
610 610 :``extraratio``: extradist divided by chainsize; another representation of
611 611 how much unrelated data is needed to load this delta chain
612 612
613 613 If the repository is configured to use the sparse read, additional keywords
614 614 are available:
615 615
616 616 :``readsize``: total size of data read from the disk for a revision
617 617 (sum of the sizes of all the blocks)
618 618 :``largestblock``: size of the largest block of data read from the disk
619 619 :``readdensity``: density of useful bytes in the data read from the disk
620 620 :``srchunks``: in how many data hunks the whole revision would be read
621 621
622 622 The sparse read can be enabled with experimental.sparse-read = True
623 623 """
624 624 opts = pycompat.byteskwargs(opts)
625 625 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
626 626 index = r.index
627 627 start = r.start
628 628 length = r.length
629 629 generaldelta = r.version & revlog.FLAG_GENERALDELTA
630 630 withsparseread = getattr(r, '_withsparseread', False)
631 631
632 632 def revinfo(rev):
633 633 e = index[rev]
634 634 compsize = e[1]
635 635 uncompsize = e[2]
636 636 chainsize = 0
637 637
638 638 if generaldelta:
639 639 if e[3] == e[5]:
640 640 deltatype = 'p1'
641 641 elif e[3] == e[6]:
642 642 deltatype = 'p2'
643 643 elif e[3] == rev - 1:
644 644 deltatype = 'prev'
645 645 elif e[3] == rev:
646 646 deltatype = 'base'
647 647 else:
648 648 deltatype = 'other'
649 649 else:
650 650 if e[3] == rev:
651 651 deltatype = 'base'
652 652 else:
653 653 deltatype = 'prev'
654 654
655 655 chain = r._deltachain(rev)[0]
656 656 for iterrev in chain:
657 657 e = index[iterrev]
658 658 chainsize += e[1]
659 659
660 660 return compsize, uncompsize, deltatype, chain, chainsize
661 661
662 662 fm = ui.formatter('debugdeltachain', opts)
663 663
664 664 fm.plain(' rev chain# chainlen prev delta '
665 665 'size rawsize chainsize ratio lindist extradist '
666 666 'extraratio')
667 667 if withsparseread:
668 668 fm.plain(' readsize largestblk rddensity srchunks')
669 669 fm.plain('\n')
670 670
671 671 chainbases = {}
672 672 for rev in r:
673 673 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
674 674 chainbase = chain[0]
675 675 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
676 676 basestart = start(chainbase)
677 677 revstart = start(rev)
678 678 lineardist = revstart + comp - basestart
679 679 extradist = lineardist - chainsize
680 680 try:
681 681 prevrev = chain[-2]
682 682 except IndexError:
683 683 prevrev = -1
684 684
685 685 if uncomp != 0:
686 686 chainratio = float(chainsize) / float(uncomp)
687 687 else:
688 688 chainratio = chainsize
689 689
690 690 if chainsize != 0:
691 691 extraratio = float(extradist) / float(chainsize)
692 692 else:
693 693 extraratio = extradist
694 694
695 695 fm.startitem()
696 696 fm.write('rev chainid chainlen prevrev deltatype compsize '
697 697 'uncompsize chainsize chainratio lindist extradist '
698 698 'extraratio',
699 699 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
700 700 rev, chainid, len(chain), prevrev, deltatype, comp,
701 701 uncomp, chainsize, chainratio, lineardist, extradist,
702 702 extraratio,
703 703 rev=rev, chainid=chainid, chainlen=len(chain),
704 704 prevrev=prevrev, deltatype=deltatype, compsize=comp,
705 705 uncompsize=uncomp, chainsize=chainsize,
706 706 chainratio=chainratio, lindist=lineardist,
707 707 extradist=extradist, extraratio=extraratio)
708 708 if withsparseread:
709 709 readsize = 0
710 710 largestblock = 0
711 711 srchunks = 0
712 712
713 713 for revschunk in deltautil.slicechunk(r, chain):
714 714 srchunks += 1
715 715 blkend = start(revschunk[-1]) + length(revschunk[-1])
716 716 blksize = blkend - start(revschunk[0])
717 717
718 718 readsize += blksize
719 719 if largestblock < blksize:
720 720 largestblock = blksize
721 721
722 722 if readsize:
723 723 readdensity = float(chainsize) / float(readsize)
724 724 else:
725 725 readdensity = 1
726 726
727 727 fm.write('readsize largestblock readdensity srchunks',
728 728 ' %10d %10d %9.5f %8d',
729 729 readsize, largestblock, readdensity, srchunks,
730 730 readsize=readsize, largestblock=largestblock,
731 731 readdensity=readdensity, srchunks=srchunks)
732 732
733 733 fm.plain('\n')
734 734
735 735 fm.end()
736 736
737 737 @command('debugdirstate|debugstate',
738 738 [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
739 739 ('', 'dates', True, _('display the saved mtime')),
740 740 ('', 'datesort', None, _('sort by saved mtime'))],
741 741 _('[OPTION]...'))
742 742 def debugstate(ui, repo, **opts):
743 743 """show the contents of the current dirstate"""
744 744
745 745 nodates = not opts[r'dates']
746 746 if opts.get(r'nodates') is not None:
747 747 nodates = True
748 748 datesort = opts.get(r'datesort')
749 749
750 750 if datesort:
751 751 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
752 752 else:
753 753 keyfunc = None # sort by filename
754 754 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
755 755 if ent[3] == -1:
756 756 timestr = 'unset '
757 757 elif nodates:
758 758 timestr = 'set '
759 759 else:
760 760 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
761 761 time.localtime(ent[3]))
762 762 timestr = encoding.strtolocal(timestr)
763 763 if ent[1] & 0o20000:
764 764 mode = 'lnk'
765 765 else:
766 766 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
767 767 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
768 768 for f in repo.dirstate.copies():
769 769 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
770 770
771 771 @command('debugdiscovery',
772 772 [('', 'old', None, _('use old-style discovery')),
773 773 ('', 'nonheads', None,
774 774 _('use old-style discovery with non-heads included')),
775 775 ('', 'rev', [], 'restrict discovery to this set of revs'),
776 776 ('', 'seed', '12323', 'specify the random seed use for discovery'),
777 777 ] + cmdutil.remoteopts,
778 778 _('[--rev REV] [OTHER]'))
779 779 def debugdiscovery(ui, repo, remoteurl="default", **opts):
780 780 """runs the changeset discovery protocol in isolation"""
781 781 opts = pycompat.byteskwargs(opts)
782 782 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
783 783 remote = hg.peer(repo, opts, remoteurl)
784 784 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
785 785
786 786 # make sure tests are repeatable
787 787 random.seed(int(opts['seed']))
788 788
789 789
790 790
791 791 if opts.get('old'):
792 792 def doit(pushedrevs, remoteheads, remote=remote):
793 793 if not util.safehasattr(remote, 'branches'):
794 794 # enable in-client legacy support
795 795 remote = localrepo.locallegacypeer(remote.local())
796 796 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
797 797 force=True)
798 798 common = set(common)
799 799 if not opts.get('nonheads'):
800 800 ui.write(("unpruned common: %s\n") %
801 801 " ".join(sorted(short(n) for n in common)))
802 802
803 803 clnode = repo.changelog.node
804 804 common = repo.revs('heads(::%ln)', common)
805 805 common = {clnode(r) for r in common}
806 806 return common, hds
807 807 else:
808 808 def doit(pushedrevs, remoteheads, remote=remote):
809 809 nodes = None
810 810 if pushedrevs:
811 811 revs = scmutil.revrange(repo, pushedrevs)
812 812 nodes = [repo[r].node() for r in revs]
813 813 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
814 814 ancestorsof=nodes)
815 815 return common, hds
816 816
817 817 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
818 818 localrevs = opts['rev']
819 819 with util.timedcm('debug-discovery') as t:
820 820 common, hds = doit(localrevs, remoterevs)
821 821
822 822 # compute all statistics
823 823 common = set(common)
824 824 rheads = set(hds)
825 825 lheads = set(repo.heads())
826 826
827 827 data = {}
828 828 data['elapsed'] = t.elapsed
829 829 data['nb-common'] = len(common)
830 830 data['nb-common-local'] = len(common & lheads)
831 831 data['nb-common-remote'] = len(common & rheads)
832 832 data['nb-common-both'] = len(common & rheads & lheads)
833 833 data['nb-local'] = len(lheads)
834 834 data['nb-local-missing'] = data['nb-local'] - data['nb-common-local']
835 835 data['nb-remote'] = len(rheads)
836 836 data['nb-remote-unknown'] = data['nb-remote'] - data['nb-common-remote']
837 837 data['nb-revs'] = len(repo.revs('all()'))
838 838 data['nb-revs-common'] = len(repo.revs('::%ln', common))
839 839 data['nb-revs-missing'] = data['nb-revs'] - data['nb-revs-common']
840 840
841 841 # display discovery summary
842 842 ui.write(("elapsed time: %(elapsed)f seconds\n") % data)
843 843 ui.write(("heads summary:\n"))
844 844 ui.write((" total common heads: %(nb-common)9d\n") % data)
845 845 ui.write((" also local heads: %(nb-common-local)9d\n") % data)
846 846 ui.write((" also remote heads: %(nb-common-remote)9d\n") % data)
847 847 ui.write((" both: %(nb-common-both)9d\n") % data)
848 848 ui.write((" local heads: %(nb-local)9d\n") % data)
849 849 ui.write((" common: %(nb-common-local)9d\n") % data)
850 850 ui.write((" missing: %(nb-local-missing)9d\n") % data)
851 851 ui.write((" remote heads: %(nb-remote)9d\n") % data)
852 852 ui.write((" common: %(nb-common-remote)9d\n") % data)
853 853 ui.write((" unknown: %(nb-remote-unknown)9d\n") % data)
854 854 ui.write(("local changesets: %(nb-revs)9d\n") % data)
855 855 ui.write((" common: %(nb-revs-common)9d\n") % data)
856 856 ui.write((" missing: %(nb-revs-missing)9d\n") % data)
857 857
858 858 if ui.verbose:
859 859 ui.write(("common heads: %s\n") %
860 860 " ".join(sorted(short(n) for n in common)))
861 861
862 862 _chunksize = 4 << 10
863 863
864 864 @command('debugdownload',
865 865 [
866 866 ('o', 'output', '', _('path')),
867 867 ],
868 868 optionalrepo=True)
869 869 def debugdownload(ui, repo, url, output=None, **opts):
870 870 """download a resource using Mercurial logic and config
871 871 """
872 872 fh = urlmod.open(ui, url, output)
873 873
874 874 dest = ui
875 875 if output:
876 876 dest = open(output, "wb", _chunksize)
877 877 try:
878 878 data = fh.read(_chunksize)
879 879 while data:
880 880 dest.write(data)
881 881 data = fh.read(_chunksize)
882 882 finally:
883 883 if output:
884 884 dest.close()
885 885
886 886 @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
887 887 def debugextensions(ui, repo, **opts):
888 888 '''show information about active extensions'''
889 889 opts = pycompat.byteskwargs(opts)
890 890 exts = extensions.extensions(ui)
891 891 hgver = util.version()
892 892 fm = ui.formatter('debugextensions', opts)
893 893 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
894 894 isinternal = extensions.ismoduleinternal(extmod)
895 895 extsource = pycompat.fsencode(extmod.__file__)
896 896 if isinternal:
897 897 exttestedwith = [] # never expose magic string to users
898 898 else:
899 899 exttestedwith = getattr(extmod, 'testedwith', '').split()
900 900 extbuglink = getattr(extmod, 'buglink', None)
901 901
902 902 fm.startitem()
903 903
904 904 if ui.quiet or ui.verbose:
905 905 fm.write('name', '%s\n', extname)
906 906 else:
907 907 fm.write('name', '%s', extname)
908 908 if isinternal or hgver in exttestedwith:
909 909 fm.plain('\n')
910 910 elif not exttestedwith:
911 911 fm.plain(_(' (untested!)\n'))
912 912 else:
913 913 lasttestedversion = exttestedwith[-1]
914 914 fm.plain(' (%s!)\n' % lasttestedversion)
915 915
916 916 fm.condwrite(ui.verbose and extsource, 'source',
917 917 _(' location: %s\n'), extsource or "")
918 918
919 919 if ui.verbose:
920 920 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
921 921 fm.data(bundled=isinternal)
922 922
923 923 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
924 924 _(' tested with: %s\n'),
925 925 fm.formatlist(exttestedwith, name='ver'))
926 926
927 927 fm.condwrite(ui.verbose and extbuglink, 'buglink',
928 928 _(' bug reporting: %s\n'), extbuglink or "")
929 929
930 930 fm.end()
931 931
932 932 @command('debugfileset',
933 933 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
934 934 ('', 'all-files', False,
935 935 _('test files from all revisions and working directory')),
936 936 ('s', 'show-matcher', None,
937 937 _('print internal representation of matcher')),
938 938 ('p', 'show-stage', [],
939 939 _('print parsed tree at the given stage'), _('NAME'))],
940 940 _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
941 941 def debugfileset(ui, repo, expr, **opts):
942 942 '''parse and apply a fileset specification'''
943 943 from . import fileset
944 944 fileset.symbols # force import of fileset so we have predicates to optimize
945 945 opts = pycompat.byteskwargs(opts)
946 946 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
947 947
948 948 stages = [
949 949 ('parsed', pycompat.identity),
950 950 ('analyzed', filesetlang.analyze),
951 951 ('optimized', filesetlang.optimize),
952 952 ]
953 953 stagenames = set(n for n, f in stages)
954 954
955 955 showalways = set()
956 956 if ui.verbose and not opts['show_stage']:
957 957 # show parsed tree by --verbose (deprecated)
958 958 showalways.add('parsed')
959 959 if opts['show_stage'] == ['all']:
960 960 showalways.update(stagenames)
961 961 else:
962 962 for n in opts['show_stage']:
963 963 if n not in stagenames:
964 964 raise error.Abort(_('invalid stage name: %s') % n)
965 965 showalways.update(opts['show_stage'])
966 966
967 967 tree = filesetlang.parse(expr)
968 968 for n, f in stages:
969 969 tree = f(tree)
970 970 if n in showalways:
971 971 if opts['show_stage'] or n != 'parsed':
972 972 ui.write(("* %s:\n") % n)
973 973 ui.write(filesetlang.prettyformat(tree), "\n")
974 974
975 975 files = set()
976 976 if opts['all_files']:
977 977 for r in repo:
978 978 c = repo[r]
979 979 files.update(c.files())
980 980 files.update(c.substate)
981 981 if opts['all_files'] or ctx.rev() is None:
982 982 wctx = repo[None]
983 983 files.update(repo.dirstate.walk(scmutil.matchall(repo),
984 984 subrepos=list(wctx.substate),
985 985 unknown=True, ignored=True))
986 986 files.update(wctx.substate)
987 987 else:
988 988 files.update(ctx.files())
989 989 files.update(ctx.substate)
990 990
991 991 m = ctx.matchfileset(expr)
992 992 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
993 993 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
994 994 for f in sorted(files):
995 995 if not m(f):
996 996 continue
997 997 ui.write("%s\n" % f)
998 998
999 999 @command('debugformat',
1000 1000 [] + cmdutil.formatteropts)
1001 1001 def debugformat(ui, repo, **opts):
1002 1002 """display format information about the current repository
1003 1003
1004 1004 Use --verbose to get extra information about current config value and
1005 1005 Mercurial default."""
1006 1006 opts = pycompat.byteskwargs(opts)
1007 1007 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1008 1008 maxvariantlength = max(len('format-variant'), maxvariantlength)
1009 1009
1010 1010 def makeformatname(name):
1011 1011 return '%s:' + (' ' * (maxvariantlength - len(name)))
1012 1012
1013 1013 fm = ui.formatter('debugformat', opts)
1014 1014 if fm.isplain():
1015 1015 def formatvalue(value):
1016 1016 if util.safehasattr(value, 'startswith'):
1017 1017 return value
1018 1018 if value:
1019 1019 return 'yes'
1020 1020 else:
1021 1021 return 'no'
1022 1022 else:
1023 1023 formatvalue = pycompat.identity
1024 1024
1025 1025 fm.plain('format-variant')
1026 1026 fm.plain(' ' * (maxvariantlength - len('format-variant')))
1027 1027 fm.plain(' repo')
1028 1028 if ui.verbose:
1029 1029 fm.plain(' config default')
1030 1030 fm.plain('\n')
1031 1031 for fv in upgrade.allformatvariant:
1032 1032 fm.startitem()
1033 1033 repovalue = fv.fromrepo(repo)
1034 1034 configvalue = fv.fromconfig(repo)
1035 1035
1036 1036 if repovalue != configvalue:
1037 1037 namelabel = 'formatvariant.name.mismatchconfig'
1038 1038 repolabel = 'formatvariant.repo.mismatchconfig'
1039 1039 elif repovalue != fv.default:
1040 1040 namelabel = 'formatvariant.name.mismatchdefault'
1041 1041 repolabel = 'formatvariant.repo.mismatchdefault'
1042 1042 else:
1043 1043 namelabel = 'formatvariant.name.uptodate'
1044 1044 repolabel = 'formatvariant.repo.uptodate'
1045 1045
1046 1046 fm.write('name', makeformatname(fv.name), fv.name,
1047 1047 label=namelabel)
1048 1048 fm.write('repo', ' %3s', formatvalue(repovalue),
1049 1049 label=repolabel)
1050 1050 if fv.default != configvalue:
1051 1051 configlabel = 'formatvariant.config.special'
1052 1052 else:
1053 1053 configlabel = 'formatvariant.config.default'
1054 1054 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
1055 1055 label=configlabel)
1056 1056 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
1057 1057 label='formatvariant.default')
1058 1058 fm.plain('\n')
1059 1059 fm.end()
1060 1060
1061 1061 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
1062 1062 def debugfsinfo(ui, path="."):
1063 1063 """show information detected about current filesystem"""
1064 1064 ui.write(('path: %s\n') % path)
1065 1065 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
1066 1066 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
1067 1067 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
1068 1068 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
1069 1069 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
1070 1070 casesensitive = '(unknown)'
1071 1071 try:
1072 1072 with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
1073 1073 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
1074 1074 except OSError:
1075 1075 pass
1076 1076 ui.write(('case-sensitive: %s\n') % casesensitive)
1077 1077
1078 1078 @command('debuggetbundle',
1079 1079 [('H', 'head', [], _('id of head node'), _('ID')),
1080 1080 ('C', 'common', [], _('id of common node'), _('ID')),
1081 1081 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1082 1082 _('REPO FILE [-H|-C ID]...'),
1083 1083 norepo=True)
1084 1084 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1085 1085 """retrieves a bundle from a repo
1086 1086
1087 1087 Every ID must be a full-length hex node id string. Saves the bundle to the
1088 1088 given file.
1089 1089 """
1090 1090 opts = pycompat.byteskwargs(opts)
1091 1091 repo = hg.peer(ui, opts, repopath)
1092 1092 if not repo.capable('getbundle'):
1093 1093 raise error.Abort("getbundle() not supported by target repository")
1094 1094 args = {}
1095 1095 if common:
1096 1096 args[r'common'] = [bin(s) for s in common]
1097 1097 if head:
1098 1098 args[r'heads'] = [bin(s) for s in head]
1099 1099 # TODO: get desired bundlecaps from command line.
1100 1100 args[r'bundlecaps'] = None
1101 1101 bundle = repo.getbundle('debug', **args)
1102 1102
1103 1103 bundletype = opts.get('type', 'bzip2').lower()
1104 1104 btypes = {'none': 'HG10UN',
1105 1105 'bzip2': 'HG10BZ',
1106 1106 'gzip': 'HG10GZ',
1107 1107 'bundle2': 'HG20'}
1108 1108 bundletype = btypes.get(bundletype)
1109 1109 if bundletype not in bundle2.bundletypes:
1110 1110 raise error.Abort(_('unknown bundle type specified with --type'))
1111 1111 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1112 1112
1113 1113 @command('debugignore', [], '[FILE]')
1114 1114 def debugignore(ui, repo, *files, **opts):
1115 1115 """display the combined ignore pattern and information about ignored files
1116 1116
1117 1117 With no argument display the combined ignore pattern.
1118 1118
1119 1119 Given space separated file names, shows if the given file is ignored and
1120 1120 if so, show the ignore rule (file and line number) that matched it.
1121 1121 """
1122 1122 ignore = repo.dirstate._ignore
1123 1123 if not files:
1124 1124 # Show all the patterns
1125 1125 ui.write("%s\n" % pycompat.byterepr(ignore))
1126 1126 else:
1127 1127 m = scmutil.match(repo[None], pats=files)
1128 1128 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1129 1129 for f in m.files():
1130 1130 nf = util.normpath(f)
1131 1131 ignored = None
1132 1132 ignoredata = None
1133 1133 if nf != '.':
1134 1134 if ignore(nf):
1135 1135 ignored = nf
1136 1136 ignoredata = repo.dirstate._ignorefileandline(nf)
1137 1137 else:
1138 1138 for p in util.finddirs(nf):
1139 1139 if ignore(p):
1140 1140 ignored = p
1141 1141 ignoredata = repo.dirstate._ignorefileandline(p)
1142 1142 break
1143 1143 if ignored:
1144 1144 if ignored == nf:
1145 1145 ui.write(_("%s is ignored\n") % uipathfn(f))
1146 1146 else:
1147 1147 ui.write(_("%s is ignored because of "
1148 1148 "containing directory %s\n")
1149 1149 % (uipathfn(f), ignored))
1150 1150 ignorefile, lineno, line = ignoredata
1151 1151 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1152 1152 % (ignorefile, lineno, line))
1153 1153 else:
1154 1154 ui.write(_("%s is not ignored\n") % uipathfn(f))
1155 1155
1156 1156 @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
1157 1157 _('-c|-m|FILE'))
1158 1158 def debugindex(ui, repo, file_=None, **opts):
1159 1159 """dump index data for a storage primitive"""
1160 1160 opts = pycompat.byteskwargs(opts)
1161 1161 store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
1162 1162
1163 1163 if ui.debugflag:
1164 1164 shortfn = hex
1165 1165 else:
1166 1166 shortfn = short
1167 1167
1168 1168 idlen = 12
1169 1169 for i in store:
1170 1170 idlen = len(shortfn(store.node(i)))
1171 1171 break
1172 1172
1173 1173 fm = ui.formatter('debugindex', opts)
1174 1174 fm.plain(b' rev linkrev %s %s p2\n' % (
1175 1175 b'nodeid'.ljust(idlen),
1176 1176 b'p1'.ljust(idlen)))
1177 1177
1178 1178 for rev in store:
1179 1179 node = store.node(rev)
1180 1180 parents = store.parents(node)
1181 1181
1182 1182 fm.startitem()
1183 1183 fm.write(b'rev', b'%6d ', rev)
1184 1184 fm.write(b'linkrev', '%7d ', store.linkrev(rev))
1185 1185 fm.write(b'node', '%s ', shortfn(node))
1186 1186 fm.write(b'p1', '%s ', shortfn(parents[0]))
1187 1187 fm.write(b'p2', '%s', shortfn(parents[1]))
1188 1188 fm.plain(b'\n')
1189 1189
1190 1190 fm.end()
1191 1191
1192 1192 @command('debugindexdot', cmdutil.debugrevlogopts,
1193 1193 _('-c|-m|FILE'), optionalrepo=True)
1194 1194 def debugindexdot(ui, repo, file_=None, **opts):
1195 1195 """dump an index DAG as a graphviz dot file"""
1196 1196 opts = pycompat.byteskwargs(opts)
1197 1197 r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
1198 1198 ui.write(("digraph G {\n"))
1199 1199 for i in r:
1200 1200 node = r.node(i)
1201 1201 pp = r.parents(node)
1202 1202 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1203 1203 if pp[1] != nullid:
1204 1204 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1205 1205 ui.write("}\n")
1206 1206
1207 1207 @command('debugindexstats', [])
1208 1208 def debugindexstats(ui, repo):
1209 1209 """show stats related to the changelog index"""
1210 1210 repo.changelog.shortest(nullid, 1)
1211 1211 index = repo.changelog.index
1212 1212 if not util.safehasattr(index, 'stats'):
1213 1213 raise error.Abort(_('debugindexstats only works with native code'))
1214 1214 for k, v in sorted(index.stats().items()):
1215 1215 ui.write('%s: %d\n' % (k, v))
1216 1216
1217 1217 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1218 1218 def debuginstall(ui, **opts):
1219 1219 '''test Mercurial installation
1220 1220
1221 1221 Returns 0 on success.
1222 1222 '''
1223 1223 opts = pycompat.byteskwargs(opts)
1224 1224
1225 1225 problems = 0
1226 1226
1227 1227 fm = ui.formatter('debuginstall', opts)
1228 1228 fm.startitem()
1229 1229
1230 1230 # encoding
1231 1231 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1232 1232 err = None
1233 1233 try:
1234 1234 codecs.lookup(pycompat.sysstr(encoding.encoding))
1235 1235 except LookupError as inst:
1236 1236 err = stringutil.forcebytestr(inst)
1237 1237 problems += 1
1238 1238 fm.condwrite(err, 'encodingerror', _(" %s\n"
1239 1239 " (check that your locale is properly set)\n"), err)
1240 1240
1241 1241 # Python
1242 1242 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1243 1243 pycompat.sysexecutable or _("unknown"))
1244 1244 fm.write('pythonver', _("checking Python version (%s)\n"),
1245 1245 ("%d.%d.%d" % sys.version_info[:3]))
1246 1246 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1247 1247 os.path.dirname(pycompat.fsencode(os.__file__)))
1248 1248
1249 1249 security = set(sslutil.supportedprotocols)
1250 1250 if sslutil.hassni:
1251 1251 security.add('sni')
1252 1252
1253 1253 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1254 1254 fm.formatlist(sorted(security), name='protocol',
1255 1255 fmt='%s', sep=','))
1256 1256
1257 1257 # These are warnings, not errors. So don't increment problem count. This
1258 1258 # may change in the future.
1259 1259 if 'tls1.2' not in security:
1260 1260 fm.plain(_(' TLS 1.2 not supported by Python install; '
1261 1261 'network connections lack modern security\n'))
1262 1262 if 'sni' not in security:
1263 1263 fm.plain(_(' SNI not supported by Python install; may have '
1264 1264 'connectivity issues with some servers\n'))
1265 1265
1266 1266 # TODO print CA cert info
1267 1267
1268 1268 # hg version
1269 1269 hgver = util.version()
1270 1270 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1271 1271 hgver.split('+')[0])
1272 1272 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1273 1273 '+'.join(hgver.split('+')[1:]))
1274 1274
1275 1275 # compiled modules
1276 1276 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1277 1277 policy.policy)
1278 1278 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1279 1279 os.path.dirname(pycompat.fsencode(__file__)))
1280 1280
1281 1281 rustandc = policy.policy in ('rust+c', 'rust+c-allow')
1282 1282 rustext = rustandc # for now, that's the only case
1283 1283 cext = policy.policy in ('c', 'allow') or rustandc
1284 1284 nopure = cext or rustext
1285 1285 if nopure:
1286 1286 err = None
1287 1287 try:
1288 1288 if cext:
1289 1289 from .cext import (
1290 1290 base85,
1291 1291 bdiff,
1292 1292 mpatch,
1293 1293 osutil,
1294 1294 )
1295 1295 # quiet pyflakes
1296 1296 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1297 1297 if rustext:
1298 1298 from .rustext import (
1299 1299 ancestor,
1300 1300 dirstate,
1301 1301 )
1302 1302 dir(ancestor), dir(dirstate) # quiet pyflakes
1303 1303 except Exception as inst:
1304 1304 err = stringutil.forcebytestr(inst)
1305 1305 problems += 1
1306 1306 fm.condwrite(err, 'extensionserror', " %s\n", err)
1307 1307
1308 1308 compengines = util.compengines._engines.values()
1309 1309 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1310 1310 fm.formatlist(sorted(e.name() for e in compengines),
1311 1311 name='compengine', fmt='%s', sep=', '))
1312 1312 fm.write('compenginesavail', _('checking available compression engines '
1313 1313 '(%s)\n'),
1314 1314 fm.formatlist(sorted(e.name() for e in compengines
1315 1315 if e.available()),
1316 1316 name='compengine', fmt='%s', sep=', '))
1317 1317 wirecompengines = compression.compengines.supportedwireengines(
1318 1318 compression.SERVERROLE)
1319 1319 fm.write('compenginesserver', _('checking available compression engines '
1320 1320 'for wire protocol (%s)\n'),
1321 1321 fm.formatlist([e.name() for e in wirecompengines
1322 1322 if e.wireprotosupport()],
1323 1323 name='compengine', fmt='%s', sep=', '))
1324 1324 re2 = 'missing'
1325 1325 if util._re2:
1326 1326 re2 = 'available'
1327 1327 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1328 1328 fm.data(re2=bool(util._re2))
1329 1329
1330 1330 # templates
1331 1331 p = templater.templatepaths()
1332 1332 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1333 1333 fm.condwrite(not p, '', _(" no template directories found\n"))
1334 1334 if p:
1335 1335 m = templater.templatepath("map-cmdline.default")
1336 1336 if m:
1337 1337 # template found, check if it is working
1338 1338 err = None
1339 1339 try:
1340 1340 templater.templater.frommapfile(m)
1341 1341 except Exception as inst:
1342 1342 err = stringutil.forcebytestr(inst)
1343 1343 p = None
1344 1344 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1345 1345 else:
1346 1346 p = None
1347 1347 fm.condwrite(p, 'defaulttemplate',
1348 1348 _("checking default template (%s)\n"), m)
1349 1349 fm.condwrite(not m, 'defaulttemplatenotfound',
1350 1350 _(" template '%s' not found\n"), "default")
1351 1351 if not p:
1352 1352 problems += 1
1353 1353 fm.condwrite(not p, '',
1354 1354 _(" (templates seem to have been installed incorrectly)\n"))
1355 1355
1356 1356 # editor
1357 1357 editor = ui.geteditor()
1358 1358 editor = util.expandpath(editor)
1359 1359 editorbin = procutil.shellsplit(editor)[0]
1360 1360 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1361 1361 cmdpath = procutil.findexe(editorbin)
1362 1362 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1363 1363 _(" No commit editor set and can't find %s in PATH\n"
1364 1364 " (specify a commit editor in your configuration"
1365 1365 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1366 1366 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1367 1367 _(" Can't find editor '%s' in PATH\n"
1368 1368 " (specify a commit editor in your configuration"
1369 1369 " file)\n"), not cmdpath and editorbin)
1370 1370 if not cmdpath and editor != 'vi':
1371 1371 problems += 1
1372 1372
1373 1373 # check username
1374 1374 username = None
1375 1375 err = None
1376 1376 try:
1377 1377 username = ui.username()
1378 1378 except error.Abort as e:
1379 1379 err = stringutil.forcebytestr(e)
1380 1380 problems += 1
1381 1381
1382 1382 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1383 1383 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1384 1384 " (specify a username in your configuration file)\n"), err)
1385 1385
1386 1386 for name, mod in extensions.extensions():
1387 1387 handler = getattr(mod, 'debuginstall', None)
1388 1388 if handler is not None:
1389 1389 problems += handler(ui, fm)
1390 1390
1391 1391 fm.condwrite(not problems, '',
1392 1392 _("no problems detected\n"))
1393 1393 if not problems:
1394 1394 fm.data(problems=problems)
1395 1395 fm.condwrite(problems, 'problems',
1396 1396 _("%d problems detected,"
1397 1397 " please check your install!\n"), problems)
1398 1398 fm.end()
1399 1399
1400 1400 return problems
1401 1401
1402 1402 @command('debugknown', [], _('REPO ID...'), norepo=True)
1403 1403 def debugknown(ui, repopath, *ids, **opts):
1404 1404 """test whether node ids are known to a repo
1405 1405
1406 1406 Every ID must be a full-length hex node id string. Returns a list of 0s
1407 1407 and 1s indicating unknown/known.
1408 1408 """
1409 1409 opts = pycompat.byteskwargs(opts)
1410 1410 repo = hg.peer(ui, opts, repopath)
1411 1411 if not repo.capable('known'):
1412 1412 raise error.Abort("known() not supported by target repository")
1413 1413 flags = repo.known([bin(s) for s in ids])
1414 1414 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1415 1415
1416 1416 @command('debuglabelcomplete', [], _('LABEL...'))
1417 1417 def debuglabelcomplete(ui, repo, *args):
1418 1418 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1419 1419 debugnamecomplete(ui, repo, *args)
1420 1420
1421 1421 @command('debuglocks',
1422 1422 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1423 1423 ('W', 'force-wlock', None,
1424 1424 _('free the working state lock (DANGEROUS)')),
1425 1425 ('s', 'set-lock', None, _('set the store lock until stopped')),
1426 1426 ('S', 'set-wlock', None,
1427 1427 _('set the working state lock until stopped'))],
1428 1428 _('[OPTION]...'))
1429 1429 def debuglocks(ui, repo, **opts):
1430 1430 """show or modify state of locks
1431 1431
1432 1432 By default, this command will show which locks are held. This
1433 1433 includes the user and process holding the lock, the amount of time
1434 1434 the lock has been held, and the machine name where the process is
1435 1435 running if it's not local.
1436 1436
1437 1437 Locks protect the integrity of Mercurial's data, so should be
1438 1438 treated with care. System crashes or other interruptions may cause
1439 1439 locks to not be properly released, though Mercurial will usually
1440 1440 detect and remove such stale locks automatically.
1441 1441
1442 1442 However, detecting stale locks may not always be possible (for
1443 1443 instance, on a shared filesystem). Removing locks may also be
1444 1444 blocked by filesystem permissions.
1445 1445
1446 1446 Setting a lock will prevent other commands from changing the data.
1447 1447 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1448 1448 The set locks are removed when the command exits.
1449 1449
1450 1450 Returns 0 if no locks are held.
1451 1451
1452 1452 """
1453 1453
1454 1454 if opts.get(r'force_lock'):
1455 1455 repo.svfs.unlink('lock')
1456 1456 if opts.get(r'force_wlock'):
1457 1457 repo.vfs.unlink('wlock')
1458 1458 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1459 1459 return 0
1460 1460
1461 1461 locks = []
1462 1462 try:
1463 1463 if opts.get(r'set_wlock'):
1464 1464 try:
1465 1465 locks.append(repo.wlock(False))
1466 1466 except error.LockHeld:
1467 1467 raise error.Abort(_('wlock is already held'))
1468 1468 if opts.get(r'set_lock'):
1469 1469 try:
1470 1470 locks.append(repo.lock(False))
1471 1471 except error.LockHeld:
1472 1472 raise error.Abort(_('lock is already held'))
1473 1473 if len(locks):
1474 1474 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1475 1475 return 0
1476 1476 finally:
1477 1477 release(*locks)
1478 1478
1479 1479 now = time.time()
1480 1480 held = 0
1481 1481
1482 1482 def report(vfs, name, method):
1483 1483 # this causes stale locks to get reaped for more accurate reporting
1484 1484 try:
1485 1485 l = method(False)
1486 1486 except error.LockHeld:
1487 1487 l = None
1488 1488
1489 1489 if l:
1490 1490 l.release()
1491 1491 else:
1492 1492 try:
1493 1493 st = vfs.lstat(name)
1494 1494 age = now - st[stat.ST_MTIME]
1495 1495 user = util.username(st.st_uid)
1496 1496 locker = vfs.readlock(name)
1497 1497 if ":" in locker:
1498 1498 host, pid = locker.split(':')
1499 1499 if host == socket.gethostname():
1500 1500 locker = 'user %s, process %s' % (user or b'None', pid)
1501 1501 else:
1502 1502 locker = ('user %s, process %s, host %s'
1503 1503 % (user or b'None', pid, host))
1504 1504 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1505 1505 return 1
1506 1506 except OSError as e:
1507 1507 if e.errno != errno.ENOENT:
1508 1508 raise
1509 1509
1510 1510 ui.write(("%-6s free\n") % (name + ":"))
1511 1511 return 0
1512 1512
1513 1513 held += report(repo.svfs, "lock", repo.lock)
1514 1514 held += report(repo.vfs, "wlock", repo.wlock)
1515 1515
1516 1516 return held
1517 1517
1518 1518 @command('debugmanifestfulltextcache', [
1519 1519 ('', 'clear', False, _('clear the cache')),
1520 1520 ('a', 'add', [], _('add the given manifest nodes to the cache'),
1521 1521 _('NODE'))
1522 1522 ], '')
1523 1523 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1524 1524 """show, clear or amend the contents of the manifest fulltext cache"""
1525 1525
1526 1526 def getcache():
1527 1527 r = repo.manifestlog.getstorage(b'')
1528 1528 try:
1529 1529 return r._fulltextcache
1530 1530 except AttributeError:
1531 1531 msg = _("Current revlog implementation doesn't appear to have a "
1532 1532 "manifest fulltext cache\n")
1533 1533 raise error.Abort(msg)
1534 1534
1535 1535 if opts.get(r'clear'):
1536 1536 with repo.wlock():
1537 1537 cache = getcache()
1538 1538 cache.clear(clear_persisted_data=True)
1539 1539 return
1540 1540
1541 1541 if add:
1542 1542 with repo.wlock():
1543 1543 m = repo.manifestlog
1544 1544 store = m.getstorage(b'')
1545 1545 for n in add:
1546 1546 try:
1547 1547 manifest = m[store.lookup(n)]
1548 1548 except error.LookupError as e:
1549 1549 raise error.Abort(e, hint="Check your manifest node id")
1550 1550 manifest.read() # stores revisision in cache too
1551 1551 return
1552 1552
1553 1553 cache = getcache()
1554 1554 if not len(cache):
1555 1555 ui.write(_('cache empty\n'))
1556 1556 else:
1557 1557 ui.write(
1558 1558 _('cache contains %d manifest entries, in order of most to '
1559 1559 'least recent:\n') % (len(cache),))
1560 1560 totalsize = 0
1561 1561 for nodeid in cache:
1562 1562 # Use cache.get to not update the LRU order
1563 1563 data = cache.peek(nodeid)
1564 1564 size = len(data)
1565 1565 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1566 1566 ui.write(_('id: %s, size %s\n') % (
1567 1567 hex(nodeid), util.bytecount(size)))
1568 1568 ondisk = cache._opener.stat('manifestfulltextcache').st_size
1569 1569 ui.write(
1570 1570 _('total cache data size %s, on-disk %s\n') % (
1571 1571 util.bytecount(totalsize), util.bytecount(ondisk))
1572 1572 )
1573 1573
1574 1574 @command('debugmergestate', [], '')
1575 1575 def debugmergestate(ui, repo, *args):
1576 1576 """print merge state
1577 1577
1578 1578 Use --verbose to print out information about whether v1 or v2 merge state
1579 1579 was chosen."""
1580 1580 def _hashornull(h):
1581 1581 if h == nullhex:
1582 1582 return 'null'
1583 1583 else:
1584 1584 return h
1585 1585
1586 1586 def printrecords(version):
1587 1587 ui.write(('* version %d records\n') % version)
1588 1588 if version == 1:
1589 1589 records = v1records
1590 1590 else:
1591 1591 records = v2records
1592 1592
1593 1593 for rtype, record in records:
1594 1594 # pretty print some record types
1595 1595 if rtype == 'L':
1596 1596 ui.write(('local: %s\n') % record)
1597 1597 elif rtype == 'O':
1598 1598 ui.write(('other: %s\n') % record)
1599 1599 elif rtype == 'm':
1600 1600 driver, mdstate = record.split('\0', 1)
1601 1601 ui.write(('merge driver: %s (state "%s")\n')
1602 1602 % (driver, mdstate))
1603 1603 elif rtype in 'FDC':
1604 1604 r = record.split('\0')
1605 1605 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1606 1606 if version == 1:
1607 1607 onode = 'not stored in v1 format'
1608 1608 flags = r[7]
1609 1609 else:
1610 1610 onode, flags = r[7:9]
1611 1611 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1612 1612 % (f, rtype, state, _hashornull(hash)))
1613 1613 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1614 1614 ui.write((' ancestor path: %s (node %s)\n')
1615 1615 % (afile, _hashornull(anode)))
1616 1616 ui.write((' other path: %s (node %s)\n')
1617 1617 % (ofile, _hashornull(onode)))
1618 1618 elif rtype == 'f':
1619 1619 filename, rawextras = record.split('\0', 1)
1620 1620 extras = rawextras.split('\0')
1621 1621 i = 0
1622 1622 extrastrings = []
1623 1623 while i < len(extras):
1624 1624 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1625 1625 i += 2
1626 1626
1627 1627 ui.write(('file extras: %s (%s)\n')
1628 1628 % (filename, ', '.join(extrastrings)))
1629 1629 elif rtype == 'l':
1630 1630 labels = record.split('\0', 2)
1631 1631 labels = [l for l in labels if len(l) > 0]
1632 1632 ui.write(('labels:\n'))
1633 1633 ui.write((' local: %s\n' % labels[0]))
1634 1634 ui.write((' other: %s\n' % labels[1]))
1635 1635 if len(labels) > 2:
1636 1636 ui.write((' base: %s\n' % labels[2]))
1637 1637 else:
1638 1638 ui.write(('unrecognized entry: %s\t%s\n')
1639 1639 % (rtype, record.replace('\0', '\t')))
1640 1640
1641 1641 # Avoid mergestate.read() since it may raise an exception for unsupported
1642 1642 # merge state records. We shouldn't be doing this, but this is OK since this
1643 1643 # command is pretty low-level.
1644 1644 ms = mergemod.mergestate(repo)
1645 1645
1646 1646 # sort so that reasonable information is on top
1647 1647 v1records = ms._readrecordsv1()
1648 1648 v2records = ms._readrecordsv2()
1649 1649 order = 'LOml'
1650 1650 def key(r):
1651 1651 idx = order.find(r[0])
1652 1652 if idx == -1:
1653 1653 return (1, r[1])
1654 1654 else:
1655 1655 return (0, idx)
1656 1656 v1records.sort(key=key)
1657 1657 v2records.sort(key=key)
1658 1658
1659 1659 if not v1records and not v2records:
1660 1660 ui.write(('no merge state found\n'))
1661 1661 elif not v2records:
1662 1662 ui.note(('no version 2 merge state\n'))
1663 1663 printrecords(1)
1664 1664 elif ms._v1v2match(v1records, v2records):
1665 1665 ui.note(('v1 and v2 states match: using v2\n'))
1666 1666 printrecords(2)
1667 1667 else:
1668 1668 ui.note(('v1 and v2 states mismatch: using v1\n'))
1669 1669 printrecords(1)
1670 1670 if ui.verbose:
1671 1671 printrecords(2)
1672 1672
1673 1673 @command('debugnamecomplete', [], _('NAME...'))
1674 1674 def debugnamecomplete(ui, repo, *args):
1675 1675 '''complete "names" - tags, open branch names, bookmark names'''
1676 1676
1677 1677 names = set()
1678 1678 # since we previously only listed open branches, we will handle that
1679 1679 # specially (after this for loop)
1680 1680 for name, ns in repo.names.iteritems():
1681 1681 if name != 'branches':
1682 1682 names.update(ns.listnames(repo))
1683 1683 names.update(tag for (tag, heads, tip, closed)
1684 1684 in repo.branchmap().iterbranches() if not closed)
1685 1685 completions = set()
1686 1686 if not args:
1687 1687 args = ['']
1688 1688 for a in args:
1689 1689 completions.update(n for n in names if n.startswith(a))
1690 1690 ui.write('\n'.join(sorted(completions)))
1691 1691 ui.write('\n')
1692 1692
1693 1693 @command('debugobsolete',
1694 1694 [('', 'flags', 0, _('markers flag')),
1695 1695 ('', 'record-parents', False,
1696 1696 _('record parent information for the precursor')),
1697 1697 ('r', 'rev', [], _('display markers relevant to REV')),
1698 1698 ('', 'exclusive', False, _('restrict display to markers only '
1699 1699 'relevant to REV')),
1700 1700 ('', 'index', False, _('display index of the marker')),
1701 1701 ('', 'delete', [], _('delete markers specified by indices')),
1702 1702 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1703 1703 _('[OBSOLETED [REPLACEMENT ...]]'))
1704 1704 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1705 1705 """create arbitrary obsolete marker
1706 1706
1707 1707 With no arguments, displays the list of obsolescence markers."""
1708 1708
1709 1709 opts = pycompat.byteskwargs(opts)
1710 1710
1711 1711 def parsenodeid(s):
1712 1712 try:
1713 1713 # We do not use revsingle/revrange functions here to accept
1714 1714 # arbitrary node identifiers, possibly not present in the
1715 1715 # local repository.
1716 1716 n = bin(s)
1717 1717 if len(n) != len(nullid):
1718 1718 raise TypeError()
1719 1719 return n
1720 1720 except TypeError:
1721 1721 raise error.Abort('changeset references must be full hexadecimal '
1722 1722 'node identifiers')
1723 1723
1724 1724 if opts.get('delete'):
1725 1725 indices = []
1726 1726 for v in opts.get('delete'):
1727 1727 try:
1728 1728 indices.append(int(v))
1729 1729 except ValueError:
1730 1730 raise error.Abort(_('invalid index value: %r') % v,
1731 1731 hint=_('use integers for indices'))
1732 1732
1733 1733 if repo.currenttransaction():
1734 1734 raise error.Abort(_('cannot delete obsmarkers in the middle '
1735 1735 'of transaction.'))
1736 1736
1737 1737 with repo.lock():
1738 1738 n = repair.deleteobsmarkers(repo.obsstore, indices)
1739 1739 ui.write(_('deleted %i obsolescence markers\n') % n)
1740 1740
1741 1741 return
1742 1742
1743 1743 if precursor is not None:
1744 1744 if opts['rev']:
1745 1745 raise error.Abort('cannot select revision when creating marker')
1746 1746 metadata = {}
1747 1747 metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
1748 1748 succs = tuple(parsenodeid(succ) for succ in successors)
1749 1749 l = repo.lock()
1750 1750 try:
1751 1751 tr = repo.transaction('debugobsolete')
1752 1752 try:
1753 1753 date = opts.get('date')
1754 1754 if date:
1755 1755 date = dateutil.parsedate(date)
1756 1756 else:
1757 1757 date = None
1758 1758 prec = parsenodeid(precursor)
1759 1759 parents = None
1760 1760 if opts['record_parents']:
1761 1761 if prec not in repo.unfiltered():
1762 1762 raise error.Abort('cannot used --record-parents on '
1763 1763 'unknown changesets')
1764 1764 parents = repo.unfiltered()[prec].parents()
1765 1765 parents = tuple(p.node() for p in parents)
1766 1766 repo.obsstore.create(tr, prec, succs, opts['flags'],
1767 1767 parents=parents, date=date,
1768 1768 metadata=metadata, ui=ui)
1769 1769 tr.close()
1770 1770 except ValueError as exc:
1771 1771 raise error.Abort(_('bad obsmarker input: %s') %
1772 1772 pycompat.bytestr(exc))
1773 1773 finally:
1774 1774 tr.release()
1775 1775 finally:
1776 1776 l.release()
1777 1777 else:
1778 1778 if opts['rev']:
1779 1779 revs = scmutil.revrange(repo, opts['rev'])
1780 1780 nodes = [repo[r].node() for r in revs]
1781 1781 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1782 1782 exclusive=opts['exclusive']))
1783 1783 markers.sort(key=lambda x: x._data)
1784 1784 else:
1785 1785 markers = obsutil.getmarkers(repo)
1786 1786
1787 1787 markerstoiter = markers
1788 1788 isrelevant = lambda m: True
1789 1789 if opts.get('rev') and opts.get('index'):
1790 1790 markerstoiter = obsutil.getmarkers(repo)
1791 1791 markerset = set(markers)
1792 1792 isrelevant = lambda m: m in markerset
1793 1793
1794 1794 fm = ui.formatter('debugobsolete', opts)
1795 1795 for i, m in enumerate(markerstoiter):
1796 1796 if not isrelevant(m):
1797 1797 # marker can be irrelevant when we're iterating over a set
1798 1798 # of markers (markerstoiter) which is bigger than the set
1799 1799 # of markers we want to display (markers)
1800 1800 # this can happen if both --index and --rev options are
1801 1801 # provided and thus we need to iterate over all of the markers
1802 1802 # to get the correct indices, but only display the ones that
1803 1803 # are relevant to --rev value
1804 1804 continue
1805 1805 fm.startitem()
1806 1806 ind = i if opts.get('index') else None
1807 1807 cmdutil.showmarker(fm, m, index=ind)
1808 1808 fm.end()
1809 1809
1810 1810 @command('debugp1copies',
1811 1811 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1812 1812 _('[-r REV]'))
1813 1813 def debugp1copies(ui, repo, **opts):
1814 1814 """dump copy information compared to p1"""
1815 1815
1816 1816 opts = pycompat.byteskwargs(opts)
1817 1817 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1818 1818 for dst, src in ctx.p1copies().items():
1819 1819 ui.write('%s -> %s\n' % (src, dst))
1820 1820
1821 1821 @command('debugp2copies',
1822 1822 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1823 1823 _('[-r REV]'))
1824 1824 def debugp1copies(ui, repo, **opts):
1825 1825 """dump copy information compared to p2"""
1826 1826
1827 1827 opts = pycompat.byteskwargs(opts)
1828 1828 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1829 1829 for dst, src in ctx.p2copies().items():
1830 1830 ui.write('%s -> %s\n' % (src, dst))
1831 1831
1832 1832 @command('debugpathcomplete',
1833 1833 [('f', 'full', None, _('complete an entire path')),
1834 1834 ('n', 'normal', None, _('show only normal files')),
1835 1835 ('a', 'added', None, _('show only added files')),
1836 1836 ('r', 'removed', None, _('show only removed files'))],
1837 1837 _('FILESPEC...'))
1838 1838 def debugpathcomplete(ui, repo, *specs, **opts):
1839 1839 '''complete part or all of a tracked path
1840 1840
1841 1841 This command supports shells that offer path name completion. It
1842 1842 currently completes only files already known to the dirstate.
1843 1843
1844 1844 Completion extends only to the next path segment unless
1845 1845 --full is specified, in which case entire paths are used.'''
1846 1846
1847 1847 def complete(path, acceptable):
1848 1848 dirstate = repo.dirstate
1849 1849 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
1850 1850 rootdir = repo.root + pycompat.ossep
1851 1851 if spec != repo.root and not spec.startswith(rootdir):
1852 1852 return [], []
1853 1853 if os.path.isdir(spec):
1854 1854 spec += '/'
1855 1855 spec = spec[len(rootdir):]
1856 1856 fixpaths = pycompat.ossep != '/'
1857 1857 if fixpaths:
1858 1858 spec = spec.replace(pycompat.ossep, '/')
1859 1859 speclen = len(spec)
1860 1860 fullpaths = opts[r'full']
1861 1861 files, dirs = set(), set()
1862 1862 adddir, addfile = dirs.add, files.add
1863 1863 for f, st in dirstate.iteritems():
1864 1864 if f.startswith(spec) and st[0] in acceptable:
1865 1865 if fixpaths:
1866 1866 f = f.replace('/', pycompat.ossep)
1867 1867 if fullpaths:
1868 1868 addfile(f)
1869 1869 continue
1870 1870 s = f.find(pycompat.ossep, speclen)
1871 1871 if s >= 0:
1872 1872 adddir(f[:s])
1873 1873 else:
1874 1874 addfile(f)
1875 1875 return files, dirs
1876 1876
1877 1877 acceptable = ''
1878 1878 if opts[r'normal']:
1879 1879 acceptable += 'nm'
1880 1880 if opts[r'added']:
1881 1881 acceptable += 'a'
1882 1882 if opts[r'removed']:
1883 1883 acceptable += 'r'
1884 1884 cwd = repo.getcwd()
1885 1885 if not specs:
1886 1886 specs = ['.']
1887 1887
1888 1888 files, dirs = set(), set()
1889 1889 for spec in specs:
1890 1890 f, d = complete(spec, acceptable or 'nmar')
1891 1891 files.update(f)
1892 1892 dirs.update(d)
1893 1893 files.update(dirs)
1894 1894 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1895 1895 ui.write('\n')
1896 1896
1897 1897 @command('debugpathcopies',
1898 1898 cmdutil.walkopts,
1899 1899 'hg debugpathcopies REV1 REV2 [FILE]',
1900 1900 inferrepo=True)
1901 1901 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
1902 1902 """show copies between two revisions"""
1903 1903 ctx1 = scmutil.revsingle(repo, rev1)
1904 1904 ctx2 = scmutil.revsingle(repo, rev2)
1905 1905 m = scmutil.match(ctx1, pats, opts)
1906 1906 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
1907 1907 ui.write('%s -> %s\n' % (src, dst))
1908 1908
1909 1909 @command('debugpeer', [], _('PATH'), norepo=True)
1910 1910 def debugpeer(ui, path):
1911 1911 """establish a connection to a peer repository"""
1912 1912 # Always enable peer request logging. Requires --debug to display
1913 1913 # though.
1914 1914 overrides = {
1915 1915 ('devel', 'debug.peer-request'): True,
1916 1916 }
1917 1917
1918 1918 with ui.configoverride(overrides):
1919 1919 peer = hg.peer(ui, {}, path)
1920 1920
1921 1921 local = peer.local() is not None
1922 1922 canpush = peer.canpush()
1923 1923
1924 1924 ui.write(_('url: %s\n') % peer.url())
1925 1925 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1926 1926 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1927 1927
1928 1928 @command('debugpickmergetool',
1929 1929 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1930 1930 ('', 'changedelete', None, _('emulate merging change and delete')),
1931 1931 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1932 1932 _('[PATTERN]...'),
1933 1933 inferrepo=True)
1934 1934 def debugpickmergetool(ui, repo, *pats, **opts):
1935 1935 """examine which merge tool is chosen for specified file
1936 1936
1937 1937 As described in :hg:`help merge-tools`, Mercurial examines
1938 1938 configurations below in this order to decide which merge tool is
1939 1939 chosen for specified file.
1940 1940
1941 1941 1. ``--tool`` option
1942 1942 2. ``HGMERGE`` environment variable
1943 1943 3. configurations in ``merge-patterns`` section
1944 1944 4. configuration of ``ui.merge``
1945 1945 5. configurations in ``merge-tools`` section
1946 1946 6. ``hgmerge`` tool (for historical reason only)
1947 1947 7. default tool for fallback (``:merge`` or ``:prompt``)
1948 1948
1949 1949 This command writes out examination result in the style below::
1950 1950
1951 1951 FILE = MERGETOOL
1952 1952
1953 1953 By default, all files known in the first parent context of the
1954 1954 working directory are examined. Use file patterns and/or -I/-X
1955 1955 options to limit target files. -r/--rev is also useful to examine
1956 1956 files in another context without actual updating to it.
1957 1957
1958 1958 With --debug, this command shows warning messages while matching
1959 1959 against ``merge-patterns`` and so on, too. It is recommended to
1960 1960 use this option with explicit file patterns and/or -I/-X options,
1961 1961 because this option increases amount of output per file according
1962 1962 to configurations in hgrc.
1963 1963
1964 1964 With -v/--verbose, this command shows configurations below at
1965 1965 first (only if specified).
1966 1966
1967 1967 - ``--tool`` option
1968 1968 - ``HGMERGE`` environment variable
1969 1969 - configuration of ``ui.merge``
1970 1970
1971 1971 If merge tool is chosen before matching against
1972 1972 ``merge-patterns``, this command can't show any helpful
1973 1973 information, even with --debug. In such case, information above is
1974 1974 useful to know why a merge tool is chosen.
1975 1975 """
1976 1976 opts = pycompat.byteskwargs(opts)
1977 1977 overrides = {}
1978 1978 if opts['tool']:
1979 1979 overrides[('ui', 'forcemerge')] = opts['tool']
1980 1980 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1981 1981
1982 1982 with ui.configoverride(overrides, 'debugmergepatterns'):
1983 1983 hgmerge = encoding.environ.get("HGMERGE")
1984 1984 if hgmerge is not None:
1985 1985 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1986 1986 uimerge = ui.config("ui", "merge")
1987 1987 if uimerge:
1988 1988 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1989 1989
1990 1990 ctx = scmutil.revsingle(repo, opts.get('rev'))
1991 1991 m = scmutil.match(ctx, pats, opts)
1992 1992 changedelete = opts['changedelete']
1993 1993 for path in ctx.walk(m):
1994 1994 fctx = ctx[path]
1995 1995 try:
1996 1996 if not ui.debugflag:
1997 1997 ui.pushbuffer(error=True)
1998 1998 tool, toolpath = filemerge._picktool(repo, ui, path,
1999 1999 fctx.isbinary(),
2000 2000 'l' in fctx.flags(),
2001 2001 changedelete)
2002 2002 finally:
2003 2003 if not ui.debugflag:
2004 2004 ui.popbuffer()
2005 2005 ui.write(('%s = %s\n') % (path, tool))
2006 2006
2007 2007 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2008 2008 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2009 2009 '''access the pushkey key/value protocol
2010 2010
2011 2011 With two args, list the keys in the given namespace.
2012 2012
2013 2013 With five args, set a key to new if it currently is set to old.
2014 2014 Reports success or failure.
2015 2015 '''
2016 2016
2017 2017 target = hg.peer(ui, {}, repopath)
2018 2018 if keyinfo:
2019 2019 key, old, new = keyinfo
2020 2020 with target.commandexecutor() as e:
2021 2021 r = e.callcommand('pushkey', {
2022 2022 'namespace': namespace,
2023 2023 'key': key,
2024 2024 'old': old,
2025 2025 'new': new,
2026 2026 }).result()
2027 2027
2028 2028 ui.status(pycompat.bytestr(r) + '\n')
2029 2029 return not r
2030 2030 else:
2031 2031 for k, v in sorted(target.listkeys(namespace).iteritems()):
2032 2032 ui.write("%s\t%s\n" % (stringutil.escapestr(k),
2033 2033 stringutil.escapestr(v)))
2034 2034
2035 2035 @command('debugpvec', [], _('A B'))
2036 2036 def debugpvec(ui, repo, a, b=None):
2037 2037 ca = scmutil.revsingle(repo, a)
2038 2038 cb = scmutil.revsingle(repo, b)
2039 2039 pa = pvec.ctxpvec(ca)
2040 2040 pb = pvec.ctxpvec(cb)
2041 2041 if pa == pb:
2042 2042 rel = "="
2043 2043 elif pa > pb:
2044 2044 rel = ">"
2045 2045 elif pa < pb:
2046 2046 rel = "<"
2047 2047 elif pa | pb:
2048 2048 rel = "|"
2049 2049 ui.write(_("a: %s\n") % pa)
2050 2050 ui.write(_("b: %s\n") % pb)
2051 2051 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2052 2052 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
2053 2053 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
2054 2054 pa.distance(pb), rel))
2055 2055
2056 2056 @command('debugrebuilddirstate|debugrebuildstate',
2057 2057 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
2058 2058 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
2059 2059 'the working copy parent')),
2060 2060 ],
2061 2061 _('[-r REV]'))
2062 2062 def debugrebuilddirstate(ui, repo, rev, **opts):
2063 2063 """rebuild the dirstate as it would look like for the given revision
2064 2064
2065 2065 If no revision is specified the first current parent will be used.
2066 2066
2067 2067 The dirstate will be set to the files of the given revision.
2068 2068 The actual working directory content or existing dirstate
2069 2069 information such as adds or removes is not considered.
2070 2070
2071 2071 ``minimal`` will only rebuild the dirstate status for files that claim to be
2072 2072 tracked but are not in the parent manifest, or that exist in the parent
2073 2073 manifest but are not in the dirstate. It will not change adds, removes, or
2074 2074 modified files that are in the working copy parent.
2075 2075
2076 2076 One use of this command is to make the next :hg:`status` invocation
2077 2077 check the actual file content.
2078 2078 """
2079 2079 ctx = scmutil.revsingle(repo, rev)
2080 2080 with repo.wlock():
2081 2081 dirstate = repo.dirstate
2082 2082 changedfiles = None
2083 2083 # See command doc for what minimal does.
2084 2084 if opts.get(r'minimal'):
2085 2085 manifestfiles = set(ctx.manifest().keys())
2086 2086 dirstatefiles = set(dirstate)
2087 2087 manifestonly = manifestfiles - dirstatefiles
2088 2088 dsonly = dirstatefiles - manifestfiles
2089 2089 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
2090 2090 changedfiles = manifestonly | dsnotadded
2091 2091
2092 2092 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2093 2093
2094 2094 @command('debugrebuildfncache', [], '')
2095 2095 def debugrebuildfncache(ui, repo):
2096 2096 """rebuild the fncache file"""
2097 2097 repair.rebuildfncache(ui, repo)
2098 2098
2099 2099 @command('debugrename',
2100 2100 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2101 2101 _('[-r REV] [FILE]...'))
2102 2102 def debugrename(ui, repo, *pats, **opts):
2103 2103 """dump rename information"""
2104 2104
2105 2105 opts = pycompat.byteskwargs(opts)
2106 2106 ctx = scmutil.revsingle(repo, opts.get('rev'))
2107 2107 m = scmutil.match(ctx, pats, opts)
2108 2108 for abs in ctx.walk(m):
2109 2109 fctx = ctx[abs]
2110 2110 o = fctx.filelog().renamed(fctx.filenode())
2111 2111 rel = repo.pathto(abs)
2112 2112 if o:
2113 2113 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2114 2114 else:
2115 2115 ui.write(_("%s not renamed\n") % rel)
2116 2116
2117 2117 @command('debugrevlog', cmdutil.debugrevlogopts +
2118 2118 [('d', 'dump', False, _('dump index data'))],
2119 2119 _('-c|-m|FILE'),
2120 2120 optionalrepo=True)
2121 2121 def debugrevlog(ui, repo, file_=None, **opts):
2122 2122 """show data and statistics about a revlog"""
2123 2123 opts = pycompat.byteskwargs(opts)
2124 2124 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2125 2125
2126 2126 if opts.get("dump"):
2127 2127 numrevs = len(r)
2128 2128 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
2129 2129 " rawsize totalsize compression heads chainlen\n"))
2130 2130 ts = 0
2131 2131 heads = set()
2132 2132
2133 2133 for rev in pycompat.xrange(numrevs):
2134 2134 dbase = r.deltaparent(rev)
2135 2135 if dbase == -1:
2136 2136 dbase = rev
2137 2137 cbase = r.chainbase(rev)
2138 2138 clen = r.chainlen(rev)
2139 2139 p1, p2 = r.parentrevs(rev)
2140 2140 rs = r.rawsize(rev)
2141 2141 ts = ts + rs
2142 2142 heads -= set(r.parentrevs(rev))
2143 2143 heads.add(rev)
2144 2144 try:
2145 2145 compression = ts / r.end(rev)
2146 2146 except ZeroDivisionError:
2147 2147 compression = 0
2148 2148 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2149 2149 "%11d %5d %8d\n" %
2150 2150 (rev, p1, p2, r.start(rev), r.end(rev),
2151 2151 r.start(dbase), r.start(cbase),
2152 2152 r.start(p1), r.start(p2),
2153 2153 rs, ts, compression, len(heads), clen))
2154 2154 return 0
2155 2155
2156 2156 v = r.version
2157 2157 format = v & 0xFFFF
2158 2158 flags = []
2159 2159 gdelta = False
2160 2160 if v & revlog.FLAG_INLINE_DATA:
2161 2161 flags.append('inline')
2162 2162 if v & revlog.FLAG_GENERALDELTA:
2163 2163 gdelta = True
2164 2164 flags.append('generaldelta')
2165 2165 if not flags:
2166 2166 flags = ['(none)']
2167 2167
2168 2168 ### tracks merge vs single parent
2169 2169 nummerges = 0
2170 2170
2171 2171 ### tracks ways the "delta" are build
2172 2172 # nodelta
2173 2173 numempty = 0
2174 2174 numemptytext = 0
2175 2175 numemptydelta = 0
2176 2176 # full file content
2177 2177 numfull = 0
2178 2178 # intermediate snapshot against a prior snapshot
2179 2179 numsemi = 0
2180 2180 # snapshot count per depth
2181 2181 numsnapdepth = collections.defaultdict(lambda: 0)
2182 2182 # delta against previous revision
2183 2183 numprev = 0
2184 2184 # delta against first or second parent (not prev)
2185 2185 nump1 = 0
2186 2186 nump2 = 0
2187 2187 # delta against neither prev nor parents
2188 2188 numother = 0
2189 2189 # delta against prev that are also first or second parent
2190 2190 # (details of `numprev`)
2191 2191 nump1prev = 0
2192 2192 nump2prev = 0
2193 2193
2194 2194 # data about delta chain of each revs
2195 2195 chainlengths = []
2196 2196 chainbases = []
2197 2197 chainspans = []
2198 2198
2199 2199 # data about each revision
2200 2200 datasize = [None, 0, 0]
2201 2201 fullsize = [None, 0, 0]
2202 2202 semisize = [None, 0, 0]
2203 2203 # snapshot count per depth
2204 2204 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2205 2205 deltasize = [None, 0, 0]
2206 2206 chunktypecounts = {}
2207 2207 chunktypesizes = {}
2208 2208
2209 2209 def addsize(size, l):
2210 2210 if l[0] is None or size < l[0]:
2211 2211 l[0] = size
2212 2212 if size > l[1]:
2213 2213 l[1] = size
2214 2214 l[2] += size
2215 2215
2216 2216 numrevs = len(r)
2217 2217 for rev in pycompat.xrange(numrevs):
2218 2218 p1, p2 = r.parentrevs(rev)
2219 2219 delta = r.deltaparent(rev)
2220 2220 if format > 0:
2221 2221 addsize(r.rawsize(rev), datasize)
2222 2222 if p2 != nullrev:
2223 2223 nummerges += 1
2224 2224 size = r.length(rev)
2225 2225 if delta == nullrev:
2226 2226 chainlengths.append(0)
2227 2227 chainbases.append(r.start(rev))
2228 2228 chainspans.append(size)
2229 2229 if size == 0:
2230 2230 numempty += 1
2231 2231 numemptytext += 1
2232 2232 else:
2233 2233 numfull += 1
2234 2234 numsnapdepth[0] += 1
2235 2235 addsize(size, fullsize)
2236 2236 addsize(size, snapsizedepth[0])
2237 2237 else:
2238 2238 chainlengths.append(chainlengths[delta] + 1)
2239 2239 baseaddr = chainbases[delta]
2240 2240 revaddr = r.start(rev)
2241 2241 chainbases.append(baseaddr)
2242 2242 chainspans.append((revaddr - baseaddr) + size)
2243 2243 if size == 0:
2244 2244 numempty += 1
2245 2245 numemptydelta += 1
2246 2246 elif r.issnapshot(rev):
2247 2247 addsize(size, semisize)
2248 2248 numsemi += 1
2249 2249 depth = r.snapshotdepth(rev)
2250 2250 numsnapdepth[depth] += 1
2251 2251 addsize(size, snapsizedepth[depth])
2252 2252 else:
2253 2253 addsize(size, deltasize)
2254 2254 if delta == rev - 1:
2255 2255 numprev += 1
2256 2256 if delta == p1:
2257 2257 nump1prev += 1
2258 2258 elif delta == p2:
2259 2259 nump2prev += 1
2260 2260 elif delta == p1:
2261 2261 nump1 += 1
2262 2262 elif delta == p2:
2263 2263 nump2 += 1
2264 2264 elif delta != nullrev:
2265 2265 numother += 1
2266 2266
2267 2267 # Obtain data on the raw chunks in the revlog.
2268 2268 if util.safehasattr(r, '_getsegmentforrevs'):
2269 2269 segment = r._getsegmentforrevs(rev, rev)[1]
2270 2270 else:
2271 2271 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2272 2272 if segment:
2273 2273 chunktype = bytes(segment[0:1])
2274 2274 else:
2275 2275 chunktype = 'empty'
2276 2276
2277 2277 if chunktype not in chunktypecounts:
2278 2278 chunktypecounts[chunktype] = 0
2279 2279 chunktypesizes[chunktype] = 0
2280 2280
2281 2281 chunktypecounts[chunktype] += 1
2282 2282 chunktypesizes[chunktype] += size
2283 2283
2284 2284 # Adjust size min value for empty cases
2285 2285 for size in (datasize, fullsize, semisize, deltasize):
2286 2286 if size[0] is None:
2287 2287 size[0] = 0
2288 2288
2289 2289 numdeltas = numrevs - numfull - numempty - numsemi
2290 2290 numoprev = numprev - nump1prev - nump2prev
2291 2291 totalrawsize = datasize[2]
2292 2292 datasize[2] /= numrevs
2293 2293 fulltotal = fullsize[2]
2294 2294 if numfull == 0:
2295 2295 fullsize[2] = 0
2296 2296 else:
2297 2297 fullsize[2] /= numfull
2298 2298 semitotal = semisize[2]
2299 2299 snaptotal = {}
2300 2300 if numsemi > 0:
2301 2301 semisize[2] /= numsemi
2302 2302 for depth in snapsizedepth:
2303 2303 snaptotal[depth] = snapsizedepth[depth][2]
2304 2304 snapsizedepth[depth][2] /= numsnapdepth[depth]
2305 2305
2306 2306 deltatotal = deltasize[2]
2307 2307 if numdeltas > 0:
2308 2308 deltasize[2] /= numdeltas
2309 2309 totalsize = fulltotal + semitotal + deltatotal
2310 2310 avgchainlen = sum(chainlengths) / numrevs
2311 2311 maxchainlen = max(chainlengths)
2312 2312 maxchainspan = max(chainspans)
2313 2313 compratio = 1
2314 2314 if totalsize:
2315 2315 compratio = totalrawsize / totalsize
2316 2316
2317 2317 basedfmtstr = '%%%dd\n'
2318 2318 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2319 2319
2320 2320 def dfmtstr(max):
2321 2321 return basedfmtstr % len(str(max))
2322 2322 def pcfmtstr(max, padding=0):
2323 2323 return basepcfmtstr % (len(str(max)), ' ' * padding)
2324 2324
2325 2325 def pcfmt(value, total):
2326 2326 if total:
2327 2327 return (value, 100 * float(value) / total)
2328 2328 else:
2329 2329 return value, 100.0
2330 2330
2331 2331 ui.write(('format : %d\n') % format)
2332 2332 ui.write(('flags : %s\n') % ', '.join(flags))
2333 2333
2334 2334 ui.write('\n')
2335 2335 fmt = pcfmtstr(totalsize)
2336 2336 fmt2 = dfmtstr(totalsize)
2337 2337 ui.write(('revisions : ') + fmt2 % numrevs)
2338 2338 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2339 2339 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2340 2340 ui.write(('revisions : ') + fmt2 % numrevs)
2341 2341 ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs))
2342 2342 ui.write((' text : ')
2343 2343 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
2344 2344 ui.write((' delta : ')
2345 2345 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
2346 2346 ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs))
2347 2347 for depth in sorted(numsnapdepth):
2348 2348 ui.write((' lvl-%-3d : ' % depth)
2349 2349 + fmt % pcfmt(numsnapdepth[depth], numrevs))
2350 2350 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2351 2351 ui.write(('revision size : ') + fmt2 % totalsize)
2352 2352 ui.write((' snapshot : ')
2353 2353 + fmt % pcfmt(fulltotal + semitotal, totalsize))
2354 2354 for depth in sorted(numsnapdepth):
2355 2355 ui.write((' lvl-%-3d : ' % depth)
2356 2356 + fmt % pcfmt(snaptotal[depth], totalsize))
2357 2357 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2358 2358
2359 2359 def fmtchunktype(chunktype):
2360 2360 if chunktype == 'empty':
2361 2361 return ' %s : ' % chunktype
2362 2362 elif chunktype in pycompat.bytestr(string.ascii_letters):
2363 2363 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2364 2364 else:
2365 2365 return ' 0x%s : ' % hex(chunktype)
2366 2366
2367 2367 ui.write('\n')
2368 2368 ui.write(('chunks : ') + fmt2 % numrevs)
2369 2369 for chunktype in sorted(chunktypecounts):
2370 2370 ui.write(fmtchunktype(chunktype))
2371 2371 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2372 2372 ui.write(('chunks size : ') + fmt2 % totalsize)
2373 2373 for chunktype in sorted(chunktypecounts):
2374 2374 ui.write(fmtchunktype(chunktype))
2375 2375 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2376 2376
2377 2377 ui.write('\n')
2378 2378 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2379 2379 ui.write(('avg chain length : ') + fmt % avgchainlen)
2380 2380 ui.write(('max chain length : ') + fmt % maxchainlen)
2381 2381 ui.write(('max chain reach : ') + fmt % maxchainspan)
2382 2382 ui.write(('compression ratio : ') + fmt % compratio)
2383 2383
2384 2384 if format > 0:
2385 2385 ui.write('\n')
2386 2386 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2387 2387 % tuple(datasize))
2388 2388 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2389 2389 % tuple(fullsize))
2390 2390 ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n')
2391 2391 % tuple(semisize))
2392 2392 for depth in sorted(snapsizedepth):
2393 2393 if depth == 0:
2394 2394 continue
2395 2395 ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n')
2396 2396 % ((depth,) + tuple(snapsizedepth[depth])))
2397 2397 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2398 2398 % tuple(deltasize))
2399 2399
2400 2400 if numdeltas > 0:
2401 2401 ui.write('\n')
2402 2402 fmt = pcfmtstr(numdeltas)
2403 2403 fmt2 = pcfmtstr(numdeltas, 4)
2404 2404 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2405 2405 if numprev > 0:
2406 2406 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2407 2407 numprev))
2408 2408 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2409 2409 numprev))
2410 2410 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2411 2411 numprev))
2412 2412 if gdelta:
2413 2413 ui.write(('deltas against p1 : ')
2414 2414 + fmt % pcfmt(nump1, numdeltas))
2415 2415 ui.write(('deltas against p2 : ')
2416 2416 + fmt % pcfmt(nump2, numdeltas))
2417 2417 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2418 2418 numdeltas))
2419 2419
2420 2420 @command('debugrevlogindex', cmdutil.debugrevlogopts +
2421 2421 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2422 2422 _('[-f FORMAT] -c|-m|FILE'),
2423 2423 optionalrepo=True)
2424 2424 def debugrevlogindex(ui, repo, file_=None, **opts):
2425 2425 """dump the contents of a revlog index"""
2426 2426 opts = pycompat.byteskwargs(opts)
2427 2427 r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
2428 2428 format = opts.get('format', 0)
2429 2429 if format not in (0, 1):
2430 2430 raise error.Abort(_("unknown format %d") % format)
2431 2431
2432 2432 if ui.debugflag:
2433 2433 shortfn = hex
2434 2434 else:
2435 2435 shortfn = short
2436 2436
2437 2437 # There might not be anything in r, so have a sane default
2438 2438 idlen = 12
2439 2439 for i in r:
2440 2440 idlen = len(shortfn(r.node(i)))
2441 2441 break
2442 2442
2443 2443 if format == 0:
2444 2444 if ui.verbose:
2445 2445 ui.write((" rev offset length linkrev"
2446 2446 " %s %s p2\n") % ("nodeid".ljust(idlen),
2447 2447 "p1".ljust(idlen)))
2448 2448 else:
2449 2449 ui.write((" rev linkrev %s %s p2\n") % (
2450 2450 "nodeid".ljust(idlen), "p1".ljust(idlen)))
2451 2451 elif format == 1:
2452 2452 if ui.verbose:
2453 2453 ui.write((" rev flag offset length size link p1"
2454 2454 " p2 %s\n") % "nodeid".rjust(idlen))
2455 2455 else:
2456 2456 ui.write((" rev flag size link p1 p2 %s\n") %
2457 2457 "nodeid".rjust(idlen))
2458 2458
2459 2459 for i in r:
2460 2460 node = r.node(i)
2461 2461 if format == 0:
2462 2462 try:
2463 2463 pp = r.parents(node)
2464 2464 except Exception:
2465 2465 pp = [nullid, nullid]
2466 2466 if ui.verbose:
2467 2467 ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
2468 2468 i, r.start(i), r.length(i), r.linkrev(i),
2469 2469 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
2470 2470 else:
2471 2471 ui.write("% 6d % 7d %s %s %s\n" % (
2472 2472 i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
2473 2473 shortfn(pp[1])))
2474 2474 elif format == 1:
2475 2475 pr = r.parentrevs(i)
2476 2476 if ui.verbose:
2477 2477 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
2478 2478 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2479 2479 r.linkrev(i), pr[0], pr[1], shortfn(node)))
2480 2480 else:
2481 2481 ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
2482 2482 i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
2483 2483 shortfn(node)))
2484 2484
2485 2485 @command('debugrevspec',
2486 2486 [('', 'optimize', None,
2487 2487 _('print parsed tree after optimizing (DEPRECATED)')),
2488 2488 ('', 'show-revs', True, _('print list of result revisions (default)')),
2489 2489 ('s', 'show-set', None, _('print internal representation of result set')),
2490 2490 ('p', 'show-stage', [],
2491 2491 _('print parsed tree at the given stage'), _('NAME')),
2492 2492 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2493 2493 ('', 'verify-optimized', False, _('verify optimized result')),
2494 2494 ],
2495 2495 ('REVSPEC'))
2496 2496 def debugrevspec(ui, repo, expr, **opts):
2497 2497 """parse and apply a revision specification
2498 2498
2499 2499 Use -p/--show-stage option to print the parsed tree at the given stages.
2500 2500 Use -p all to print tree at every stage.
2501 2501
2502 2502 Use --no-show-revs option with -s or -p to print only the set
2503 2503 representation or the parsed tree respectively.
2504 2504
2505 2505 Use --verify-optimized to compare the optimized result with the unoptimized
2506 2506 one. Returns 1 if the optimized result differs.
2507 2507 """
2508 2508 opts = pycompat.byteskwargs(opts)
2509 2509 aliases = ui.configitems('revsetalias')
2510 2510 stages = [
2511 2511 ('parsed', lambda tree: tree),
2512 2512 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2513 2513 ui.warn)),
2514 2514 ('concatenated', revsetlang.foldconcat),
2515 2515 ('analyzed', revsetlang.analyze),
2516 2516 ('optimized', revsetlang.optimize),
2517 2517 ]
2518 2518 if opts['no_optimized']:
2519 2519 stages = stages[:-1]
2520 2520 if opts['verify_optimized'] and opts['no_optimized']:
2521 2521 raise error.Abort(_('cannot use --verify-optimized with '
2522 2522 '--no-optimized'))
2523 2523 stagenames = set(n for n, f in stages)
2524 2524
2525 2525 showalways = set()
2526 2526 showchanged = set()
2527 2527 if ui.verbose and not opts['show_stage']:
2528 2528 # show parsed tree by --verbose (deprecated)
2529 2529 showalways.add('parsed')
2530 2530 showchanged.update(['expanded', 'concatenated'])
2531 2531 if opts['optimize']:
2532 2532 showalways.add('optimized')
2533 2533 if opts['show_stage'] and opts['optimize']:
2534 2534 raise error.Abort(_('cannot use --optimize with --show-stage'))
2535 2535 if opts['show_stage'] == ['all']:
2536 2536 showalways.update(stagenames)
2537 2537 else:
2538 2538 for n in opts['show_stage']:
2539 2539 if n not in stagenames:
2540 2540 raise error.Abort(_('invalid stage name: %s') % n)
2541 2541 showalways.update(opts['show_stage'])
2542 2542
2543 2543 treebystage = {}
2544 2544 printedtree = None
2545 2545 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
2546 2546 for n, f in stages:
2547 2547 treebystage[n] = tree = f(tree)
2548 2548 if n in showalways or (n in showchanged and tree != printedtree):
2549 2549 if opts['show_stage'] or n != 'parsed':
2550 2550 ui.write(("* %s:\n") % n)
2551 2551 ui.write(revsetlang.prettyformat(tree), "\n")
2552 2552 printedtree = tree
2553 2553
2554 2554 if opts['verify_optimized']:
2555 2555 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2556 2556 brevs = revset.makematcher(treebystage['optimized'])(repo)
2557 2557 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2558 2558 ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
2559 2559 ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
2560 2560 arevs = list(arevs)
2561 2561 brevs = list(brevs)
2562 2562 if arevs == brevs:
2563 2563 return 0
2564 2564 ui.write(('--- analyzed\n'), label='diff.file_a')
2565 2565 ui.write(('+++ optimized\n'), label='diff.file_b')
2566 2566 sm = difflib.SequenceMatcher(None, arevs, brevs)
2567 2567 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2568 2568 if tag in (r'delete', r'replace'):
2569 2569 for c in arevs[alo:ahi]:
2570 2570 ui.write('-%d\n' % c, label='diff.deleted')
2571 2571 if tag in (r'insert', r'replace'):
2572 2572 for c in brevs[blo:bhi]:
2573 2573 ui.write('+%d\n' % c, label='diff.inserted')
2574 2574 if tag == r'equal':
2575 2575 for c in arevs[alo:ahi]:
2576 2576 ui.write(' %d\n' % c)
2577 2577 return 1
2578 2578
2579 2579 func = revset.makematcher(tree)
2580 2580 revs = func(repo)
2581 2581 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2582 2582 ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
2583 2583 if not opts['show_revs']:
2584 2584 return
2585 2585 for c in revs:
2586 2586 ui.write("%d\n" % c)
2587 2587
2588 2588 @command('debugserve', [
2589 2589 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2590 2590 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2591 2591 ('', 'logiofile', '', _('file to log server I/O to')),
2592 2592 ], '')
2593 2593 def debugserve(ui, repo, **opts):
2594 2594 """run a server with advanced settings
2595 2595
2596 2596 This command is similar to :hg:`serve`. It exists partially as a
2597 2597 workaround to the fact that ``hg serve --stdio`` must have specific
2598 2598 arguments for security reasons.
2599 2599 """
2600 2600 opts = pycompat.byteskwargs(opts)
2601 2601
2602 2602 if not opts['sshstdio']:
2603 2603 raise error.Abort(_('only --sshstdio is currently supported'))
2604 2604
2605 2605 logfh = None
2606 2606
2607 2607 if opts['logiofd'] and opts['logiofile']:
2608 2608 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2609 2609
2610 2610 if opts['logiofd']:
2611 2611 # Line buffered because output is line based.
2612 2612 try:
2613 2613 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2614 2614 except OSError as e:
2615 2615 if e.errno != errno.ESPIPE:
2616 2616 raise
2617 2617 # can't seek a pipe, so `ab` mode fails on py3
2618 2618 logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
2619 2619 elif opts['logiofile']:
2620 2620 logfh = open(opts['logiofile'], 'ab', 1)
2621 2621
2622 2622 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2623 2623 s.serve_forever()
2624 2624
2625 2625 @command('debugsetparents', [], _('REV1 [REV2]'))
2626 2626 def debugsetparents(ui, repo, rev1, rev2=None):
2627 2627 """manually set the parents of the current working directory
2628 2628
2629 2629 This is useful for writing repository conversion tools, but should
2630 2630 be used with care. For example, neither the working directory nor the
2631 2631 dirstate is updated, so file status may be incorrect after running this
2632 2632 command.
2633 2633
2634 2634 Returns 0 on success.
2635 2635 """
2636 2636
2637 2637 node1 = scmutil.revsingle(repo, rev1).node()
2638 2638 node2 = scmutil.revsingle(repo, rev2, 'null').node()
2639 2639
2640 2640 with repo.wlock():
2641 2641 repo.setparents(node1, node2)
2642 2642
2643 2643 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2644 2644 def debugssl(ui, repo, source=None, **opts):
2645 2645 '''test a secure connection to a server
2646 2646
2647 2647 This builds the certificate chain for the server on Windows, installing the
2648 2648 missing intermediates and trusted root via Windows Update if necessary. It
2649 2649 does nothing on other platforms.
2650 2650
2651 2651 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2652 2652 that server is used. See :hg:`help urls` for more information.
2653 2653
2654 2654 If the update succeeds, retry the original operation. Otherwise, the cause
2655 2655 of the SSL error is likely another issue.
2656 2656 '''
2657 2657 if not pycompat.iswindows:
2658 2658 raise error.Abort(_('certificate chain building is only possible on '
2659 2659 'Windows'))
2660 2660
2661 2661 if not source:
2662 2662 if not repo:
2663 2663 raise error.Abort(_("there is no Mercurial repository here, and no "
2664 2664 "server specified"))
2665 2665 source = "default"
2666 2666
2667 2667 source, branches = hg.parseurl(ui.expandpath(source))
2668 2668 url = util.url(source)
2669 2669
2670 2670 defaultport = {'https': 443, 'ssh': 22}
2671 2671 if url.scheme in defaultport:
2672 2672 try:
2673 2673 addr = (url.host, int(url.port or defaultport[url.scheme]))
2674 2674 except ValueError:
2675 2675 raise error.Abort(_("malformed port number in URL"))
2676 2676 else:
2677 2677 raise error.Abort(_("only https and ssh connections are supported"))
2678 2678
2679 2679 from . import win32
2680 2680
2681 2681 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2682 2682 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2683 2683
2684 2684 try:
2685 2685 s.connect(addr)
2686 2686 cert = s.getpeercert(True)
2687 2687
2688 2688 ui.status(_('checking the certificate chain for %s\n') % url.host)
2689 2689
2690 2690 complete = win32.checkcertificatechain(cert, build=False)
2691 2691
2692 2692 if not complete:
2693 2693 ui.status(_('certificate chain is incomplete, updating... '))
2694 2694
2695 2695 if not win32.checkcertificatechain(cert):
2696 2696 ui.status(_('failed.\n'))
2697 2697 else:
2698 2698 ui.status(_('done.\n'))
2699 2699 else:
2700 2700 ui.status(_('full certificate chain is available\n'))
2701 2701 finally:
2702 2702 s.close()
2703 2703
2704 2704 @command('debugsub',
2705 2705 [('r', 'rev', '',
2706 2706 _('revision to check'), _('REV'))],
2707 2707 _('[-r REV] [REV]'))
2708 2708 def debugsub(ui, repo, rev=None):
2709 2709 ctx = scmutil.revsingle(repo, rev, None)
2710 2710 for k, v in sorted(ctx.substate.items()):
2711 2711 ui.write(('path %s\n') % k)
2712 2712 ui.write((' source %s\n') % v[0])
2713 2713 ui.write((' revision %s\n') % v[1])
2714 2714
2715 2715 @command('debugsuccessorssets',
2716 2716 [('', 'closest', False, _('return closest successors sets only'))],
2717 2717 _('[REV]'))
2718 2718 def debugsuccessorssets(ui, repo, *revs, **opts):
2719 2719 """show set of successors for revision
2720 2720
2721 2721 A successors set of changeset A is a consistent group of revisions that
2722 2722 succeed A. It contains non-obsolete changesets only unless closests
2723 2723 successors set is set.
2724 2724
2725 2725 In most cases a changeset A has a single successors set containing a single
2726 2726 successor (changeset A replaced by A').
2727 2727
2728 2728 A changeset that is made obsolete with no successors are called "pruned".
2729 2729 Such changesets have no successors sets at all.
2730 2730
2731 2731 A changeset that has been "split" will have a successors set containing
2732 2732 more than one successor.
2733 2733
2734 2734 A changeset that has been rewritten in multiple different ways is called
2735 2735 "divergent". Such changesets have multiple successor sets (each of which
2736 2736 may also be split, i.e. have multiple successors).
2737 2737
2738 2738 Results are displayed as follows::
2739 2739
2740 2740 <rev1>
2741 2741 <successors-1A>
2742 2742 <rev2>
2743 2743 <successors-2A>
2744 2744 <successors-2B1> <successors-2B2> <successors-2B3>
2745 2745
2746 2746 Here rev2 has two possible (i.e. divergent) successors sets. The first
2747 2747 holds one element, whereas the second holds three (i.e. the changeset has
2748 2748 been split).
2749 2749 """
2750 2750 # passed to successorssets caching computation from one call to another
2751 2751 cache = {}
2752 2752 ctx2str = bytes
2753 2753 node2str = short
2754 2754 for rev in scmutil.revrange(repo, revs):
2755 2755 ctx = repo[rev]
2756 2756 ui.write('%s\n'% ctx2str(ctx))
2757 2757 for succsset in obsutil.successorssets(repo, ctx.node(),
2758 2758 closest=opts[r'closest'],
2759 2759 cache=cache):
2760 2760 if succsset:
2761 2761 ui.write(' ')
2762 2762 ui.write(node2str(succsset[0]))
2763 2763 for node in succsset[1:]:
2764 2764 ui.write(' ')
2765 2765 ui.write(node2str(node))
2766 2766 ui.write('\n')
2767 2767
2768 2768 @command('debugtemplate',
2769 2769 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2770 2770 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2771 2771 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2772 2772 optionalrepo=True)
2773 2773 def debugtemplate(ui, repo, tmpl, **opts):
2774 2774 """parse and apply a template
2775 2775
2776 2776 If -r/--rev is given, the template is processed as a log template and
2777 2777 applied to the given changesets. Otherwise, it is processed as a generic
2778 2778 template.
2779 2779
2780 2780 Use --verbose to print the parsed tree.
2781 2781 """
2782 2782 revs = None
2783 2783 if opts[r'rev']:
2784 2784 if repo is None:
2785 2785 raise error.RepoError(_('there is no Mercurial repository here '
2786 2786 '(.hg not found)'))
2787 2787 revs = scmutil.revrange(repo, opts[r'rev'])
2788 2788
2789 2789 props = {}
2790 2790 for d in opts[r'define']:
2791 2791 try:
2792 2792 k, v = (e.strip() for e in d.split('=', 1))
2793 2793 if not k or k == 'ui':
2794 2794 raise ValueError
2795 2795 props[k] = v
2796 2796 except ValueError:
2797 2797 raise error.Abort(_('malformed keyword definition: %s') % d)
2798 2798
2799 2799 if ui.verbose:
2800 2800 aliases = ui.configitems('templatealias')
2801 2801 tree = templater.parse(tmpl)
2802 2802 ui.note(templater.prettyformat(tree), '\n')
2803 2803 newtree = templater.expandaliases(tree, aliases)
2804 2804 if newtree != tree:
2805 2805 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2806 2806
2807 2807 if revs is None:
2808 2808 tres = formatter.templateresources(ui, repo)
2809 2809 t = formatter.maketemplater(ui, tmpl, resources=tres)
2810 2810 if ui.verbose:
2811 2811 kwds, funcs = t.symbolsuseddefault()
2812 2812 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2813 2813 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2814 2814 ui.write(t.renderdefault(props))
2815 2815 else:
2816 2816 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2817 2817 if ui.verbose:
2818 2818 kwds, funcs = displayer.t.symbolsuseddefault()
2819 2819 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2820 2820 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2821 2821 for r in revs:
2822 2822 displayer.show(repo[r], **pycompat.strkwargs(props))
2823 2823 displayer.close()
2824 2824
2825 2825 @command('debuguigetpass', [
2826 2826 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2827 2827 ], _('[-p TEXT]'), norepo=True)
2828 2828 def debuguigetpass(ui, prompt=''):
2829 2829 """show prompt to type password"""
2830 2830 r = ui.getpass(prompt)
2831 2831 ui.write(('respose: %s\n') % r)
2832 2832
2833 2833 @command('debuguiprompt', [
2834 2834 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2835 2835 ], _('[-p TEXT]'), norepo=True)
2836 2836 def debuguiprompt(ui, prompt=''):
2837 2837 """show plain prompt"""
2838 2838 r = ui.prompt(prompt)
2839 2839 ui.write(('response: %s\n') % r)
2840 2840
2841 2841 @command('debugupdatecaches', [])
2842 2842 def debugupdatecaches(ui, repo, *pats, **opts):
2843 2843 """warm all known caches in the repository"""
2844 2844 with repo.wlock(), repo.lock():
2845 2845 repo.updatecaches(full=True)
2846 2846
2847 2847 @command('debugupgraderepo', [
2848 2848 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2849 2849 ('', 'run', False, _('performs an upgrade')),
2850 2850 ('', 'backup', True, _('keep the old repository content around')),
2851 ('', 'manifest', None, _('select the manifest for upgrade')),
2851 2852 ])
2852 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True):
2853 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
2853 2854 """upgrade a repository to use different features
2854 2855
2855 2856 If no arguments are specified, the repository is evaluated for upgrade
2856 2857 and a list of problems and potential optimizations is printed.
2857 2858
2858 2859 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2859 2860 can be influenced via additional arguments. More details will be provided
2860 2861 by the command output when run without ``--run``.
2861 2862
2862 2863 During the upgrade, the repository will be locked and no writes will be
2863 2864 allowed.
2864 2865
2865 2866 At the end of the upgrade, the repository may not be readable while new
2866 2867 repository data is swapped in. This window will be as long as it takes to
2867 2868 rename some directories inside the ``.hg`` directory. On most machines, this
2868 2869 should complete almost instantaneously and the chances of a consumer being
2869 2870 unable to access the repository should be low.
2871
2872 By default, all revlog will be upgraded. You can restrict this using flag
2873 such as `--manifest`:
2874
2875 * `--manifest`: only optimize the manifest
2876 * `--no-manifest`: optimize all revlog but the manifest
2870 2877 """
2871 2878 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize,
2872 backup=backup)
2879 backup=backup, **opts)
2873 2880
2874 2881 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2875 2882 inferrepo=True)
2876 2883 def debugwalk(ui, repo, *pats, **opts):
2877 2884 """show how files match on given patterns"""
2878 2885 opts = pycompat.byteskwargs(opts)
2879 2886 m = scmutil.match(repo[None], pats, opts)
2880 2887 if ui.verbose:
2881 2888 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
2882 2889 items = list(repo[None].walk(m))
2883 2890 if not items:
2884 2891 return
2885 2892 f = lambda fn: fn
2886 2893 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2887 2894 f = lambda fn: util.normpath(fn)
2888 2895 fmt = 'f %%-%ds %%-%ds %%s' % (
2889 2896 max([len(abs) for abs in items]),
2890 2897 max([len(repo.pathto(abs)) for abs in items]))
2891 2898 for abs in items:
2892 2899 line = fmt % (abs, f(repo.pathto(abs)), m.exact(abs) and 'exact' or '')
2893 2900 ui.write("%s\n" % line.rstrip())
2894 2901
2895 2902 @command('debugwhyunstable', [], _('REV'))
2896 2903 def debugwhyunstable(ui, repo, rev):
2897 2904 """explain instabilities of a changeset"""
2898 2905 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
2899 2906 dnodes = ''
2900 2907 if entry.get('divergentnodes'):
2901 2908 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2902 2909 for ctx in entry['divergentnodes']) + ' '
2903 2910 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2904 2911 entry['reason'], entry['node']))
2905 2912
2906 2913 @command('debugwireargs',
2907 2914 [('', 'three', '', 'three'),
2908 2915 ('', 'four', '', 'four'),
2909 2916 ('', 'five', '', 'five'),
2910 2917 ] + cmdutil.remoteopts,
2911 2918 _('REPO [OPTIONS]... [ONE [TWO]]'),
2912 2919 norepo=True)
2913 2920 def debugwireargs(ui, repopath, *vals, **opts):
2914 2921 opts = pycompat.byteskwargs(opts)
2915 2922 repo = hg.peer(ui, opts, repopath)
2916 2923 for opt in cmdutil.remoteopts:
2917 2924 del opts[opt[1]]
2918 2925 args = {}
2919 2926 for k, v in opts.iteritems():
2920 2927 if v:
2921 2928 args[k] = v
2922 2929 args = pycompat.strkwargs(args)
2923 2930 # run twice to check that we don't mess up the stream for the next command
2924 2931 res1 = repo.debugwireargs(*vals, **args)
2925 2932 res2 = repo.debugwireargs(*vals, **args)
2926 2933 ui.write("%s\n" % res1)
2927 2934 if res1 != res2:
2928 2935 ui.warn("%s\n" % res2)
2929 2936
2930 2937 def _parsewirelangblocks(fh):
2931 2938 activeaction = None
2932 2939 blocklines = []
2933 2940 lastindent = 0
2934 2941
2935 2942 for line in fh:
2936 2943 line = line.rstrip()
2937 2944 if not line:
2938 2945 continue
2939 2946
2940 2947 if line.startswith(b'#'):
2941 2948 continue
2942 2949
2943 2950 if not line.startswith(b' '):
2944 2951 # New block. Flush previous one.
2945 2952 if activeaction:
2946 2953 yield activeaction, blocklines
2947 2954
2948 2955 activeaction = line
2949 2956 blocklines = []
2950 2957 lastindent = 0
2951 2958 continue
2952 2959
2953 2960 # Else we start with an indent.
2954 2961
2955 2962 if not activeaction:
2956 2963 raise error.Abort(_('indented line outside of block'))
2957 2964
2958 2965 indent = len(line) - len(line.lstrip())
2959 2966
2960 2967 # If this line is indented more than the last line, concatenate it.
2961 2968 if indent > lastindent and blocklines:
2962 2969 blocklines[-1] += line.lstrip()
2963 2970 else:
2964 2971 blocklines.append(line)
2965 2972 lastindent = indent
2966 2973
2967 2974 # Flush last block.
2968 2975 if activeaction:
2969 2976 yield activeaction, blocklines
2970 2977
2971 2978 @command('debugwireproto',
2972 2979 [
2973 2980 ('', 'localssh', False, _('start an SSH server for this repo')),
2974 2981 ('', 'peer', '', _('construct a specific version of the peer')),
2975 2982 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2976 2983 ('', 'nologhandshake', False,
2977 2984 _('do not log I/O related to the peer handshake')),
2978 2985 ] + cmdutil.remoteopts,
2979 2986 _('[PATH]'),
2980 2987 optionalrepo=True)
2981 2988 def debugwireproto(ui, repo, path=None, **opts):
2982 2989 """send wire protocol commands to a server
2983 2990
2984 2991 This command can be used to issue wire protocol commands to remote
2985 2992 peers and to debug the raw data being exchanged.
2986 2993
2987 2994 ``--localssh`` will start an SSH server against the current repository
2988 2995 and connect to that. By default, the connection will perform a handshake
2989 2996 and establish an appropriate peer instance.
2990 2997
2991 2998 ``--peer`` can be used to bypass the handshake protocol and construct a
2992 2999 peer instance using the specified class type. Valid values are ``raw``,
2993 3000 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
2994 3001 raw data payloads and don't support higher-level command actions.
2995 3002
2996 3003 ``--noreadstderr`` can be used to disable automatic reading from stderr
2997 3004 of the peer (for SSH connections only). Disabling automatic reading of
2998 3005 stderr is useful for making output more deterministic.
2999 3006
3000 3007 Commands are issued via a mini language which is specified via stdin.
3001 3008 The language consists of individual actions to perform. An action is
3002 3009 defined by a block. A block is defined as a line with no leading
3003 3010 space followed by 0 or more lines with leading space. Blocks are
3004 3011 effectively a high-level command with additional metadata.
3005 3012
3006 3013 Lines beginning with ``#`` are ignored.
3007 3014
3008 3015 The following sections denote available actions.
3009 3016
3010 3017 raw
3011 3018 ---
3012 3019
3013 3020 Send raw data to the server.
3014 3021
3015 3022 The block payload contains the raw data to send as one atomic send
3016 3023 operation. The data may not actually be delivered in a single system
3017 3024 call: it depends on the abilities of the transport being used.
3018 3025
3019 3026 Each line in the block is de-indented and concatenated. Then, that
3020 3027 value is evaluated as a Python b'' literal. This allows the use of
3021 3028 backslash escaping, etc.
3022 3029
3023 3030 raw+
3024 3031 ----
3025 3032
3026 3033 Behaves like ``raw`` except flushes output afterwards.
3027 3034
3028 3035 command <X>
3029 3036 -----------
3030 3037
3031 3038 Send a request to run a named command, whose name follows the ``command``
3032 3039 string.
3033 3040
3034 3041 Arguments to the command are defined as lines in this block. The format of
3035 3042 each line is ``<key> <value>``. e.g.::
3036 3043
3037 3044 command listkeys
3038 3045 namespace bookmarks
3039 3046
3040 3047 If the value begins with ``eval:``, it will be interpreted as a Python
3041 3048 literal expression. Otherwise values are interpreted as Python b'' literals.
3042 3049 This allows sending complex types and encoding special byte sequences via
3043 3050 backslash escaping.
3044 3051
3045 3052 The following arguments have special meaning:
3046 3053
3047 3054 ``PUSHFILE``
3048 3055 When defined, the *push* mechanism of the peer will be used instead
3049 3056 of the static request-response mechanism and the content of the
3050 3057 file specified in the value of this argument will be sent as the
3051 3058 command payload.
3052 3059
3053 3060 This can be used to submit a local bundle file to the remote.
3054 3061
3055 3062 batchbegin
3056 3063 ----------
3057 3064
3058 3065 Instruct the peer to begin a batched send.
3059 3066
3060 3067 All ``command`` blocks are queued for execution until the next
3061 3068 ``batchsubmit`` block.
3062 3069
3063 3070 batchsubmit
3064 3071 -----------
3065 3072
3066 3073 Submit previously queued ``command`` blocks as a batch request.
3067 3074
3068 3075 This action MUST be paired with a ``batchbegin`` action.
3069 3076
3070 3077 httprequest <method> <path>
3071 3078 ---------------------------
3072 3079
3073 3080 (HTTP peer only)
3074 3081
3075 3082 Send an HTTP request to the peer.
3076 3083
3077 3084 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3078 3085
3079 3086 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3080 3087 headers to add to the request. e.g. ``Accept: foo``.
3081 3088
3082 3089 The following arguments are special:
3083 3090
3084 3091 ``BODYFILE``
3085 3092 The content of the file defined as the value to this argument will be
3086 3093 transferred verbatim as the HTTP request body.
3087 3094
3088 3095 ``frame <type> <flags> <payload>``
3089 3096 Send a unified protocol frame as part of the request body.
3090 3097
3091 3098 All frames will be collected and sent as the body to the HTTP
3092 3099 request.
3093 3100
3094 3101 close
3095 3102 -----
3096 3103
3097 3104 Close the connection to the server.
3098 3105
3099 3106 flush
3100 3107 -----
3101 3108
3102 3109 Flush data written to the server.
3103 3110
3104 3111 readavailable
3105 3112 -------------
3106 3113
3107 3114 Close the write end of the connection and read all available data from
3108 3115 the server.
3109 3116
3110 3117 If the connection to the server encompasses multiple pipes, we poll both
3111 3118 pipes and read available data.
3112 3119
3113 3120 readline
3114 3121 --------
3115 3122
3116 3123 Read a line of output from the server. If there are multiple output
3117 3124 pipes, reads only the main pipe.
3118 3125
3119 3126 ereadline
3120 3127 ---------
3121 3128
3122 3129 Like ``readline``, but read from the stderr pipe, if available.
3123 3130
3124 3131 read <X>
3125 3132 --------
3126 3133
3127 3134 ``read()`` N bytes from the server's main output pipe.
3128 3135
3129 3136 eread <X>
3130 3137 ---------
3131 3138
3132 3139 ``read()`` N bytes from the server's stderr pipe, if available.
3133 3140
3134 3141 Specifying Unified Frame-Based Protocol Frames
3135 3142 ----------------------------------------------
3136 3143
3137 3144 It is possible to emit a *Unified Frame-Based Protocol* by using special
3138 3145 syntax.
3139 3146
3140 3147 A frame is composed as a type, flags, and payload. These can be parsed
3141 3148 from a string of the form:
3142 3149
3143 3150 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3144 3151
3145 3152 ``request-id`` and ``stream-id`` are integers defining the request and
3146 3153 stream identifiers.
3147 3154
3148 3155 ``type`` can be an integer value for the frame type or the string name
3149 3156 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3150 3157 ``command-name``.
3151 3158
3152 3159 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3153 3160 components. Each component (and there can be just one) can be an integer
3154 3161 or a flag name for stream flags or frame flags, respectively. Values are
3155 3162 resolved to integers and then bitwise OR'd together.
3156 3163
3157 3164 ``payload`` represents the raw frame payload. If it begins with
3158 3165 ``cbor:``, the following string is evaluated as Python code and the
3159 3166 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3160 3167 as a Python byte string literal.
3161 3168 """
3162 3169 opts = pycompat.byteskwargs(opts)
3163 3170
3164 3171 if opts['localssh'] and not repo:
3165 3172 raise error.Abort(_('--localssh requires a repository'))
3166 3173
3167 3174 if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
3168 3175 raise error.Abort(_('invalid value for --peer'),
3169 3176 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
3170 3177
3171 3178 if path and opts['localssh']:
3172 3179 raise error.Abort(_('cannot specify --localssh with an explicit '
3173 3180 'path'))
3174 3181
3175 3182 if ui.interactive():
3176 3183 ui.write(_('(waiting for commands on stdin)\n'))
3177 3184
3178 3185 blocks = list(_parsewirelangblocks(ui.fin))
3179 3186
3180 3187 proc = None
3181 3188 stdin = None
3182 3189 stdout = None
3183 3190 stderr = None
3184 3191 opener = None
3185 3192
3186 3193 if opts['localssh']:
3187 3194 # We start the SSH server in its own process so there is process
3188 3195 # separation. This prevents a whole class of potential bugs around
3189 3196 # shared state from interfering with server operation.
3190 3197 args = procutil.hgcmd() + [
3191 3198 '-R', repo.root,
3192 3199 'debugserve', '--sshstdio',
3193 3200 ]
3194 3201 proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
3195 3202 stdin=subprocess.PIPE,
3196 3203 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3197 3204 bufsize=0)
3198 3205
3199 3206 stdin = proc.stdin
3200 3207 stdout = proc.stdout
3201 3208 stderr = proc.stderr
3202 3209
3203 3210 # We turn the pipes into observers so we can log I/O.
3204 3211 if ui.verbose or opts['peer'] == 'raw':
3205 3212 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
3206 3213 logdata=True)
3207 3214 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
3208 3215 logdata=True)
3209 3216 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
3210 3217 logdata=True)
3211 3218
3212 3219 # --localssh also implies the peer connection settings.
3213 3220
3214 3221 url = 'ssh://localserver'
3215 3222 autoreadstderr = not opts['noreadstderr']
3216 3223
3217 3224 if opts['peer'] == 'ssh1':
3218 3225 ui.write(_('creating ssh peer for wire protocol version 1\n'))
3219 3226 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
3220 3227 None, autoreadstderr=autoreadstderr)
3221 3228 elif opts['peer'] == 'ssh2':
3222 3229 ui.write(_('creating ssh peer for wire protocol version 2\n'))
3223 3230 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
3224 3231 None, autoreadstderr=autoreadstderr)
3225 3232 elif opts['peer'] == 'raw':
3226 3233 ui.write(_('using raw connection to peer\n'))
3227 3234 peer = None
3228 3235 else:
3229 3236 ui.write(_('creating ssh peer from handshake results\n'))
3230 3237 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
3231 3238 autoreadstderr=autoreadstderr)
3232 3239
3233 3240 elif path:
3234 3241 # We bypass hg.peer() so we can proxy the sockets.
3235 3242 # TODO consider not doing this because we skip
3236 3243 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3237 3244 u = util.url(path)
3238 3245 if u.scheme != 'http':
3239 3246 raise error.Abort(_('only http:// paths are currently supported'))
3240 3247
3241 3248 url, authinfo = u.authinfo()
3242 3249 openerargs = {
3243 3250 r'useragent': b'Mercurial debugwireproto',
3244 3251 }
3245 3252
3246 3253 # Turn pipes/sockets into observers so we can log I/O.
3247 3254 if ui.verbose:
3248 3255 openerargs.update({
3249 3256 r'loggingfh': ui,
3250 3257 r'loggingname': b's',
3251 3258 r'loggingopts': {
3252 3259 r'logdata': True,
3253 3260 r'logdataapis': False,
3254 3261 },
3255 3262 })
3256 3263
3257 3264 if ui.debugflag:
3258 3265 openerargs[r'loggingopts'][r'logdataapis'] = True
3259 3266
3260 3267 # Don't send default headers when in raw mode. This allows us to
3261 3268 # bypass most of the behavior of our URL handling code so we can
3262 3269 # have near complete control over what's sent on the wire.
3263 3270 if opts['peer'] == 'raw':
3264 3271 openerargs[r'sendaccept'] = False
3265 3272
3266 3273 opener = urlmod.opener(ui, authinfo, **openerargs)
3267 3274
3268 3275 if opts['peer'] == 'http2':
3269 3276 ui.write(_('creating http peer for wire protocol version 2\n'))
3270 3277 # We go through makepeer() because we need an API descriptor for
3271 3278 # the peer instance to be useful.
3272 3279 with ui.configoverride({
3273 3280 ('experimental', 'httppeer.advertise-v2'): True}):
3274 3281 if opts['nologhandshake']:
3275 3282 ui.pushbuffer()
3276 3283
3277 3284 peer = httppeer.makepeer(ui, path, opener=opener)
3278 3285
3279 3286 if opts['nologhandshake']:
3280 3287 ui.popbuffer()
3281 3288
3282 3289 if not isinstance(peer, httppeer.httpv2peer):
3283 3290 raise error.Abort(_('could not instantiate HTTP peer for '
3284 3291 'wire protocol version 2'),
3285 3292 hint=_('the server may not have the feature '
3286 3293 'enabled or is not allowing this '
3287 3294 'client version'))
3288 3295
3289 3296 elif opts['peer'] == 'raw':
3290 3297 ui.write(_('using raw connection to peer\n'))
3291 3298 peer = None
3292 3299 elif opts['peer']:
3293 3300 raise error.Abort(_('--peer %s not supported with HTTP peers') %
3294 3301 opts['peer'])
3295 3302 else:
3296 3303 peer = httppeer.makepeer(ui, path, opener=opener)
3297 3304
3298 3305 # We /could/ populate stdin/stdout with sock.makefile()...
3299 3306 else:
3300 3307 raise error.Abort(_('unsupported connection configuration'))
3301 3308
3302 3309 batchedcommands = None
3303 3310
3304 3311 # Now perform actions based on the parsed wire language instructions.
3305 3312 for action, lines in blocks:
3306 3313 if action in ('raw', 'raw+'):
3307 3314 if not stdin:
3308 3315 raise error.Abort(_('cannot call raw/raw+ on this peer'))
3309 3316
3310 3317 # Concatenate the data together.
3311 3318 data = ''.join(l.lstrip() for l in lines)
3312 3319 data = stringutil.unescapestr(data)
3313 3320 stdin.write(data)
3314 3321
3315 3322 if action == 'raw+':
3316 3323 stdin.flush()
3317 3324 elif action == 'flush':
3318 3325 if not stdin:
3319 3326 raise error.Abort(_('cannot call flush on this peer'))
3320 3327 stdin.flush()
3321 3328 elif action.startswith('command'):
3322 3329 if not peer:
3323 3330 raise error.Abort(_('cannot send commands unless peer instance '
3324 3331 'is available'))
3325 3332
3326 3333 command = action.split(' ', 1)[1]
3327 3334
3328 3335 args = {}
3329 3336 for line in lines:
3330 3337 # We need to allow empty values.
3331 3338 fields = line.lstrip().split(' ', 1)
3332 3339 if len(fields) == 1:
3333 3340 key = fields[0]
3334 3341 value = ''
3335 3342 else:
3336 3343 key, value = fields
3337 3344
3338 3345 if value.startswith('eval:'):
3339 3346 value = stringutil.evalpythonliteral(value[5:])
3340 3347 else:
3341 3348 value = stringutil.unescapestr(value)
3342 3349
3343 3350 args[key] = value
3344 3351
3345 3352 if batchedcommands is not None:
3346 3353 batchedcommands.append((command, args))
3347 3354 continue
3348 3355
3349 3356 ui.status(_('sending %s command\n') % command)
3350 3357
3351 3358 if 'PUSHFILE' in args:
3352 3359 with open(args['PUSHFILE'], r'rb') as fh:
3353 3360 del args['PUSHFILE']
3354 3361 res, output = peer._callpush(command, fh,
3355 3362 **pycompat.strkwargs(args))
3356 3363 ui.status(_('result: %s\n') % stringutil.escapestr(res))
3357 3364 ui.status(_('remote output: %s\n') %
3358 3365 stringutil.escapestr(output))
3359 3366 else:
3360 3367 with peer.commandexecutor() as e:
3361 3368 res = e.callcommand(command, args).result()
3362 3369
3363 3370 if isinstance(res, wireprotov2peer.commandresponse):
3364 3371 val = res.objects()
3365 3372 ui.status(_('response: %s\n') %
3366 3373 stringutil.pprint(val, bprefix=True, indent=2))
3367 3374 else:
3368 3375 ui.status(_('response: %s\n') %
3369 3376 stringutil.pprint(res, bprefix=True, indent=2))
3370 3377
3371 3378 elif action == 'batchbegin':
3372 3379 if batchedcommands is not None:
3373 3380 raise error.Abort(_('nested batchbegin not allowed'))
3374 3381
3375 3382 batchedcommands = []
3376 3383 elif action == 'batchsubmit':
3377 3384 # There is a batching API we could go through. But it would be
3378 3385 # difficult to normalize requests into function calls. It is easier
3379 3386 # to bypass this layer and normalize to commands + args.
3380 3387 ui.status(_('sending batch with %d sub-commands\n') %
3381 3388 len(batchedcommands))
3382 3389 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
3383 3390 ui.status(_('response #%d: %s\n') %
3384 3391 (i, stringutil.escapestr(chunk)))
3385 3392
3386 3393 batchedcommands = None
3387 3394
3388 3395 elif action.startswith('httprequest '):
3389 3396 if not opener:
3390 3397 raise error.Abort(_('cannot use httprequest without an HTTP '
3391 3398 'peer'))
3392 3399
3393 3400 request = action.split(' ', 2)
3394 3401 if len(request) != 3:
3395 3402 raise error.Abort(_('invalid httprequest: expected format is '
3396 3403 '"httprequest <method> <path>'))
3397 3404
3398 3405 method, httppath = request[1:]
3399 3406 headers = {}
3400 3407 body = None
3401 3408 frames = []
3402 3409 for line in lines:
3403 3410 line = line.lstrip()
3404 3411 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
3405 3412 if m:
3406 3413 # Headers need to use native strings.
3407 3414 key = pycompat.strurl(m.group(1))
3408 3415 value = pycompat.strurl(m.group(2))
3409 3416 headers[key] = value
3410 3417 continue
3411 3418
3412 3419 if line.startswith(b'BODYFILE '):
3413 3420 with open(line.split(b' ', 1), 'rb') as fh:
3414 3421 body = fh.read()
3415 3422 elif line.startswith(b'frame '):
3416 3423 frame = wireprotoframing.makeframefromhumanstring(
3417 3424 line[len(b'frame '):])
3418 3425
3419 3426 frames.append(frame)
3420 3427 else:
3421 3428 raise error.Abort(_('unknown argument to httprequest: %s') %
3422 3429 line)
3423 3430
3424 3431 url = path + httppath
3425 3432
3426 3433 if frames:
3427 3434 body = b''.join(bytes(f) for f in frames)
3428 3435
3429 3436 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
3430 3437
3431 3438 # urllib.Request insists on using has_data() as a proxy for
3432 3439 # determining the request method. Override that to use our
3433 3440 # explicitly requested method.
3434 3441 req.get_method = lambda: pycompat.sysstr(method)
3435 3442
3436 3443 try:
3437 3444 res = opener.open(req)
3438 3445 body = res.read()
3439 3446 except util.urlerr.urlerror as e:
3440 3447 # read() method must be called, but only exists in Python 2
3441 3448 getattr(e, 'read', lambda: None)()
3442 3449 continue
3443 3450
3444 3451 ct = res.headers.get(r'Content-Type')
3445 3452 if ct == r'application/mercurial-cbor':
3446 3453 ui.write(_('cbor> %s\n') %
3447 3454 stringutil.pprint(cborutil.decodeall(body),
3448 3455 bprefix=True,
3449 3456 indent=2))
3450 3457
3451 3458 elif action == 'close':
3452 3459 peer.close()
3453 3460 elif action == 'readavailable':
3454 3461 if not stdout or not stderr:
3455 3462 raise error.Abort(_('readavailable not available on this peer'))
3456 3463
3457 3464 stdin.close()
3458 3465 stdout.read()
3459 3466 stderr.read()
3460 3467
3461 3468 elif action == 'readline':
3462 3469 if not stdout:
3463 3470 raise error.Abort(_('readline not available on this peer'))
3464 3471 stdout.readline()
3465 3472 elif action == 'ereadline':
3466 3473 if not stderr:
3467 3474 raise error.Abort(_('ereadline not available on this peer'))
3468 3475 stderr.readline()
3469 3476 elif action.startswith('read '):
3470 3477 count = int(action.split(' ', 1)[1])
3471 3478 if not stdout:
3472 3479 raise error.Abort(_('read not available on this peer'))
3473 3480 stdout.read(count)
3474 3481 elif action.startswith('eread '):
3475 3482 count = int(action.split(' ', 1)[1])
3476 3483 if not stderr:
3477 3484 raise error.Abort(_('eread not available on this peer'))
3478 3485 stderr.read(count)
3479 3486 else:
3480 3487 raise error.Abort(_('unknown action: %s') % action)
3481 3488
3482 3489 if batchedcommands is not None:
3483 3490 raise error.Abort(_('unclosed "batchbegin" request'))
3484 3491
3485 3492 if peer:
3486 3493 peer.close()
3487 3494
3488 3495 if proc:
3489 3496 proc.kill()
@@ -1,1038 +1,1056 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 changelog,
15 15 error,
16 16 filelog,
17 17 hg,
18 18 localrepo,
19 19 manifest,
20 20 pycompat,
21 21 revlog,
22 22 scmutil,
23 23 util,
24 24 vfs as vfsmod,
25 25 )
26 26
27 27 from .utils import (
28 28 compression,
29 29 )
30 30
31 31 def requiredsourcerequirements(repo):
32 32 """Obtain requirements required to be present to upgrade a repo.
33 33
34 34 An upgrade will not be allowed if the repository doesn't have the
35 35 requirements returned by this function.
36 36 """
37 37 return {
38 38 # Introduced in Mercurial 0.9.2.
39 39 'revlogv1',
40 40 # Introduced in Mercurial 0.9.2.
41 41 'store',
42 42 }
43 43
44 44 def blocksourcerequirements(repo):
45 45 """Obtain requirements that will prevent an upgrade from occurring.
46 46
47 47 An upgrade cannot be performed if the source repository contains a
48 48 requirements in the returned set.
49 49 """
50 50 return {
51 51 # The upgrade code does not yet support these experimental features.
52 52 # This is an artificial limitation.
53 53 'treemanifest',
54 54 # This was a precursor to generaldelta and was never enabled by default.
55 55 # It should (hopefully) not exist in the wild.
56 56 'parentdelta',
57 57 # Upgrade should operate on the actual store, not the shared link.
58 58 'shared',
59 59 }
60 60
61 61 def supportremovedrequirements(repo):
62 62 """Obtain requirements that can be removed during an upgrade.
63 63
64 64 If an upgrade were to create a repository that dropped a requirement,
65 65 the dropped requirement must appear in the returned set for the upgrade
66 66 to be allowed.
67 67 """
68 68 supported = {
69 69 localrepo.SPARSEREVLOG_REQUIREMENT,
70 70 }
71 71 for name in compression.compengines:
72 72 engine = compression.compengines[name]
73 73 if engine.available() and engine.revlogheader():
74 74 supported.add(b'exp-compression-%s' % name)
75 75 if engine.name() == 'zstd':
76 76 supported.add(b'revlog-compression-zstd')
77 77 return supported
78 78
79 79 def supporteddestrequirements(repo):
80 80 """Obtain requirements that upgrade supports in the destination.
81 81
82 82 If the result of the upgrade would create requirements not in this set,
83 83 the upgrade is disallowed.
84 84
85 85 Extensions should monkeypatch this to add their custom requirements.
86 86 """
87 87 supported = {
88 88 'dotencode',
89 89 'fncache',
90 90 'generaldelta',
91 91 'revlogv1',
92 92 'store',
93 93 localrepo.SPARSEREVLOG_REQUIREMENT,
94 94 }
95 95 for name in compression.compengines:
96 96 engine = compression.compengines[name]
97 97 if engine.available() and engine.revlogheader():
98 98 supported.add(b'exp-compression-%s' % name)
99 99 if engine.name() == 'zstd':
100 100 supported.add(b'revlog-compression-zstd')
101 101 return supported
102 102
103 103 def allowednewrequirements(repo):
104 104 """Obtain requirements that can be added to a repository during upgrade.
105 105
106 106 This is used to disallow proposed requirements from being added when
107 107 they weren't present before.
108 108
109 109 We use a list of allowed requirement additions instead of a list of known
110 110 bad additions because the whitelist approach is safer and will prevent
111 111 future, unknown requirements from accidentally being added.
112 112 """
113 113 supported = {
114 114 'dotencode',
115 115 'fncache',
116 116 'generaldelta',
117 117 localrepo.SPARSEREVLOG_REQUIREMENT,
118 118 }
119 119 for name in compression.compengines:
120 120 engine = compression.compengines[name]
121 121 if engine.available() and engine.revlogheader():
122 122 supported.add(b'exp-compression-%s' % name)
123 123 if engine.name() == 'zstd':
124 124 supported.add(b'revlog-compression-zstd')
125 125 return supported
126 126
127 127 def preservedrequirements(repo):
128 128 return set()
129 129
130 130 deficiency = 'deficiency'
131 131 optimisation = 'optimization'
132 132
133 133 class improvement(object):
134 134 """Represents an improvement that can be made as part of an upgrade.
135 135
136 136 The following attributes are defined on each instance:
137 137
138 138 name
139 139 Machine-readable string uniquely identifying this improvement. It
140 140 will be mapped to an action later in the upgrade process.
141 141
142 142 type
143 143 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
144 144 problem. An optimization is an action (sometimes optional) that
145 145 can be taken to further improve the state of the repository.
146 146
147 147 description
148 148 Message intended for humans explaining the improvement in more detail,
149 149 including the implications of it. For ``deficiency`` types, should be
150 150 worded in the present tense. For ``optimisation`` types, should be
151 151 worded in the future tense.
152 152
153 153 upgrademessage
154 154 Message intended for humans explaining what an upgrade addressing this
155 155 issue will do. Should be worded in the future tense.
156 156 """
157 157 def __init__(self, name, type, description, upgrademessage):
158 158 self.name = name
159 159 self.type = type
160 160 self.description = description
161 161 self.upgrademessage = upgrademessage
162 162
163 163 def __eq__(self, other):
164 164 if not isinstance(other, improvement):
165 165 # This is what python tell use to do
166 166 return NotImplemented
167 167 return self.name == other.name
168 168
169 169 def __ne__(self, other):
170 170 return not (self == other)
171 171
172 172 def __hash__(self):
173 173 return hash(self.name)
174 174
175 175 allformatvariant = []
176 176
177 177 def registerformatvariant(cls):
178 178 allformatvariant.append(cls)
179 179 return cls
180 180
181 181 class formatvariant(improvement):
182 182 """an improvement subclass dedicated to repository format"""
183 183 type = deficiency
184 184 ### The following attributes should be defined for each class:
185 185
186 186 # machine-readable string uniquely identifying this improvement. it will be
187 187 # mapped to an action later in the upgrade process.
188 188 name = None
189 189
190 190 # message intended for humans explaining the improvement in more detail,
191 191 # including the implications of it ``deficiency`` types, should be worded
192 192 # in the present tense.
193 193 description = None
194 194
195 195 # message intended for humans explaining what an upgrade addressing this
196 196 # issue will do. should be worded in the future tense.
197 197 upgrademessage = None
198 198
199 199 # value of current Mercurial default for new repository
200 200 default = None
201 201
202 202 def __init__(self):
203 203 raise NotImplementedError()
204 204
205 205 @staticmethod
206 206 def fromrepo(repo):
207 207 """current value of the variant in the repository"""
208 208 raise NotImplementedError()
209 209
210 210 @staticmethod
211 211 def fromconfig(repo):
212 212 """current value of the variant in the configuration"""
213 213 raise NotImplementedError()
214 214
215 215 class requirementformatvariant(formatvariant):
216 216 """formatvariant based on a 'requirement' name.
217 217
218 218 Many format variant are controlled by a 'requirement'. We define a small
219 219 subclass to factor the code.
220 220 """
221 221
222 222 # the requirement that control this format variant
223 223 _requirement = None
224 224
225 225 @staticmethod
226 226 def _newreporequirements(ui):
227 227 return localrepo.newreporequirements(
228 228 ui, localrepo.defaultcreateopts(ui))
229 229
230 230 @classmethod
231 231 def fromrepo(cls, repo):
232 232 assert cls._requirement is not None
233 233 return cls._requirement in repo.requirements
234 234
235 235 @classmethod
236 236 def fromconfig(cls, repo):
237 237 assert cls._requirement is not None
238 238 return cls._requirement in cls._newreporequirements(repo.ui)
239 239
240 240 @registerformatvariant
241 241 class fncache(requirementformatvariant):
242 242 name = 'fncache'
243 243
244 244 _requirement = 'fncache'
245 245
246 246 default = True
247 247
248 248 description = _('long and reserved filenames may not work correctly; '
249 249 'repository performance is sub-optimal')
250 250
251 251 upgrademessage = _('repository will be more resilient to storing '
252 252 'certain paths and performance of certain '
253 253 'operations should be improved')
254 254
255 255 @registerformatvariant
256 256 class dotencode(requirementformatvariant):
257 257 name = 'dotencode'
258 258
259 259 _requirement = 'dotencode'
260 260
261 261 default = True
262 262
263 263 description = _('storage of filenames beginning with a period or '
264 264 'space may not work correctly')
265 265
266 266 upgrademessage = _('repository will be better able to store files '
267 267 'beginning with a space or period')
268 268
269 269 @registerformatvariant
270 270 class generaldelta(requirementformatvariant):
271 271 name = 'generaldelta'
272 272
273 273 _requirement = 'generaldelta'
274 274
275 275 default = True
276 276
277 277 description = _('deltas within internal storage are unable to '
278 278 'choose optimal revisions; repository is larger and '
279 279 'slower than it could be; interaction with other '
280 280 'repositories may require extra network and CPU '
281 281 'resources, making "hg push" and "hg pull" slower')
282 282
283 283 upgrademessage = _('repository storage will be able to create '
284 284 'optimal deltas; new repository data will be '
285 285 'smaller and read times should decrease; '
286 286 'interacting with other repositories using this '
287 287 'storage model should require less network and '
288 288 'CPU resources, making "hg push" and "hg pull" '
289 289 'faster')
290 290
291 291 @registerformatvariant
292 292 class sparserevlog(requirementformatvariant):
293 293 name = 'sparserevlog'
294 294
295 295 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
296 296
297 297 default = True
298 298
299 299 description = _('in order to limit disk reading and memory usage on older '
300 300 'version, the span of a delta chain from its root to its '
301 301 'end is limited, whatever the relevant data in this span. '
302 302 'This can severly limit Mercurial ability to build good '
303 303 'chain of delta resulting is much more storage space being '
304 304 'taken and limit reusability of on disk delta during '
305 305 'exchange.'
306 306 )
307 307
308 308 upgrademessage = _('Revlog supports delta chain with more unused data '
309 309 'between payload. These gaps will be skipped at read '
310 310 'time. This allows for better delta chains, making a '
311 311 'better compression and faster exchange with server.')
312 312
313 313 @registerformatvariant
314 314 class removecldeltachain(formatvariant):
315 315 name = 'plain-cl-delta'
316 316
317 317 default = True
318 318
319 319 description = _('changelog storage is using deltas instead of '
320 320 'raw entries; changelog reading and any '
321 321 'operation relying on changelog data are slower '
322 322 'than they could be')
323 323
324 324 upgrademessage = _('changelog storage will be reformated to '
325 325 'store raw entries; changelog reading will be '
326 326 'faster; changelog size may be reduced')
327 327
328 328 @staticmethod
329 329 def fromrepo(repo):
330 330 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
331 331 # changelogs with deltas.
332 332 cl = repo.changelog
333 333 chainbase = cl.chainbase
334 334 return all(rev == chainbase(rev) for rev in cl)
335 335
336 336 @staticmethod
337 337 def fromconfig(repo):
338 338 return True
339 339
340 340 @registerformatvariant
341 341 class compressionengine(formatvariant):
342 342 name = 'compression'
343 343 default = 'zlib'
344 344
345 345 description = _('Compresion algorithm used to compress data. '
346 346 'Some engine are faster than other')
347 347
348 348 upgrademessage = _('revlog content will be recompressed with the new '
349 349 'algorithm.')
350 350
351 351 @classmethod
352 352 def fromrepo(cls, repo):
353 353 # we allow multiple compression engine requirement to co-exist because
354 354 # strickly speaking, revlog seems to support mixed compression style.
355 355 #
356 356 # The compression used for new entries will be "the last one"
357 357 compression = 'zlib'
358 358 for req in repo.requirements:
359 359 prefix = req.startswith
360 360 if prefix('revlog-compression-') or prefix('exp-compression-'):
361 361 compression = req.split('-', 2)[2]
362 362 return compression
363 363
364 364 @classmethod
365 365 def fromconfig(cls, repo):
366 366 return repo.ui.config('format', 'revlog-compression')
367 367
368 368 @registerformatvariant
369 369 class compressionlevel(formatvariant):
370 370 name = 'compression-level'
371 371 default = 'default'
372 372
373 373 description = _('compression level')
374 374
375 375 upgrademessage = _('revlog content will be recompressed')
376 376
377 377 @classmethod
378 378 def fromrepo(cls, repo):
379 379 comp = compressionengine.fromrepo(repo)
380 380 level = None
381 381 if comp == 'zlib':
382 382 level = repo.ui.configint('storage', 'revlog.zlib.level')
383 383 elif comp == 'zstd':
384 384 level = repo.ui.configint('storage', 'revlog.zstd.level')
385 385 if level is None:
386 386 return 'default'
387 387 return bytes(level)
388 388
389 389 @classmethod
390 390 def fromconfig(cls, repo):
391 391 comp = compressionengine.fromconfig(repo)
392 392 level = None
393 393 if comp == 'zlib':
394 394 level = repo.ui.configint('storage', 'revlog.zlib.level')
395 395 elif comp == 'zstd':
396 396 level = repo.ui.configint('storage', 'revlog.zstd.level')
397 397 if level is None:
398 398 return 'default'
399 399 return bytes(level)
400 400
401 401 def finddeficiencies(repo):
402 402 """returns a list of deficiencies that the repo suffer from"""
403 403 deficiencies = []
404 404
405 405 # We could detect lack of revlogv1 and store here, but they were added
406 406 # in 0.9.2 and we don't support upgrading repos without these
407 407 # requirements, so let's not bother.
408 408
409 409 for fv in allformatvariant:
410 410 if not fv.fromrepo(repo):
411 411 deficiencies.append(fv)
412 412
413 413 return deficiencies
414 414
415 415 # search without '-' to support older form on newer client.
416 416 #
417 417 # We don't enforce backward compatibility for debug command so this
418 418 # might eventually be dropped. However, having to use two different
419 419 # forms in script when comparing result is anoying enough to add
420 420 # backward compatibility for a while.
421 421 legacy_opts_map = {
422 422 'redeltaparent': 're-delta-parent',
423 423 'redeltamultibase': 're-delta-multibase',
424 424 'redeltaall': 're-delta-all',
425 425 'redeltafulladd': 're-delta-fulladd',
426 426 }
427 427
428 428 def findoptimizations(repo):
429 429 """Determine optimisation that could be used during upgrade"""
430 430 # These are unconditionally added. There is logic later that figures out
431 431 # which ones to apply.
432 432 optimizations = []
433 433
434 434 optimizations.append(improvement(
435 435 name='re-delta-parent',
436 436 type=optimisation,
437 437 description=_('deltas within internal storage will be recalculated to '
438 438 'choose an optimal base revision where this was not '
439 439 'already done; the size of the repository may shrink and '
440 440 'various operations may become faster; the first time '
441 441 'this optimization is performed could slow down upgrade '
442 442 'execution considerably; subsequent invocations should '
443 443 'not run noticeably slower'),
444 444 upgrademessage=_('deltas within internal storage will choose a new '
445 445 'base revision if needed')))
446 446
447 447 optimizations.append(improvement(
448 448 name='re-delta-multibase',
449 449 type=optimisation,
450 450 description=_('deltas within internal storage will be recalculated '
451 451 'against multiple base revision and the smallest '
452 452 'difference will be used; the size of the repository may '
453 453 'shrink significantly when there are many merges; this '
454 454 'optimization will slow down execution in proportion to '
455 455 'the number of merges in the repository and the amount '
456 456 'of files in the repository; this slow down should not '
457 457 'be significant unless there are tens of thousands of '
458 458 'files and thousands of merges'),
459 459 upgrademessage=_('deltas within internal storage will choose an '
460 460 'optimal delta by computing deltas against multiple '
461 461 'parents; may slow down execution time '
462 462 'significantly')))
463 463
464 464 optimizations.append(improvement(
465 465 name='re-delta-all',
466 466 type=optimisation,
467 467 description=_('deltas within internal storage will always be '
468 468 'recalculated without reusing prior deltas; this will '
469 469 'likely make execution run several times slower; this '
470 470 'optimization is typically not needed'),
471 471 upgrademessage=_('deltas within internal storage will be fully '
472 472 'recomputed; this will likely drastically slow down '
473 473 'execution time')))
474 474
475 475 optimizations.append(improvement(
476 476 name='re-delta-fulladd',
477 477 type=optimisation,
478 478 description=_('every revision will be re-added as if it was new '
479 479 'content. It will go through the full storage '
480 480 'mechanism giving extensions a chance to process it '
481 481 '(eg. lfs). This is similar to "re-delta-all" but even '
482 482 'slower since more logic is involved.'),
483 483 upgrademessage=_('each revision will be added as new content to the '
484 484 'internal storage; this will likely drastically slow '
485 485 'down execution time, but some extensions might need '
486 486 'it')))
487 487
488 488 return optimizations
489 489
490 490 def determineactions(repo, deficiencies, sourcereqs, destreqs):
491 491 """Determine upgrade actions that will be performed.
492 492
493 493 Given a list of improvements as returned by ``finddeficiencies`` and
494 494 ``findoptimizations``, determine the list of upgrade actions that
495 495 will be performed.
496 496
497 497 The role of this function is to filter improvements if needed, apply
498 498 recommended optimizations from the improvements list that make sense,
499 499 etc.
500 500
501 501 Returns a list of action names.
502 502 """
503 503 newactions = []
504 504
505 505 knownreqs = supporteddestrequirements(repo)
506 506
507 507 for d in deficiencies:
508 508 name = d.name
509 509
510 510 # If the action is a requirement that doesn't show up in the
511 511 # destination requirements, prune the action.
512 512 if name in knownreqs and name not in destreqs:
513 513 continue
514 514
515 515 newactions.append(d)
516 516
517 517 # FUTURE consider adding some optimizations here for certain transitions.
518 518 # e.g. adding generaldelta could schedule parent redeltas.
519 519
520 520 return newactions
521 521
522 522 def _revlogfrompath(repo, path):
523 523 """Obtain a revlog from a repo path.
524 524
525 525 An instance of the appropriate class is returned.
526 526 """
527 527 if path == '00changelog.i':
528 528 return changelog.changelog(repo.svfs)
529 529 elif path.endswith('00manifest.i'):
530 530 mandir = path[:-len('00manifest.i')]
531 531 return manifest.manifestrevlog(repo.svfs, tree=mandir)
532 532 else:
533 533 #reverse of "/".join(("data", path + ".i"))
534 534 return filelog.filelog(repo.svfs, path[5:-2])
535 535
536 536 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
537 537 """copy all relevant files for `oldrl` into `destrepo` store
538 538
539 539 Files are copied "as is" without any transformation. The copy is performed
540 540 without extra checks. Callers are responsible for making sure the copied
541 541 content is compatible with format of the destination repository.
542 542 """
543 543 oldrl = getattr(oldrl, '_revlog', oldrl)
544 544 newrl = _revlogfrompath(destrepo, unencodedname)
545 545 newrl = getattr(newrl, '_revlog', newrl)
546 546
547 547 oldvfs = oldrl.opener
548 548 newvfs = newrl.opener
549 549 oldindex = oldvfs.join(oldrl.indexfile)
550 550 newindex = newvfs.join(newrl.indexfile)
551 551 olddata = oldvfs.join(oldrl.datafile)
552 552 newdata = newvfs.join(newrl.datafile)
553 553
554 554 newdir = newvfs.dirname(newrl.indexfile)
555 555 newvfs.makedirs(newdir)
556 556
557 557 util.copyfile(oldindex, newindex)
558 558 if oldrl.opener.exists(olddata):
559 559 util.copyfile(olddata, newdata)
560 560
561 561 if not (unencodedname.endswith('00changelog.i')
562 562 or unencodedname.endswith('00manifest.i')):
563 563 destrepo.svfs.fncache.add(unencodedname)
564 564
565 565 UPGRADE_CHANGELOG = object()
566 566 UPGRADE_MANIFEST = object()
567 567 UPGRADE_FILELOG = object()
568 568
569 569 UPGRADE_ALL_REVLOGS = frozenset([UPGRADE_CHANGELOG,
570 570 UPGRADE_MANIFEST,
571 571 UPGRADE_FILELOG])
572 572
573 573 def matchrevlog(revlogfilter, entry):
574 574 """check is a revlog is selected for cloning
575 575
576 576 The store entry is checked against the passed filter"""
577 577 if entry.endswith('00changelog.i'):
578 578 return UPGRADE_CHANGELOG in revlogfilter
579 579 elif entry.endswith('00manifest.i'):
580 580 return UPGRADE_MANIFEST in revlogfilter
581 581 return UPGRADE_FILELOG in revlogfilter
582 582
583 583 def _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents,
584 584 revlogs=UPGRADE_ALL_REVLOGS):
585 585 """Copy revlogs between 2 repos."""
586 586 revcount = 0
587 587 srcsize = 0
588 588 srcrawsize = 0
589 589 dstsize = 0
590 590 fcount = 0
591 591 frevcount = 0
592 592 fsrcsize = 0
593 593 frawsize = 0
594 594 fdstsize = 0
595 595 mcount = 0
596 596 mrevcount = 0
597 597 msrcsize = 0
598 598 mrawsize = 0
599 599 mdstsize = 0
600 600 crevcount = 0
601 601 csrcsize = 0
602 602 crawsize = 0
603 603 cdstsize = 0
604 604
605 605 alldatafiles = list(srcrepo.store.walk())
606 606
607 607 # Perform a pass to collect metadata. This validates we can open all
608 608 # source files and allows a unified progress bar to be displayed.
609 609 for unencoded, encoded, size in alldatafiles:
610 610 if unencoded.endswith('.d'):
611 611 continue
612 612
613 613 rl = _revlogfrompath(srcrepo, unencoded)
614 614
615 615 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
616 616 trackedsize=True, storedsize=True)
617 617
618 618 revcount += info['revisionscount'] or 0
619 619 datasize = info['storedsize'] or 0
620 620 rawsize = info['trackedsize'] or 0
621 621
622 622 srcsize += datasize
623 623 srcrawsize += rawsize
624 624
625 625 # This is for the separate progress bars.
626 626 if isinstance(rl, changelog.changelog):
627 627 crevcount += len(rl)
628 628 csrcsize += datasize
629 629 crawsize += rawsize
630 630 elif isinstance(rl, manifest.manifestrevlog):
631 631 mcount += 1
632 632 mrevcount += len(rl)
633 633 msrcsize += datasize
634 634 mrawsize += rawsize
635 635 elif isinstance(rl, filelog.filelog):
636 636 fcount += 1
637 637 frevcount += len(rl)
638 638 fsrcsize += datasize
639 639 frawsize += rawsize
640 640 else:
641 641 error.ProgrammingError('unknown revlog type')
642 642
643 643 if not revcount:
644 644 return
645 645
646 646 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
647 647 '%d in changelog)\n') %
648 648 (revcount, frevcount, mrevcount, crevcount))
649 649 ui.write(_('migrating %s in store; %s tracked data\n') % (
650 650 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
651 651
652 652 # Used to keep track of progress.
653 653 progress = None
654 654 def oncopiedrevision(rl, rev, node):
655 655 progress.increment()
656 656
657 657 # Do the actual copying.
658 658 # FUTURE this operation can be farmed off to worker processes.
659 659 seen = set()
660 660 for unencoded, encoded, size in alldatafiles:
661 661 if unencoded.endswith('.d'):
662 662 continue
663 663
664 664 oldrl = _revlogfrompath(srcrepo, unencoded)
665 665
666 666 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
667 667 ui.write(_('finished migrating %d manifest revisions across %d '
668 668 'manifests; change in size: %s\n') %
669 669 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
670 670
671 671 ui.write(_('migrating changelog containing %d revisions '
672 672 '(%s in store; %s tracked data)\n') %
673 673 (crevcount, util.bytecount(csrcsize),
674 674 util.bytecount(crawsize)))
675 675 seen.add('c')
676 676 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
677 677 total=crevcount)
678 678 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
679 679 ui.write(_('finished migrating %d filelog revisions across %d '
680 680 'filelogs; change in size: %s\n') %
681 681 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
682 682
683 683 ui.write(_('migrating %d manifests containing %d revisions '
684 684 '(%s in store; %s tracked data)\n') %
685 685 (mcount, mrevcount, util.bytecount(msrcsize),
686 686 util.bytecount(mrawsize)))
687 687 seen.add('m')
688 688 if progress:
689 689 progress.complete()
690 690 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
691 691 total=mrevcount)
692 692 elif 'f' not in seen:
693 693 ui.write(_('migrating %d filelogs containing %d revisions '
694 694 '(%s in store; %s tracked data)\n') %
695 695 (fcount, frevcount, util.bytecount(fsrcsize),
696 696 util.bytecount(frawsize)))
697 697 seen.add('f')
698 698 if progress:
699 699 progress.complete()
700 700 progress = srcrepo.ui.makeprogress(_('file revisions'),
701 701 total=frevcount)
702 702
703 703 if matchrevlog(revlogs, unencoded):
704 704 ui.note(_('cloning %d revisions from %s\n')
705 705 % (len(oldrl), unencoded))
706 706 newrl = _revlogfrompath(dstrepo, unencoded)
707 707 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
708 708 deltareuse=deltareuse,
709 709 forcedeltabothparents=forcedeltabothparents)
710 710 else:
711 711 msg = _('blindly copying %s containing %i revisions\n')
712 712 ui.note(msg % (unencoded, len(oldrl)))
713 713 _copyrevlog(tr, dstrepo, oldrl, unencoded)
714 714
715 715 newrl = _revlogfrompath(dstrepo, unencoded)
716 716
717 717 info = newrl.storageinfo(storedsize=True)
718 718 datasize = info['storedsize'] or 0
719 719
720 720 dstsize += datasize
721 721
722 722 if isinstance(newrl, changelog.changelog):
723 723 cdstsize += datasize
724 724 elif isinstance(newrl, manifest.manifestrevlog):
725 725 mdstsize += datasize
726 726 else:
727 727 fdstsize += datasize
728 728
729 729 progress.complete()
730 730
731 731 ui.write(_('finished migrating %d changelog revisions; change in size: '
732 732 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
733 733
734 734 ui.write(_('finished migrating %d total revisions; total change in store '
735 735 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
736 736
737 737 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
738 738 """Determine whether to copy a store file during upgrade.
739 739
740 740 This function is called when migrating store files from ``srcrepo`` to
741 741 ``dstrepo`` as part of upgrading a repository.
742 742
743 743 Args:
744 744 srcrepo: repo we are copying from
745 745 dstrepo: repo we are copying to
746 746 requirements: set of requirements for ``dstrepo``
747 747 path: store file being examined
748 748 mode: the ``ST_MODE`` file type of ``path``
749 749 st: ``stat`` data structure for ``path``
750 750
751 751 Function should return ``True`` if the file is to be copied.
752 752 """
753 753 # Skip revlogs.
754 754 if path.endswith(('.i', '.d')):
755 755 return False
756 756 # Skip transaction related files.
757 757 if path.startswith('undo'):
758 758 return False
759 759 # Only copy regular files.
760 760 if mode != stat.S_IFREG:
761 761 return False
762 762 # Skip other skipped files.
763 763 if path in ('lock', 'fncache'):
764 764 return False
765 765
766 766 return True
767 767
768 768 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
769 769 """Hook point for extensions to perform additional actions during upgrade.
770 770
771 771 This function is called after revlogs and store files have been copied but
772 772 before the new store is swapped into the original location.
773 773 """
774 774
775 775 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions,
776 776 revlogs=UPGRADE_ALL_REVLOGS):
777 777 """Do the low-level work of upgrading a repository.
778 778
779 779 The upgrade is effectively performed as a copy between a source
780 780 repository and a temporary destination repository.
781 781
782 782 The source repository is unmodified for as long as possible so the
783 783 upgrade can abort at any time without causing loss of service for
784 784 readers and without corrupting the source repository.
785 785 """
786 786 assert srcrepo.currentwlock()
787 787 assert dstrepo.currentwlock()
788 788
789 789 ui.write(_('(it is safe to interrupt this process any time before '
790 790 'data migration completes)\n'))
791 791
792 792 if 're-delta-all' in actions:
793 793 deltareuse = revlog.revlog.DELTAREUSENEVER
794 794 elif 're-delta-parent' in actions:
795 795 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
796 796 elif 're-delta-multibase' in actions:
797 797 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
798 798 elif 're-delta-fulladd' in actions:
799 799 deltareuse = revlog.revlog.DELTAREUSEFULLADD
800 800 else:
801 801 deltareuse = revlog.revlog.DELTAREUSEALWAYS
802 802
803 803 with dstrepo.transaction('upgrade') as tr:
804 804 _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
805 805 're-delta-multibase' in actions, revlogs=revlogs)
806 806
807 807 # Now copy other files in the store directory.
808 808 # The sorted() makes execution deterministic.
809 809 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
810 810 if not _filterstorefile(srcrepo, dstrepo, requirements,
811 811 p, kind, st):
812 812 continue
813 813
814 814 srcrepo.ui.write(_('copying %s\n') % p)
815 815 src = srcrepo.store.rawvfs.join(p)
816 816 dst = dstrepo.store.rawvfs.join(p)
817 817 util.copyfile(src, dst, copystat=True)
818 818
819 819 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
820 820
821 821 ui.write(_('data fully migrated to temporary repository\n'))
822 822
823 823 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
824 824 backupvfs = vfsmod.vfs(backuppath)
825 825
826 826 # Make a backup of requires file first, as it is the first to be modified.
827 827 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
828 828
829 829 # We install an arbitrary requirement that clients must not support
830 830 # as a mechanism to lock out new clients during the data swap. This is
831 831 # better than allowing a client to continue while the repository is in
832 832 # an inconsistent state.
833 833 ui.write(_('marking source repository as being upgraded; clients will be '
834 834 'unable to read from repository\n'))
835 835 scmutil.writerequires(srcrepo.vfs,
836 836 srcrepo.requirements | {'upgradeinprogress'})
837 837
838 838 ui.write(_('starting in-place swap of repository data\n'))
839 839 ui.write(_('replaced files will be backed up at %s\n') %
840 840 backuppath)
841 841
842 842 # Now swap in the new store directory. Doing it as a rename should make
843 843 # the operation nearly instantaneous and atomic (at least in well-behaved
844 844 # environments).
845 845 ui.write(_('replacing store...\n'))
846 846 tstart = util.timer()
847 847 util.rename(srcrepo.spath, backupvfs.join('store'))
848 848 util.rename(dstrepo.spath, srcrepo.spath)
849 849 elapsed = util.timer() - tstart
850 850 ui.write(_('store replacement complete; repository was inconsistent for '
851 851 '%0.1fs\n') % elapsed)
852 852
853 853 # We first write the requirements file. Any new requirements will lock
854 854 # out legacy clients.
855 855 ui.write(_('finalizing requirements file and making repository readable '
856 856 'again\n'))
857 857 scmutil.writerequires(srcrepo.vfs, requirements)
858 858
859 859 # The lock file from the old store won't be removed because nothing has a
860 860 # reference to its new location. So clean it up manually. Alternatively, we
861 861 # could update srcrepo.svfs and other variables to point to the new
862 862 # location. This is simpler.
863 863 backupvfs.unlink('store/lock')
864 864
865 865 return backuppath
866 866
867 def upgraderepo(ui, repo, run=False, optimize=None, backup=True):
867 def upgraderepo(ui, repo, run=False, optimize=None, backup=True,
868 manifest=None):
868 869 """Upgrade a repository in place."""
869 870 if optimize is None:
870 871 optimize = []
871 872 optimize = set(legacy_opts_map.get(o, o) for o in optimize)
872 873 repo = repo.unfiltered()
873 874
875 revlogs = set(UPGRADE_ALL_REVLOGS)
876 specentries = (('m', manifest),)
877 specified = [(y, x) for (y, x) in specentries if x is not None]
878 if specified:
879 # we have some limitation on revlogs to be recloned
880 if any(x for y, x in specified):
881 revlogs = set()
882 for r, enabled in specified:
883 if enabled:
884 if r == 'm':
885 revlogs.add(UPGRADE_MANIFEST)
886 else:
887 # none are enabled
888 for r, __ in specified:
889 if r == 'm':
890 revlogs.discard(UPGRADE_MANIFEST)
891
874 892 # Ensure the repository can be upgraded.
875 893 missingreqs = requiredsourcerequirements(repo) - repo.requirements
876 894 if missingreqs:
877 895 raise error.Abort(_('cannot upgrade repository; requirement '
878 896 'missing: %s') % _(', ').join(sorted(missingreqs)))
879 897
880 898 blockedreqs = blocksourcerequirements(repo) & repo.requirements
881 899 if blockedreqs:
882 900 raise error.Abort(_('cannot upgrade repository; unsupported source '
883 901 'requirement: %s') %
884 902 _(', ').join(sorted(blockedreqs)))
885 903
886 904 # FUTURE there is potentially a need to control the wanted requirements via
887 905 # command arguments or via an extension hook point.
888 906 newreqs = localrepo.newreporequirements(
889 907 repo.ui, localrepo.defaultcreateopts(repo.ui))
890 908 newreqs.update(preservedrequirements(repo))
891 909
892 910 noremovereqs = (repo.requirements - newreqs -
893 911 supportremovedrequirements(repo))
894 912 if noremovereqs:
895 913 raise error.Abort(_('cannot upgrade repository; requirement would be '
896 914 'removed: %s') % _(', ').join(sorted(noremovereqs)))
897 915
898 916 noaddreqs = (newreqs - repo.requirements -
899 917 allowednewrequirements(repo))
900 918 if noaddreqs:
901 919 raise error.Abort(_('cannot upgrade repository; do not support adding '
902 920 'requirement: %s') %
903 921 _(', ').join(sorted(noaddreqs)))
904 922
905 923 unsupportedreqs = newreqs - supporteddestrequirements(repo)
906 924 if unsupportedreqs:
907 925 raise error.Abort(_('cannot upgrade repository; do not support '
908 926 'destination requirement: %s') %
909 927 _(', ').join(sorted(unsupportedreqs)))
910 928
911 929 # Find and validate all improvements that can be made.
912 930 alloptimizations = findoptimizations(repo)
913 931
914 932 # Apply and Validate arguments.
915 933 optimizations = []
916 934 for o in alloptimizations:
917 935 if o.name in optimize:
918 936 optimizations.append(o)
919 937 optimize.discard(o.name)
920 938
921 939 if optimize: # anything left is unknown
922 940 raise error.Abort(_('unknown optimization action requested: %s') %
923 941 ', '.join(sorted(optimize)),
924 942 hint=_('run without arguments to see valid '
925 943 'optimizations'))
926 944
927 945 deficiencies = finddeficiencies(repo)
928 946 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
929 947 actions.extend(o for o in sorted(optimizations)
930 948 # determineactions could have added optimisation
931 949 if o not in actions)
932 950
933 951 def printrequirements():
934 952 ui.write(_('requirements\n'))
935 953 ui.write(_(' preserved: %s\n') %
936 954 _(', ').join(sorted(newreqs & repo.requirements)))
937 955
938 956 if repo.requirements - newreqs:
939 957 ui.write(_(' removed: %s\n') %
940 958 _(', ').join(sorted(repo.requirements - newreqs)))
941 959
942 960 if newreqs - repo.requirements:
943 961 ui.write(_(' added: %s\n') %
944 962 _(', ').join(sorted(newreqs - repo.requirements)))
945 963
946 964 ui.write('\n')
947 965
948 966 def printupgradeactions():
949 967 for a in actions:
950 968 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
951 969
952 970 if not run:
953 971 fromconfig = []
954 972 onlydefault = []
955 973
956 974 for d in deficiencies:
957 975 if d.fromconfig(repo):
958 976 fromconfig.append(d)
959 977 elif d.default:
960 978 onlydefault.append(d)
961 979
962 980 if fromconfig or onlydefault:
963 981
964 982 if fromconfig:
965 983 ui.write(_('repository lacks features recommended by '
966 984 'current config options:\n\n'))
967 985 for i in fromconfig:
968 986 ui.write('%s\n %s\n\n' % (i.name, i.description))
969 987
970 988 if onlydefault:
971 989 ui.write(_('repository lacks features used by the default '
972 990 'config options:\n\n'))
973 991 for i in onlydefault:
974 992 ui.write('%s\n %s\n\n' % (i.name, i.description))
975 993
976 994 ui.write('\n')
977 995 else:
978 996 ui.write(_('(no feature deficiencies found in existing '
979 997 'repository)\n'))
980 998
981 999 ui.write(_('performing an upgrade with "--run" will make the following '
982 1000 'changes:\n\n'))
983 1001
984 1002 printrequirements()
985 1003 printupgradeactions()
986 1004
987 1005 unusedoptimize = [i for i in alloptimizations if i not in actions]
988 1006
989 1007 if unusedoptimize:
990 1008 ui.write(_('additional optimizations are available by specifying '
991 1009 '"--optimize <name>":\n\n'))
992 1010 for i in unusedoptimize:
993 1011 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
994 1012 return
995 1013
996 1014 # Else we're in the run=true case.
997 1015 ui.write(_('upgrade will perform the following actions:\n\n'))
998 1016 printrequirements()
999 1017 printupgradeactions()
1000 1018
1001 1019 upgradeactions = [a.name for a in actions]
1002 1020
1003 1021 ui.write(_('beginning upgrade...\n'))
1004 1022 with repo.wlock(), repo.lock():
1005 1023 ui.write(_('repository locked and read-only\n'))
1006 1024 # Our strategy for upgrading the repository is to create a new,
1007 1025 # temporary repository, write data to it, then do a swap of the
1008 1026 # data. There are less heavyweight ways to do this, but it is easier
1009 1027 # to create a new repo object than to instantiate all the components
1010 1028 # (like the store) separately.
1011 1029 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
1012 1030 backuppath = None
1013 1031 try:
1014 1032 ui.write(_('creating temporary repository to stage migrated '
1015 1033 'data: %s\n') % tmppath)
1016 1034
1017 1035 # clone ui without using ui.copy because repo.ui is protected
1018 1036 repoui = repo.ui.__class__(repo.ui)
1019 1037 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1020 1038
1021 1039 with dstrepo.wlock(), dstrepo.lock():
1022 1040 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1023 upgradeactions)
1041 upgradeactions, revlogs=revlogs)
1024 1042 if not (backup or backuppath is None):
1025 1043 ui.write(_('removing old repository content%s\n') % backuppath)
1026 1044 repo.vfs.rmtree(backuppath, forcibly=True)
1027 1045 backuppath = None
1028 1046
1029 1047 finally:
1030 1048 ui.write(_('removing temporary repository %s\n') % tmppath)
1031 1049 repo.vfs.rmtree(tmppath, forcibly=True)
1032 1050
1033 1051 if backuppath:
1034 1052 ui.warn(_('copy of old repository backed up at %s\n') %
1035 1053 backuppath)
1036 1054 ui.warn(_('the old repository will not be deleted; remove '
1037 1055 'it to free up disk space once the upgraded '
1038 1056 'repository is verified\n'))
@@ -1,423 +1,423 b''
1 1 Show all commands except debug commands
2 2 $ hg debugcomplete
3 3 abort
4 4 add
5 5 addremove
6 6 annotate
7 7 archive
8 8 backout
9 9 bisect
10 10 bookmarks
11 11 branch
12 12 branches
13 13 bundle
14 14 cat
15 15 clone
16 16 commit
17 17 config
18 18 continue
19 19 copy
20 20 diff
21 21 export
22 22 files
23 23 forget
24 24 graft
25 25 grep
26 26 heads
27 27 help
28 28 identify
29 29 import
30 30 incoming
31 31 init
32 32 locate
33 33 log
34 34 manifest
35 35 merge
36 36 outgoing
37 37 parents
38 38 paths
39 39 phase
40 40 pull
41 41 push
42 42 recover
43 43 remove
44 44 rename
45 45 resolve
46 46 revert
47 47 rollback
48 48 root
49 49 serve
50 50 shelve
51 51 status
52 52 summary
53 53 tag
54 54 tags
55 55 tip
56 56 unbundle
57 57 unshelve
58 58 update
59 59 verify
60 60 version
61 61
62 62 Show all commands that start with "a"
63 63 $ hg debugcomplete a
64 64 abort
65 65 add
66 66 addremove
67 67 annotate
68 68 archive
69 69
70 70 Do not show debug commands if there are other candidates
71 71 $ hg debugcomplete d
72 72 diff
73 73
74 74 Show debug commands if there are no other candidates
75 75 $ hg debugcomplete debug
76 76 debugancestor
77 77 debugapplystreamclonebundle
78 78 debugbuilddag
79 79 debugbundle
80 80 debugcapabilities
81 81 debugcheckstate
82 82 debugcolor
83 83 debugcommands
84 84 debugcomplete
85 85 debugconfig
86 86 debugcreatestreamclonebundle
87 87 debugdag
88 88 debugdata
89 89 debugdate
90 90 debugdeltachain
91 91 debugdirstate
92 92 debugdiscovery
93 93 debugdownload
94 94 debugextensions
95 95 debugfileset
96 96 debugformat
97 97 debugfsinfo
98 98 debuggetbundle
99 99 debugignore
100 100 debugindex
101 101 debugindexdot
102 102 debugindexstats
103 103 debuginstall
104 104 debugknown
105 105 debuglabelcomplete
106 106 debuglocks
107 107 debugmanifestfulltextcache
108 108 debugmergestate
109 109 debugnamecomplete
110 110 debugobsolete
111 111 debugp1copies
112 112 debugp2copies
113 113 debugpathcomplete
114 114 debugpathcopies
115 115 debugpeer
116 116 debugpickmergetool
117 117 debugpushkey
118 118 debugpvec
119 119 debugrebuilddirstate
120 120 debugrebuildfncache
121 121 debugrename
122 122 debugrevlog
123 123 debugrevlogindex
124 124 debugrevspec
125 125 debugserve
126 126 debugsetparents
127 127 debugssl
128 128 debugsub
129 129 debugsuccessorssets
130 130 debugtemplate
131 131 debuguigetpass
132 132 debuguiprompt
133 133 debugupdatecaches
134 134 debugupgraderepo
135 135 debugwalk
136 136 debugwhyunstable
137 137 debugwireargs
138 138 debugwireproto
139 139
140 140 Do not show the alias of a debug command if there are other candidates
141 141 (this should hide rawcommit)
142 142 $ hg debugcomplete r
143 143 recover
144 144 remove
145 145 rename
146 146 resolve
147 147 revert
148 148 rollback
149 149 root
150 150 Show the alias of a debug command if there are no other candidates
151 151 $ hg debugcomplete rawc
152 152
153 153
154 154 Show the global options
155 155 $ hg debugcomplete --options | sort
156 156 --color
157 157 --config
158 158 --cwd
159 159 --debug
160 160 --debugger
161 161 --encoding
162 162 --encodingmode
163 163 --help
164 164 --hidden
165 165 --noninteractive
166 166 --pager
167 167 --profile
168 168 --quiet
169 169 --repository
170 170 --time
171 171 --traceback
172 172 --verbose
173 173 --version
174 174 -R
175 175 -h
176 176 -q
177 177 -v
178 178 -y
179 179
180 180 Show the options for the "serve" command
181 181 $ hg debugcomplete --options serve | sort
182 182 --accesslog
183 183 --address
184 184 --certificate
185 185 --cmdserver
186 186 --color
187 187 --config
188 188 --cwd
189 189 --daemon
190 190 --daemon-postexec
191 191 --debug
192 192 --debugger
193 193 --encoding
194 194 --encodingmode
195 195 --errorlog
196 196 --help
197 197 --hidden
198 198 --ipv6
199 199 --name
200 200 --noninteractive
201 201 --pager
202 202 --pid-file
203 203 --port
204 204 --prefix
205 205 --print-url
206 206 --profile
207 207 --quiet
208 208 --repository
209 209 --stdio
210 210 --style
211 211 --subrepos
212 212 --templates
213 213 --time
214 214 --traceback
215 215 --verbose
216 216 --version
217 217 --web-conf
218 218 -6
219 219 -A
220 220 -E
221 221 -R
222 222 -S
223 223 -a
224 224 -d
225 225 -h
226 226 -n
227 227 -p
228 228 -q
229 229 -t
230 230 -v
231 231 -y
232 232
233 233 Show an error if we use --options with an ambiguous abbreviation
234 234 $ hg debugcomplete --options s
235 235 hg: command 's' is ambiguous:
236 236 serve shelve showconfig status summary
237 237 [255]
238 238
239 239 Show all commands + options
240 240 $ hg debugcommands
241 241 abort: dry-run
242 242 add: include, exclude, subrepos, dry-run
243 243 addremove: similarity, subrepos, include, exclude, dry-run
244 244 annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
245 245 archive: no-decode, prefix, rev, type, subrepos, include, exclude
246 246 backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
247 247 bisect: reset, good, bad, skip, extend, command, noupdate
248 248 bookmarks: force, rev, delete, rename, inactive, list, template
249 249 branch: force, clean, rev
250 250 branches: active, closed, rev, template
251 251 bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
252 252 cat: output, rev, decode, include, exclude, template
253 253 clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
254 254 commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
255 255 config: untrusted, edit, local, global, template
256 256 continue: dry-run
257 257 copy: after, force, include, exclude, dry-run
258 258 debugancestor:
259 259 debugapplystreamclonebundle:
260 260 debugbuilddag: mergeable-file, overwritten-file, new-file
261 261 debugbundle: all, part-type, spec
262 262 debugcapabilities:
263 263 debugcheckstate:
264 264 debugcolor: style
265 265 debugcommands:
266 266 debugcomplete: options
267 267 debugcreatestreamclonebundle:
268 268 debugdag: tags, branches, dots, spaces
269 269 debugdata: changelog, manifest, dir
270 270 debugdate: extended
271 271 debugdeltachain: changelog, manifest, dir, template
272 272 debugdirstate: nodates, dates, datesort
273 273 debugdiscovery: old, nonheads, rev, seed, ssh, remotecmd, insecure
274 274 debugdownload: output
275 275 debugextensions: template
276 276 debugfileset: rev, all-files, show-matcher, show-stage
277 277 debugformat: template
278 278 debugfsinfo:
279 279 debuggetbundle: head, common, type
280 280 debugignore:
281 281 debugindex: changelog, manifest, dir, template
282 282 debugindexdot: changelog, manifest, dir
283 283 debugindexstats:
284 284 debuginstall: template
285 285 debugknown:
286 286 debuglabelcomplete:
287 287 debuglocks: force-lock, force-wlock, set-lock, set-wlock
288 288 debugmanifestfulltextcache: clear, add
289 289 debugmergestate:
290 290 debugnamecomplete:
291 291 debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
292 292 debugp1copies: rev
293 293 debugp2copies: rev
294 294 debugpathcomplete: full, normal, added, removed
295 295 debugpathcopies: include, exclude
296 296 debugpeer:
297 297 debugpickmergetool: rev, changedelete, include, exclude, tool
298 298 debugpushkey:
299 299 debugpvec:
300 300 debugrebuilddirstate: rev, minimal
301 301 debugrebuildfncache:
302 302 debugrename: rev
303 303 debugrevlog: changelog, manifest, dir, dump
304 304 debugrevlogindex: changelog, manifest, dir, format
305 305 debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
306 306 debugserve: sshstdio, logiofd, logiofile
307 307 debugsetparents:
308 308 debugssl:
309 309 debugsub: rev
310 310 debugsuccessorssets: closest
311 311 debugtemplate: rev, define
312 312 debuguigetpass: prompt
313 313 debuguiprompt: prompt
314 314 debugupdatecaches:
315 debugupgraderepo: optimize, run, backup
315 debugupgraderepo: optimize, run, backup, manifest
316 316 debugwalk: include, exclude
317 317 debugwhyunstable:
318 318 debugwireargs: three, four, five, ssh, remotecmd, insecure
319 319 debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure
320 320 diff: rev, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
321 321 export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
322 322 files: rev, print0, include, exclude, template, subrepos
323 323 forget: interactive, include, exclude, dry-run
324 324 graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
325 325 grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
326 326 heads: rev, topo, active, closed, style, template
327 327 help: extension, command, keyword, system
328 328 identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
329 329 import: strip, base, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
330 330 incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
331 331 init: ssh, remotecmd, insecure
332 332 locate: rev, print0, fullpath, include, exclude
333 333 log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
334 334 manifest: rev, all, template
335 335 merge: force, rev, preview, abort, tool
336 336 outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
337 337 parents: rev, style, template
338 338 paths: template
339 339 phase: public, draft, secret, force, rev
340 340 pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure
341 341 push: force, rev, bookmark, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
342 342 recover: verify
343 343 remove: after, force, subrepos, include, exclude, dry-run
344 344 rename: after, force, include, exclude, dry-run
345 345 resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
346 346 revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
347 347 rollback: dry-run, force
348 348 root: template
349 349 serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
350 350 shelve: addremove, unknown, cleanup, date, delete, edit, keep, list, message, name, patch, interactive, stat, include, exclude
351 351 status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
352 352 summary: remote
353 353 tag: force, local, rev, remove, edit, message, date, user
354 354 tags: template
355 355 tip: patch, git, style, template
356 356 unbundle: update
357 357 unshelve: abort, continue, interactive, keep, name, tool, date
358 358 update: clean, check, merge, date, rev, tool
359 359 verify: full
360 360 version: template
361 361
362 362 $ hg init a
363 363 $ cd a
364 364 $ echo fee > fee
365 365 $ hg ci -q -Amfee
366 366 $ hg tag fee
367 367 $ mkdir fie
368 368 $ echo dead > fie/dead
369 369 $ echo live > fie/live
370 370 $ hg bookmark fo
371 371 $ hg branch -q fie
372 372 $ hg ci -q -Amfie
373 373 $ echo fo > fo
374 374 $ hg branch -qf default
375 375 $ hg ci -q -Amfo
376 376 $ echo Fum > Fum
377 377 $ hg ci -q -AmFum
378 378 $ hg bookmark Fum
379 379
380 380 Test debugpathcomplete
381 381
382 382 $ hg debugpathcomplete f
383 383 fee
384 384 fie
385 385 fo
386 386 $ hg debugpathcomplete -f f
387 387 fee
388 388 fie/dead
389 389 fie/live
390 390 fo
391 391
392 392 $ hg rm Fum
393 393 $ hg debugpathcomplete -r F
394 394 Fum
395 395
396 396 Test debugnamecomplete
397 397
398 398 $ hg debugnamecomplete
399 399 Fum
400 400 default
401 401 fee
402 402 fie
403 403 fo
404 404 tip
405 405 $ hg debugnamecomplete f
406 406 fee
407 407 fie
408 408 fo
409 409
410 410 Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still
411 411 used for completions in some shells.
412 412
413 413 $ hg debuglabelcomplete
414 414 Fum
415 415 default
416 416 fee
417 417 fie
418 418 fo
419 419 tip
420 420 $ hg debuglabelcomplete f
421 421 fee
422 422 fie
423 423 fo
@@ -1,931 +1,1048 b''
1 1 #require no-reposimplestore
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [extensions]
5 5 > share =
6 6 > EOF
7 7
8 8 store and revlogv1 are required in source
9 9
10 10 $ hg --config format.usestore=false init no-store
11 11 $ hg -R no-store debugupgraderepo
12 12 abort: cannot upgrade repository; requirement missing: store
13 13 [255]
14 14
15 15 $ hg init no-revlogv1
16 16 $ cat > no-revlogv1/.hg/requires << EOF
17 17 > dotencode
18 18 > fncache
19 19 > generaldelta
20 20 > store
21 21 > EOF
22 22
23 23 $ hg -R no-revlogv1 debugupgraderepo
24 24 abort: cannot upgrade repository; requirement missing: revlogv1
25 25 [255]
26 26
27 27 Cannot upgrade shared repositories
28 28
29 29 $ hg init share-parent
30 30 $ hg -q share share-parent share-child
31 31
32 32 $ hg -R share-child debugupgraderepo
33 33 abort: cannot upgrade repository; unsupported source requirement: shared
34 34 [255]
35 35
36 36 Do not yet support upgrading treemanifest repos
37 37
38 38 $ hg --config experimental.treemanifest=true init treemanifest
39 39 $ hg -R treemanifest debugupgraderepo
40 40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
41 41 [255]
42 42
43 43 Cannot add treemanifest requirement during upgrade
44 44
45 45 $ hg init disallowaddedreq
46 46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
47 47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
48 48 [255]
49 49
50 50 An upgrade of a repository created with recommended settings only suggests optimizations
51 51
52 52 $ hg init empty
53 53 $ cd empty
54 54 $ hg debugformat
55 55 format-variant repo
56 56 fncache: yes
57 57 dotencode: yes
58 58 generaldelta: yes
59 59 sparserevlog: yes
60 60 plain-cl-delta: yes
61 61 compression: zlib
62 62 compression-level: default
63 63 $ hg debugformat --verbose
64 64 format-variant repo config default
65 65 fncache: yes yes yes
66 66 dotencode: yes yes yes
67 67 generaldelta: yes yes yes
68 68 sparserevlog: yes yes yes
69 69 plain-cl-delta: yes yes yes
70 70 compression: zlib zlib zlib
71 71 compression-level: default default default
72 72 $ hg debugformat --verbose --config format.usefncache=no
73 73 format-variant repo config default
74 74 fncache: yes no yes
75 75 dotencode: yes no yes
76 76 generaldelta: yes yes yes
77 77 sparserevlog: yes yes yes
78 78 plain-cl-delta: yes yes yes
79 79 compression: zlib zlib zlib
80 80 compression-level: default default default
81 81 $ hg debugformat --verbose --config format.usefncache=no --color=debug
82 82 format-variant repo config default
83 83 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
84 84 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
85 85 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
86 86 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
87 87 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
88 88 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
89 89 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
90 90 $ hg debugformat -Tjson
91 91 [
92 92 {
93 93 "config": true,
94 94 "default": true,
95 95 "name": "fncache",
96 96 "repo": true
97 97 },
98 98 {
99 99 "config": true,
100 100 "default": true,
101 101 "name": "dotencode",
102 102 "repo": true
103 103 },
104 104 {
105 105 "config": true,
106 106 "default": true,
107 107 "name": "generaldelta",
108 108 "repo": true
109 109 },
110 110 {
111 111 "config": true,
112 112 "default": true,
113 113 "name": "sparserevlog",
114 114 "repo": true
115 115 },
116 116 {
117 117 "config": true,
118 118 "default": true,
119 119 "name": "plain-cl-delta",
120 120 "repo": true
121 121 },
122 122 {
123 123 "config": "zlib",
124 124 "default": "zlib",
125 125 "name": "compression",
126 126 "repo": "zlib"
127 127 },
128 128 {
129 129 "config": "default",
130 130 "default": "default",
131 131 "name": "compression-level",
132 132 "repo": "default"
133 133 }
134 134 ]
135 135 $ hg debugupgraderepo
136 136 (no feature deficiencies found in existing repository)
137 137 performing an upgrade with "--run" will make the following changes:
138 138
139 139 requirements
140 140 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
141 141
142 142 additional optimizations are available by specifying "--optimize <name>":
143 143
144 144 re-delta-parent
145 145 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
146 146
147 147 re-delta-multibase
148 148 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
149 149
150 150 re-delta-all
151 151 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
152 152
153 153 re-delta-fulladd
154 154 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
155 155
156 156
157 157 --optimize can be used to add optimizations
158 158
159 159 $ hg debugupgrade --optimize redeltaparent
160 160 (no feature deficiencies found in existing repository)
161 161 performing an upgrade with "--run" will make the following changes:
162 162
163 163 requirements
164 164 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
165 165
166 166 re-delta-parent
167 167 deltas within internal storage will choose a new base revision if needed
168 168
169 169 additional optimizations are available by specifying "--optimize <name>":
170 170
171 171 re-delta-multibase
172 172 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
173 173
174 174 re-delta-all
175 175 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
176 176
177 177 re-delta-fulladd
178 178 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
179 179
180 180
181 181 modern form of the option
182 182
183 183 $ hg debugupgrade --optimize re-delta-parent
184 184 (no feature deficiencies found in existing repository)
185 185 performing an upgrade with "--run" will make the following changes:
186 186
187 187 requirements
188 188 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
189 189
190 190 re-delta-parent
191 191 deltas within internal storage will choose a new base revision if needed
192 192
193 193 additional optimizations are available by specifying "--optimize <name>":
194 194
195 195 re-delta-multibase
196 196 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
197 197
198 198 re-delta-all
199 199 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
200 200
201 201 re-delta-fulladd
202 202 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
203 203
204 204
205 205 unknown optimization:
206 206
207 207 $ hg debugupgrade --optimize foobar
208 208 abort: unknown optimization action requested: foobar
209 209 (run without arguments to see valid optimizations)
210 210 [255]
211 211
212 212 Various sub-optimal detections work
213 213
214 214 $ cat > .hg/requires << EOF
215 215 > revlogv1
216 216 > store
217 217 > EOF
218 218
219 219 $ hg debugformat
220 220 format-variant repo
221 221 fncache: no
222 222 dotencode: no
223 223 generaldelta: no
224 224 sparserevlog: no
225 225 plain-cl-delta: yes
226 226 compression: zlib
227 227 compression-level: default
228 228 $ hg debugformat --verbose
229 229 format-variant repo config default
230 230 fncache: no yes yes
231 231 dotencode: no yes yes
232 232 generaldelta: no yes yes
233 233 sparserevlog: no yes yes
234 234 plain-cl-delta: yes yes yes
235 235 compression: zlib zlib zlib
236 236 compression-level: default default default
237 237 $ hg debugformat --verbose --config format.usegeneraldelta=no
238 238 format-variant repo config default
239 239 fncache: no yes yes
240 240 dotencode: no yes yes
241 241 generaldelta: no no yes
242 242 sparserevlog: no no yes
243 243 plain-cl-delta: yes yes yes
244 244 compression: zlib zlib zlib
245 245 compression-level: default default default
246 246 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
247 247 format-variant repo config default
248 248 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
249 249 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
250 250 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
251 251 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
252 252 [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
253 253 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
254 254 [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
255 255 $ hg debugupgraderepo
256 256 repository lacks features recommended by current config options:
257 257
258 258 fncache
259 259 long and reserved filenames may not work correctly; repository performance is sub-optimal
260 260
261 261 dotencode
262 262 storage of filenames beginning with a period or space may not work correctly
263 263
264 264 generaldelta
265 265 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
266 266
267 267 sparserevlog
268 268 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
269 269
270 270
271 271 performing an upgrade with "--run" will make the following changes:
272 272
273 273 requirements
274 274 preserved: revlogv1, store
275 275 added: dotencode, fncache, generaldelta, sparserevlog
276 276
277 277 fncache
278 278 repository will be more resilient to storing certain paths and performance of certain operations should be improved
279 279
280 280 dotencode
281 281 repository will be better able to store files beginning with a space or period
282 282
283 283 generaldelta
284 284 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
285 285
286 286 sparserevlog
287 287 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
288 288
289 289 additional optimizations are available by specifying "--optimize <name>":
290 290
291 291 re-delta-parent
292 292 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
293 293
294 294 re-delta-multibase
295 295 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
296 296
297 297 re-delta-all
298 298 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
299 299
300 300 re-delta-fulladd
301 301 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
302 302
303 303
304 304 $ hg --config format.dotencode=false debugupgraderepo
305 305 repository lacks features recommended by current config options:
306 306
307 307 fncache
308 308 long and reserved filenames may not work correctly; repository performance is sub-optimal
309 309
310 310 generaldelta
311 311 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
312 312
313 313 sparserevlog
314 314 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
315 315
316 316 repository lacks features used by the default config options:
317 317
318 318 dotencode
319 319 storage of filenames beginning with a period or space may not work correctly
320 320
321 321
322 322 performing an upgrade with "--run" will make the following changes:
323 323
324 324 requirements
325 325 preserved: revlogv1, store
326 326 added: fncache, generaldelta, sparserevlog
327 327
328 328 fncache
329 329 repository will be more resilient to storing certain paths and performance of certain operations should be improved
330 330
331 331 generaldelta
332 332 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
333 333
334 334 sparserevlog
335 335 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
336 336
337 337 additional optimizations are available by specifying "--optimize <name>":
338 338
339 339 re-delta-parent
340 340 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
341 341
342 342 re-delta-multibase
343 343 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
344 344
345 345 re-delta-all
346 346 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
347 347
348 348 re-delta-fulladd
349 349 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
350 350
351 351
352 352 $ cd ..
353 353
354 354 Upgrading a repository that is already modern essentially no-ops
355 355
356 356 $ hg init modern
357 357 $ hg -R modern debugupgraderepo --run
358 358 upgrade will perform the following actions:
359 359
360 360 requirements
361 361 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
362 362
363 363 beginning upgrade...
364 364 repository locked and read-only
365 365 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
366 366 (it is safe to interrupt this process any time before data migration completes)
367 367 data fully migrated to temporary repository
368 368 marking source repository as being upgraded; clients will be unable to read from repository
369 369 starting in-place swap of repository data
370 370 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
371 371 replacing store...
372 372 store replacement complete; repository was inconsistent for *s (glob)
373 373 finalizing requirements file and making repository readable again
374 374 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
375 375 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
376 376 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
377 377
378 378 Upgrading a repository to generaldelta works
379 379
380 380 $ hg --config format.usegeneraldelta=false init upgradegd
381 381 $ cd upgradegd
382 382 $ touch f0
383 383 $ hg -q commit -A -m initial
384 384 $ touch f1
385 385 $ hg -q commit -A -m 'add f1'
386 386 $ hg -q up -r 0
387 387 $ touch f2
388 388 $ hg -q commit -A -m 'add f2'
389 389
390 390 $ hg debugupgraderepo --run --config format.sparse-revlog=false
391 391 upgrade will perform the following actions:
392 392
393 393 requirements
394 394 preserved: dotencode, fncache, revlogv1, store
395 395 added: generaldelta
396 396
397 397 generaldelta
398 398 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
399 399
400 400 beginning upgrade...
401 401 repository locked and read-only
402 402 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
403 403 (it is safe to interrupt this process any time before data migration completes)
404 404 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
405 405 migrating 917 bytes in store; 401 bytes tracked data
406 406 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
407 407 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
408 408 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
409 409 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
410 410 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
411 411 finished migrating 3 changelog revisions; change in size: 0 bytes
412 412 finished migrating 9 total revisions; total change in store size: 0 bytes
413 413 copying phaseroots
414 414 data fully migrated to temporary repository
415 415 marking source repository as being upgraded; clients will be unable to read from repository
416 416 starting in-place swap of repository data
417 417 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
418 418 replacing store...
419 419 store replacement complete; repository was inconsistent for *s (glob)
420 420 finalizing requirements file and making repository readable again
421 421 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
422 422 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
423 423 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
424 424
425 425 Original requirements backed up
426 426
427 427 $ cat .hg/upgradebackup.*/requires
428 428 dotencode
429 429 fncache
430 430 revlogv1
431 431 store
432 432
433 433 generaldelta added to original requirements files
434 434
435 435 $ cat .hg/requires
436 436 dotencode
437 437 fncache
438 438 generaldelta
439 439 revlogv1
440 440 store
441 441
442 442 store directory has files we expect
443 443
444 444 $ ls .hg/store
445 445 00changelog.i
446 446 00manifest.i
447 447 data
448 448 fncache
449 449 phaseroots
450 450 undo
451 451 undo.backupfiles
452 452 undo.phaseroots
453 453
454 454 manifest should be generaldelta
455 455
456 456 $ hg debugrevlog -m | grep flags
457 457 flags : inline, generaldelta
458 458
459 459 verify should be happy
460 460
461 461 $ hg verify
462 462 checking changesets
463 463 checking manifests
464 464 crosschecking files in changesets and manifests
465 465 checking files
466 466 checked 3 changesets with 3 changes to 3 files
467 467
468 468 old store should be backed up
469 469
470 470 $ ls -d .hg/upgradebackup.*/
471 471 .hg/upgradebackup.*/ (glob)
472 472 $ ls .hg/upgradebackup.*/store
473 473 00changelog.i
474 474 00manifest.i
475 475 data
476 476 fncache
477 477 phaseroots
478 478 undo
479 479 undo.backup.fncache
480 480 undo.backupfiles
481 481 undo.phaseroots
482 482
483 483 unless --no-backup is passed
484 484
485 485 $ rm -rf .hg/upgradebackup.*/
486 486 $ hg debugupgraderepo --run --no-backup
487 487 upgrade will perform the following actions:
488 488
489 489 requirements
490 490 preserved: dotencode, fncache, generaldelta, revlogv1, store
491 491 added: sparserevlog
492 492
493 493 sparserevlog
494 494 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
495 495
496 496 beginning upgrade...
497 497 repository locked and read-only
498 498 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
499 499 (it is safe to interrupt this process any time before data migration completes)
500 500 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
501 501 migrating 917 bytes in store; 401 bytes tracked data
502 502 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
503 503 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
504 504 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
505 505 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
506 506 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
507 507 finished migrating 3 changelog revisions; change in size: 0 bytes
508 508 finished migrating 9 total revisions; total change in store size: 0 bytes
509 509 copying phaseroots
510 510 data fully migrated to temporary repository
511 511 marking source repository as being upgraded; clients will be unable to read from repository
512 512 starting in-place swap of repository data
513 513 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
514 514 replacing store...
515 515 store replacement complete; repository was inconsistent for * (glob)
516 516 finalizing requirements file and making repository readable again
517 517 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
518 518 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
519 519 $ ls -1 .hg/ | grep upgradebackup
520 520 [1]
521
522 We can restrict optimization to some revlog:
523
524 $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
525 upgrade will perform the following actions:
526
527 requirements
528 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
529
530 re-delta-parent
531 deltas within internal storage will choose a new base revision if needed
532
533 beginning upgrade...
534 repository locked and read-only
535 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
536 (it is safe to interrupt this process any time before data migration completes)
537 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
538 migrating 917 bytes in store; 401 bytes tracked data
539 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
540 blindly copying data/f0.i containing 1 revisions
541 blindly copying data/f1.i containing 1 revisions
542 blindly copying data/f2.i containing 1 revisions
543 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
544 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
545 cloning 3 revisions from 00manifest.i
546 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
547 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
548 blindly copying 00changelog.i containing 3 revisions
549 finished migrating 3 changelog revisions; change in size: 0 bytes
550 finished migrating 9 total revisions; total change in store size: 0 bytes
551 copying phaseroots
552 data fully migrated to temporary repository
553 marking source repository as being upgraded; clients will be unable to read from repository
554 starting in-place swap of repository data
555 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
556 replacing store...
557 store replacement complete; repository was inconsistent for *s (glob)
558 finalizing requirements file and making repository readable again
559 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
560 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
561
562 Check that the repo still works fine
563
564 $ hg log -G --patch
565 @ changeset: 2:b5a3b78015e5
566 | tag: tip
567 | parent: 0:ba592bf28da2
568 | user: test
569 | date: Thu Jan 01 00:00:00 1970 +0000
570 | summary: add f2
571 |
572 |
573 | o changeset: 1:da8c0fc4833c
574 |/ user: test
575 | date: Thu Jan 01 00:00:00 1970 +0000
576 | summary: add f1
577 |
578 |
579 o changeset: 0:ba592bf28da2
580 user: test
581 date: Thu Jan 01 00:00:00 1970 +0000
582 summary: initial
583
584
585
586 $ hg verify
587 checking changesets
588 checking manifests
589 crosschecking files in changesets and manifests
590 checking files
591 checked 3 changesets with 3 changes to 3 files
592
593 Check we can select negatively
594
595 $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
596 upgrade will perform the following actions:
597
598 requirements
599 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
600
601 re-delta-parent
602 deltas within internal storage will choose a new base revision if needed
603
604 beginning upgrade...
605 repository locked and read-only
606 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
607 (it is safe to interrupt this process any time before data migration completes)
608 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
609 migrating 917 bytes in store; 401 bytes tracked data
610 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
611 cloning 1 revisions from data/f0.i
612 cloning 1 revisions from data/f1.i
613 cloning 1 revisions from data/f2.i
614 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
615 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
616 blindly copying 00manifest.i containing 3 revisions
617 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
618 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
619 cloning 3 revisions from 00changelog.i
620 finished migrating 3 changelog revisions; change in size: 0 bytes
621 finished migrating 9 total revisions; total change in store size: 0 bytes
622 copying phaseroots
623 data fully migrated to temporary repository
624 marking source repository as being upgraded; clients will be unable to read from repository
625 starting in-place swap of repository data
626 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
627 replacing store...
628 store replacement complete; repository was inconsistent for *s (glob)
629 finalizing requirements file and making repository readable again
630 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
631 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
632 $ hg verify
633 checking changesets
634 checking manifests
635 crosschecking files in changesets and manifests
636 checking files
637 checked 3 changesets with 3 changes to 3 files
638
521 639 $ cd ..
522 640
523
524 641 store files with special filenames aren't encoded during copy
525 642
526 643 $ hg init store-filenames
527 644 $ cd store-filenames
528 645 $ touch foo
529 646 $ hg -q commit -A -m initial
530 647 $ touch .hg/store/.XX_special_filename
531 648
532 649 $ hg debugupgraderepo --run
533 650 upgrade will perform the following actions:
534 651
535 652 requirements
536 653 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
537 654
538 655 beginning upgrade...
539 656 repository locked and read-only
540 657 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
541 658 (it is safe to interrupt this process any time before data migration completes)
542 659 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
543 660 migrating 301 bytes in store; 107 bytes tracked data
544 661 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
545 662 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
546 663 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
547 664 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
548 665 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
549 666 finished migrating 1 changelog revisions; change in size: 0 bytes
550 667 finished migrating 3 total revisions; total change in store size: 0 bytes
551 668 copying .XX_special_filename
552 669 copying phaseroots
553 670 data fully migrated to temporary repository
554 671 marking source repository as being upgraded; clients will be unable to read from repository
555 672 starting in-place swap of repository data
556 673 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
557 674 replacing store...
558 675 store replacement complete; repository was inconsistent for *s (glob)
559 676 finalizing requirements file and making repository readable again
560 677 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
561 678 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
562 679 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
563 680 $ hg debugupgraderepo --run --optimize redeltafulladd
564 681 upgrade will perform the following actions:
565 682
566 683 requirements
567 684 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
568 685
569 686 re-delta-fulladd
570 687 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
571 688
572 689 beginning upgrade...
573 690 repository locked and read-only
574 691 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
575 692 (it is safe to interrupt this process any time before data migration completes)
576 693 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
577 694 migrating 301 bytes in store; 107 bytes tracked data
578 695 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
579 696 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
580 697 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
581 698 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
582 699 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
583 700 finished migrating 1 changelog revisions; change in size: 0 bytes
584 701 finished migrating 3 total revisions; total change in store size: 0 bytes
585 702 copying .XX_special_filename
586 703 copying phaseroots
587 704 data fully migrated to temporary repository
588 705 marking source repository as being upgraded; clients will be unable to read from repository
589 706 starting in-place swap of repository data
590 707 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
591 708 replacing store...
592 709 store replacement complete; repository was inconsistent for *s (glob)
593 710 finalizing requirements file and making repository readable again
594 711 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
595 712 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
596 713 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
597 714
598 715 fncache is valid after upgrade
599 716
600 717 $ hg debugrebuildfncache
601 718 fncache already up to date
602 719
603 720 $ cd ..
604 721
605 722 Check upgrading a large file repository
606 723 ---------------------------------------
607 724
608 725 $ hg init largefilesrepo
609 726 $ cat << EOF >> largefilesrepo/.hg/hgrc
610 727 > [extensions]
611 728 > largefiles =
612 729 > EOF
613 730
614 731 $ cd largefilesrepo
615 732 $ touch foo
616 733 $ hg add --large foo
617 734 $ hg -q commit -m initial
618 735 $ cat .hg/requires
619 736 dotencode
620 737 fncache
621 738 generaldelta
622 739 largefiles
623 740 revlogv1
624 741 sparserevlog
625 742 store
626 743
627 744 $ hg debugupgraderepo --run
628 745 upgrade will perform the following actions:
629 746
630 747 requirements
631 748 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
632 749
633 750 beginning upgrade...
634 751 repository locked and read-only
635 752 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
636 753 (it is safe to interrupt this process any time before data migration completes)
637 754 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
638 755 migrating 355 bytes in store; 160 bytes tracked data
639 756 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
640 757 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
641 758 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
642 759 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
643 760 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
644 761 finished migrating 1 changelog revisions; change in size: 0 bytes
645 762 finished migrating 3 total revisions; total change in store size: 0 bytes
646 763 copying phaseroots
647 764 data fully migrated to temporary repository
648 765 marking source repository as being upgraded; clients will be unable to read from repository
649 766 starting in-place swap of repository data
650 767 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
651 768 replacing store...
652 769 store replacement complete; repository was inconsistent for *s (glob)
653 770 finalizing requirements file and making repository readable again
654 771 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
655 772 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
656 773 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
657 774 $ cat .hg/requires
658 775 dotencode
659 776 fncache
660 777 generaldelta
661 778 largefiles
662 779 revlogv1
663 780 sparserevlog
664 781 store
665 782
666 783 $ cat << EOF >> .hg/hgrc
667 784 > [extensions]
668 785 > lfs =
669 786 > [lfs]
670 787 > threshold = 10
671 788 > EOF
672 789 $ echo '123456789012345' > lfs.bin
673 790 $ hg ci -Am 'lfs.bin'
674 791 adding lfs.bin
675 792 $ grep lfs .hg/requires
676 793 lfs
677 794 $ find .hg/store/lfs -type f
678 795 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
679 796
680 797 $ hg debugupgraderepo --run
681 798 upgrade will perform the following actions:
682 799
683 800 requirements
684 801 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
685 802
686 803 beginning upgrade...
687 804 repository locked and read-only
688 805 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
689 806 (it is safe to interrupt this process any time before data migration completes)
690 807 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
691 808 migrating 801 bytes in store; 467 bytes tracked data
692 809 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
693 810 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
694 811 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
695 812 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
696 813 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
697 814 finished migrating 2 changelog revisions; change in size: 0 bytes
698 815 finished migrating 6 total revisions; total change in store size: 0 bytes
699 816 copying phaseroots
700 817 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
701 818 data fully migrated to temporary repository
702 819 marking source repository as being upgraded; clients will be unable to read from repository
703 820 starting in-place swap of repository data
704 821 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
705 822 replacing store...
706 823 store replacement complete; repository was inconsistent for *s (glob)
707 824 finalizing requirements file and making repository readable again
708 825 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
709 826 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
710 827 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
711 828
712 829 $ grep lfs .hg/requires
713 830 lfs
714 831 $ find .hg/store/lfs -type f
715 832 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
716 833 $ hg verify
717 834 checking changesets
718 835 checking manifests
719 836 crosschecking files in changesets and manifests
720 837 checking files
721 838 checked 2 changesets with 2 changes to 2 files
722 839 $ hg debugdata lfs.bin 0
723 840 version https://git-lfs.github.com/spec/v1
724 841 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
725 842 size 16
726 843 x-is-binary 0
727 844
728 845 $ cd ..
729 846
730 847 repository config is taken in account
731 848 -------------------------------------
732 849
733 850 $ cat << EOF >> $HGRCPATH
734 851 > [format]
735 852 > maxchainlen = 1
736 853 > EOF
737 854
738 855 $ hg init localconfig
739 856 $ cd localconfig
740 857 $ cat << EOF > file
741 858 > some content
742 859 > with some length
743 860 > to make sure we get a delta
744 861 > after changes
745 862 > very long
746 863 > very long
747 864 > very long
748 865 > very long
749 866 > very long
750 867 > very long
751 868 > very long
752 869 > very long
753 870 > very long
754 871 > very long
755 872 > very long
756 873 > EOF
757 874 $ hg -q commit -A -m A
758 875 $ echo "new line" >> file
759 876 $ hg -q commit -m B
760 877 $ echo "new line" >> file
761 878 $ hg -q commit -m C
762 879
763 880 $ cat << EOF >> .hg/hgrc
764 881 > [format]
765 882 > maxchainlen = 9001
766 883 > EOF
767 884 $ hg config format
768 885 format.maxchainlen=9001
769 886 $ hg debugdeltachain file
770 887 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
771 888 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
772 889 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
773 890 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
774 891
775 892 $ hg debugupgraderepo --run --optimize redeltaall
776 893 upgrade will perform the following actions:
777 894
778 895 requirements
779 896 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
780 897
781 898 re-delta-all
782 899 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
783 900
784 901 beginning upgrade...
785 902 repository locked and read-only
786 903 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
787 904 (it is safe to interrupt this process any time before data migration completes)
788 905 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
789 906 migrating 1019 bytes in store; 882 bytes tracked data
790 907 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
791 908 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
792 909 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
793 910 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
794 911 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
795 912 finished migrating 3 changelog revisions; change in size: 0 bytes
796 913 finished migrating 9 total revisions; total change in store size: -9 bytes
797 914 copying phaseroots
798 915 data fully migrated to temporary repository
799 916 marking source repository as being upgraded; clients will be unable to read from repository
800 917 starting in-place swap of repository data
801 918 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
802 919 replacing store...
803 920 store replacement complete; repository was inconsistent for *s (glob)
804 921 finalizing requirements file and making repository readable again
805 922 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
806 923 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
807 924 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
808 925 $ hg debugdeltachain file
809 926 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
810 927 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
811 928 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
812 929 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
813 930 $ cd ..
814 931
815 932 $ cat << EOF >> $HGRCPATH
816 933 > [format]
817 934 > maxchainlen = 9001
818 935 > EOF
819 936
820 937 Check upgrading a sparse-revlog repository
821 938 ---------------------------------------
822 939
823 940 $ hg init sparserevlogrepo --config format.sparse-revlog=no
824 941 $ cd sparserevlogrepo
825 942 $ touch foo
826 943 $ hg add foo
827 944 $ hg -q commit -m "foo"
828 945 $ cat .hg/requires
829 946 dotencode
830 947 fncache
831 948 generaldelta
832 949 revlogv1
833 950 store
834 951
835 952 Check that we can add the sparse-revlog format requirement
836 953 $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null
837 954 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
838 955 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
839 956 $ cat .hg/requires
840 957 dotencode
841 958 fncache
842 959 generaldelta
843 960 revlogv1
844 961 sparserevlog
845 962 store
846 963
847 964 Check that we can remove the sparse-revlog format requirement
848 965 $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null
849 966 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
850 967 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
851 968 $ cat .hg/requires
852 969 dotencode
853 970 fncache
854 971 generaldelta
855 972 revlogv1
856 973 store
857 974
858 975 #if zstd
859 976
860 977 Check upgrading to a zstd revlog
861 978 --------------------------------
862 979
863 980 upgrade
864 981
865 982 $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup >/dev/null
866 983 $ hg debugformat -v
867 984 format-variant repo config default
868 985 fncache: yes yes yes
869 986 dotencode: yes yes yes
870 987 generaldelta: yes yes yes
871 988 sparserevlog: yes yes yes
872 989 plain-cl-delta: yes yes yes
873 990 compression: zstd zlib zlib
874 991 compression-level: default default default
875 992 $ cat .hg/requires
876 993 dotencode
877 994 fncache
878 995 generaldelta
879 996 revlog-compression-zstd
880 997 revlogv1
881 998 sparserevlog
882 999 store
883 1000
884 1001 downgrade
885 1002
886 1003 $ hg debugupgraderepo --run --no-backup > /dev/null
887 1004 $ hg debugformat -v
888 1005 format-variant repo config default
889 1006 fncache: yes yes yes
890 1007 dotencode: yes yes yes
891 1008 generaldelta: yes yes yes
892 1009 sparserevlog: yes yes yes
893 1010 plain-cl-delta: yes yes yes
894 1011 compression: zlib zlib zlib
895 1012 compression-level: default default default
896 1013 $ cat .hg/requires
897 1014 dotencode
898 1015 fncache
899 1016 generaldelta
900 1017 revlogv1
901 1018 sparserevlog
902 1019 store
903 1020
904 1021 upgrade from hgrc
905 1022
906 1023 $ cat >> .hg/hgrc << EOF
907 1024 > [format]
908 1025 > revlog-compression=zstd
909 1026 > EOF
910 1027 $ hg debugupgraderepo --run --no-backup > /dev/null
911 1028 $ hg debugformat -v
912 1029 format-variant repo config default
913 1030 fncache: yes yes yes
914 1031 dotencode: yes yes yes
915 1032 generaldelta: yes yes yes
916 1033 sparserevlog: yes yes yes
917 1034 plain-cl-delta: yes yes yes
918 1035 compression: zstd zstd zlib
919 1036 compression-level: default default default
920 1037 $ cat .hg/requires
921 1038 dotencode
922 1039 fncache
923 1040 generaldelta
924 1041 revlog-compression-zstd
925 1042 revlogv1
926 1043 sparserevlog
927 1044 store
928 1045
929 1046 $ cd ..
930 1047
931 1048 #endif
General Comments 0
You need to be logged in to leave comments. Login now