##// END OF EJS Templates
debugupgraderepo: add a --no-backup mode...
Boris Feld -
r41121:a59a7472 default
parent child Browse files
Show More
@@ -1,3391 +1,3393 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import codecs
11 11 import collections
12 12 import difflib
13 13 import errno
14 14 import operator
15 15 import os
16 16 import random
17 17 import re
18 18 import socket
19 19 import ssl
20 20 import stat
21 21 import string
22 22 import subprocess
23 23 import sys
24 24 import time
25 25
26 26 from .i18n import _
27 27 from .node import (
28 28 bin,
29 29 hex,
30 30 nullhex,
31 31 nullid,
32 32 nullrev,
33 33 short,
34 34 )
35 35 from . import (
36 36 bundle2,
37 37 changegroup,
38 38 cmdutil,
39 39 color,
40 40 context,
41 41 dagparser,
42 42 encoding,
43 43 error,
44 44 exchange,
45 45 extensions,
46 46 filemerge,
47 47 filesetlang,
48 48 formatter,
49 49 hg,
50 50 httppeer,
51 51 localrepo,
52 52 lock as lockmod,
53 53 logcmdutil,
54 54 merge as mergemod,
55 55 obsolete,
56 56 obsutil,
57 57 phases,
58 58 policy,
59 59 pvec,
60 60 pycompat,
61 61 registrar,
62 62 repair,
63 63 revlog,
64 64 revset,
65 65 revsetlang,
66 66 scmutil,
67 67 setdiscovery,
68 68 simplemerge,
69 69 sshpeer,
70 70 sslutil,
71 71 streamclone,
72 72 templater,
73 73 treediscovery,
74 74 upgrade,
75 75 url as urlmod,
76 76 util,
77 77 vfs as vfsmod,
78 78 wireprotoframing,
79 79 wireprotoserver,
80 80 wireprotov2peer,
81 81 )
82 82 from .utils import (
83 83 cborutil,
84 84 dateutil,
85 85 procutil,
86 86 stringutil,
87 87 )
88 88
89 89 from .revlogutils import (
90 90 deltas as deltautil
91 91 )
92 92
93 93 release = lockmod.release
94 94
95 95 command = registrar.command()
96 96
97 97 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
98 98 def debugancestor(ui, repo, *args):
99 99 """find the ancestor revision of two revisions in a given index"""
100 100 if len(args) == 3:
101 101 index, rev1, rev2 = args
102 102 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
103 103 lookup = r.lookup
104 104 elif len(args) == 2:
105 105 if not repo:
106 106 raise error.Abort(_('there is no Mercurial repository here '
107 107 '(.hg not found)'))
108 108 rev1, rev2 = args
109 109 r = repo.changelog
110 110 lookup = repo.lookup
111 111 else:
112 112 raise error.Abort(_('either two or three arguments required'))
113 113 a = r.ancestor(lookup(rev1), lookup(rev2))
114 114 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
115 115
116 116 @command('debugapplystreamclonebundle', [], 'FILE')
117 117 def debugapplystreamclonebundle(ui, repo, fname):
118 118 """apply a stream clone bundle file"""
119 119 f = hg.openpath(ui, fname)
120 120 gen = exchange.readbundle(ui, f, fname)
121 121 gen.apply(repo)
122 122
123 123 @command('debugbuilddag',
124 124 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
125 125 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
126 126 ('n', 'new-file', None, _('add new file at each rev'))],
127 127 _('[OPTION]... [TEXT]'))
128 128 def debugbuilddag(ui, repo, text=None,
129 129 mergeable_file=False,
130 130 overwritten_file=False,
131 131 new_file=False):
132 132 """builds a repo with a given DAG from scratch in the current empty repo
133 133
134 134 The description of the DAG is read from stdin if not given on the
135 135 command line.
136 136
137 137 Elements:
138 138
139 139 - "+n" is a linear run of n nodes based on the current default parent
140 140 - "." is a single node based on the current default parent
141 141 - "$" resets the default parent to null (implied at the start);
142 142 otherwise the default parent is always the last node created
143 143 - "<p" sets the default parent to the backref p
144 144 - "*p" is a fork at parent p, which is a backref
145 145 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
146 146 - "/p2" is a merge of the preceding node and p2
147 147 - ":tag" defines a local tag for the preceding node
148 148 - "@branch" sets the named branch for subsequent nodes
149 149 - "#...\\n" is a comment up to the end of the line
150 150
151 151 Whitespace between the above elements is ignored.
152 152
153 153 A backref is either
154 154
155 155 - a number n, which references the node curr-n, where curr is the current
156 156 node, or
157 157 - the name of a local tag you placed earlier using ":tag", or
158 158 - empty to denote the default parent.
159 159
160 160 All string valued-elements are either strictly alphanumeric, or must
161 161 be enclosed in double quotes ("..."), with "\\" as escape character.
162 162 """
163 163
164 164 if text is None:
165 165 ui.status(_("reading DAG from stdin\n"))
166 166 text = ui.fin.read()
167 167
168 168 cl = repo.changelog
169 169 if len(cl) > 0:
170 170 raise error.Abort(_('repository is not empty'))
171 171
172 172 # determine number of revs in DAG
173 173 total = 0
174 174 for type, data in dagparser.parsedag(text):
175 175 if type == 'n':
176 176 total += 1
177 177
178 178 if mergeable_file:
179 179 linesperrev = 2
180 180 # make a file with k lines per rev
181 181 initialmergedlines = ['%d' % i
182 182 for i in pycompat.xrange(0, total * linesperrev)]
183 183 initialmergedlines.append("")
184 184
185 185 tags = []
186 186 progress = ui.makeprogress(_('building'), unit=_('revisions'),
187 187 total=total)
188 188 with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
189 189 at = -1
190 190 atbranch = 'default'
191 191 nodeids = []
192 192 id = 0
193 193 progress.update(id)
194 194 for type, data in dagparser.parsedag(text):
195 195 if type == 'n':
196 196 ui.note(('node %s\n' % pycompat.bytestr(data)))
197 197 id, ps = data
198 198
199 199 files = []
200 200 filecontent = {}
201 201
202 202 p2 = None
203 203 if mergeable_file:
204 204 fn = "mf"
205 205 p1 = repo[ps[0]]
206 206 if len(ps) > 1:
207 207 p2 = repo[ps[1]]
208 208 pa = p1.ancestor(p2)
209 209 base, local, other = [x[fn].data() for x in (pa, p1,
210 210 p2)]
211 211 m3 = simplemerge.Merge3Text(base, local, other)
212 212 ml = [l.strip() for l in m3.merge_lines()]
213 213 ml.append("")
214 214 elif at > 0:
215 215 ml = p1[fn].data().split("\n")
216 216 else:
217 217 ml = initialmergedlines
218 218 ml[id * linesperrev] += " r%i" % id
219 219 mergedtext = "\n".join(ml)
220 220 files.append(fn)
221 221 filecontent[fn] = mergedtext
222 222
223 223 if overwritten_file:
224 224 fn = "of"
225 225 files.append(fn)
226 226 filecontent[fn] = "r%i\n" % id
227 227
228 228 if new_file:
229 229 fn = "nf%i" % id
230 230 files.append(fn)
231 231 filecontent[fn] = "r%i\n" % id
232 232 if len(ps) > 1:
233 233 if not p2:
234 234 p2 = repo[ps[1]]
235 235 for fn in p2:
236 236 if fn.startswith("nf"):
237 237 files.append(fn)
238 238 filecontent[fn] = p2[fn].data()
239 239
240 240 def fctxfn(repo, cx, path):
241 241 if path in filecontent:
242 242 return context.memfilectx(repo, cx, path,
243 243 filecontent[path])
244 244 return None
245 245
246 246 if len(ps) == 0 or ps[0] < 0:
247 247 pars = [None, None]
248 248 elif len(ps) == 1:
249 249 pars = [nodeids[ps[0]], None]
250 250 else:
251 251 pars = [nodeids[p] for p in ps]
252 252 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
253 253 date=(id, 0),
254 254 user="debugbuilddag",
255 255 extra={'branch': atbranch})
256 256 nodeid = repo.commitctx(cx)
257 257 nodeids.append(nodeid)
258 258 at = id
259 259 elif type == 'l':
260 260 id, name = data
261 261 ui.note(('tag %s\n' % name))
262 262 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
263 263 elif type == 'a':
264 264 ui.note(('branch %s\n' % data))
265 265 atbranch = data
266 266 progress.update(id)
267 267
268 268 if tags:
269 269 repo.vfs.write("localtags", "".join(tags))
270 270
271 271 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
272 272 indent_string = ' ' * indent
273 273 if all:
274 274 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
275 275 % indent_string)
276 276
277 277 def showchunks(named):
278 278 ui.write("\n%s%s\n" % (indent_string, named))
279 279 for deltadata in gen.deltaiter():
280 280 node, p1, p2, cs, deltabase, delta, flags = deltadata
281 281 ui.write("%s%s %s %s %s %s %d\n" %
282 282 (indent_string, hex(node), hex(p1), hex(p2),
283 283 hex(cs), hex(deltabase), len(delta)))
284 284
285 285 chunkdata = gen.changelogheader()
286 286 showchunks("changelog")
287 287 chunkdata = gen.manifestheader()
288 288 showchunks("manifest")
289 289 for chunkdata in iter(gen.filelogheader, {}):
290 290 fname = chunkdata['filename']
291 291 showchunks(fname)
292 292 else:
293 293 if isinstance(gen, bundle2.unbundle20):
294 294 raise error.Abort(_('use debugbundle2 for this file'))
295 295 chunkdata = gen.changelogheader()
296 296 for deltadata in gen.deltaiter():
297 297 node, p1, p2, cs, deltabase, delta, flags = deltadata
298 298 ui.write("%s%s\n" % (indent_string, hex(node)))
299 299
300 300 def _debugobsmarkers(ui, part, indent=0, **opts):
301 301 """display version and markers contained in 'data'"""
302 302 opts = pycompat.byteskwargs(opts)
303 303 data = part.read()
304 304 indent_string = ' ' * indent
305 305 try:
306 306 version, markers = obsolete._readmarkers(data)
307 307 except error.UnknownVersion as exc:
308 308 msg = "%sunsupported version: %s (%d bytes)\n"
309 309 msg %= indent_string, exc.version, len(data)
310 310 ui.write(msg)
311 311 else:
312 312 msg = "%sversion: %d (%d bytes)\n"
313 313 msg %= indent_string, version, len(data)
314 314 ui.write(msg)
315 315 fm = ui.formatter('debugobsolete', opts)
316 316 for rawmarker in sorted(markers):
317 317 m = obsutil.marker(None, rawmarker)
318 318 fm.startitem()
319 319 fm.plain(indent_string)
320 320 cmdutil.showmarker(fm, m)
321 321 fm.end()
322 322
323 323 def _debugphaseheads(ui, data, indent=0):
324 324 """display version and markers contained in 'data'"""
325 325 indent_string = ' ' * indent
326 326 headsbyphase = phases.binarydecode(data)
327 327 for phase in phases.allphases:
328 328 for head in headsbyphase[phase]:
329 329 ui.write(indent_string)
330 330 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
331 331
332 332 def _quasirepr(thing):
333 333 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
334 334 return '{%s}' % (
335 335 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
336 336 return pycompat.bytestr(repr(thing))
337 337
338 338 def _debugbundle2(ui, gen, all=None, **opts):
339 339 """lists the contents of a bundle2"""
340 340 if not isinstance(gen, bundle2.unbundle20):
341 341 raise error.Abort(_('not a bundle2 file'))
342 342 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
343 343 parttypes = opts.get(r'part_type', [])
344 344 for part in gen.iterparts():
345 345 if parttypes and part.type not in parttypes:
346 346 continue
347 347 msg = '%s -- %s (mandatory: %r)\n'
348 348 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
349 349 if part.type == 'changegroup':
350 350 version = part.params.get('version', '01')
351 351 cg = changegroup.getunbundler(version, part, 'UN')
352 352 if not ui.quiet:
353 353 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
354 354 if part.type == 'obsmarkers':
355 355 if not ui.quiet:
356 356 _debugobsmarkers(ui, part, indent=4, **opts)
357 357 if part.type == 'phase-heads':
358 358 if not ui.quiet:
359 359 _debugphaseheads(ui, part, indent=4)
360 360
361 361 @command('debugbundle',
362 362 [('a', 'all', None, _('show all details')),
363 363 ('', 'part-type', [], _('show only the named part type')),
364 364 ('', 'spec', None, _('print the bundlespec of the bundle'))],
365 365 _('FILE'),
366 366 norepo=True)
367 367 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
368 368 """lists the contents of a bundle"""
369 369 with hg.openpath(ui, bundlepath) as f:
370 370 if spec:
371 371 spec = exchange.getbundlespec(ui, f)
372 372 ui.write('%s\n' % spec)
373 373 return
374 374
375 375 gen = exchange.readbundle(ui, f, bundlepath)
376 376 if isinstance(gen, bundle2.unbundle20):
377 377 return _debugbundle2(ui, gen, all=all, **opts)
378 378 _debugchangegroup(ui, gen, all=all, **opts)
379 379
380 380 @command('debugcapabilities',
381 381 [], _('PATH'),
382 382 norepo=True)
383 383 def debugcapabilities(ui, path, **opts):
384 384 """lists the capabilities of a remote peer"""
385 385 opts = pycompat.byteskwargs(opts)
386 386 peer = hg.peer(ui, opts, path)
387 387 caps = peer.capabilities()
388 388 ui.write(('Main capabilities:\n'))
389 389 for c in sorted(caps):
390 390 ui.write((' %s\n') % c)
391 391 b2caps = bundle2.bundle2caps(peer)
392 392 if b2caps:
393 393 ui.write(('Bundle2 capabilities:\n'))
394 394 for key, values in sorted(b2caps.iteritems()):
395 395 ui.write((' %s\n') % key)
396 396 for v in values:
397 397 ui.write((' %s\n') % v)
398 398
399 399 @command('debugcheckstate', [], '')
400 400 def debugcheckstate(ui, repo):
401 401 """validate the correctness of the current dirstate"""
402 402 parent1, parent2 = repo.dirstate.parents()
403 403 m1 = repo[parent1].manifest()
404 404 m2 = repo[parent2].manifest()
405 405 errors = 0
406 406 for f in repo.dirstate:
407 407 state = repo.dirstate[f]
408 408 if state in "nr" and f not in m1:
409 409 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
410 410 errors += 1
411 411 if state in "a" and f in m1:
412 412 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
413 413 errors += 1
414 414 if state in "m" and f not in m1 and f not in m2:
415 415 ui.warn(_("%s in state %s, but not in either manifest\n") %
416 416 (f, state))
417 417 errors += 1
418 418 for f in m1:
419 419 state = repo.dirstate[f]
420 420 if state not in "nrm":
421 421 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
422 422 errors += 1
423 423 if errors:
424 424 error = _(".hg/dirstate inconsistent with current parent's manifest")
425 425 raise error.Abort(error)
426 426
427 427 @command('debugcolor',
428 428 [('', 'style', None, _('show all configured styles'))],
429 429 'hg debugcolor')
430 430 def debugcolor(ui, repo, **opts):
431 431 """show available color, effects or style"""
432 432 ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
433 433 if opts.get(r'style'):
434 434 return _debugdisplaystyle(ui)
435 435 else:
436 436 return _debugdisplaycolor(ui)
437 437
438 438 def _debugdisplaycolor(ui):
439 439 ui = ui.copy()
440 440 ui._styles.clear()
441 441 for effect in color._activeeffects(ui).keys():
442 442 ui._styles[effect] = effect
443 443 if ui._terminfoparams:
444 444 for k, v in ui.configitems('color'):
445 445 if k.startswith('color.'):
446 446 ui._styles[k] = k[6:]
447 447 elif k.startswith('terminfo.'):
448 448 ui._styles[k] = k[9:]
449 449 ui.write(_('available colors:\n'))
450 450 # sort label with a '_' after the other to group '_background' entry.
451 451 items = sorted(ui._styles.items(),
452 452 key=lambda i: ('_' in i[0], i[0], i[1]))
453 453 for colorname, label in items:
454 454 ui.write(('%s\n') % colorname, label=label)
455 455
456 456 def _debugdisplaystyle(ui):
457 457 ui.write(_('available style:\n'))
458 458 if not ui._styles:
459 459 return
460 460 width = max(len(s) for s in ui._styles)
461 461 for label, effects in sorted(ui._styles.items()):
462 462 ui.write('%s' % label, label=label)
463 463 if effects:
464 464 # 50
465 465 ui.write(': ')
466 466 ui.write(' ' * (max(0, width - len(label))))
467 467 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
468 468 ui.write('\n')
469 469
470 470 @command('debugcreatestreamclonebundle', [], 'FILE')
471 471 def debugcreatestreamclonebundle(ui, repo, fname):
472 472 """create a stream clone bundle file
473 473
474 474 Stream bundles are special bundles that are essentially archives of
475 475 revlog files. They are commonly used for cloning very quickly.
476 476 """
477 477 # TODO we may want to turn this into an abort when this functionality
478 478 # is moved into `hg bundle`.
479 479 if phases.hassecret(repo):
480 480 ui.warn(_('(warning: stream clone bundle will contain secret '
481 481 'revisions)\n'))
482 482
483 483 requirements, gen = streamclone.generatebundlev1(repo)
484 484 changegroup.writechunks(ui, gen, fname)
485 485
486 486 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
487 487
488 488 @command('debugdag',
489 489 [('t', 'tags', None, _('use tags as labels')),
490 490 ('b', 'branches', None, _('annotate with branch names')),
491 491 ('', 'dots', None, _('use dots for runs')),
492 492 ('s', 'spaces', None, _('separate elements by spaces'))],
493 493 _('[OPTION]... [FILE [REV]...]'),
494 494 optionalrepo=True)
495 495 def debugdag(ui, repo, file_=None, *revs, **opts):
496 496 """format the changelog or an index DAG as a concise textual description
497 497
498 498 If you pass a revlog index, the revlog's DAG is emitted. If you list
499 499 revision numbers, they get labeled in the output as rN.
500 500
501 501 Otherwise, the changelog DAG of the current repo is emitted.
502 502 """
503 503 spaces = opts.get(r'spaces')
504 504 dots = opts.get(r'dots')
505 505 if file_:
506 506 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
507 507 file_)
508 508 revs = set((int(r) for r in revs))
509 509 def events():
510 510 for r in rlog:
511 511 yield 'n', (r, list(p for p in rlog.parentrevs(r)
512 512 if p != -1))
513 513 if r in revs:
514 514 yield 'l', (r, "r%i" % r)
515 515 elif repo:
516 516 cl = repo.changelog
517 517 tags = opts.get(r'tags')
518 518 branches = opts.get(r'branches')
519 519 if tags:
520 520 labels = {}
521 521 for l, n in repo.tags().items():
522 522 labels.setdefault(cl.rev(n), []).append(l)
523 523 def events():
524 524 b = "default"
525 525 for r in cl:
526 526 if branches:
527 527 newb = cl.read(cl.node(r))[5]['branch']
528 528 if newb != b:
529 529 yield 'a', newb
530 530 b = newb
531 531 yield 'n', (r, list(p for p in cl.parentrevs(r)
532 532 if p != -1))
533 533 if tags:
534 534 ls = labels.get(r)
535 535 if ls:
536 536 for l in ls:
537 537 yield 'l', (r, l)
538 538 else:
539 539 raise error.Abort(_('need repo for changelog dag'))
540 540
541 541 for line in dagparser.dagtextlines(events(),
542 542 addspaces=spaces,
543 543 wraplabels=True,
544 544 wrapannotations=True,
545 545 wrapnonlinear=dots,
546 546 usedots=dots,
547 547 maxlinewidth=70):
548 548 ui.write(line)
549 549 ui.write("\n")
550 550
551 551 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
552 552 def debugdata(ui, repo, file_, rev=None, **opts):
553 553 """dump the contents of a data file revision"""
554 554 opts = pycompat.byteskwargs(opts)
555 555 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
556 556 if rev is not None:
557 557 raise error.CommandError('debugdata', _('invalid arguments'))
558 558 file_, rev = None, file_
559 559 elif rev is None:
560 560 raise error.CommandError('debugdata', _('invalid arguments'))
561 561 r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
562 562 try:
563 563 ui.write(r.revision(r.lookup(rev), raw=True))
564 564 except KeyError:
565 565 raise error.Abort(_('invalid revision identifier %s') % rev)
566 566
567 567 @command('debugdate',
568 568 [('e', 'extended', None, _('try extended date formats'))],
569 569 _('[-e] DATE [RANGE]'),
570 570 norepo=True, optionalrepo=True)
571 571 def debugdate(ui, date, range=None, **opts):
572 572 """parse and display a date"""
573 573 if opts[r"extended"]:
574 574 d = dateutil.parsedate(date, util.extendeddateformats)
575 575 else:
576 576 d = dateutil.parsedate(date)
577 577 ui.write(("internal: %d %d\n") % d)
578 578 ui.write(("standard: %s\n") % dateutil.datestr(d))
579 579 if range:
580 580 m = dateutil.matchdate(range)
581 581 ui.write(("match: %s\n") % m(d[0]))
582 582
583 583 @command('debugdeltachain',
584 584 cmdutil.debugrevlogopts + cmdutil.formatteropts,
585 585 _('-c|-m|FILE'),
586 586 optionalrepo=True)
587 587 def debugdeltachain(ui, repo, file_=None, **opts):
588 588 """dump information about delta chains in a revlog
589 589
590 590 Output can be templatized. Available template keywords are:
591 591
592 592 :``rev``: revision number
593 593 :``chainid``: delta chain identifier (numbered by unique base)
594 594 :``chainlen``: delta chain length to this revision
595 595 :``prevrev``: previous revision in delta chain
596 596 :``deltatype``: role of delta / how it was computed
597 597 :``compsize``: compressed size of revision
598 598 :``uncompsize``: uncompressed size of revision
599 599 :``chainsize``: total size of compressed revisions in chain
600 600 :``chainratio``: total chain size divided by uncompressed revision size
601 601 (new delta chains typically start at ratio 2.00)
602 602 :``lindist``: linear distance from base revision in delta chain to end
603 603 of this revision
604 604 :``extradist``: total size of revisions not part of this delta chain from
605 605 base of delta chain to end of this revision; a measurement
606 606 of how much extra data we need to read/seek across to read
607 607 the delta chain for this revision
608 608 :``extraratio``: extradist divided by chainsize; another representation of
609 609 how much unrelated data is needed to load this delta chain
610 610
611 611 If the repository is configured to use the sparse read, additional keywords
612 612 are available:
613 613
614 614 :``readsize``: total size of data read from the disk for a revision
615 615 (sum of the sizes of all the blocks)
616 616 :``largestblock``: size of the largest block of data read from the disk
617 617 :``readdensity``: density of useful bytes in the data read from the disk
618 618 :``srchunks``: in how many data hunks the whole revision would be read
619 619
620 620 The sparse read can be enabled with experimental.sparse-read = True
621 621 """
622 622 opts = pycompat.byteskwargs(opts)
623 623 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
624 624 index = r.index
625 625 start = r.start
626 626 length = r.length
627 627 generaldelta = r.version & revlog.FLAG_GENERALDELTA
628 628 withsparseread = getattr(r, '_withsparseread', False)
629 629
630 630 def revinfo(rev):
631 631 e = index[rev]
632 632 compsize = e[1]
633 633 uncompsize = e[2]
634 634 chainsize = 0
635 635
636 636 if generaldelta:
637 637 if e[3] == e[5]:
638 638 deltatype = 'p1'
639 639 elif e[3] == e[6]:
640 640 deltatype = 'p2'
641 641 elif e[3] == rev - 1:
642 642 deltatype = 'prev'
643 643 elif e[3] == rev:
644 644 deltatype = 'base'
645 645 else:
646 646 deltatype = 'other'
647 647 else:
648 648 if e[3] == rev:
649 649 deltatype = 'base'
650 650 else:
651 651 deltatype = 'prev'
652 652
653 653 chain = r._deltachain(rev)[0]
654 654 for iterrev in chain:
655 655 e = index[iterrev]
656 656 chainsize += e[1]
657 657
658 658 return compsize, uncompsize, deltatype, chain, chainsize
659 659
660 660 fm = ui.formatter('debugdeltachain', opts)
661 661
662 662 fm.plain(' rev chain# chainlen prev delta '
663 663 'size rawsize chainsize ratio lindist extradist '
664 664 'extraratio')
665 665 if withsparseread:
666 666 fm.plain(' readsize largestblk rddensity srchunks')
667 667 fm.plain('\n')
668 668
669 669 chainbases = {}
670 670 for rev in r:
671 671 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
672 672 chainbase = chain[0]
673 673 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
674 674 basestart = start(chainbase)
675 675 revstart = start(rev)
676 676 lineardist = revstart + comp - basestart
677 677 extradist = lineardist - chainsize
678 678 try:
679 679 prevrev = chain[-2]
680 680 except IndexError:
681 681 prevrev = -1
682 682
683 683 if uncomp != 0:
684 684 chainratio = float(chainsize) / float(uncomp)
685 685 else:
686 686 chainratio = chainsize
687 687
688 688 if chainsize != 0:
689 689 extraratio = float(extradist) / float(chainsize)
690 690 else:
691 691 extraratio = extradist
692 692
693 693 fm.startitem()
694 694 fm.write('rev chainid chainlen prevrev deltatype compsize '
695 695 'uncompsize chainsize chainratio lindist extradist '
696 696 'extraratio',
697 697 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
698 698 rev, chainid, len(chain), prevrev, deltatype, comp,
699 699 uncomp, chainsize, chainratio, lineardist, extradist,
700 700 extraratio,
701 701 rev=rev, chainid=chainid, chainlen=len(chain),
702 702 prevrev=prevrev, deltatype=deltatype, compsize=comp,
703 703 uncompsize=uncomp, chainsize=chainsize,
704 704 chainratio=chainratio, lindist=lineardist,
705 705 extradist=extradist, extraratio=extraratio)
706 706 if withsparseread:
707 707 readsize = 0
708 708 largestblock = 0
709 709 srchunks = 0
710 710
711 711 for revschunk in deltautil.slicechunk(r, chain):
712 712 srchunks += 1
713 713 blkend = start(revschunk[-1]) + length(revschunk[-1])
714 714 blksize = blkend - start(revschunk[0])
715 715
716 716 readsize += blksize
717 717 if largestblock < blksize:
718 718 largestblock = blksize
719 719
720 720 if readsize:
721 721 readdensity = float(chainsize) / float(readsize)
722 722 else:
723 723 readdensity = 1
724 724
725 725 fm.write('readsize largestblock readdensity srchunks',
726 726 ' %10d %10d %9.5f %8d',
727 727 readsize, largestblock, readdensity, srchunks,
728 728 readsize=readsize, largestblock=largestblock,
729 729 readdensity=readdensity, srchunks=srchunks)
730 730
731 731 fm.plain('\n')
732 732
733 733 fm.end()
734 734
735 735 @command('debugdirstate|debugstate',
736 736 [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
737 737 ('', 'dates', True, _('display the saved mtime')),
738 738 ('', 'datesort', None, _('sort by saved mtime'))],
739 739 _('[OPTION]...'))
740 740 def debugstate(ui, repo, **opts):
741 741 """show the contents of the current dirstate"""
742 742
743 743 nodates = not opts[r'dates']
744 744 if opts.get(r'nodates') is not None:
745 745 nodates = True
746 746 datesort = opts.get(r'datesort')
747 747
748 748 timestr = ""
749 749 if datesort:
750 750 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
751 751 else:
752 752 keyfunc = None # sort by filename
753 753 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
754 754 if ent[3] == -1:
755 755 timestr = 'unset '
756 756 elif nodates:
757 757 timestr = 'set '
758 758 else:
759 759 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
760 760 time.localtime(ent[3]))
761 761 timestr = encoding.strtolocal(timestr)
762 762 if ent[1] & 0o20000:
763 763 mode = 'lnk'
764 764 else:
765 765 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
766 766 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
767 767 for f in repo.dirstate.copies():
768 768 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
769 769
770 770 @command('debugdiscovery',
771 771 [('', 'old', None, _('use old-style discovery')),
772 772 ('', 'nonheads', None,
773 773 _('use old-style discovery with non-heads included')),
774 774 ('', 'rev', [], 'restrict discovery to this set of revs'),
775 775 ] + cmdutil.remoteopts,
776 776 _('[--rev REV] [OTHER]'))
777 777 def debugdiscovery(ui, repo, remoteurl="default", **opts):
778 778 """runs the changeset discovery protocol in isolation"""
779 779 opts = pycompat.byteskwargs(opts)
780 780 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
781 781 remote = hg.peer(repo, opts, remoteurl)
782 782 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
783 783
784 784 # make sure tests are repeatable
785 785 random.seed(12323)
786 786
787 787 def doit(pushedrevs, remoteheads, remote=remote):
788 788 if opts.get('old'):
789 789 if not util.safehasattr(remote, 'branches'):
790 790 # enable in-client legacy support
791 791 remote = localrepo.locallegacypeer(remote.local())
792 792 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
793 793 force=True)
794 794 common = set(common)
795 795 if not opts.get('nonheads'):
796 796 ui.write(("unpruned common: %s\n") %
797 797 " ".join(sorted(short(n) for n in common)))
798 798
799 799 clnode = repo.changelog.node
800 800 common = repo.revs('heads(::%ln)', common)
801 801 common = {clnode(r) for r in common}
802 802 else:
803 803 nodes = None
804 804 if pushedrevs:
805 805 revs = scmutil.revrange(repo, pushedrevs)
806 806 nodes = [repo[r].node() for r in revs]
807 807 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
808 808 ancestorsof=nodes)
809 809 common = set(common)
810 810 rheads = set(hds)
811 811 lheads = set(repo.heads())
812 812 ui.write(("common heads: %s\n") %
813 813 " ".join(sorted(short(n) for n in common)))
814 814 if lheads <= common:
815 815 ui.write(("local is subset\n"))
816 816 elif rheads <= common:
817 817 ui.write(("remote is subset\n"))
818 818
819 819 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
820 820 localrevs = opts['rev']
821 821 doit(localrevs, remoterevs)
822 822
823 823 _chunksize = 4 << 10
824 824
825 825 @command('debugdownload',
826 826 [
827 827 ('o', 'output', '', _('path')),
828 828 ],
829 829 optionalrepo=True)
830 830 def debugdownload(ui, repo, url, output=None, **opts):
831 831 """download a resource using Mercurial logic and config
832 832 """
833 833 fh = urlmod.open(ui, url, output)
834 834
835 835 dest = ui
836 836 if output:
837 837 dest = open(output, "wb", _chunksize)
838 838 try:
839 839 data = fh.read(_chunksize)
840 840 while data:
841 841 dest.write(data)
842 842 data = fh.read(_chunksize)
843 843 finally:
844 844 if output:
845 845 dest.close()
846 846
847 847 @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
848 848 def debugextensions(ui, repo, **opts):
849 849 '''show information about active extensions'''
850 850 opts = pycompat.byteskwargs(opts)
851 851 exts = extensions.extensions(ui)
852 852 hgver = util.version()
853 853 fm = ui.formatter('debugextensions', opts)
854 854 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
855 855 isinternal = extensions.ismoduleinternal(extmod)
856 856 extsource = pycompat.fsencode(extmod.__file__)
857 857 if isinternal:
858 858 exttestedwith = [] # never expose magic string to users
859 859 else:
860 860 exttestedwith = getattr(extmod, 'testedwith', '').split()
861 861 extbuglink = getattr(extmod, 'buglink', None)
862 862
863 863 fm.startitem()
864 864
865 865 if ui.quiet or ui.verbose:
866 866 fm.write('name', '%s\n', extname)
867 867 else:
868 868 fm.write('name', '%s', extname)
869 869 if isinternal or hgver in exttestedwith:
870 870 fm.plain('\n')
871 871 elif not exttestedwith:
872 872 fm.plain(_(' (untested!)\n'))
873 873 else:
874 874 lasttestedversion = exttestedwith[-1]
875 875 fm.plain(' (%s!)\n' % lasttestedversion)
876 876
877 877 fm.condwrite(ui.verbose and extsource, 'source',
878 878 _(' location: %s\n'), extsource or "")
879 879
880 880 if ui.verbose:
881 881 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
882 882 fm.data(bundled=isinternal)
883 883
884 884 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
885 885 _(' tested with: %s\n'),
886 886 fm.formatlist(exttestedwith, name='ver'))
887 887
888 888 fm.condwrite(ui.verbose and extbuglink, 'buglink',
889 889 _(' bug reporting: %s\n'), extbuglink or "")
890 890
891 891 fm.end()
892 892
893 893 @command('debugfileset',
894 894 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
895 895 ('', 'all-files', False,
896 896 _('test files from all revisions and working directory')),
897 897 ('s', 'show-matcher', None,
898 898 _('print internal representation of matcher')),
899 899 ('p', 'show-stage', [],
900 900 _('print parsed tree at the given stage'), _('NAME'))],
901 901 _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
902 902 def debugfileset(ui, repo, expr, **opts):
903 903 '''parse and apply a fileset specification'''
904 904 from . import fileset
905 905 fileset.symbols # force import of fileset so we have predicates to optimize
906 906 opts = pycompat.byteskwargs(opts)
907 907 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
908 908
909 909 stages = [
910 910 ('parsed', pycompat.identity),
911 911 ('analyzed', filesetlang.analyze),
912 912 ('optimized', filesetlang.optimize),
913 913 ]
914 914 stagenames = set(n for n, f in stages)
915 915
916 916 showalways = set()
917 917 if ui.verbose and not opts['show_stage']:
918 918 # show parsed tree by --verbose (deprecated)
919 919 showalways.add('parsed')
920 920 if opts['show_stage'] == ['all']:
921 921 showalways.update(stagenames)
922 922 else:
923 923 for n in opts['show_stage']:
924 924 if n not in stagenames:
925 925 raise error.Abort(_('invalid stage name: %s') % n)
926 926 showalways.update(opts['show_stage'])
927 927
928 928 tree = filesetlang.parse(expr)
929 929 for n, f in stages:
930 930 tree = f(tree)
931 931 if n in showalways:
932 932 if opts['show_stage'] or n != 'parsed':
933 933 ui.write(("* %s:\n") % n)
934 934 ui.write(filesetlang.prettyformat(tree), "\n")
935 935
936 936 files = set()
937 937 if opts['all_files']:
938 938 for r in repo:
939 939 c = repo[r]
940 940 files.update(c.files())
941 941 files.update(c.substate)
942 942 if opts['all_files'] or ctx.rev() is None:
943 943 wctx = repo[None]
944 944 files.update(repo.dirstate.walk(scmutil.matchall(repo),
945 945 subrepos=list(wctx.substate),
946 946 unknown=True, ignored=True))
947 947 files.update(wctx.substate)
948 948 else:
949 949 files.update(ctx.files())
950 950 files.update(ctx.substate)
951 951
952 952 m = ctx.matchfileset(expr)
953 953 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
954 954 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
955 955 for f in sorted(files):
956 956 if not m(f):
957 957 continue
958 958 ui.write("%s\n" % f)
959 959
960 960 @command('debugformat',
961 961 [] + cmdutil.formatteropts)
962 962 def debugformat(ui, repo, **opts):
963 963 """display format information about the current repository
964 964
965 965 Use --verbose to get extra information about current config value and
966 966 Mercurial default."""
967 967 opts = pycompat.byteskwargs(opts)
968 968 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
969 969 maxvariantlength = max(len('format-variant'), maxvariantlength)
970 970
971 971 def makeformatname(name):
972 972 return '%s:' + (' ' * (maxvariantlength - len(name)))
973 973
974 974 fm = ui.formatter('debugformat', opts)
975 975 if fm.isplain():
976 976 def formatvalue(value):
977 977 if util.safehasattr(value, 'startswith'):
978 978 return value
979 979 if value:
980 980 return 'yes'
981 981 else:
982 982 return 'no'
983 983 else:
984 984 formatvalue = pycompat.identity
985 985
986 986 fm.plain('format-variant')
987 987 fm.plain(' ' * (maxvariantlength - len('format-variant')))
988 988 fm.plain(' repo')
989 989 if ui.verbose:
990 990 fm.plain(' config default')
991 991 fm.plain('\n')
992 992 for fv in upgrade.allformatvariant:
993 993 fm.startitem()
994 994 repovalue = fv.fromrepo(repo)
995 995 configvalue = fv.fromconfig(repo)
996 996
997 997 if repovalue != configvalue:
998 998 namelabel = 'formatvariant.name.mismatchconfig'
999 999 repolabel = 'formatvariant.repo.mismatchconfig'
1000 1000 elif repovalue != fv.default:
1001 1001 namelabel = 'formatvariant.name.mismatchdefault'
1002 1002 repolabel = 'formatvariant.repo.mismatchdefault'
1003 1003 else:
1004 1004 namelabel = 'formatvariant.name.uptodate'
1005 1005 repolabel = 'formatvariant.repo.uptodate'
1006 1006
1007 1007 fm.write('name', makeformatname(fv.name), fv.name,
1008 1008 label=namelabel)
1009 1009 fm.write('repo', ' %3s', formatvalue(repovalue),
1010 1010 label=repolabel)
1011 1011 if fv.default != configvalue:
1012 1012 configlabel = 'formatvariant.config.special'
1013 1013 else:
1014 1014 configlabel = 'formatvariant.config.default'
1015 1015 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
1016 1016 label=configlabel)
1017 1017 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
1018 1018 label='formatvariant.default')
1019 1019 fm.plain('\n')
1020 1020 fm.end()
1021 1021
1022 1022 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
1023 1023 def debugfsinfo(ui, path="."):
1024 1024 """show information detected about current filesystem"""
1025 1025 ui.write(('path: %s\n') % path)
1026 1026 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
1027 1027 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
1028 1028 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
1029 1029 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
1030 1030 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
1031 1031 casesensitive = '(unknown)'
1032 1032 try:
1033 1033 with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
1034 1034 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
1035 1035 except OSError:
1036 1036 pass
1037 1037 ui.write(('case-sensitive: %s\n') % casesensitive)
1038 1038
1039 1039 @command('debuggetbundle',
1040 1040 [('H', 'head', [], _('id of head node'), _('ID')),
1041 1041 ('C', 'common', [], _('id of common node'), _('ID')),
1042 1042 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1043 1043 _('REPO FILE [-H|-C ID]...'),
1044 1044 norepo=True)
1045 1045 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1046 1046 """retrieves a bundle from a repo
1047 1047
1048 1048 Every ID must be a full-length hex node id string. Saves the bundle to the
1049 1049 given file.
1050 1050 """
1051 1051 opts = pycompat.byteskwargs(opts)
1052 1052 repo = hg.peer(ui, opts, repopath)
1053 1053 if not repo.capable('getbundle'):
1054 1054 raise error.Abort("getbundle() not supported by target repository")
1055 1055 args = {}
1056 1056 if common:
1057 1057 args[r'common'] = [bin(s) for s in common]
1058 1058 if head:
1059 1059 args[r'heads'] = [bin(s) for s in head]
1060 1060 # TODO: get desired bundlecaps from command line.
1061 1061 args[r'bundlecaps'] = None
1062 1062 bundle = repo.getbundle('debug', **args)
1063 1063
1064 1064 bundletype = opts.get('type', 'bzip2').lower()
1065 1065 btypes = {'none': 'HG10UN',
1066 1066 'bzip2': 'HG10BZ',
1067 1067 'gzip': 'HG10GZ',
1068 1068 'bundle2': 'HG20'}
1069 1069 bundletype = btypes.get(bundletype)
1070 1070 if bundletype not in bundle2.bundletypes:
1071 1071 raise error.Abort(_('unknown bundle type specified with --type'))
1072 1072 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1073 1073
1074 1074 @command('debugignore', [], '[FILE]')
1075 1075 def debugignore(ui, repo, *files, **opts):
1076 1076 """display the combined ignore pattern and information about ignored files
1077 1077
1078 1078 With no argument display the combined ignore pattern.
1079 1079
1080 1080 Given space separated file names, shows if the given file is ignored and
1081 1081 if so, show the ignore rule (file and line number) that matched it.
1082 1082 """
1083 1083 ignore = repo.dirstate._ignore
1084 1084 if not files:
1085 1085 # Show all the patterns
1086 1086 ui.write("%s\n" % pycompat.byterepr(ignore))
1087 1087 else:
1088 1088 m = scmutil.match(repo[None], pats=files)
1089 1089 for f in m.files():
1090 1090 nf = util.normpath(f)
1091 1091 ignored = None
1092 1092 ignoredata = None
1093 1093 if nf != '.':
1094 1094 if ignore(nf):
1095 1095 ignored = nf
1096 1096 ignoredata = repo.dirstate._ignorefileandline(nf)
1097 1097 else:
1098 1098 for p in util.finddirs(nf):
1099 1099 if ignore(p):
1100 1100 ignored = p
1101 1101 ignoredata = repo.dirstate._ignorefileandline(p)
1102 1102 break
1103 1103 if ignored:
1104 1104 if ignored == nf:
1105 1105 ui.write(_("%s is ignored\n") % m.uipath(f))
1106 1106 else:
1107 1107 ui.write(_("%s is ignored because of "
1108 1108 "containing folder %s\n")
1109 1109 % (m.uipath(f), ignored))
1110 1110 ignorefile, lineno, line = ignoredata
1111 1111 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1112 1112 % (ignorefile, lineno, line))
1113 1113 else:
1114 1114 ui.write(_("%s is not ignored\n") % m.uipath(f))
1115 1115
1116 1116 @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
1117 1117 _('-c|-m|FILE'))
1118 1118 def debugindex(ui, repo, file_=None, **opts):
1119 1119 """dump index data for a storage primitive"""
1120 1120 opts = pycompat.byteskwargs(opts)
1121 1121 store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
1122 1122
1123 1123 if ui.debugflag:
1124 1124 shortfn = hex
1125 1125 else:
1126 1126 shortfn = short
1127 1127
1128 1128 idlen = 12
1129 1129 for i in store:
1130 1130 idlen = len(shortfn(store.node(i)))
1131 1131 break
1132 1132
1133 1133 fm = ui.formatter('debugindex', opts)
1134 1134 fm.plain(b' rev linkrev %s %s p2\n' % (
1135 1135 b'nodeid'.ljust(idlen),
1136 1136 b'p1'.ljust(idlen)))
1137 1137
1138 1138 for rev in store:
1139 1139 node = store.node(rev)
1140 1140 parents = store.parents(node)
1141 1141
1142 1142 fm.startitem()
1143 1143 fm.write(b'rev', b'%6d ', rev)
1144 1144 fm.write(b'linkrev', '%7d ', store.linkrev(rev))
1145 1145 fm.write(b'node', '%s ', shortfn(node))
1146 1146 fm.write(b'p1', '%s ', shortfn(parents[0]))
1147 1147 fm.write(b'p2', '%s', shortfn(parents[1]))
1148 1148 fm.plain(b'\n')
1149 1149
1150 1150 fm.end()
1151 1151
1152 1152 @command('debugindexdot', cmdutil.debugrevlogopts,
1153 1153 _('-c|-m|FILE'), optionalrepo=True)
1154 1154 def debugindexdot(ui, repo, file_=None, **opts):
1155 1155 """dump an index DAG as a graphviz dot file"""
1156 1156 opts = pycompat.byteskwargs(opts)
1157 1157 r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
1158 1158 ui.write(("digraph G {\n"))
1159 1159 for i in r:
1160 1160 node = r.node(i)
1161 1161 pp = r.parents(node)
1162 1162 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1163 1163 if pp[1] != nullid:
1164 1164 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1165 1165 ui.write("}\n")
1166 1166
1167 1167 @command('debugindexstats', [])
1168 1168 def debugindexstats(ui, repo):
1169 1169 """show stats related to the changelog index"""
1170 1170 repo.changelog.shortest(nullid, 1)
1171 1171 index = repo.changelog.index
1172 1172 if not util.safehasattr(index, 'stats'):
1173 1173 raise error.Abort(_('debugindexstats only works with native code'))
1174 1174 for k, v in sorted(index.stats().items()):
1175 1175 ui.write('%s: %d\n' % (k, v))
1176 1176
1177 1177 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1178 1178 def debuginstall(ui, **opts):
1179 1179 '''test Mercurial installation
1180 1180
1181 1181 Returns 0 on success.
1182 1182 '''
1183 1183 opts = pycompat.byteskwargs(opts)
1184 1184
1185 1185 def writetemp(contents):
1186 1186 (fd, name) = pycompat.mkstemp(prefix="hg-debuginstall-")
1187 1187 f = os.fdopen(fd, r"wb")
1188 1188 f.write(contents)
1189 1189 f.close()
1190 1190 return name
1191 1191
1192 1192 problems = 0
1193 1193
1194 1194 fm = ui.formatter('debuginstall', opts)
1195 1195 fm.startitem()
1196 1196
1197 1197 # encoding
1198 1198 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1199 1199 err = None
1200 1200 try:
1201 1201 codecs.lookup(pycompat.sysstr(encoding.encoding))
1202 1202 except LookupError as inst:
1203 1203 err = stringutil.forcebytestr(inst)
1204 1204 problems += 1
1205 1205 fm.condwrite(err, 'encodingerror', _(" %s\n"
1206 1206 " (check that your locale is properly set)\n"), err)
1207 1207
1208 1208 # Python
1209 1209 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1210 1210 pycompat.sysexecutable)
1211 1211 fm.write('pythonver', _("checking Python version (%s)\n"),
1212 1212 ("%d.%d.%d" % sys.version_info[:3]))
1213 1213 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1214 1214 os.path.dirname(pycompat.fsencode(os.__file__)))
1215 1215
1216 1216 security = set(sslutil.supportedprotocols)
1217 1217 if sslutil.hassni:
1218 1218 security.add('sni')
1219 1219
1220 1220 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1221 1221 fm.formatlist(sorted(security), name='protocol',
1222 1222 fmt='%s', sep=','))
1223 1223
1224 1224 # These are warnings, not errors. So don't increment problem count. This
1225 1225 # may change in the future.
1226 1226 if 'tls1.2' not in security:
1227 1227 fm.plain(_(' TLS 1.2 not supported by Python install; '
1228 1228 'network connections lack modern security\n'))
1229 1229 if 'sni' not in security:
1230 1230 fm.plain(_(' SNI not supported by Python install; may have '
1231 1231 'connectivity issues with some servers\n'))
1232 1232
1233 1233 # TODO print CA cert info
1234 1234
1235 1235 # hg version
1236 1236 hgver = util.version()
1237 1237 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1238 1238 hgver.split('+')[0])
1239 1239 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1240 1240 '+'.join(hgver.split('+')[1:]))
1241 1241
1242 1242 # compiled modules
1243 1243 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1244 1244 policy.policy)
1245 1245 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1246 1246 os.path.dirname(pycompat.fsencode(__file__)))
1247 1247
1248 1248 if policy.policy in ('c', 'allow'):
1249 1249 err = None
1250 1250 try:
1251 1251 from .cext import (
1252 1252 base85,
1253 1253 bdiff,
1254 1254 mpatch,
1255 1255 osutil,
1256 1256 )
1257 1257 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1258 1258 except Exception as inst:
1259 1259 err = stringutil.forcebytestr(inst)
1260 1260 problems += 1
1261 1261 fm.condwrite(err, 'extensionserror', " %s\n", err)
1262 1262
1263 1263 compengines = util.compengines._engines.values()
1264 1264 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1265 1265 fm.formatlist(sorted(e.name() for e in compengines),
1266 1266 name='compengine', fmt='%s', sep=', '))
1267 1267 fm.write('compenginesavail', _('checking available compression engines '
1268 1268 '(%s)\n'),
1269 1269 fm.formatlist(sorted(e.name() for e in compengines
1270 1270 if e.available()),
1271 1271 name='compengine', fmt='%s', sep=', '))
1272 1272 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1273 1273 fm.write('compenginesserver', _('checking available compression engines '
1274 1274 'for wire protocol (%s)\n'),
1275 1275 fm.formatlist([e.name() for e in wirecompengines
1276 1276 if e.wireprotosupport()],
1277 1277 name='compengine', fmt='%s', sep=', '))
1278 1278 re2 = 'missing'
1279 1279 if util._re2:
1280 1280 re2 = 'available'
1281 1281 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1282 1282 fm.data(re2=bool(util._re2))
1283 1283
1284 1284 # templates
1285 1285 p = templater.templatepaths()
1286 1286 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1287 1287 fm.condwrite(not p, '', _(" no template directories found\n"))
1288 1288 if p:
1289 1289 m = templater.templatepath("map-cmdline.default")
1290 1290 if m:
1291 1291 # template found, check if it is working
1292 1292 err = None
1293 1293 try:
1294 1294 templater.templater.frommapfile(m)
1295 1295 except Exception as inst:
1296 1296 err = stringutil.forcebytestr(inst)
1297 1297 p = None
1298 1298 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1299 1299 else:
1300 1300 p = None
1301 1301 fm.condwrite(p, 'defaulttemplate',
1302 1302 _("checking default template (%s)\n"), m)
1303 1303 fm.condwrite(not m, 'defaulttemplatenotfound',
1304 1304 _(" template '%s' not found\n"), "default")
1305 1305 if not p:
1306 1306 problems += 1
1307 1307 fm.condwrite(not p, '',
1308 1308 _(" (templates seem to have been installed incorrectly)\n"))
1309 1309
1310 1310 # editor
1311 1311 editor = ui.geteditor()
1312 1312 editor = util.expandpath(editor)
1313 1313 editorbin = procutil.shellsplit(editor)[0]
1314 1314 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1315 1315 cmdpath = procutil.findexe(editorbin)
1316 1316 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1317 1317 _(" No commit editor set and can't find %s in PATH\n"
1318 1318 " (specify a commit editor in your configuration"
1319 1319 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1320 1320 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1321 1321 _(" Can't find editor '%s' in PATH\n"
1322 1322 " (specify a commit editor in your configuration"
1323 1323 " file)\n"), not cmdpath and editorbin)
1324 1324 if not cmdpath and editor != 'vi':
1325 1325 problems += 1
1326 1326
1327 1327 # check username
1328 1328 username = None
1329 1329 err = None
1330 1330 try:
1331 1331 username = ui.username()
1332 1332 except error.Abort as e:
1333 1333 err = stringutil.forcebytestr(e)
1334 1334 problems += 1
1335 1335
1336 1336 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1337 1337 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1338 1338 " (specify a username in your configuration file)\n"), err)
1339 1339
1340 1340 fm.condwrite(not problems, '',
1341 1341 _("no problems detected\n"))
1342 1342 if not problems:
1343 1343 fm.data(problems=problems)
1344 1344 fm.condwrite(problems, 'problems',
1345 1345 _("%d problems detected,"
1346 1346 " please check your install!\n"), problems)
1347 1347 fm.end()
1348 1348
1349 1349 return problems
1350 1350
1351 1351 @command('debugknown', [], _('REPO ID...'), norepo=True)
1352 1352 def debugknown(ui, repopath, *ids, **opts):
1353 1353 """test whether node ids are known to a repo
1354 1354
1355 1355 Every ID must be a full-length hex node id string. Returns a list of 0s
1356 1356 and 1s indicating unknown/known.
1357 1357 """
1358 1358 opts = pycompat.byteskwargs(opts)
1359 1359 repo = hg.peer(ui, opts, repopath)
1360 1360 if not repo.capable('known'):
1361 1361 raise error.Abort("known() not supported by target repository")
1362 1362 flags = repo.known([bin(s) for s in ids])
1363 1363 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1364 1364
1365 1365 @command('debuglabelcomplete', [], _('LABEL...'))
1366 1366 def debuglabelcomplete(ui, repo, *args):
1367 1367 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1368 1368 debugnamecomplete(ui, repo, *args)
1369 1369
1370 1370 @command('debuglocks',
1371 1371 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1372 1372 ('W', 'force-wlock', None,
1373 1373 _('free the working state lock (DANGEROUS)')),
1374 1374 ('s', 'set-lock', None, _('set the store lock until stopped')),
1375 1375 ('S', 'set-wlock', None,
1376 1376 _('set the working state lock until stopped'))],
1377 1377 _('[OPTION]...'))
1378 1378 def debuglocks(ui, repo, **opts):
1379 1379 """show or modify state of locks
1380 1380
1381 1381 By default, this command will show which locks are held. This
1382 1382 includes the user and process holding the lock, the amount of time
1383 1383 the lock has been held, and the machine name where the process is
1384 1384 running if it's not local.
1385 1385
1386 1386 Locks protect the integrity of Mercurial's data, so should be
1387 1387 treated with care. System crashes or other interruptions may cause
1388 1388 locks to not be properly released, though Mercurial will usually
1389 1389 detect and remove such stale locks automatically.
1390 1390
1391 1391 However, detecting stale locks may not always be possible (for
1392 1392 instance, on a shared filesystem). Removing locks may also be
1393 1393 blocked by filesystem permissions.
1394 1394
1395 1395 Setting a lock will prevent other commands from changing the data.
1396 1396 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1397 1397 The set locks are removed when the command exits.
1398 1398
1399 1399 Returns 0 if no locks are held.
1400 1400
1401 1401 """
1402 1402
1403 1403 if opts.get(r'force_lock'):
1404 1404 repo.svfs.unlink('lock')
1405 1405 if opts.get(r'force_wlock'):
1406 1406 repo.vfs.unlink('wlock')
1407 1407 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1408 1408 return 0
1409 1409
1410 1410 locks = []
1411 1411 try:
1412 1412 if opts.get(r'set_wlock'):
1413 1413 try:
1414 1414 locks.append(repo.wlock(False))
1415 1415 except error.LockHeld:
1416 1416 raise error.Abort(_('wlock is already held'))
1417 1417 if opts.get(r'set_lock'):
1418 1418 try:
1419 1419 locks.append(repo.lock(False))
1420 1420 except error.LockHeld:
1421 1421 raise error.Abort(_('lock is already held'))
1422 1422 if len(locks):
1423 1423 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1424 1424 return 0
1425 1425 finally:
1426 1426 release(*locks)
1427 1427
1428 1428 now = time.time()
1429 1429 held = 0
1430 1430
1431 1431 def report(vfs, name, method):
1432 1432 # this causes stale locks to get reaped for more accurate reporting
1433 1433 try:
1434 1434 l = method(False)
1435 1435 except error.LockHeld:
1436 1436 l = None
1437 1437
1438 1438 if l:
1439 1439 l.release()
1440 1440 else:
1441 1441 try:
1442 1442 st = vfs.lstat(name)
1443 1443 age = now - st[stat.ST_MTIME]
1444 1444 user = util.username(st.st_uid)
1445 1445 locker = vfs.readlock(name)
1446 1446 if ":" in locker:
1447 1447 host, pid = locker.split(':')
1448 1448 if host == socket.gethostname():
1449 1449 locker = 'user %s, process %s' % (user or b'None', pid)
1450 1450 else:
1451 1451 locker = 'user %s, process %s, host %s' \
1452 1452 % (user or b'None', pid, host)
1453 1453 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1454 1454 return 1
1455 1455 except OSError as e:
1456 1456 if e.errno != errno.ENOENT:
1457 1457 raise
1458 1458
1459 1459 ui.write(("%-6s free\n") % (name + ":"))
1460 1460 return 0
1461 1461
1462 1462 held += report(repo.svfs, "lock", repo.lock)
1463 1463 held += report(repo.vfs, "wlock", repo.wlock)
1464 1464
1465 1465 return held
1466 1466
1467 1467 @command('debugmanifestfulltextcache', [
1468 1468 ('', 'clear', False, _('clear the cache')),
1469 1469 ('a', 'add', '', _('add the given manifest node to the cache'),
1470 1470 _('NODE'))
1471 1471 ], '')
1472 1472 def debugmanifestfulltextcache(ui, repo, add=None, **opts):
1473 1473 """show, clear or amend the contents of the manifest fulltext cache"""
1474 1474 with repo.lock():
1475 1475 r = repo.manifestlog.getstorage(b'')
1476 1476 try:
1477 1477 cache = r._fulltextcache
1478 1478 except AttributeError:
1479 1479 ui.warn(_(
1480 1480 "Current revlog implementation doesn't appear to have a "
1481 1481 'manifest fulltext cache\n'))
1482 1482 return
1483 1483
1484 1484 if opts.get(r'clear'):
1485 1485 cache.clear()
1486 1486
1487 1487 if add:
1488 1488 try:
1489 1489 manifest = repo.manifestlog[r.lookup(add)]
1490 1490 except error.LookupError as e:
1491 1491 raise error.Abort(e, hint="Check your manifest node id")
1492 1492 manifest.read() # stores revisision in cache too
1493 1493
1494 1494 if not len(cache):
1495 1495 ui.write(_('Cache empty'))
1496 1496 else:
1497 1497 ui.write(
1498 1498 _('Cache contains %d manifest entries, in order of most to '
1499 1499 'least recent:\n') % (len(cache),))
1500 1500 totalsize = 0
1501 1501 for nodeid in cache:
1502 1502 # Use cache.get to not update the LRU order
1503 1503 data = cache.get(nodeid)
1504 1504 size = len(data)
1505 1505 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1506 1506 ui.write(_('id: %s, size %s\n') % (
1507 1507 hex(nodeid), util.bytecount(size)))
1508 1508 ondisk = cache._opener.stat('manifestfulltextcache').st_size
1509 1509 ui.write(
1510 1510 _('Total cache data size %s, on-disk %s\n') % (
1511 1511 util.bytecount(totalsize), util.bytecount(ondisk))
1512 1512 )
1513 1513
1514 1514 @command('debugmergestate', [], '')
1515 1515 def debugmergestate(ui, repo, *args):
1516 1516 """print merge state
1517 1517
1518 1518 Use --verbose to print out information about whether v1 or v2 merge state
1519 1519 was chosen."""
1520 1520 def _hashornull(h):
1521 1521 if h == nullhex:
1522 1522 return 'null'
1523 1523 else:
1524 1524 return h
1525 1525
1526 1526 def printrecords(version):
1527 1527 ui.write(('* version %d records\n') % version)
1528 1528 if version == 1:
1529 1529 records = v1records
1530 1530 else:
1531 1531 records = v2records
1532 1532
1533 1533 for rtype, record in records:
1534 1534 # pretty print some record types
1535 1535 if rtype == 'L':
1536 1536 ui.write(('local: %s\n') % record)
1537 1537 elif rtype == 'O':
1538 1538 ui.write(('other: %s\n') % record)
1539 1539 elif rtype == 'm':
1540 1540 driver, mdstate = record.split('\0', 1)
1541 1541 ui.write(('merge driver: %s (state "%s")\n')
1542 1542 % (driver, mdstate))
1543 1543 elif rtype in 'FDC':
1544 1544 r = record.split('\0')
1545 1545 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1546 1546 if version == 1:
1547 1547 onode = 'not stored in v1 format'
1548 1548 flags = r[7]
1549 1549 else:
1550 1550 onode, flags = r[7:9]
1551 1551 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1552 1552 % (f, rtype, state, _hashornull(hash)))
1553 1553 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1554 1554 ui.write((' ancestor path: %s (node %s)\n')
1555 1555 % (afile, _hashornull(anode)))
1556 1556 ui.write((' other path: %s (node %s)\n')
1557 1557 % (ofile, _hashornull(onode)))
1558 1558 elif rtype == 'f':
1559 1559 filename, rawextras = record.split('\0', 1)
1560 1560 extras = rawextras.split('\0')
1561 1561 i = 0
1562 1562 extrastrings = []
1563 1563 while i < len(extras):
1564 1564 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1565 1565 i += 2
1566 1566
1567 1567 ui.write(('file extras: %s (%s)\n')
1568 1568 % (filename, ', '.join(extrastrings)))
1569 1569 elif rtype == 'l':
1570 1570 labels = record.split('\0', 2)
1571 1571 labels = [l for l in labels if len(l) > 0]
1572 1572 ui.write(('labels:\n'))
1573 1573 ui.write((' local: %s\n' % labels[0]))
1574 1574 ui.write((' other: %s\n' % labels[1]))
1575 1575 if len(labels) > 2:
1576 1576 ui.write((' base: %s\n' % labels[2]))
1577 1577 else:
1578 1578 ui.write(('unrecognized entry: %s\t%s\n')
1579 1579 % (rtype, record.replace('\0', '\t')))
1580 1580
1581 1581 # Avoid mergestate.read() since it may raise an exception for unsupported
1582 1582 # merge state records. We shouldn't be doing this, but this is OK since this
1583 1583 # command is pretty low-level.
1584 1584 ms = mergemod.mergestate(repo)
1585 1585
1586 1586 # sort so that reasonable information is on top
1587 1587 v1records = ms._readrecordsv1()
1588 1588 v2records = ms._readrecordsv2()
1589 1589 order = 'LOml'
1590 1590 def key(r):
1591 1591 idx = order.find(r[0])
1592 1592 if idx == -1:
1593 1593 return (1, r[1])
1594 1594 else:
1595 1595 return (0, idx)
1596 1596 v1records.sort(key=key)
1597 1597 v2records.sort(key=key)
1598 1598
1599 1599 if not v1records and not v2records:
1600 1600 ui.write(('no merge state found\n'))
1601 1601 elif not v2records:
1602 1602 ui.note(('no version 2 merge state\n'))
1603 1603 printrecords(1)
1604 1604 elif ms._v1v2match(v1records, v2records):
1605 1605 ui.note(('v1 and v2 states match: using v2\n'))
1606 1606 printrecords(2)
1607 1607 else:
1608 1608 ui.note(('v1 and v2 states mismatch: using v1\n'))
1609 1609 printrecords(1)
1610 1610 if ui.verbose:
1611 1611 printrecords(2)
1612 1612
1613 1613 @command('debugnamecomplete', [], _('NAME...'))
1614 1614 def debugnamecomplete(ui, repo, *args):
1615 1615 '''complete "names" - tags, open branch names, bookmark names'''
1616 1616
1617 1617 names = set()
1618 1618 # since we previously only listed open branches, we will handle that
1619 1619 # specially (after this for loop)
1620 1620 for name, ns in repo.names.iteritems():
1621 1621 if name != 'branches':
1622 1622 names.update(ns.listnames(repo))
1623 1623 names.update(tag for (tag, heads, tip, closed)
1624 1624 in repo.branchmap().iterbranches() if not closed)
1625 1625 completions = set()
1626 1626 if not args:
1627 1627 args = ['']
1628 1628 for a in args:
1629 1629 completions.update(n for n in names if n.startswith(a))
1630 1630 ui.write('\n'.join(sorted(completions)))
1631 1631 ui.write('\n')
1632 1632
1633 1633 @command('debugobsolete',
1634 1634 [('', 'flags', 0, _('markers flag')),
1635 1635 ('', 'record-parents', False,
1636 1636 _('record parent information for the precursor')),
1637 1637 ('r', 'rev', [], _('display markers relevant to REV')),
1638 1638 ('', 'exclusive', False, _('restrict display to markers only '
1639 1639 'relevant to REV')),
1640 1640 ('', 'index', False, _('display index of the marker')),
1641 1641 ('', 'delete', [], _('delete markers specified by indices')),
1642 1642 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1643 1643 _('[OBSOLETED [REPLACEMENT ...]]'))
1644 1644 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1645 1645 """create arbitrary obsolete marker
1646 1646
1647 1647 With no arguments, displays the list of obsolescence markers."""
1648 1648
1649 1649 opts = pycompat.byteskwargs(opts)
1650 1650
1651 1651 def parsenodeid(s):
1652 1652 try:
1653 1653 # We do not use revsingle/revrange functions here to accept
1654 1654 # arbitrary node identifiers, possibly not present in the
1655 1655 # local repository.
1656 1656 n = bin(s)
1657 1657 if len(n) != len(nullid):
1658 1658 raise TypeError()
1659 1659 return n
1660 1660 except TypeError:
1661 1661 raise error.Abort('changeset references must be full hexadecimal '
1662 1662 'node identifiers')
1663 1663
1664 1664 if opts.get('delete'):
1665 1665 indices = []
1666 1666 for v in opts.get('delete'):
1667 1667 try:
1668 1668 indices.append(int(v))
1669 1669 except ValueError:
1670 1670 raise error.Abort(_('invalid index value: %r') % v,
1671 1671 hint=_('use integers for indices'))
1672 1672
1673 1673 if repo.currenttransaction():
1674 1674 raise error.Abort(_('cannot delete obsmarkers in the middle '
1675 1675 'of transaction.'))
1676 1676
1677 1677 with repo.lock():
1678 1678 n = repair.deleteobsmarkers(repo.obsstore, indices)
1679 1679 ui.write(_('deleted %i obsolescence markers\n') % n)
1680 1680
1681 1681 return
1682 1682
1683 1683 if precursor is not None:
1684 1684 if opts['rev']:
1685 1685 raise error.Abort('cannot select revision when creating marker')
1686 1686 metadata = {}
1687 1687 metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
1688 1688 succs = tuple(parsenodeid(succ) for succ in successors)
1689 1689 l = repo.lock()
1690 1690 try:
1691 1691 tr = repo.transaction('debugobsolete')
1692 1692 try:
1693 1693 date = opts.get('date')
1694 1694 if date:
1695 1695 date = dateutil.parsedate(date)
1696 1696 else:
1697 1697 date = None
1698 1698 prec = parsenodeid(precursor)
1699 1699 parents = None
1700 1700 if opts['record_parents']:
1701 1701 if prec not in repo.unfiltered():
1702 1702 raise error.Abort('cannot used --record-parents on '
1703 1703 'unknown changesets')
1704 1704 parents = repo.unfiltered()[prec].parents()
1705 1705 parents = tuple(p.node() for p in parents)
1706 1706 repo.obsstore.create(tr, prec, succs, opts['flags'],
1707 1707 parents=parents, date=date,
1708 1708 metadata=metadata, ui=ui)
1709 1709 tr.close()
1710 1710 except ValueError as exc:
1711 1711 raise error.Abort(_('bad obsmarker input: %s') %
1712 1712 pycompat.bytestr(exc))
1713 1713 finally:
1714 1714 tr.release()
1715 1715 finally:
1716 1716 l.release()
1717 1717 else:
1718 1718 if opts['rev']:
1719 1719 revs = scmutil.revrange(repo, opts['rev'])
1720 1720 nodes = [repo[r].node() for r in revs]
1721 1721 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1722 1722 exclusive=opts['exclusive']))
1723 1723 markers.sort(key=lambda x: x._data)
1724 1724 else:
1725 1725 markers = obsutil.getmarkers(repo)
1726 1726
1727 1727 markerstoiter = markers
1728 1728 isrelevant = lambda m: True
1729 1729 if opts.get('rev') and opts.get('index'):
1730 1730 markerstoiter = obsutil.getmarkers(repo)
1731 1731 markerset = set(markers)
1732 1732 isrelevant = lambda m: m in markerset
1733 1733
1734 1734 fm = ui.formatter('debugobsolete', opts)
1735 1735 for i, m in enumerate(markerstoiter):
1736 1736 if not isrelevant(m):
1737 1737 # marker can be irrelevant when we're iterating over a set
1738 1738 # of markers (markerstoiter) which is bigger than the set
1739 1739 # of markers we want to display (markers)
1740 1740 # this can happen if both --index and --rev options are
1741 1741 # provided and thus we need to iterate over all of the markers
1742 1742 # to get the correct indices, but only display the ones that
1743 1743 # are relevant to --rev value
1744 1744 continue
1745 1745 fm.startitem()
1746 1746 ind = i if opts.get('index') else None
1747 1747 cmdutil.showmarker(fm, m, index=ind)
1748 1748 fm.end()
1749 1749
1750 1750 @command('debugpathcomplete',
1751 1751 [('f', 'full', None, _('complete an entire path')),
1752 1752 ('n', 'normal', None, _('show only normal files')),
1753 1753 ('a', 'added', None, _('show only added files')),
1754 1754 ('r', 'removed', None, _('show only removed files'))],
1755 1755 _('FILESPEC...'))
1756 1756 def debugpathcomplete(ui, repo, *specs, **opts):
1757 1757 '''complete part or all of a tracked path
1758 1758
1759 1759 This command supports shells that offer path name completion. It
1760 1760 currently completes only files already known to the dirstate.
1761 1761
1762 1762 Completion extends only to the next path segment unless
1763 1763 --full is specified, in which case entire paths are used.'''
1764 1764
1765 1765 def complete(path, acceptable):
1766 1766 dirstate = repo.dirstate
1767 1767 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
1768 1768 rootdir = repo.root + pycompat.ossep
1769 1769 if spec != repo.root and not spec.startswith(rootdir):
1770 1770 return [], []
1771 1771 if os.path.isdir(spec):
1772 1772 spec += '/'
1773 1773 spec = spec[len(rootdir):]
1774 1774 fixpaths = pycompat.ossep != '/'
1775 1775 if fixpaths:
1776 1776 spec = spec.replace(pycompat.ossep, '/')
1777 1777 speclen = len(spec)
1778 1778 fullpaths = opts[r'full']
1779 1779 files, dirs = set(), set()
1780 1780 adddir, addfile = dirs.add, files.add
1781 1781 for f, st in dirstate.iteritems():
1782 1782 if f.startswith(spec) and st[0] in acceptable:
1783 1783 if fixpaths:
1784 1784 f = f.replace('/', pycompat.ossep)
1785 1785 if fullpaths:
1786 1786 addfile(f)
1787 1787 continue
1788 1788 s = f.find(pycompat.ossep, speclen)
1789 1789 if s >= 0:
1790 1790 adddir(f[:s])
1791 1791 else:
1792 1792 addfile(f)
1793 1793 return files, dirs
1794 1794
1795 1795 acceptable = ''
1796 1796 if opts[r'normal']:
1797 1797 acceptable += 'nm'
1798 1798 if opts[r'added']:
1799 1799 acceptable += 'a'
1800 1800 if opts[r'removed']:
1801 1801 acceptable += 'r'
1802 1802 cwd = repo.getcwd()
1803 1803 if not specs:
1804 1804 specs = ['.']
1805 1805
1806 1806 files, dirs = set(), set()
1807 1807 for spec in specs:
1808 1808 f, d = complete(spec, acceptable or 'nmar')
1809 1809 files.update(f)
1810 1810 dirs.update(d)
1811 1811 files.update(dirs)
1812 1812 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1813 1813 ui.write('\n')
1814 1814
1815 1815 @command('debugpeer', [], _('PATH'), norepo=True)
1816 1816 def debugpeer(ui, path):
1817 1817 """establish a connection to a peer repository"""
1818 1818 # Always enable peer request logging. Requires --debug to display
1819 1819 # though.
1820 1820 overrides = {
1821 1821 ('devel', 'debug.peer-request'): True,
1822 1822 }
1823 1823
1824 1824 with ui.configoverride(overrides):
1825 1825 peer = hg.peer(ui, {}, path)
1826 1826
1827 1827 local = peer.local() is not None
1828 1828 canpush = peer.canpush()
1829 1829
1830 1830 ui.write(_('url: %s\n') % peer.url())
1831 1831 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1832 1832 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1833 1833
1834 1834 @command('debugpickmergetool',
1835 1835 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1836 1836 ('', 'changedelete', None, _('emulate merging change and delete')),
1837 1837 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1838 1838 _('[PATTERN]...'),
1839 1839 inferrepo=True)
1840 1840 def debugpickmergetool(ui, repo, *pats, **opts):
1841 1841 """examine which merge tool is chosen for specified file
1842 1842
1843 1843 As described in :hg:`help merge-tools`, Mercurial examines
1844 1844 configurations below in this order to decide which merge tool is
1845 1845 chosen for specified file.
1846 1846
1847 1847 1. ``--tool`` option
1848 1848 2. ``HGMERGE`` environment variable
1849 1849 3. configurations in ``merge-patterns`` section
1850 1850 4. configuration of ``ui.merge``
1851 1851 5. configurations in ``merge-tools`` section
1852 1852 6. ``hgmerge`` tool (for historical reason only)
1853 1853 7. default tool for fallback (``:merge`` or ``:prompt``)
1854 1854
1855 1855 This command writes out examination result in the style below::
1856 1856
1857 1857 FILE = MERGETOOL
1858 1858
1859 1859 By default, all files known in the first parent context of the
1860 1860 working directory are examined. Use file patterns and/or -I/-X
1861 1861 options to limit target files. -r/--rev is also useful to examine
1862 1862 files in another context without actual updating to it.
1863 1863
1864 1864 With --debug, this command shows warning messages while matching
1865 1865 against ``merge-patterns`` and so on, too. It is recommended to
1866 1866 use this option with explicit file patterns and/or -I/-X options,
1867 1867 because this option increases amount of output per file according
1868 1868 to configurations in hgrc.
1869 1869
1870 1870 With -v/--verbose, this command shows configurations below at
1871 1871 first (only if specified).
1872 1872
1873 1873 - ``--tool`` option
1874 1874 - ``HGMERGE`` environment variable
1875 1875 - configuration of ``ui.merge``
1876 1876
1877 1877 If merge tool is chosen before matching against
1878 1878 ``merge-patterns``, this command can't show any helpful
1879 1879 information, even with --debug. In such case, information above is
1880 1880 useful to know why a merge tool is chosen.
1881 1881 """
1882 1882 opts = pycompat.byteskwargs(opts)
1883 1883 overrides = {}
1884 1884 if opts['tool']:
1885 1885 overrides[('ui', 'forcemerge')] = opts['tool']
1886 1886 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1887 1887
1888 1888 with ui.configoverride(overrides, 'debugmergepatterns'):
1889 1889 hgmerge = encoding.environ.get("HGMERGE")
1890 1890 if hgmerge is not None:
1891 1891 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1892 1892 uimerge = ui.config("ui", "merge")
1893 1893 if uimerge:
1894 1894 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1895 1895
1896 1896 ctx = scmutil.revsingle(repo, opts.get('rev'))
1897 1897 m = scmutil.match(ctx, pats, opts)
1898 1898 changedelete = opts['changedelete']
1899 1899 for path in ctx.walk(m):
1900 1900 fctx = ctx[path]
1901 1901 try:
1902 1902 if not ui.debugflag:
1903 1903 ui.pushbuffer(error=True)
1904 1904 tool, toolpath = filemerge._picktool(repo, ui, path,
1905 1905 fctx.isbinary(),
1906 1906 'l' in fctx.flags(),
1907 1907 changedelete)
1908 1908 finally:
1909 1909 if not ui.debugflag:
1910 1910 ui.popbuffer()
1911 1911 ui.write(('%s = %s\n') % (path, tool))
1912 1912
1913 1913 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1914 1914 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1915 1915 '''access the pushkey key/value protocol
1916 1916
1917 1917 With two args, list the keys in the given namespace.
1918 1918
1919 1919 With five args, set a key to new if it currently is set to old.
1920 1920 Reports success or failure.
1921 1921 '''
1922 1922
1923 1923 target = hg.peer(ui, {}, repopath)
1924 1924 if keyinfo:
1925 1925 key, old, new = keyinfo
1926 1926 with target.commandexecutor() as e:
1927 1927 r = e.callcommand('pushkey', {
1928 1928 'namespace': namespace,
1929 1929 'key': key,
1930 1930 'old': old,
1931 1931 'new': new,
1932 1932 }).result()
1933 1933
1934 1934 ui.status(pycompat.bytestr(r) + '\n')
1935 1935 return not r
1936 1936 else:
1937 1937 for k, v in sorted(target.listkeys(namespace).iteritems()):
1938 1938 ui.write("%s\t%s\n" % (stringutil.escapestr(k),
1939 1939 stringutil.escapestr(v)))
1940 1940
1941 1941 @command('debugpvec', [], _('A B'))
1942 1942 def debugpvec(ui, repo, a, b=None):
1943 1943 ca = scmutil.revsingle(repo, a)
1944 1944 cb = scmutil.revsingle(repo, b)
1945 1945 pa = pvec.ctxpvec(ca)
1946 1946 pb = pvec.ctxpvec(cb)
1947 1947 if pa == pb:
1948 1948 rel = "="
1949 1949 elif pa > pb:
1950 1950 rel = ">"
1951 1951 elif pa < pb:
1952 1952 rel = "<"
1953 1953 elif pa | pb:
1954 1954 rel = "|"
1955 1955 ui.write(_("a: %s\n") % pa)
1956 1956 ui.write(_("b: %s\n") % pb)
1957 1957 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1958 1958 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1959 1959 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1960 1960 pa.distance(pb), rel))
1961 1961
1962 1962 @command('debugrebuilddirstate|debugrebuildstate',
1963 1963 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1964 1964 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1965 1965 'the working copy parent')),
1966 1966 ],
1967 1967 _('[-r REV]'))
1968 1968 def debugrebuilddirstate(ui, repo, rev, **opts):
1969 1969 """rebuild the dirstate as it would look like for the given revision
1970 1970
1971 1971 If no revision is specified the first current parent will be used.
1972 1972
1973 1973 The dirstate will be set to the files of the given revision.
1974 1974 The actual working directory content or existing dirstate
1975 1975 information such as adds or removes is not considered.
1976 1976
1977 1977 ``minimal`` will only rebuild the dirstate status for files that claim to be
1978 1978 tracked but are not in the parent manifest, or that exist in the parent
1979 1979 manifest but are not in the dirstate. It will not change adds, removes, or
1980 1980 modified files that are in the working copy parent.
1981 1981
1982 1982 One use of this command is to make the next :hg:`status` invocation
1983 1983 check the actual file content.
1984 1984 """
1985 1985 ctx = scmutil.revsingle(repo, rev)
1986 1986 with repo.wlock():
1987 1987 dirstate = repo.dirstate
1988 1988 changedfiles = None
1989 1989 # See command doc for what minimal does.
1990 1990 if opts.get(r'minimal'):
1991 1991 manifestfiles = set(ctx.manifest().keys())
1992 1992 dirstatefiles = set(dirstate)
1993 1993 manifestonly = manifestfiles - dirstatefiles
1994 1994 dsonly = dirstatefiles - manifestfiles
1995 1995 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1996 1996 changedfiles = manifestonly | dsnotadded
1997 1997
1998 1998 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1999 1999
2000 2000 @command('debugrebuildfncache', [], '')
2001 2001 def debugrebuildfncache(ui, repo):
2002 2002 """rebuild the fncache file"""
2003 2003 repair.rebuildfncache(ui, repo)
2004 2004
2005 2005 @command('debugrename',
2006 2006 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2007 2007 _('[-r REV] FILE'))
2008 2008 def debugrename(ui, repo, file1, *pats, **opts):
2009 2009 """dump rename information"""
2010 2010
2011 2011 opts = pycompat.byteskwargs(opts)
2012 2012 ctx = scmutil.revsingle(repo, opts.get('rev'))
2013 2013 m = scmutil.match(ctx, (file1,) + pats, opts)
2014 2014 for abs in ctx.walk(m):
2015 2015 fctx = ctx[abs]
2016 2016 o = fctx.filelog().renamed(fctx.filenode())
2017 2017 rel = m.rel(abs)
2018 2018 if o:
2019 2019 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2020 2020 else:
2021 2021 ui.write(_("%s not renamed\n") % rel)
2022 2022
2023 2023 @command('debugrevlog', cmdutil.debugrevlogopts +
2024 2024 [('d', 'dump', False, _('dump index data'))],
2025 2025 _('-c|-m|FILE'),
2026 2026 optionalrepo=True)
2027 2027 def debugrevlog(ui, repo, file_=None, **opts):
2028 2028 """show data and statistics about a revlog"""
2029 2029 opts = pycompat.byteskwargs(opts)
2030 2030 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2031 2031
2032 2032 if opts.get("dump"):
2033 2033 numrevs = len(r)
2034 2034 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
2035 2035 " rawsize totalsize compression heads chainlen\n"))
2036 2036 ts = 0
2037 2037 heads = set()
2038 2038
2039 2039 for rev in pycompat.xrange(numrevs):
2040 2040 dbase = r.deltaparent(rev)
2041 2041 if dbase == -1:
2042 2042 dbase = rev
2043 2043 cbase = r.chainbase(rev)
2044 2044 clen = r.chainlen(rev)
2045 2045 p1, p2 = r.parentrevs(rev)
2046 2046 rs = r.rawsize(rev)
2047 2047 ts = ts + rs
2048 2048 heads -= set(r.parentrevs(rev))
2049 2049 heads.add(rev)
2050 2050 try:
2051 2051 compression = ts / r.end(rev)
2052 2052 except ZeroDivisionError:
2053 2053 compression = 0
2054 2054 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2055 2055 "%11d %5d %8d\n" %
2056 2056 (rev, p1, p2, r.start(rev), r.end(rev),
2057 2057 r.start(dbase), r.start(cbase),
2058 2058 r.start(p1), r.start(p2),
2059 2059 rs, ts, compression, len(heads), clen))
2060 2060 return 0
2061 2061
2062 2062 v = r.version
2063 2063 format = v & 0xFFFF
2064 2064 flags = []
2065 2065 gdelta = False
2066 2066 if v & revlog.FLAG_INLINE_DATA:
2067 2067 flags.append('inline')
2068 2068 if v & revlog.FLAG_GENERALDELTA:
2069 2069 gdelta = True
2070 2070 flags.append('generaldelta')
2071 2071 if not flags:
2072 2072 flags = ['(none)']
2073 2073
2074 2074 ### tracks merge vs single parent
2075 2075 nummerges = 0
2076 2076
2077 2077 ### tracks ways the "delta" are build
2078 2078 # nodelta
2079 2079 numempty = 0
2080 2080 numemptytext = 0
2081 2081 numemptydelta = 0
2082 2082 # full file content
2083 2083 numfull = 0
2084 2084 # intermediate snapshot against a prior snapshot
2085 2085 numsemi = 0
2086 2086 # snapshot count per depth
2087 2087 numsnapdepth = collections.defaultdict(lambda: 0)
2088 2088 # delta against previous revision
2089 2089 numprev = 0
2090 2090 # delta against first or second parent (not prev)
2091 2091 nump1 = 0
2092 2092 nump2 = 0
2093 2093 # delta against neither prev nor parents
2094 2094 numother = 0
2095 2095 # delta against prev that are also first or second parent
2096 2096 # (details of `numprev`)
2097 2097 nump1prev = 0
2098 2098 nump2prev = 0
2099 2099
2100 2100 # data about delta chain of each revs
2101 2101 chainlengths = []
2102 2102 chainbases = []
2103 2103 chainspans = []
2104 2104
2105 2105 # data about each revision
2106 2106 datasize = [None, 0, 0]
2107 2107 fullsize = [None, 0, 0]
2108 2108 semisize = [None, 0, 0]
2109 2109 # snapshot count per depth
2110 2110 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2111 2111 deltasize = [None, 0, 0]
2112 2112 chunktypecounts = {}
2113 2113 chunktypesizes = {}
2114 2114
2115 2115 def addsize(size, l):
2116 2116 if l[0] is None or size < l[0]:
2117 2117 l[0] = size
2118 2118 if size > l[1]:
2119 2119 l[1] = size
2120 2120 l[2] += size
2121 2121
2122 2122 numrevs = len(r)
2123 2123 for rev in pycompat.xrange(numrevs):
2124 2124 p1, p2 = r.parentrevs(rev)
2125 2125 delta = r.deltaparent(rev)
2126 2126 if format > 0:
2127 2127 addsize(r.rawsize(rev), datasize)
2128 2128 if p2 != nullrev:
2129 2129 nummerges += 1
2130 2130 size = r.length(rev)
2131 2131 if delta == nullrev:
2132 2132 chainlengths.append(0)
2133 2133 chainbases.append(r.start(rev))
2134 2134 chainspans.append(size)
2135 2135 if size == 0:
2136 2136 numempty += 1
2137 2137 numemptytext += 1
2138 2138 else:
2139 2139 numfull += 1
2140 2140 numsnapdepth[0] += 1
2141 2141 addsize(size, fullsize)
2142 2142 addsize(size, snapsizedepth[0])
2143 2143 else:
2144 2144 chainlengths.append(chainlengths[delta] + 1)
2145 2145 baseaddr = chainbases[delta]
2146 2146 revaddr = r.start(rev)
2147 2147 chainbases.append(baseaddr)
2148 2148 chainspans.append((revaddr - baseaddr) + size)
2149 2149 if size == 0:
2150 2150 numempty += 1
2151 2151 numemptydelta += 1
2152 2152 elif r.issnapshot(rev):
2153 2153 addsize(size, semisize)
2154 2154 numsemi += 1
2155 2155 depth = r.snapshotdepth(rev)
2156 2156 numsnapdepth[depth] += 1
2157 2157 addsize(size, snapsizedepth[depth])
2158 2158 else:
2159 2159 addsize(size, deltasize)
2160 2160 if delta == rev - 1:
2161 2161 numprev += 1
2162 2162 if delta == p1:
2163 2163 nump1prev += 1
2164 2164 elif delta == p2:
2165 2165 nump2prev += 1
2166 2166 elif delta == p1:
2167 2167 nump1 += 1
2168 2168 elif delta == p2:
2169 2169 nump2 += 1
2170 2170 elif delta != nullrev:
2171 2171 numother += 1
2172 2172
2173 2173 # Obtain data on the raw chunks in the revlog.
2174 2174 if util.safehasattr(r, '_getsegmentforrevs'):
2175 2175 segment = r._getsegmentforrevs(rev, rev)[1]
2176 2176 else:
2177 2177 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2178 2178 if segment:
2179 2179 chunktype = bytes(segment[0:1])
2180 2180 else:
2181 2181 chunktype = 'empty'
2182 2182
2183 2183 if chunktype not in chunktypecounts:
2184 2184 chunktypecounts[chunktype] = 0
2185 2185 chunktypesizes[chunktype] = 0
2186 2186
2187 2187 chunktypecounts[chunktype] += 1
2188 2188 chunktypesizes[chunktype] += size
2189 2189
2190 2190 # Adjust size min value for empty cases
2191 2191 for size in (datasize, fullsize, semisize, deltasize):
2192 2192 if size[0] is None:
2193 2193 size[0] = 0
2194 2194
2195 2195 numdeltas = numrevs - numfull - numempty - numsemi
2196 2196 numoprev = numprev - nump1prev - nump2prev
2197 2197 totalrawsize = datasize[2]
2198 2198 datasize[2] /= numrevs
2199 2199 fulltotal = fullsize[2]
2200 2200 fullsize[2] /= numfull
2201 2201 semitotal = semisize[2]
2202 2202 snaptotal = {}
2203 2203 if numsemi > 0:
2204 2204 semisize[2] /= numsemi
2205 2205 for depth in snapsizedepth:
2206 2206 snaptotal[depth] = snapsizedepth[depth][2]
2207 2207 snapsizedepth[depth][2] /= numsnapdepth[depth]
2208 2208
2209 2209 deltatotal = deltasize[2]
2210 2210 if numdeltas > 0:
2211 2211 deltasize[2] /= numdeltas
2212 2212 totalsize = fulltotal + semitotal + deltatotal
2213 2213 avgchainlen = sum(chainlengths) / numrevs
2214 2214 maxchainlen = max(chainlengths)
2215 2215 maxchainspan = max(chainspans)
2216 2216 compratio = 1
2217 2217 if totalsize:
2218 2218 compratio = totalrawsize / totalsize
2219 2219
2220 2220 basedfmtstr = '%%%dd\n'
2221 2221 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2222 2222
2223 2223 def dfmtstr(max):
2224 2224 return basedfmtstr % len(str(max))
2225 2225 def pcfmtstr(max, padding=0):
2226 2226 return basepcfmtstr % (len(str(max)), ' ' * padding)
2227 2227
2228 2228 def pcfmt(value, total):
2229 2229 if total:
2230 2230 return (value, 100 * float(value) / total)
2231 2231 else:
2232 2232 return value, 100.0
2233 2233
2234 2234 ui.write(('format : %d\n') % format)
2235 2235 ui.write(('flags : %s\n') % ', '.join(flags))
2236 2236
2237 2237 ui.write('\n')
2238 2238 fmt = pcfmtstr(totalsize)
2239 2239 fmt2 = dfmtstr(totalsize)
2240 2240 ui.write(('revisions : ') + fmt2 % numrevs)
2241 2241 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2242 2242 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2243 2243 ui.write(('revisions : ') + fmt2 % numrevs)
2244 2244 ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs))
2245 2245 ui.write((' text : ')
2246 2246 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
2247 2247 ui.write((' delta : ')
2248 2248 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
2249 2249 ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs))
2250 2250 for depth in sorted(numsnapdepth):
2251 2251 ui.write((' lvl-%-3d : ' % depth)
2252 2252 + fmt % pcfmt(numsnapdepth[depth], numrevs))
2253 2253 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2254 2254 ui.write(('revision size : ') + fmt2 % totalsize)
2255 2255 ui.write((' snapshot : ')
2256 2256 + fmt % pcfmt(fulltotal + semitotal, totalsize))
2257 2257 for depth in sorted(numsnapdepth):
2258 2258 ui.write((' lvl-%-3d : ' % depth)
2259 2259 + fmt % pcfmt(snaptotal[depth], totalsize))
2260 2260 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2261 2261
2262 2262 def fmtchunktype(chunktype):
2263 2263 if chunktype == 'empty':
2264 2264 return ' %s : ' % chunktype
2265 2265 elif chunktype in pycompat.bytestr(string.ascii_letters):
2266 2266 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2267 2267 else:
2268 2268 return ' 0x%s : ' % hex(chunktype)
2269 2269
2270 2270 ui.write('\n')
2271 2271 ui.write(('chunks : ') + fmt2 % numrevs)
2272 2272 for chunktype in sorted(chunktypecounts):
2273 2273 ui.write(fmtchunktype(chunktype))
2274 2274 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2275 2275 ui.write(('chunks size : ') + fmt2 % totalsize)
2276 2276 for chunktype in sorted(chunktypecounts):
2277 2277 ui.write(fmtchunktype(chunktype))
2278 2278 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2279 2279
2280 2280 ui.write('\n')
2281 2281 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2282 2282 ui.write(('avg chain length : ') + fmt % avgchainlen)
2283 2283 ui.write(('max chain length : ') + fmt % maxchainlen)
2284 2284 ui.write(('max chain reach : ') + fmt % maxchainspan)
2285 2285 ui.write(('compression ratio : ') + fmt % compratio)
2286 2286
2287 2287 if format > 0:
2288 2288 ui.write('\n')
2289 2289 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2290 2290 % tuple(datasize))
2291 2291 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2292 2292 % tuple(fullsize))
2293 2293 ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n')
2294 2294 % tuple(semisize))
2295 2295 for depth in sorted(snapsizedepth):
2296 2296 if depth == 0:
2297 2297 continue
2298 2298 ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n')
2299 2299 % ((depth,) + tuple(snapsizedepth[depth])))
2300 2300 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2301 2301 % tuple(deltasize))
2302 2302
2303 2303 if numdeltas > 0:
2304 2304 ui.write('\n')
2305 2305 fmt = pcfmtstr(numdeltas)
2306 2306 fmt2 = pcfmtstr(numdeltas, 4)
2307 2307 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2308 2308 if numprev > 0:
2309 2309 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2310 2310 numprev))
2311 2311 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2312 2312 numprev))
2313 2313 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2314 2314 numprev))
2315 2315 if gdelta:
2316 2316 ui.write(('deltas against p1 : ')
2317 2317 + fmt % pcfmt(nump1, numdeltas))
2318 2318 ui.write(('deltas against p2 : ')
2319 2319 + fmt % pcfmt(nump2, numdeltas))
2320 2320 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2321 2321 numdeltas))
2322 2322
2323 2323 @command('debugrevlogindex', cmdutil.debugrevlogopts +
2324 2324 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2325 2325 _('[-f FORMAT] -c|-m|FILE'),
2326 2326 optionalrepo=True)
2327 2327 def debugrevlogindex(ui, repo, file_=None, **opts):
2328 2328 """dump the contents of a revlog index"""
2329 2329 opts = pycompat.byteskwargs(opts)
2330 2330 r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
2331 2331 format = opts.get('format', 0)
2332 2332 if format not in (0, 1):
2333 2333 raise error.Abort(_("unknown format %d") % format)
2334 2334
2335 2335 if ui.debugflag:
2336 2336 shortfn = hex
2337 2337 else:
2338 2338 shortfn = short
2339 2339
2340 2340 # There might not be anything in r, so have a sane default
2341 2341 idlen = 12
2342 2342 for i in r:
2343 2343 idlen = len(shortfn(r.node(i)))
2344 2344 break
2345 2345
2346 2346 if format == 0:
2347 2347 if ui.verbose:
2348 2348 ui.write((" rev offset length linkrev"
2349 2349 " %s %s p2\n") % ("nodeid".ljust(idlen),
2350 2350 "p1".ljust(idlen)))
2351 2351 else:
2352 2352 ui.write((" rev linkrev %s %s p2\n") % (
2353 2353 "nodeid".ljust(idlen), "p1".ljust(idlen)))
2354 2354 elif format == 1:
2355 2355 if ui.verbose:
2356 2356 ui.write((" rev flag offset length size link p1"
2357 2357 " p2 %s\n") % "nodeid".rjust(idlen))
2358 2358 else:
2359 2359 ui.write((" rev flag size link p1 p2 %s\n") %
2360 2360 "nodeid".rjust(idlen))
2361 2361
2362 2362 for i in r:
2363 2363 node = r.node(i)
2364 2364 if format == 0:
2365 2365 try:
2366 2366 pp = r.parents(node)
2367 2367 except Exception:
2368 2368 pp = [nullid, nullid]
2369 2369 if ui.verbose:
2370 2370 ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
2371 2371 i, r.start(i), r.length(i), r.linkrev(i),
2372 2372 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
2373 2373 else:
2374 2374 ui.write("% 6d % 7d %s %s %s\n" % (
2375 2375 i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
2376 2376 shortfn(pp[1])))
2377 2377 elif format == 1:
2378 2378 pr = r.parentrevs(i)
2379 2379 if ui.verbose:
2380 2380 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
2381 2381 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2382 2382 r.linkrev(i), pr[0], pr[1], shortfn(node)))
2383 2383 else:
2384 2384 ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
2385 2385 i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
2386 2386 shortfn(node)))
2387 2387
2388 2388 @command('debugrevspec',
2389 2389 [('', 'optimize', None,
2390 2390 _('print parsed tree after optimizing (DEPRECATED)')),
2391 2391 ('', 'show-revs', True, _('print list of result revisions (default)')),
2392 2392 ('s', 'show-set', None, _('print internal representation of result set')),
2393 2393 ('p', 'show-stage', [],
2394 2394 _('print parsed tree at the given stage'), _('NAME')),
2395 2395 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2396 2396 ('', 'verify-optimized', False, _('verify optimized result')),
2397 2397 ],
2398 2398 ('REVSPEC'))
2399 2399 def debugrevspec(ui, repo, expr, **opts):
2400 2400 """parse and apply a revision specification
2401 2401
2402 2402 Use -p/--show-stage option to print the parsed tree at the given stages.
2403 2403 Use -p all to print tree at every stage.
2404 2404
2405 2405 Use --no-show-revs option with -s or -p to print only the set
2406 2406 representation or the parsed tree respectively.
2407 2407
2408 2408 Use --verify-optimized to compare the optimized result with the unoptimized
2409 2409 one. Returns 1 if the optimized result differs.
2410 2410 """
2411 2411 opts = pycompat.byteskwargs(opts)
2412 2412 aliases = ui.configitems('revsetalias')
2413 2413 stages = [
2414 2414 ('parsed', lambda tree: tree),
2415 2415 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2416 2416 ui.warn)),
2417 2417 ('concatenated', revsetlang.foldconcat),
2418 2418 ('analyzed', revsetlang.analyze),
2419 2419 ('optimized', revsetlang.optimize),
2420 2420 ]
2421 2421 if opts['no_optimized']:
2422 2422 stages = stages[:-1]
2423 2423 if opts['verify_optimized'] and opts['no_optimized']:
2424 2424 raise error.Abort(_('cannot use --verify-optimized with '
2425 2425 '--no-optimized'))
2426 2426 stagenames = set(n for n, f in stages)
2427 2427
2428 2428 showalways = set()
2429 2429 showchanged = set()
2430 2430 if ui.verbose and not opts['show_stage']:
2431 2431 # show parsed tree by --verbose (deprecated)
2432 2432 showalways.add('parsed')
2433 2433 showchanged.update(['expanded', 'concatenated'])
2434 2434 if opts['optimize']:
2435 2435 showalways.add('optimized')
2436 2436 if opts['show_stage'] and opts['optimize']:
2437 2437 raise error.Abort(_('cannot use --optimize with --show-stage'))
2438 2438 if opts['show_stage'] == ['all']:
2439 2439 showalways.update(stagenames)
2440 2440 else:
2441 2441 for n in opts['show_stage']:
2442 2442 if n not in stagenames:
2443 2443 raise error.Abort(_('invalid stage name: %s') % n)
2444 2444 showalways.update(opts['show_stage'])
2445 2445
2446 2446 treebystage = {}
2447 2447 printedtree = None
2448 2448 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
2449 2449 for n, f in stages:
2450 2450 treebystage[n] = tree = f(tree)
2451 2451 if n in showalways or (n in showchanged and tree != printedtree):
2452 2452 if opts['show_stage'] or n != 'parsed':
2453 2453 ui.write(("* %s:\n") % n)
2454 2454 ui.write(revsetlang.prettyformat(tree), "\n")
2455 2455 printedtree = tree
2456 2456
2457 2457 if opts['verify_optimized']:
2458 2458 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2459 2459 brevs = revset.makematcher(treebystage['optimized'])(repo)
2460 2460 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2461 2461 ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
2462 2462 ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
2463 2463 arevs = list(arevs)
2464 2464 brevs = list(brevs)
2465 2465 if arevs == brevs:
2466 2466 return 0
2467 2467 ui.write(('--- analyzed\n'), label='diff.file_a')
2468 2468 ui.write(('+++ optimized\n'), label='diff.file_b')
2469 2469 sm = difflib.SequenceMatcher(None, arevs, brevs)
2470 2470 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2471 2471 if tag in ('delete', 'replace'):
2472 2472 for c in arevs[alo:ahi]:
2473 2473 ui.write('-%s\n' % c, label='diff.deleted')
2474 2474 if tag in ('insert', 'replace'):
2475 2475 for c in brevs[blo:bhi]:
2476 2476 ui.write('+%s\n' % c, label='diff.inserted')
2477 2477 if tag == 'equal':
2478 2478 for c in arevs[alo:ahi]:
2479 2479 ui.write(' %s\n' % c)
2480 2480 return 1
2481 2481
2482 2482 func = revset.makematcher(tree)
2483 2483 revs = func(repo)
2484 2484 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2485 2485 ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
2486 2486 if not opts['show_revs']:
2487 2487 return
2488 2488 for c in revs:
2489 2489 ui.write("%d\n" % c)
2490 2490
2491 2491 @command('debugserve', [
2492 2492 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2493 2493 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2494 2494 ('', 'logiofile', '', _('file to log server I/O to')),
2495 2495 ], '')
2496 2496 def debugserve(ui, repo, **opts):
2497 2497 """run a server with advanced settings
2498 2498
2499 2499 This command is similar to :hg:`serve`. It exists partially as a
2500 2500 workaround to the fact that ``hg serve --stdio`` must have specific
2501 2501 arguments for security reasons.
2502 2502 """
2503 2503 opts = pycompat.byteskwargs(opts)
2504 2504
2505 2505 if not opts['sshstdio']:
2506 2506 raise error.Abort(_('only --sshstdio is currently supported'))
2507 2507
2508 2508 logfh = None
2509 2509
2510 2510 if opts['logiofd'] and opts['logiofile']:
2511 2511 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2512 2512
2513 2513 if opts['logiofd']:
2514 2514 # Line buffered because output is line based.
2515 2515 try:
2516 2516 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2517 2517 except OSError as e:
2518 2518 if e.errno != errno.ESPIPE:
2519 2519 raise
2520 2520 # can't seek a pipe, so `ab` mode fails on py3
2521 2521 logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
2522 2522 elif opts['logiofile']:
2523 2523 logfh = open(opts['logiofile'], 'ab', 1)
2524 2524
2525 2525 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2526 2526 s.serve_forever()
2527 2527
2528 2528 @command('debugsetparents', [], _('REV1 [REV2]'))
2529 2529 def debugsetparents(ui, repo, rev1, rev2=None):
2530 2530 """manually set the parents of the current working directory
2531 2531
2532 2532 This is useful for writing repository conversion tools, but should
2533 2533 be used with care. For example, neither the working directory nor the
2534 2534 dirstate is updated, so file status may be incorrect after running this
2535 2535 command.
2536 2536
2537 2537 Returns 0 on success.
2538 2538 """
2539 2539
2540 2540 node1 = scmutil.revsingle(repo, rev1).node()
2541 2541 node2 = scmutil.revsingle(repo, rev2, 'null').node()
2542 2542
2543 2543 with repo.wlock():
2544 2544 repo.setparents(node1, node2)
2545 2545
2546 2546 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2547 2547 def debugssl(ui, repo, source=None, **opts):
2548 2548 '''test a secure connection to a server
2549 2549
2550 2550 This builds the certificate chain for the server on Windows, installing the
2551 2551 missing intermediates and trusted root via Windows Update if necessary. It
2552 2552 does nothing on other platforms.
2553 2553
2554 2554 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2555 2555 that server is used. See :hg:`help urls` for more information.
2556 2556
2557 2557 If the update succeeds, retry the original operation. Otherwise, the cause
2558 2558 of the SSL error is likely another issue.
2559 2559 '''
2560 2560 if not pycompat.iswindows:
2561 2561 raise error.Abort(_('certificate chain building is only possible on '
2562 2562 'Windows'))
2563 2563
2564 2564 if not source:
2565 2565 if not repo:
2566 2566 raise error.Abort(_("there is no Mercurial repository here, and no "
2567 2567 "server specified"))
2568 2568 source = "default"
2569 2569
2570 2570 source, branches = hg.parseurl(ui.expandpath(source))
2571 2571 url = util.url(source)
2572 2572 addr = None
2573 2573
2574 2574 defaultport = {'https': 443, 'ssh': 22}
2575 2575 if url.scheme in defaultport:
2576 2576 try:
2577 2577 addr = (url.host, int(url.port or defaultport[url.scheme]))
2578 2578 except ValueError:
2579 2579 raise error.Abort(_("malformed port number in URL"))
2580 2580 else:
2581 2581 raise error.Abort(_("only https and ssh connections are supported"))
2582 2582
2583 2583 from . import win32
2584 2584
2585 2585 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2586 2586 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2587 2587
2588 2588 try:
2589 2589 s.connect(addr)
2590 2590 cert = s.getpeercert(True)
2591 2591
2592 2592 ui.status(_('checking the certificate chain for %s\n') % url.host)
2593 2593
2594 2594 complete = win32.checkcertificatechain(cert, build=False)
2595 2595
2596 2596 if not complete:
2597 2597 ui.status(_('certificate chain is incomplete, updating... '))
2598 2598
2599 2599 if not win32.checkcertificatechain(cert):
2600 2600 ui.status(_('failed.\n'))
2601 2601 else:
2602 2602 ui.status(_('done.\n'))
2603 2603 else:
2604 2604 ui.status(_('full certificate chain is available\n'))
2605 2605 finally:
2606 2606 s.close()
2607 2607
2608 2608 @command('debugsub',
2609 2609 [('r', 'rev', '',
2610 2610 _('revision to check'), _('REV'))],
2611 2611 _('[-r REV] [REV]'))
2612 2612 def debugsub(ui, repo, rev=None):
2613 2613 ctx = scmutil.revsingle(repo, rev, None)
2614 2614 for k, v in sorted(ctx.substate.items()):
2615 2615 ui.write(('path %s\n') % k)
2616 2616 ui.write((' source %s\n') % v[0])
2617 2617 ui.write((' revision %s\n') % v[1])
2618 2618
2619 2619 @command('debugsuccessorssets',
2620 2620 [('', 'closest', False, _('return closest successors sets only'))],
2621 2621 _('[REV]'))
2622 2622 def debugsuccessorssets(ui, repo, *revs, **opts):
2623 2623 """show set of successors for revision
2624 2624
2625 2625 A successors set of changeset A is a consistent group of revisions that
2626 2626 succeed A. It contains non-obsolete changesets only unless closests
2627 2627 successors set is set.
2628 2628
2629 2629 In most cases a changeset A has a single successors set containing a single
2630 2630 successor (changeset A replaced by A').
2631 2631
2632 2632 A changeset that is made obsolete with no successors are called "pruned".
2633 2633 Such changesets have no successors sets at all.
2634 2634
2635 2635 A changeset that has been "split" will have a successors set containing
2636 2636 more than one successor.
2637 2637
2638 2638 A changeset that has been rewritten in multiple different ways is called
2639 2639 "divergent". Such changesets have multiple successor sets (each of which
2640 2640 may also be split, i.e. have multiple successors).
2641 2641
2642 2642 Results are displayed as follows::
2643 2643
2644 2644 <rev1>
2645 2645 <successors-1A>
2646 2646 <rev2>
2647 2647 <successors-2A>
2648 2648 <successors-2B1> <successors-2B2> <successors-2B3>
2649 2649
2650 2650 Here rev2 has two possible (i.e. divergent) successors sets. The first
2651 2651 holds one element, whereas the second holds three (i.e. the changeset has
2652 2652 been split).
2653 2653 """
2654 2654 # passed to successorssets caching computation from one call to another
2655 2655 cache = {}
2656 2656 ctx2str = bytes
2657 2657 node2str = short
2658 2658 for rev in scmutil.revrange(repo, revs):
2659 2659 ctx = repo[rev]
2660 2660 ui.write('%s\n'% ctx2str(ctx))
2661 2661 for succsset in obsutil.successorssets(repo, ctx.node(),
2662 2662 closest=opts[r'closest'],
2663 2663 cache=cache):
2664 2664 if succsset:
2665 2665 ui.write(' ')
2666 2666 ui.write(node2str(succsset[0]))
2667 2667 for node in succsset[1:]:
2668 2668 ui.write(' ')
2669 2669 ui.write(node2str(node))
2670 2670 ui.write('\n')
2671 2671
2672 2672 @command('debugtemplate',
2673 2673 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2674 2674 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2675 2675 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2676 2676 optionalrepo=True)
2677 2677 def debugtemplate(ui, repo, tmpl, **opts):
2678 2678 """parse and apply a template
2679 2679
2680 2680 If -r/--rev is given, the template is processed as a log template and
2681 2681 applied to the given changesets. Otherwise, it is processed as a generic
2682 2682 template.
2683 2683
2684 2684 Use --verbose to print the parsed tree.
2685 2685 """
2686 2686 revs = None
2687 2687 if opts[r'rev']:
2688 2688 if repo is None:
2689 2689 raise error.RepoError(_('there is no Mercurial repository here '
2690 2690 '(.hg not found)'))
2691 2691 revs = scmutil.revrange(repo, opts[r'rev'])
2692 2692
2693 2693 props = {}
2694 2694 for d in opts[r'define']:
2695 2695 try:
2696 2696 k, v = (e.strip() for e in d.split('=', 1))
2697 2697 if not k or k == 'ui':
2698 2698 raise ValueError
2699 2699 props[k] = v
2700 2700 except ValueError:
2701 2701 raise error.Abort(_('malformed keyword definition: %s') % d)
2702 2702
2703 2703 if ui.verbose:
2704 2704 aliases = ui.configitems('templatealias')
2705 2705 tree = templater.parse(tmpl)
2706 2706 ui.note(templater.prettyformat(tree), '\n')
2707 2707 newtree = templater.expandaliases(tree, aliases)
2708 2708 if newtree != tree:
2709 2709 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2710 2710
2711 2711 if revs is None:
2712 2712 tres = formatter.templateresources(ui, repo)
2713 2713 t = formatter.maketemplater(ui, tmpl, resources=tres)
2714 2714 if ui.verbose:
2715 2715 kwds, funcs = t.symbolsuseddefault()
2716 2716 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2717 2717 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2718 2718 ui.write(t.renderdefault(props))
2719 2719 else:
2720 2720 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2721 2721 if ui.verbose:
2722 2722 kwds, funcs = displayer.t.symbolsuseddefault()
2723 2723 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2724 2724 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2725 2725 for r in revs:
2726 2726 displayer.show(repo[r], **pycompat.strkwargs(props))
2727 2727 displayer.close()
2728 2728
2729 2729 @command('debuguigetpass', [
2730 2730 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2731 2731 ], _('[-p TEXT]'), norepo=True)
2732 2732 def debuguigetpass(ui, prompt=''):
2733 2733 """show prompt to type password"""
2734 2734 r = ui.getpass(prompt)
2735 2735 ui.write(('respose: %s\n') % r)
2736 2736
2737 2737 @command('debuguiprompt', [
2738 2738 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2739 2739 ], _('[-p TEXT]'), norepo=True)
2740 2740 def debuguiprompt(ui, prompt=''):
2741 2741 """show plain prompt"""
2742 2742 r = ui.prompt(prompt)
2743 2743 ui.write(('response: %s\n') % r)
2744 2744
2745 2745 @command('debugupdatecaches', [])
2746 2746 def debugupdatecaches(ui, repo, *pats, **opts):
2747 2747 """warm all known caches in the repository"""
2748 2748 with repo.wlock(), repo.lock():
2749 2749 repo.updatecaches(full=True)
2750 2750
2751 2751 @command('debugupgraderepo', [
2752 2752 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2753 2753 ('', 'run', False, _('performs an upgrade')),
2754 ('', 'backup', True, _('keep the old repository content around')),
2754 2755 ])
2755 def debugupgraderepo(ui, repo, run=False, optimize=None):
2756 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True):
2756 2757 """upgrade a repository to use different features
2757 2758
2758 2759 If no arguments are specified, the repository is evaluated for upgrade
2759 2760 and a list of problems and potential optimizations is printed.
2760 2761
2761 2762 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2762 2763 can be influenced via additional arguments. More details will be provided
2763 2764 by the command output when run without ``--run``.
2764 2765
2765 2766 During the upgrade, the repository will be locked and no writes will be
2766 2767 allowed.
2767 2768
2768 2769 At the end of the upgrade, the repository may not be readable while new
2769 2770 repository data is swapped in. This window will be as long as it takes to
2770 2771 rename some directories inside the ``.hg`` directory. On most machines, this
2771 2772 should complete almost instantaneously and the chances of a consumer being
2772 2773 unable to access the repository should be low.
2773 2774 """
2774 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2775 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize,
2776 backup=backup)
2775 2777
2776 2778 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2777 2779 inferrepo=True)
2778 2780 def debugwalk(ui, repo, *pats, **opts):
2779 2781 """show how files match on given patterns"""
2780 2782 opts = pycompat.byteskwargs(opts)
2781 2783 m = scmutil.match(repo[None], pats, opts)
2782 2784 if ui.verbose:
2783 2785 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
2784 2786 items = list(repo[None].walk(m))
2785 2787 if not items:
2786 2788 return
2787 2789 f = lambda fn: fn
2788 2790 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2789 2791 f = lambda fn: util.normpath(fn)
2790 2792 fmt = 'f %%-%ds %%-%ds %%s' % (
2791 2793 max([len(abs) for abs in items]),
2792 2794 max([len(m.rel(abs)) for abs in items]))
2793 2795 for abs in items:
2794 2796 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2795 2797 ui.write("%s\n" % line.rstrip())
2796 2798
2797 2799 @command('debugwhyunstable', [], _('REV'))
2798 2800 def debugwhyunstable(ui, repo, rev):
2799 2801 """explain instabilities of a changeset"""
2800 2802 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
2801 2803 dnodes = ''
2802 2804 if entry.get('divergentnodes'):
2803 2805 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2804 2806 for ctx in entry['divergentnodes']) + ' '
2805 2807 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2806 2808 entry['reason'], entry['node']))
2807 2809
2808 2810 @command('debugwireargs',
2809 2811 [('', 'three', '', 'three'),
2810 2812 ('', 'four', '', 'four'),
2811 2813 ('', 'five', '', 'five'),
2812 2814 ] + cmdutil.remoteopts,
2813 2815 _('REPO [OPTIONS]... [ONE [TWO]]'),
2814 2816 norepo=True)
2815 2817 def debugwireargs(ui, repopath, *vals, **opts):
2816 2818 opts = pycompat.byteskwargs(opts)
2817 2819 repo = hg.peer(ui, opts, repopath)
2818 2820 for opt in cmdutil.remoteopts:
2819 2821 del opts[opt[1]]
2820 2822 args = {}
2821 2823 for k, v in opts.iteritems():
2822 2824 if v:
2823 2825 args[k] = v
2824 2826 args = pycompat.strkwargs(args)
2825 2827 # run twice to check that we don't mess up the stream for the next command
2826 2828 res1 = repo.debugwireargs(*vals, **args)
2827 2829 res2 = repo.debugwireargs(*vals, **args)
2828 2830 ui.write("%s\n" % res1)
2829 2831 if res1 != res2:
2830 2832 ui.warn("%s\n" % res2)
2831 2833
2832 2834 def _parsewirelangblocks(fh):
2833 2835 activeaction = None
2834 2836 blocklines = []
2835 2837 lastindent = 0
2836 2838
2837 2839 for line in fh:
2838 2840 line = line.rstrip()
2839 2841 if not line:
2840 2842 continue
2841 2843
2842 2844 if line.startswith(b'#'):
2843 2845 continue
2844 2846
2845 2847 if not line.startswith(b' '):
2846 2848 # New block. Flush previous one.
2847 2849 if activeaction:
2848 2850 yield activeaction, blocklines
2849 2851
2850 2852 activeaction = line
2851 2853 blocklines = []
2852 2854 lastindent = 0
2853 2855 continue
2854 2856
2855 2857 # Else we start with an indent.
2856 2858
2857 2859 if not activeaction:
2858 2860 raise error.Abort(_('indented line outside of block'))
2859 2861
2860 2862 indent = len(line) - len(line.lstrip())
2861 2863
2862 2864 # If this line is indented more than the last line, concatenate it.
2863 2865 if indent > lastindent and blocklines:
2864 2866 blocklines[-1] += line.lstrip()
2865 2867 else:
2866 2868 blocklines.append(line)
2867 2869 lastindent = indent
2868 2870
2869 2871 # Flush last block.
2870 2872 if activeaction:
2871 2873 yield activeaction, blocklines
2872 2874
2873 2875 @command('debugwireproto',
2874 2876 [
2875 2877 ('', 'localssh', False, _('start an SSH server for this repo')),
2876 2878 ('', 'peer', '', _('construct a specific version of the peer')),
2877 2879 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2878 2880 ('', 'nologhandshake', False,
2879 2881 _('do not log I/O related to the peer handshake')),
2880 2882 ] + cmdutil.remoteopts,
2881 2883 _('[PATH]'),
2882 2884 optionalrepo=True)
2883 2885 def debugwireproto(ui, repo, path=None, **opts):
2884 2886 """send wire protocol commands to a server
2885 2887
2886 2888 This command can be used to issue wire protocol commands to remote
2887 2889 peers and to debug the raw data being exchanged.
2888 2890
2889 2891 ``--localssh`` will start an SSH server against the current repository
2890 2892 and connect to that. By default, the connection will perform a handshake
2891 2893 and establish an appropriate peer instance.
2892 2894
2893 2895 ``--peer`` can be used to bypass the handshake protocol and construct a
2894 2896 peer instance using the specified class type. Valid values are ``raw``,
2895 2897 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
2896 2898 raw data payloads and don't support higher-level command actions.
2897 2899
2898 2900 ``--noreadstderr`` can be used to disable automatic reading from stderr
2899 2901 of the peer (for SSH connections only). Disabling automatic reading of
2900 2902 stderr is useful for making output more deterministic.
2901 2903
2902 2904 Commands are issued via a mini language which is specified via stdin.
2903 2905 The language consists of individual actions to perform. An action is
2904 2906 defined by a block. A block is defined as a line with no leading
2905 2907 space followed by 0 or more lines with leading space. Blocks are
2906 2908 effectively a high-level command with additional metadata.
2907 2909
2908 2910 Lines beginning with ``#`` are ignored.
2909 2911
2910 2912 The following sections denote available actions.
2911 2913
2912 2914 raw
2913 2915 ---
2914 2916
2915 2917 Send raw data to the server.
2916 2918
2917 2919 The block payload contains the raw data to send as one atomic send
2918 2920 operation. The data may not actually be delivered in a single system
2919 2921 call: it depends on the abilities of the transport being used.
2920 2922
2921 2923 Each line in the block is de-indented and concatenated. Then, that
2922 2924 value is evaluated as a Python b'' literal. This allows the use of
2923 2925 backslash escaping, etc.
2924 2926
2925 2927 raw+
2926 2928 ----
2927 2929
2928 2930 Behaves like ``raw`` except flushes output afterwards.
2929 2931
2930 2932 command <X>
2931 2933 -----------
2932 2934
2933 2935 Send a request to run a named command, whose name follows the ``command``
2934 2936 string.
2935 2937
2936 2938 Arguments to the command are defined as lines in this block. The format of
2937 2939 each line is ``<key> <value>``. e.g.::
2938 2940
2939 2941 command listkeys
2940 2942 namespace bookmarks
2941 2943
2942 2944 If the value begins with ``eval:``, it will be interpreted as a Python
2943 2945 literal expression. Otherwise values are interpreted as Python b'' literals.
2944 2946 This allows sending complex types and encoding special byte sequences via
2945 2947 backslash escaping.
2946 2948
2947 2949 The following arguments have special meaning:
2948 2950
2949 2951 ``PUSHFILE``
2950 2952 When defined, the *push* mechanism of the peer will be used instead
2951 2953 of the static request-response mechanism and the content of the
2952 2954 file specified in the value of this argument will be sent as the
2953 2955 command payload.
2954 2956
2955 2957 This can be used to submit a local bundle file to the remote.
2956 2958
2957 2959 batchbegin
2958 2960 ----------
2959 2961
2960 2962 Instruct the peer to begin a batched send.
2961 2963
2962 2964 All ``command`` blocks are queued for execution until the next
2963 2965 ``batchsubmit`` block.
2964 2966
2965 2967 batchsubmit
2966 2968 -----------
2967 2969
2968 2970 Submit previously queued ``command`` blocks as a batch request.
2969 2971
2970 2972 This action MUST be paired with a ``batchbegin`` action.
2971 2973
2972 2974 httprequest <method> <path>
2973 2975 ---------------------------
2974 2976
2975 2977 (HTTP peer only)
2976 2978
2977 2979 Send an HTTP request to the peer.
2978 2980
2979 2981 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
2980 2982
2981 2983 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
2982 2984 headers to add to the request. e.g. ``Accept: foo``.
2983 2985
2984 2986 The following arguments are special:
2985 2987
2986 2988 ``BODYFILE``
2987 2989 The content of the file defined as the value to this argument will be
2988 2990 transferred verbatim as the HTTP request body.
2989 2991
2990 2992 ``frame <type> <flags> <payload>``
2991 2993 Send a unified protocol frame as part of the request body.
2992 2994
2993 2995 All frames will be collected and sent as the body to the HTTP
2994 2996 request.
2995 2997
2996 2998 close
2997 2999 -----
2998 3000
2999 3001 Close the connection to the server.
3000 3002
3001 3003 flush
3002 3004 -----
3003 3005
3004 3006 Flush data written to the server.
3005 3007
3006 3008 readavailable
3007 3009 -------------
3008 3010
3009 3011 Close the write end of the connection and read all available data from
3010 3012 the server.
3011 3013
3012 3014 If the connection to the server encompasses multiple pipes, we poll both
3013 3015 pipes and read available data.
3014 3016
3015 3017 readline
3016 3018 --------
3017 3019
3018 3020 Read a line of output from the server. If there are multiple output
3019 3021 pipes, reads only the main pipe.
3020 3022
3021 3023 ereadline
3022 3024 ---------
3023 3025
3024 3026 Like ``readline``, but read from the stderr pipe, if available.
3025 3027
3026 3028 read <X>
3027 3029 --------
3028 3030
3029 3031 ``read()`` N bytes from the server's main output pipe.
3030 3032
3031 3033 eread <X>
3032 3034 ---------
3033 3035
3034 3036 ``read()`` N bytes from the server's stderr pipe, if available.
3035 3037
3036 3038 Specifying Unified Frame-Based Protocol Frames
3037 3039 ----------------------------------------------
3038 3040
3039 3041 It is possible to emit a *Unified Frame-Based Protocol* by using special
3040 3042 syntax.
3041 3043
3042 3044 A frame is composed as a type, flags, and payload. These can be parsed
3043 3045 from a string of the form:
3044 3046
3045 3047 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3046 3048
3047 3049 ``request-id`` and ``stream-id`` are integers defining the request and
3048 3050 stream identifiers.
3049 3051
3050 3052 ``type`` can be an integer value for the frame type or the string name
3051 3053 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3052 3054 ``command-name``.
3053 3055
3054 3056 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3055 3057 components. Each component (and there can be just one) can be an integer
3056 3058 or a flag name for stream flags or frame flags, respectively. Values are
3057 3059 resolved to integers and then bitwise OR'd together.
3058 3060
3059 3061 ``payload`` represents the raw frame payload. If it begins with
3060 3062 ``cbor:``, the following string is evaluated as Python code and the
3061 3063 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3062 3064 as a Python byte string literal.
3063 3065 """
3064 3066 opts = pycompat.byteskwargs(opts)
3065 3067
3066 3068 if opts['localssh'] and not repo:
3067 3069 raise error.Abort(_('--localssh requires a repository'))
3068 3070
3069 3071 if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
3070 3072 raise error.Abort(_('invalid value for --peer'),
3071 3073 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
3072 3074
3073 3075 if path and opts['localssh']:
3074 3076 raise error.Abort(_('cannot specify --localssh with an explicit '
3075 3077 'path'))
3076 3078
3077 3079 if ui.interactive():
3078 3080 ui.write(_('(waiting for commands on stdin)\n'))
3079 3081
3080 3082 blocks = list(_parsewirelangblocks(ui.fin))
3081 3083
3082 3084 proc = None
3083 3085 stdin = None
3084 3086 stdout = None
3085 3087 stderr = None
3086 3088 opener = None
3087 3089
3088 3090 if opts['localssh']:
3089 3091 # We start the SSH server in its own process so there is process
3090 3092 # separation. This prevents a whole class of potential bugs around
3091 3093 # shared state from interfering with server operation.
3092 3094 args = procutil.hgcmd() + [
3093 3095 '-R', repo.root,
3094 3096 'debugserve', '--sshstdio',
3095 3097 ]
3096 3098 proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
3097 3099 stdin=subprocess.PIPE,
3098 3100 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3099 3101 bufsize=0)
3100 3102
3101 3103 stdin = proc.stdin
3102 3104 stdout = proc.stdout
3103 3105 stderr = proc.stderr
3104 3106
3105 3107 # We turn the pipes into observers so we can log I/O.
3106 3108 if ui.verbose or opts['peer'] == 'raw':
3107 3109 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
3108 3110 logdata=True)
3109 3111 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
3110 3112 logdata=True)
3111 3113 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
3112 3114 logdata=True)
3113 3115
3114 3116 # --localssh also implies the peer connection settings.
3115 3117
3116 3118 url = 'ssh://localserver'
3117 3119 autoreadstderr = not opts['noreadstderr']
3118 3120
3119 3121 if opts['peer'] == 'ssh1':
3120 3122 ui.write(_('creating ssh peer for wire protocol version 1\n'))
3121 3123 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
3122 3124 None, autoreadstderr=autoreadstderr)
3123 3125 elif opts['peer'] == 'ssh2':
3124 3126 ui.write(_('creating ssh peer for wire protocol version 2\n'))
3125 3127 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
3126 3128 None, autoreadstderr=autoreadstderr)
3127 3129 elif opts['peer'] == 'raw':
3128 3130 ui.write(_('using raw connection to peer\n'))
3129 3131 peer = None
3130 3132 else:
3131 3133 ui.write(_('creating ssh peer from handshake results\n'))
3132 3134 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
3133 3135 autoreadstderr=autoreadstderr)
3134 3136
3135 3137 elif path:
3136 3138 # We bypass hg.peer() so we can proxy the sockets.
3137 3139 # TODO consider not doing this because we skip
3138 3140 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3139 3141 u = util.url(path)
3140 3142 if u.scheme != 'http':
3141 3143 raise error.Abort(_('only http:// paths are currently supported'))
3142 3144
3143 3145 url, authinfo = u.authinfo()
3144 3146 openerargs = {
3145 3147 r'useragent': b'Mercurial debugwireproto',
3146 3148 }
3147 3149
3148 3150 # Turn pipes/sockets into observers so we can log I/O.
3149 3151 if ui.verbose:
3150 3152 openerargs.update({
3151 3153 r'loggingfh': ui,
3152 3154 r'loggingname': b's',
3153 3155 r'loggingopts': {
3154 3156 r'logdata': True,
3155 3157 r'logdataapis': False,
3156 3158 },
3157 3159 })
3158 3160
3159 3161 if ui.debugflag:
3160 3162 openerargs[r'loggingopts'][r'logdataapis'] = True
3161 3163
3162 3164 # Don't send default headers when in raw mode. This allows us to
3163 3165 # bypass most of the behavior of our URL handling code so we can
3164 3166 # have near complete control over what's sent on the wire.
3165 3167 if opts['peer'] == 'raw':
3166 3168 openerargs[r'sendaccept'] = False
3167 3169
3168 3170 opener = urlmod.opener(ui, authinfo, **openerargs)
3169 3171
3170 3172 if opts['peer'] == 'http2':
3171 3173 ui.write(_('creating http peer for wire protocol version 2\n'))
3172 3174 # We go through makepeer() because we need an API descriptor for
3173 3175 # the peer instance to be useful.
3174 3176 with ui.configoverride({
3175 3177 ('experimental', 'httppeer.advertise-v2'): True}):
3176 3178 if opts['nologhandshake']:
3177 3179 ui.pushbuffer()
3178 3180
3179 3181 peer = httppeer.makepeer(ui, path, opener=opener)
3180 3182
3181 3183 if opts['nologhandshake']:
3182 3184 ui.popbuffer()
3183 3185
3184 3186 if not isinstance(peer, httppeer.httpv2peer):
3185 3187 raise error.Abort(_('could not instantiate HTTP peer for '
3186 3188 'wire protocol version 2'),
3187 3189 hint=_('the server may not have the feature '
3188 3190 'enabled or is not allowing this '
3189 3191 'client version'))
3190 3192
3191 3193 elif opts['peer'] == 'raw':
3192 3194 ui.write(_('using raw connection to peer\n'))
3193 3195 peer = None
3194 3196 elif opts['peer']:
3195 3197 raise error.Abort(_('--peer %s not supported with HTTP peers') %
3196 3198 opts['peer'])
3197 3199 else:
3198 3200 peer = httppeer.makepeer(ui, path, opener=opener)
3199 3201
3200 3202 # We /could/ populate stdin/stdout with sock.makefile()...
3201 3203 else:
3202 3204 raise error.Abort(_('unsupported connection configuration'))
3203 3205
3204 3206 batchedcommands = None
3205 3207
3206 3208 # Now perform actions based on the parsed wire language instructions.
3207 3209 for action, lines in blocks:
3208 3210 if action in ('raw', 'raw+'):
3209 3211 if not stdin:
3210 3212 raise error.Abort(_('cannot call raw/raw+ on this peer'))
3211 3213
3212 3214 # Concatenate the data together.
3213 3215 data = ''.join(l.lstrip() for l in lines)
3214 3216 data = stringutil.unescapestr(data)
3215 3217 stdin.write(data)
3216 3218
3217 3219 if action == 'raw+':
3218 3220 stdin.flush()
3219 3221 elif action == 'flush':
3220 3222 if not stdin:
3221 3223 raise error.Abort(_('cannot call flush on this peer'))
3222 3224 stdin.flush()
3223 3225 elif action.startswith('command'):
3224 3226 if not peer:
3225 3227 raise error.Abort(_('cannot send commands unless peer instance '
3226 3228 'is available'))
3227 3229
3228 3230 command = action.split(' ', 1)[1]
3229 3231
3230 3232 args = {}
3231 3233 for line in lines:
3232 3234 # We need to allow empty values.
3233 3235 fields = line.lstrip().split(' ', 1)
3234 3236 if len(fields) == 1:
3235 3237 key = fields[0]
3236 3238 value = ''
3237 3239 else:
3238 3240 key, value = fields
3239 3241
3240 3242 if value.startswith('eval:'):
3241 3243 value = stringutil.evalpythonliteral(value[5:])
3242 3244 else:
3243 3245 value = stringutil.unescapestr(value)
3244 3246
3245 3247 args[key] = value
3246 3248
3247 3249 if batchedcommands is not None:
3248 3250 batchedcommands.append((command, args))
3249 3251 continue
3250 3252
3251 3253 ui.status(_('sending %s command\n') % command)
3252 3254
3253 3255 if 'PUSHFILE' in args:
3254 3256 with open(args['PUSHFILE'], r'rb') as fh:
3255 3257 del args['PUSHFILE']
3256 3258 res, output = peer._callpush(command, fh,
3257 3259 **pycompat.strkwargs(args))
3258 3260 ui.status(_('result: %s\n') % stringutil.escapestr(res))
3259 3261 ui.status(_('remote output: %s\n') %
3260 3262 stringutil.escapestr(output))
3261 3263 else:
3262 3264 with peer.commandexecutor() as e:
3263 3265 res = e.callcommand(command, args).result()
3264 3266
3265 3267 if isinstance(res, wireprotov2peer.commandresponse):
3266 3268 val = res.objects()
3267 3269 ui.status(_('response: %s\n') %
3268 3270 stringutil.pprint(val, bprefix=True, indent=2))
3269 3271 else:
3270 3272 ui.status(_('response: %s\n') %
3271 3273 stringutil.pprint(res, bprefix=True, indent=2))
3272 3274
3273 3275 elif action == 'batchbegin':
3274 3276 if batchedcommands is not None:
3275 3277 raise error.Abort(_('nested batchbegin not allowed'))
3276 3278
3277 3279 batchedcommands = []
3278 3280 elif action == 'batchsubmit':
3279 3281 # There is a batching API we could go through. But it would be
3280 3282 # difficult to normalize requests into function calls. It is easier
3281 3283 # to bypass this layer and normalize to commands + args.
3282 3284 ui.status(_('sending batch with %d sub-commands\n') %
3283 3285 len(batchedcommands))
3284 3286 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
3285 3287 ui.status(_('response #%d: %s\n') %
3286 3288 (i, stringutil.escapestr(chunk)))
3287 3289
3288 3290 batchedcommands = None
3289 3291
3290 3292 elif action.startswith('httprequest '):
3291 3293 if not opener:
3292 3294 raise error.Abort(_('cannot use httprequest without an HTTP '
3293 3295 'peer'))
3294 3296
3295 3297 request = action.split(' ', 2)
3296 3298 if len(request) != 3:
3297 3299 raise error.Abort(_('invalid httprequest: expected format is '
3298 3300 '"httprequest <method> <path>'))
3299 3301
3300 3302 method, httppath = request[1:]
3301 3303 headers = {}
3302 3304 body = None
3303 3305 frames = []
3304 3306 for line in lines:
3305 3307 line = line.lstrip()
3306 3308 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
3307 3309 if m:
3308 3310 # Headers need to use native strings.
3309 3311 key = pycompat.strurl(m.group(1))
3310 3312 value = pycompat.strurl(m.group(2))
3311 3313 headers[key] = value
3312 3314 continue
3313 3315
3314 3316 if line.startswith(b'BODYFILE '):
3315 3317 with open(line.split(b' ', 1), 'rb') as fh:
3316 3318 body = fh.read()
3317 3319 elif line.startswith(b'frame '):
3318 3320 frame = wireprotoframing.makeframefromhumanstring(
3319 3321 line[len(b'frame '):])
3320 3322
3321 3323 frames.append(frame)
3322 3324 else:
3323 3325 raise error.Abort(_('unknown argument to httprequest: %s') %
3324 3326 line)
3325 3327
3326 3328 url = path + httppath
3327 3329
3328 3330 if frames:
3329 3331 body = b''.join(bytes(f) for f in frames)
3330 3332
3331 3333 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
3332 3334
3333 3335 # urllib.Request insists on using has_data() as a proxy for
3334 3336 # determining the request method. Override that to use our
3335 3337 # explicitly requested method.
3336 3338 req.get_method = lambda: pycompat.sysstr(method)
3337 3339
3338 3340 try:
3339 3341 res = opener.open(req)
3340 3342 body = res.read()
3341 3343 except util.urlerr.urlerror as e:
3342 3344 # read() method must be called, but only exists in Python 2
3343 3345 getattr(e, 'read', lambda: None)()
3344 3346 continue
3345 3347
3346 3348 ct = res.headers.get(r'Content-Type')
3347 3349 if ct == r'application/mercurial-cbor':
3348 3350 ui.write(_('cbor> %s\n') %
3349 3351 stringutil.pprint(cborutil.decodeall(body),
3350 3352 bprefix=True,
3351 3353 indent=2))
3352 3354
3353 3355 elif action == 'close':
3354 3356 peer.close()
3355 3357 elif action == 'readavailable':
3356 3358 if not stdout or not stderr:
3357 3359 raise error.Abort(_('readavailable not available on this peer'))
3358 3360
3359 3361 stdin.close()
3360 3362 stdout.read()
3361 3363 stderr.read()
3362 3364
3363 3365 elif action == 'readline':
3364 3366 if not stdout:
3365 3367 raise error.Abort(_('readline not available on this peer'))
3366 3368 stdout.readline()
3367 3369 elif action == 'ereadline':
3368 3370 if not stderr:
3369 3371 raise error.Abort(_('ereadline not available on this peer'))
3370 3372 stderr.readline()
3371 3373 elif action.startswith('read '):
3372 3374 count = int(action.split(' ', 1)[1])
3373 3375 if not stdout:
3374 3376 raise error.Abort(_('read not available on this peer'))
3375 3377 stdout.read(count)
3376 3378 elif action.startswith('eread '):
3377 3379 count = int(action.split(' ', 1)[1])
3378 3380 if not stderr:
3379 3381 raise error.Abort(_('eread not available on this peer'))
3380 3382 stderr.read(count)
3381 3383 else:
3382 3384 raise error.Abort(_('unknown action: %s') % action)
3383 3385
3384 3386 if batchedcommands is not None:
3385 3387 raise error.Abort(_('unclosed "batchbegin" request'))
3386 3388
3387 3389 if peer:
3388 3390 peer.close()
3389 3391
3390 3392 if proc:
3391 3393 proc.kill()
@@ -1,912 +1,916 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 changelog,
15 15 error,
16 16 filelog,
17 17 hg,
18 18 localrepo,
19 19 manifest,
20 20 pycompat,
21 21 revlog,
22 22 scmutil,
23 23 util,
24 24 vfs as vfsmod,
25 25 )
26 26
27 27 def requiredsourcerequirements(repo):
28 28 """Obtain requirements required to be present to upgrade a repo.
29 29
30 30 An upgrade will not be allowed if the repository doesn't have the
31 31 requirements returned by this function.
32 32 """
33 33 return {
34 34 # Introduced in Mercurial 0.9.2.
35 35 'revlogv1',
36 36 # Introduced in Mercurial 0.9.2.
37 37 'store',
38 38 }
39 39
40 40 def blocksourcerequirements(repo):
41 41 """Obtain requirements that will prevent an upgrade from occurring.
42 42
43 43 An upgrade cannot be performed if the source repository contains a
44 44 requirements in the returned set.
45 45 """
46 46 return {
47 47 # The upgrade code does not yet support these experimental features.
48 48 # This is an artificial limitation.
49 49 'treemanifest',
50 50 # This was a precursor to generaldelta and was never enabled by default.
51 51 # It should (hopefully) not exist in the wild.
52 52 'parentdelta',
53 53 # Upgrade should operate on the actual store, not the shared link.
54 54 'shared',
55 55 }
56 56
57 57 def supportremovedrequirements(repo):
58 58 """Obtain requirements that can be removed during an upgrade.
59 59
60 60 If an upgrade were to create a repository that dropped a requirement,
61 61 the dropped requirement must appear in the returned set for the upgrade
62 62 to be allowed.
63 63 """
64 64 return {
65 65 localrepo.SPARSEREVLOG_REQUIREMENT,
66 66 }
67 67
68 68 def supporteddestrequirements(repo):
69 69 """Obtain requirements that upgrade supports in the destination.
70 70
71 71 If the result of the upgrade would create requirements not in this set,
72 72 the upgrade is disallowed.
73 73
74 74 Extensions should monkeypatch this to add their custom requirements.
75 75 """
76 76 return {
77 77 'dotencode',
78 78 'fncache',
79 79 'generaldelta',
80 80 'revlogv1',
81 81 'store',
82 82 localrepo.SPARSEREVLOG_REQUIREMENT,
83 83 }
84 84
85 85 def allowednewrequirements(repo):
86 86 """Obtain requirements that can be added to a repository during upgrade.
87 87
88 88 This is used to disallow proposed requirements from being added when
89 89 they weren't present before.
90 90
91 91 We use a list of allowed requirement additions instead of a list of known
92 92 bad additions because the whitelist approach is safer and will prevent
93 93 future, unknown requirements from accidentally being added.
94 94 """
95 95 return {
96 96 'dotencode',
97 97 'fncache',
98 98 'generaldelta',
99 99 localrepo.SPARSEREVLOG_REQUIREMENT,
100 100 }
101 101
102 102 def preservedrequirements(repo):
103 103 return set()
104 104
105 105 deficiency = 'deficiency'
106 106 optimisation = 'optimization'
107 107
108 108 class improvement(object):
109 109 """Represents an improvement that can be made as part of an upgrade.
110 110
111 111 The following attributes are defined on each instance:
112 112
113 113 name
114 114 Machine-readable string uniquely identifying this improvement. It
115 115 will be mapped to an action later in the upgrade process.
116 116
117 117 type
118 118 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
119 119 problem. An optimization is an action (sometimes optional) that
120 120 can be taken to further improve the state of the repository.
121 121
122 122 description
123 123 Message intended for humans explaining the improvement in more detail,
124 124 including the implications of it. For ``deficiency`` types, should be
125 125 worded in the present tense. For ``optimisation`` types, should be
126 126 worded in the future tense.
127 127
128 128 upgrademessage
129 129 Message intended for humans explaining what an upgrade addressing this
130 130 issue will do. Should be worded in the future tense.
131 131 """
132 132 def __init__(self, name, type, description, upgrademessage):
133 133 self.name = name
134 134 self.type = type
135 135 self.description = description
136 136 self.upgrademessage = upgrademessage
137 137
138 138 def __eq__(self, other):
139 139 if not isinstance(other, improvement):
140 140 # This is what python tell use to do
141 141 return NotImplemented
142 142 return self.name == other.name
143 143
144 144 def __ne__(self, other):
145 145 return not (self == other)
146 146
147 147 def __hash__(self):
148 148 return hash(self.name)
149 149
150 150 allformatvariant = []
151 151
152 152 def registerformatvariant(cls):
153 153 allformatvariant.append(cls)
154 154 return cls
155 155
156 156 class formatvariant(improvement):
157 157 """an improvement subclass dedicated to repository format"""
158 158 type = deficiency
159 159 ### The following attributes should be defined for each class:
160 160
161 161 # machine-readable string uniquely identifying this improvement. it will be
162 162 # mapped to an action later in the upgrade process.
163 163 name = None
164 164
165 165 # message intended for humans explaining the improvement in more detail,
166 166 # including the implications of it ``deficiency`` types, should be worded
167 167 # in the present tense.
168 168 description = None
169 169
170 170 # message intended for humans explaining what an upgrade addressing this
171 171 # issue will do. should be worded in the future tense.
172 172 upgrademessage = None
173 173
174 174 # value of current Mercurial default for new repository
175 175 default = None
176 176
177 177 def __init__(self):
178 178 raise NotImplementedError()
179 179
180 180 @staticmethod
181 181 def fromrepo(repo):
182 182 """current value of the variant in the repository"""
183 183 raise NotImplementedError()
184 184
185 185 @staticmethod
186 186 def fromconfig(repo):
187 187 """current value of the variant in the configuration"""
188 188 raise NotImplementedError()
189 189
190 190 class requirementformatvariant(formatvariant):
191 191 """formatvariant based on a 'requirement' name.
192 192
193 193 Many format variant are controlled by a 'requirement'. We define a small
194 194 subclass to factor the code.
195 195 """
196 196
197 197 # the requirement that control this format variant
198 198 _requirement = None
199 199
200 200 @staticmethod
201 201 def _newreporequirements(ui):
202 202 return localrepo.newreporequirements(
203 203 ui, localrepo.defaultcreateopts(ui))
204 204
205 205 @classmethod
206 206 def fromrepo(cls, repo):
207 207 assert cls._requirement is not None
208 208 return cls._requirement in repo.requirements
209 209
210 210 @classmethod
211 211 def fromconfig(cls, repo):
212 212 assert cls._requirement is not None
213 213 return cls._requirement in cls._newreporequirements(repo.ui)
214 214
215 215 @registerformatvariant
216 216 class fncache(requirementformatvariant):
217 217 name = 'fncache'
218 218
219 219 _requirement = 'fncache'
220 220
221 221 default = True
222 222
223 223 description = _('long and reserved filenames may not work correctly; '
224 224 'repository performance is sub-optimal')
225 225
226 226 upgrademessage = _('repository will be more resilient to storing '
227 227 'certain paths and performance of certain '
228 228 'operations should be improved')
229 229
230 230 @registerformatvariant
231 231 class dotencode(requirementformatvariant):
232 232 name = 'dotencode'
233 233
234 234 _requirement = 'dotencode'
235 235
236 236 default = True
237 237
238 238 description = _('storage of filenames beginning with a period or '
239 239 'space may not work correctly')
240 240
241 241 upgrademessage = _('repository will be better able to store files '
242 242 'beginning with a space or period')
243 243
244 244 @registerformatvariant
245 245 class generaldelta(requirementformatvariant):
246 246 name = 'generaldelta'
247 247
248 248 _requirement = 'generaldelta'
249 249
250 250 default = True
251 251
252 252 description = _('deltas within internal storage are unable to '
253 253 'choose optimal revisions; repository is larger and '
254 254 'slower than it could be; interaction with other '
255 255 'repositories may require extra network and CPU '
256 256 'resources, making "hg push" and "hg pull" slower')
257 257
258 258 upgrademessage = _('repository storage will be able to create '
259 259 'optimal deltas; new repository data will be '
260 260 'smaller and read times should decrease; '
261 261 'interacting with other repositories using this '
262 262 'storage model should require less network and '
263 263 'CPU resources, making "hg push" and "hg pull" '
264 264 'faster')
265 265
266 266 @registerformatvariant
267 267 class sparserevlog(requirementformatvariant):
268 268 name = 'sparserevlog'
269 269
270 270 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
271 271
272 272 default = True
273 273
274 274 description = _('in order to limit disk reading and memory usage on older '
275 275 'version, the span of a delta chain from its root to its '
276 276 'end is limited, whatever the relevant data in this span. '
277 277 'This can severly limit Mercurial ability to build good '
278 278 'chain of delta resulting is much more storage space being '
279 279 'taken and limit reusability of on disk delta during '
280 280 'exchange.'
281 281 )
282 282
283 283 upgrademessage = _('Revlog supports delta chain with more unused data '
284 284 'between payload. These gaps will be skipped at read '
285 285 'time. This allows for better delta chains, making a '
286 286 'better compression and faster exchange with server.')
287 287
288 288 @registerformatvariant
289 289 class removecldeltachain(formatvariant):
290 290 name = 'plain-cl-delta'
291 291
292 292 default = True
293 293
294 294 description = _('changelog storage is using deltas instead of '
295 295 'raw entries; changelog reading and any '
296 296 'operation relying on changelog data are slower '
297 297 'than they could be')
298 298
299 299 upgrademessage = _('changelog storage will be reformated to '
300 300 'store raw entries; changelog reading will be '
301 301 'faster; changelog size may be reduced')
302 302
303 303 @staticmethod
304 304 def fromrepo(repo):
305 305 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
306 306 # changelogs with deltas.
307 307 cl = repo.changelog
308 308 chainbase = cl.chainbase
309 309 return all(rev == chainbase(rev) for rev in cl)
310 310
311 311 @staticmethod
312 312 def fromconfig(repo):
313 313 return True
314 314
315 315 @registerformatvariant
316 316 class compressionengine(formatvariant):
317 317 name = 'compression'
318 318 default = 'zlib'
319 319
320 320 description = _('Compresion algorithm used to compress data. '
321 321 'Some engine are faster than other')
322 322
323 323 upgrademessage = _('revlog content will be recompressed with the new '
324 324 'algorithm.')
325 325
326 326 @classmethod
327 327 def fromrepo(cls, repo):
328 328 for req in repo.requirements:
329 329 if req.startswith('exp-compression-'):
330 330 return req.split('-', 2)[2]
331 331 return 'zlib'
332 332
333 333 @classmethod
334 334 def fromconfig(cls, repo):
335 335 return repo.ui.config('experimental', 'format.compression')
336 336
337 337 def finddeficiencies(repo):
338 338 """returns a list of deficiencies that the repo suffer from"""
339 339 deficiencies = []
340 340
341 341 # We could detect lack of revlogv1 and store here, but they were added
342 342 # in 0.9.2 and we don't support upgrading repos without these
343 343 # requirements, so let's not bother.
344 344
345 345 for fv in allformatvariant:
346 346 if not fv.fromrepo(repo):
347 347 deficiencies.append(fv)
348 348
349 349 return deficiencies
350 350
351 351 # search without '-' to support older form on newer client.
352 352 #
353 353 # We don't enforce backward compatibility for debug command so this
354 354 # might eventually be dropped. However, having to use two different
355 355 # forms in script when comparing result is anoying enough to add
356 356 # backward compatibility for a while.
357 357 legacy_opts_map = {
358 358 'redeltaparent': 're-delta-parent',
359 359 'redeltamultibase': 're-delta-multibase',
360 360 'redeltaall': 're-delta-all',
361 361 'redeltafulladd': 're-delta-fulladd',
362 362 }
363 363
364 364 def findoptimizations(repo):
365 365 """Determine optimisation that could be used during upgrade"""
366 366 # These are unconditionally added. There is logic later that figures out
367 367 # which ones to apply.
368 368 optimizations = []
369 369
370 370 optimizations.append(improvement(
371 371 name='re-delta-parent',
372 372 type=optimisation,
373 373 description=_('deltas within internal storage will be recalculated to '
374 374 'choose an optimal base revision where this was not '
375 375 'already done; the size of the repository may shrink and '
376 376 'various operations may become faster; the first time '
377 377 'this optimization is performed could slow down upgrade '
378 378 'execution considerably; subsequent invocations should '
379 379 'not run noticeably slower'),
380 380 upgrademessage=_('deltas within internal storage will choose a new '
381 381 'base revision if needed')))
382 382
383 383 optimizations.append(improvement(
384 384 name='re-delta-multibase',
385 385 type=optimisation,
386 386 description=_('deltas within internal storage will be recalculated '
387 387 'against multiple base revision and the smallest '
388 388 'difference will be used; the size of the repository may '
389 389 'shrink significantly when there are many merges; this '
390 390 'optimization will slow down execution in proportion to '
391 391 'the number of merges in the repository and the amount '
392 392 'of files in the repository; this slow down should not '
393 393 'be significant unless there are tens of thousands of '
394 394 'files and thousands of merges'),
395 395 upgrademessage=_('deltas within internal storage will choose an '
396 396 'optimal delta by computing deltas against multiple '
397 397 'parents; may slow down execution time '
398 398 'significantly')))
399 399
400 400 optimizations.append(improvement(
401 401 name='re-delta-all',
402 402 type=optimisation,
403 403 description=_('deltas within internal storage will always be '
404 404 'recalculated without reusing prior deltas; this will '
405 405 'likely make execution run several times slower; this '
406 406 'optimization is typically not needed'),
407 407 upgrademessage=_('deltas within internal storage will be fully '
408 408 'recomputed; this will likely drastically slow down '
409 409 'execution time')))
410 410
411 411 optimizations.append(improvement(
412 412 name='re-delta-fulladd',
413 413 type=optimisation,
414 414 description=_('every revision will be re-added as if it was new '
415 415 'content. It will go through the full storage '
416 416 'mechanism giving extensions a chance to process it '
417 417 '(eg. lfs). This is similar to "re-delta-all" but even '
418 418 'slower since more logic is involved.'),
419 419 upgrademessage=_('each revision will be added as new content to the '
420 420 'internal storage; this will likely drastically slow '
421 421 'down execution time, but some extensions might need '
422 422 'it')))
423 423
424 424 return optimizations
425 425
426 426 def determineactions(repo, deficiencies, sourcereqs, destreqs):
427 427 """Determine upgrade actions that will be performed.
428 428
429 429 Given a list of improvements as returned by ``finddeficiencies`` and
430 430 ``findoptimizations``, determine the list of upgrade actions that
431 431 will be performed.
432 432
433 433 The role of this function is to filter improvements if needed, apply
434 434 recommended optimizations from the improvements list that make sense,
435 435 etc.
436 436
437 437 Returns a list of action names.
438 438 """
439 439 newactions = []
440 440
441 441 knownreqs = supporteddestrequirements(repo)
442 442
443 443 for d in deficiencies:
444 444 name = d.name
445 445
446 446 # If the action is a requirement that doesn't show up in the
447 447 # destination requirements, prune the action.
448 448 if name in knownreqs and name not in destreqs:
449 449 continue
450 450
451 451 newactions.append(d)
452 452
453 453 # FUTURE consider adding some optimizations here for certain transitions.
454 454 # e.g. adding generaldelta could schedule parent redeltas.
455 455
456 456 return newactions
457 457
458 458 def _revlogfrompath(repo, path):
459 459 """Obtain a revlog from a repo path.
460 460
461 461 An instance of the appropriate class is returned.
462 462 """
463 463 if path == '00changelog.i':
464 464 return changelog.changelog(repo.svfs)
465 465 elif path.endswith('00manifest.i'):
466 466 mandir = path[:-len('00manifest.i')]
467 467 return manifest.manifestrevlog(repo.svfs, tree=mandir)
468 468 else:
469 469 #reverse of "/".join(("data", path + ".i"))
470 470 return filelog.filelog(repo.svfs, path[5:-2])
471 471
472 472 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents):
473 473 """Copy revlogs between 2 repos."""
474 474 revcount = 0
475 475 srcsize = 0
476 476 srcrawsize = 0
477 477 dstsize = 0
478 478 fcount = 0
479 479 frevcount = 0
480 480 fsrcsize = 0
481 481 frawsize = 0
482 482 fdstsize = 0
483 483 mcount = 0
484 484 mrevcount = 0
485 485 msrcsize = 0
486 486 mrawsize = 0
487 487 mdstsize = 0
488 488 crevcount = 0
489 489 csrcsize = 0
490 490 crawsize = 0
491 491 cdstsize = 0
492 492
493 493 # Perform a pass to collect metadata. This validates we can open all
494 494 # source files and allows a unified progress bar to be displayed.
495 495 for unencoded, encoded, size in srcrepo.store.walk():
496 496 if unencoded.endswith('.d'):
497 497 continue
498 498
499 499 rl = _revlogfrompath(srcrepo, unencoded)
500 500
501 501 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
502 502 trackedsize=True, storedsize=True)
503 503
504 504 revcount += info['revisionscount'] or 0
505 505 datasize = info['storedsize'] or 0
506 506 rawsize = info['trackedsize'] or 0
507 507
508 508 srcsize += datasize
509 509 srcrawsize += rawsize
510 510
511 511 # This is for the separate progress bars.
512 512 if isinstance(rl, changelog.changelog):
513 513 crevcount += len(rl)
514 514 csrcsize += datasize
515 515 crawsize += rawsize
516 516 elif isinstance(rl, manifest.manifestrevlog):
517 517 mcount += 1
518 518 mrevcount += len(rl)
519 519 msrcsize += datasize
520 520 mrawsize += rawsize
521 521 elif isinstance(rl, filelog.filelog):
522 522 fcount += 1
523 523 frevcount += len(rl)
524 524 fsrcsize += datasize
525 525 frawsize += rawsize
526 526 else:
527 527 error.ProgrammingError('unknown revlog type')
528 528
529 529 if not revcount:
530 530 return
531 531
532 532 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
533 533 '%d in changelog)\n') %
534 534 (revcount, frevcount, mrevcount, crevcount))
535 535 ui.write(_('migrating %s in store; %s tracked data\n') % (
536 536 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
537 537
538 538 # Used to keep track of progress.
539 539 progress = None
540 540 def oncopiedrevision(rl, rev, node):
541 541 progress.increment()
542 542
543 543 # Do the actual copying.
544 544 # FUTURE this operation can be farmed off to worker processes.
545 545 seen = set()
546 546 for unencoded, encoded, size in srcrepo.store.walk():
547 547 if unencoded.endswith('.d'):
548 548 continue
549 549
550 550 oldrl = _revlogfrompath(srcrepo, unencoded)
551 551 newrl = _revlogfrompath(dstrepo, unencoded)
552 552
553 553 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
554 554 ui.write(_('finished migrating %d manifest revisions across %d '
555 555 'manifests; change in size: %s\n') %
556 556 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
557 557
558 558 ui.write(_('migrating changelog containing %d revisions '
559 559 '(%s in store; %s tracked data)\n') %
560 560 (crevcount, util.bytecount(csrcsize),
561 561 util.bytecount(crawsize)))
562 562 seen.add('c')
563 563 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
564 564 total=crevcount)
565 565 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
566 566 ui.write(_('finished migrating %d filelog revisions across %d '
567 567 'filelogs; change in size: %s\n') %
568 568 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
569 569
570 570 ui.write(_('migrating %d manifests containing %d revisions '
571 571 '(%s in store; %s tracked data)\n') %
572 572 (mcount, mrevcount, util.bytecount(msrcsize),
573 573 util.bytecount(mrawsize)))
574 574 seen.add('m')
575 575 if progress:
576 576 progress.complete()
577 577 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
578 578 total=mrevcount)
579 579 elif 'f' not in seen:
580 580 ui.write(_('migrating %d filelogs containing %d revisions '
581 581 '(%s in store; %s tracked data)\n') %
582 582 (fcount, frevcount, util.bytecount(fsrcsize),
583 583 util.bytecount(frawsize)))
584 584 seen.add('f')
585 585 if progress:
586 586 progress.complete()
587 587 progress = srcrepo.ui.makeprogress(_('file revisions'),
588 588 total=frevcount)
589 589
590 590
591 591 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
592 592 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
593 593 deltareuse=deltareuse,
594 594 forcedeltabothparents=forcedeltabothparents)
595 595
596 596 info = newrl.storageinfo(storedsize=True)
597 597 datasize = info['storedsize'] or 0
598 598
599 599 dstsize += datasize
600 600
601 601 if isinstance(newrl, changelog.changelog):
602 602 cdstsize += datasize
603 603 elif isinstance(newrl, manifest.manifestrevlog):
604 604 mdstsize += datasize
605 605 else:
606 606 fdstsize += datasize
607 607
608 608 progress.complete()
609 609
610 610 ui.write(_('finished migrating %d changelog revisions; change in size: '
611 611 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
612 612
613 613 ui.write(_('finished migrating %d total revisions; total change in store '
614 614 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
615 615
616 616 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
617 617 """Determine whether to copy a store file during upgrade.
618 618
619 619 This function is called when migrating store files from ``srcrepo`` to
620 620 ``dstrepo`` as part of upgrading a repository.
621 621
622 622 Args:
623 623 srcrepo: repo we are copying from
624 624 dstrepo: repo we are copying to
625 625 requirements: set of requirements for ``dstrepo``
626 626 path: store file being examined
627 627 mode: the ``ST_MODE`` file type of ``path``
628 628 st: ``stat`` data structure for ``path``
629 629
630 630 Function should return ``True`` if the file is to be copied.
631 631 """
632 632 # Skip revlogs.
633 633 if path.endswith(('.i', '.d')):
634 634 return False
635 635 # Skip transaction related files.
636 636 if path.startswith('undo'):
637 637 return False
638 638 # Only copy regular files.
639 639 if mode != stat.S_IFREG:
640 640 return False
641 641 # Skip other skipped files.
642 642 if path in ('lock', 'fncache'):
643 643 return False
644 644
645 645 return True
646 646
647 647 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
648 648 """Hook point for extensions to perform additional actions during upgrade.
649 649
650 650 This function is called after revlogs and store files have been copied but
651 651 before the new store is swapped into the original location.
652 652 """
653 653
654 654 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
655 655 """Do the low-level work of upgrading a repository.
656 656
657 657 The upgrade is effectively performed as a copy between a source
658 658 repository and a temporary destination repository.
659 659
660 660 The source repository is unmodified for as long as possible so the
661 661 upgrade can abort at any time without causing loss of service for
662 662 readers and without corrupting the source repository.
663 663 """
664 664 assert srcrepo.currentwlock()
665 665 assert dstrepo.currentwlock()
666 666
667 667 ui.write(_('(it is safe to interrupt this process any time before '
668 668 'data migration completes)\n'))
669 669
670 670 if 're-delta-all' in actions:
671 671 deltareuse = revlog.revlog.DELTAREUSENEVER
672 672 elif 're-delta-parent' in actions:
673 673 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
674 674 elif 're-delta-multibase' in actions:
675 675 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
676 676 elif 're-delta-fulladd' in actions:
677 677 deltareuse = revlog.revlog.DELTAREUSEFULLADD
678 678 else:
679 679 deltareuse = revlog.revlog.DELTAREUSEALWAYS
680 680
681 681 with dstrepo.transaction('upgrade') as tr:
682 682 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
683 683 're-delta-multibase' in actions)
684 684
685 685 # Now copy other files in the store directory.
686 686 # The sorted() makes execution deterministic.
687 687 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
688 688 if not _filterstorefile(srcrepo, dstrepo, requirements,
689 689 p, kind, st):
690 690 continue
691 691
692 692 srcrepo.ui.write(_('copying %s\n') % p)
693 693 src = srcrepo.store.rawvfs.join(p)
694 694 dst = dstrepo.store.rawvfs.join(p)
695 695 util.copyfile(src, dst, copystat=True)
696 696
697 697 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
698 698
699 699 ui.write(_('data fully migrated to temporary repository\n'))
700 700
701 701 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
702 702 backupvfs = vfsmod.vfs(backuppath)
703 703
704 704 # Make a backup of requires file first, as it is the first to be modified.
705 705 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
706 706
707 707 # We install an arbitrary requirement that clients must not support
708 708 # as a mechanism to lock out new clients during the data swap. This is
709 709 # better than allowing a client to continue while the repository is in
710 710 # an inconsistent state.
711 711 ui.write(_('marking source repository as being upgraded; clients will be '
712 712 'unable to read from repository\n'))
713 713 scmutil.writerequires(srcrepo.vfs,
714 714 srcrepo.requirements | {'upgradeinprogress'})
715 715
716 716 ui.write(_('starting in-place swap of repository data\n'))
717 717 ui.write(_('replaced files will be backed up at %s\n') %
718 718 backuppath)
719 719
720 720 # Now swap in the new store directory. Doing it as a rename should make
721 721 # the operation nearly instantaneous and atomic (at least in well-behaved
722 722 # environments).
723 723 ui.write(_('replacing store...\n'))
724 724 tstart = util.timer()
725 725 util.rename(srcrepo.spath, backupvfs.join('store'))
726 726 util.rename(dstrepo.spath, srcrepo.spath)
727 727 elapsed = util.timer() - tstart
728 728 ui.write(_('store replacement complete; repository was inconsistent for '
729 729 '%0.1fs\n') % elapsed)
730 730
731 731 # We first write the requirements file. Any new requirements will lock
732 732 # out legacy clients.
733 733 ui.write(_('finalizing requirements file and making repository readable '
734 734 'again\n'))
735 735 scmutil.writerequires(srcrepo.vfs, requirements)
736 736
737 737 # The lock file from the old store won't be removed because nothing has a
738 738 # reference to its new location. So clean it up manually. Alternatively, we
739 739 # could update srcrepo.svfs and other variables to point to the new
740 740 # location. This is simpler.
741 741 backupvfs.unlink('store/lock')
742 742
743 743 return backuppath
744 744
745 def upgraderepo(ui, repo, run=False, optimize=None):
745 def upgraderepo(ui, repo, run=False, optimize=None, backup=True):
746 746 """Upgrade a repository in place."""
747 747 if optimize is None:
748 748 optimize = []
749 749 optimize = set(legacy_opts_map.get(o, o) for o in optimize)
750 750 repo = repo.unfiltered()
751 751
752 752 # Ensure the repository can be upgraded.
753 753 missingreqs = requiredsourcerequirements(repo) - repo.requirements
754 754 if missingreqs:
755 755 raise error.Abort(_('cannot upgrade repository; requirement '
756 756 'missing: %s') % _(', ').join(sorted(missingreqs)))
757 757
758 758 blockedreqs = blocksourcerequirements(repo) & repo.requirements
759 759 if blockedreqs:
760 760 raise error.Abort(_('cannot upgrade repository; unsupported source '
761 761 'requirement: %s') %
762 762 _(', ').join(sorted(blockedreqs)))
763 763
764 764 # FUTURE there is potentially a need to control the wanted requirements via
765 765 # command arguments or via an extension hook point.
766 766 newreqs = localrepo.newreporequirements(
767 767 repo.ui, localrepo.defaultcreateopts(repo.ui))
768 768 newreqs.update(preservedrequirements(repo))
769 769
770 770 noremovereqs = (repo.requirements - newreqs -
771 771 supportremovedrequirements(repo))
772 772 if noremovereqs:
773 773 raise error.Abort(_('cannot upgrade repository; requirement would be '
774 774 'removed: %s') % _(', ').join(sorted(noremovereqs)))
775 775
776 776 noaddreqs = (newreqs - repo.requirements -
777 777 allowednewrequirements(repo))
778 778 if noaddreqs:
779 779 raise error.Abort(_('cannot upgrade repository; do not support adding '
780 780 'requirement: %s') %
781 781 _(', ').join(sorted(noaddreqs)))
782 782
783 783 unsupportedreqs = newreqs - supporteddestrequirements(repo)
784 784 if unsupportedreqs:
785 785 raise error.Abort(_('cannot upgrade repository; do not support '
786 786 'destination requirement: %s') %
787 787 _(', ').join(sorted(unsupportedreqs)))
788 788
789 789 # Find and validate all improvements that can be made.
790 790 alloptimizations = findoptimizations(repo)
791 791
792 792 # Apply and Validate arguments.
793 793 optimizations = []
794 794 for o in alloptimizations:
795 795 if o.name in optimize:
796 796 optimizations.append(o)
797 797 optimize.discard(o.name)
798 798
799 799 if optimize: # anything left is unknown
800 800 raise error.Abort(_('unknown optimization action requested: %s') %
801 801 ', '.join(sorted(optimize)),
802 802 hint=_('run without arguments to see valid '
803 803 'optimizations'))
804 804
805 805 deficiencies = finddeficiencies(repo)
806 806 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
807 807 actions.extend(o for o in sorted(optimizations)
808 808 # determineactions could have added optimisation
809 809 if o not in actions)
810 810
811 811 def printrequirements():
812 812 ui.write(_('requirements\n'))
813 813 ui.write(_(' preserved: %s\n') %
814 814 _(', ').join(sorted(newreqs & repo.requirements)))
815 815
816 816 if repo.requirements - newreqs:
817 817 ui.write(_(' removed: %s\n') %
818 818 _(', ').join(sorted(repo.requirements - newreqs)))
819 819
820 820 if newreqs - repo.requirements:
821 821 ui.write(_(' added: %s\n') %
822 822 _(', ').join(sorted(newreqs - repo.requirements)))
823 823
824 824 ui.write('\n')
825 825
826 826 def printupgradeactions():
827 827 for a in actions:
828 828 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
829 829
830 830 if not run:
831 831 fromconfig = []
832 832 onlydefault = []
833 833
834 834 for d in deficiencies:
835 835 if d.fromconfig(repo):
836 836 fromconfig.append(d)
837 837 elif d.default:
838 838 onlydefault.append(d)
839 839
840 840 if fromconfig or onlydefault:
841 841
842 842 if fromconfig:
843 843 ui.write(_('repository lacks features recommended by '
844 844 'current config options:\n\n'))
845 845 for i in fromconfig:
846 846 ui.write('%s\n %s\n\n' % (i.name, i.description))
847 847
848 848 if onlydefault:
849 849 ui.write(_('repository lacks features used by the default '
850 850 'config options:\n\n'))
851 851 for i in onlydefault:
852 852 ui.write('%s\n %s\n\n' % (i.name, i.description))
853 853
854 854 ui.write('\n')
855 855 else:
856 856 ui.write(_('(no feature deficiencies found in existing '
857 857 'repository)\n'))
858 858
859 859 ui.write(_('performing an upgrade with "--run" will make the following '
860 860 'changes:\n\n'))
861 861
862 862 printrequirements()
863 863 printupgradeactions()
864 864
865 865 unusedoptimize = [i for i in alloptimizations if i not in actions]
866 866
867 867 if unusedoptimize:
868 868 ui.write(_('additional optimizations are available by specifying '
869 869 '"--optimize <name>":\n\n'))
870 870 for i in unusedoptimize:
871 871 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
872 872 return
873 873
874 874 # Else we're in the run=true case.
875 875 ui.write(_('upgrade will perform the following actions:\n\n'))
876 876 printrequirements()
877 877 printupgradeactions()
878 878
879 879 upgradeactions = [a.name for a in actions]
880 880
881 881 ui.write(_('beginning upgrade...\n'))
882 882 with repo.wlock(), repo.lock():
883 883 ui.write(_('repository locked and read-only\n'))
884 884 # Our strategy for upgrading the repository is to create a new,
885 885 # temporary repository, write data to it, then do a swap of the
886 886 # data. There are less heavyweight ways to do this, but it is easier
887 887 # to create a new repo object than to instantiate all the components
888 888 # (like the store) separately.
889 889 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
890 890 backuppath = None
891 891 try:
892 892 ui.write(_('creating temporary repository to stage migrated '
893 893 'data: %s\n') % tmppath)
894 894
895 895 # clone ui without using ui.copy because repo.ui is protected
896 896 repoui = repo.ui.__class__(repo.ui)
897 897 dstrepo = hg.repository(repoui, path=tmppath, create=True)
898 898
899 899 with dstrepo.wlock(), dstrepo.lock():
900 900 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
901 901 upgradeactions)
902 if not (backup or backuppath is None):
903 ui.write(_('removing old repository content%s\n') % backuppath)
904 repo.vfs.rmtree(backuppath, forcibly=True)
905 backuppath = None
902 906
903 907 finally:
904 908 ui.write(_('removing temporary repository %s\n') % tmppath)
905 909 repo.vfs.rmtree(tmppath, forcibly=True)
906 910
907 911 if backuppath:
908 912 ui.warn(_('copy of old repository backed up at %s\n') %
909 913 backuppath)
910 914 ui.warn(_('the old repository will not be deleted; remove '
911 915 'it to free up disk space once the upgraded '
912 916 'repository is verified\n'))
@@ -1,408 +1,408 b''
1 1 Show all commands except debug commands
2 2 $ hg debugcomplete
3 3 add
4 4 addremove
5 5 annotate
6 6 archive
7 7 backout
8 8 bisect
9 9 bookmarks
10 10 branch
11 11 branches
12 12 bundle
13 13 cat
14 14 clone
15 15 commit
16 16 config
17 17 copy
18 18 diff
19 19 export
20 20 files
21 21 forget
22 22 graft
23 23 grep
24 24 heads
25 25 help
26 26 identify
27 27 import
28 28 incoming
29 29 init
30 30 locate
31 31 log
32 32 manifest
33 33 merge
34 34 outgoing
35 35 parents
36 36 paths
37 37 phase
38 38 pull
39 39 push
40 40 recover
41 41 remove
42 42 rename
43 43 resolve
44 44 revert
45 45 rollback
46 46 root
47 47 serve
48 48 status
49 49 summary
50 50 tag
51 51 tags
52 52 tip
53 53 unbundle
54 54 update
55 55 verify
56 56 version
57 57
58 58 Show all commands that start with "a"
59 59 $ hg debugcomplete a
60 60 add
61 61 addremove
62 62 annotate
63 63 archive
64 64
65 65 Do not show debug commands if there are other candidates
66 66 $ hg debugcomplete d
67 67 diff
68 68
69 69 Show debug commands if there are no other candidates
70 70 $ hg debugcomplete debug
71 71 debugancestor
72 72 debugapplystreamclonebundle
73 73 debugbuilddag
74 74 debugbundle
75 75 debugcapabilities
76 76 debugcheckstate
77 77 debugcolor
78 78 debugcommands
79 79 debugcomplete
80 80 debugconfig
81 81 debugcreatestreamclonebundle
82 82 debugdag
83 83 debugdata
84 84 debugdate
85 85 debugdeltachain
86 86 debugdirstate
87 87 debugdiscovery
88 88 debugdownload
89 89 debugextensions
90 90 debugfileset
91 91 debugformat
92 92 debugfsinfo
93 93 debuggetbundle
94 94 debugignore
95 95 debugindex
96 96 debugindexdot
97 97 debugindexstats
98 98 debuginstall
99 99 debugknown
100 100 debuglabelcomplete
101 101 debuglocks
102 102 debugmanifestfulltextcache
103 103 debugmergestate
104 104 debugnamecomplete
105 105 debugobsolete
106 106 debugpathcomplete
107 107 debugpeer
108 108 debugpickmergetool
109 109 debugpushkey
110 110 debugpvec
111 111 debugrebuilddirstate
112 112 debugrebuildfncache
113 113 debugrename
114 114 debugrevlog
115 115 debugrevlogindex
116 116 debugrevspec
117 117 debugserve
118 118 debugsetparents
119 119 debugssl
120 120 debugsub
121 121 debugsuccessorssets
122 122 debugtemplate
123 123 debuguigetpass
124 124 debuguiprompt
125 125 debugupdatecaches
126 126 debugupgraderepo
127 127 debugwalk
128 128 debugwhyunstable
129 129 debugwireargs
130 130 debugwireproto
131 131
132 132 Do not show the alias of a debug command if there are other candidates
133 133 (this should hide rawcommit)
134 134 $ hg debugcomplete r
135 135 recover
136 136 remove
137 137 rename
138 138 resolve
139 139 revert
140 140 rollback
141 141 root
142 142 Show the alias of a debug command if there are no other candidates
143 143 $ hg debugcomplete rawc
144 144
145 145
146 146 Show the global options
147 147 $ hg debugcomplete --options | sort
148 148 --color
149 149 --config
150 150 --cwd
151 151 --debug
152 152 --debugger
153 153 --encoding
154 154 --encodingmode
155 155 --help
156 156 --hidden
157 157 --noninteractive
158 158 --pager
159 159 --profile
160 160 --quiet
161 161 --repository
162 162 --time
163 163 --traceback
164 164 --verbose
165 165 --version
166 166 -R
167 167 -h
168 168 -q
169 169 -v
170 170 -y
171 171
172 172 Show the options for the "serve" command
173 173 $ hg debugcomplete --options serve | sort
174 174 --accesslog
175 175 --address
176 176 --certificate
177 177 --cmdserver
178 178 --color
179 179 --config
180 180 --cwd
181 181 --daemon
182 182 --daemon-postexec
183 183 --debug
184 184 --debugger
185 185 --encoding
186 186 --encodingmode
187 187 --errorlog
188 188 --help
189 189 --hidden
190 190 --ipv6
191 191 --name
192 192 --noninteractive
193 193 --pager
194 194 --pid-file
195 195 --port
196 196 --prefix
197 197 --print-url
198 198 --profile
199 199 --quiet
200 200 --repository
201 201 --stdio
202 202 --style
203 203 --subrepos
204 204 --templates
205 205 --time
206 206 --traceback
207 207 --verbose
208 208 --version
209 209 --web-conf
210 210 -6
211 211 -A
212 212 -E
213 213 -R
214 214 -S
215 215 -a
216 216 -d
217 217 -h
218 218 -n
219 219 -p
220 220 -q
221 221 -t
222 222 -v
223 223 -y
224 224
225 225 Show an error if we use --options with an ambiguous abbreviation
226 226 $ hg debugcomplete --options s
227 227 hg: command 's' is ambiguous:
228 228 serve showconfig status summary
229 229 [255]
230 230
231 231 Show all commands + options
232 232 $ hg debugcommands
233 233 add: include, exclude, subrepos, dry-run
234 234 addremove: similarity, subrepos, include, exclude, dry-run
235 235 annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
236 236 archive: no-decode, prefix, rev, type, subrepos, include, exclude
237 237 backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
238 238 bisect: reset, good, bad, skip, extend, command, noupdate
239 239 bookmarks: force, rev, delete, rename, inactive, list, template
240 240 branch: force, clean, rev
241 241 branches: active, closed, rev, template
242 242 bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
243 243 cat: output, rev, decode, include, exclude, template
244 244 clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
245 245 commit: addremove, close-branch, amend, secret, edit, interactive, include, exclude, message, logfile, date, user, subrepos
246 246 config: untrusted, edit, local, global, template
247 247 copy: after, force, include, exclude, dry-run
248 248 debugancestor:
249 249 debugapplystreamclonebundle:
250 250 debugbuilddag: mergeable-file, overwritten-file, new-file
251 251 debugbundle: all, part-type, spec
252 252 debugcapabilities:
253 253 debugcheckstate:
254 254 debugcolor: style
255 255 debugcommands:
256 256 debugcomplete: options
257 257 debugcreatestreamclonebundle:
258 258 debugdag: tags, branches, dots, spaces
259 259 debugdata: changelog, manifest, dir
260 260 debugdate: extended
261 261 debugdeltachain: changelog, manifest, dir, template
262 262 debugdirstate: nodates, dates, datesort
263 263 debugdiscovery: old, nonheads, rev, ssh, remotecmd, insecure
264 264 debugdownload: output
265 265 debugextensions: template
266 266 debugfileset: rev, all-files, show-matcher, show-stage
267 267 debugformat: template
268 268 debugfsinfo:
269 269 debuggetbundle: head, common, type
270 270 debugignore:
271 271 debugindex: changelog, manifest, dir, template
272 272 debugindexdot: changelog, manifest, dir
273 273 debugindexstats:
274 274 debuginstall: template
275 275 debugknown:
276 276 debuglabelcomplete:
277 277 debuglocks: force-lock, force-wlock, set-lock, set-wlock
278 278 debugmanifestfulltextcache: clear, add
279 279 debugmergestate:
280 280 debugnamecomplete:
281 281 debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
282 282 debugpathcomplete: full, normal, added, removed
283 283 debugpeer:
284 284 debugpickmergetool: rev, changedelete, include, exclude, tool
285 285 debugpushkey:
286 286 debugpvec:
287 287 debugrebuilddirstate: rev, minimal
288 288 debugrebuildfncache:
289 289 debugrename: rev
290 290 debugrevlog: changelog, manifest, dir, dump
291 291 debugrevlogindex: changelog, manifest, dir, format
292 292 debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
293 293 debugserve: sshstdio, logiofd, logiofile
294 294 debugsetparents:
295 295 debugssl:
296 296 debugsub: rev
297 297 debugsuccessorssets: closest
298 298 debugtemplate: rev, define
299 299 debuguigetpass: prompt
300 300 debuguiprompt: prompt
301 301 debugupdatecaches:
302 debugupgraderepo: optimize, run
302 debugupgraderepo: optimize, run, backup
303 303 debugwalk: include, exclude
304 304 debugwhyunstable:
305 305 debugwireargs: three, four, five, ssh, remotecmd, insecure
306 306 debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure
307 307 diff: rev, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
308 308 export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
309 309 files: rev, print0, include, exclude, template, subrepos
310 310 forget: interactive, include, exclude, dry-run
311 311 graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
312 312 grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
313 313 heads: rev, topo, active, closed, style, template
314 314 help: extension, command, keyword, system
315 315 identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
316 316 import: strip, base, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
317 317 incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
318 318 init: ssh, remotecmd, insecure
319 319 locate: rev, print0, fullpath, include, exclude
320 320 log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
321 321 manifest: rev, all, template
322 322 merge: force, rev, preview, abort, tool
323 323 outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
324 324 parents: rev, style, template
325 325 paths: template
326 326 phase: public, draft, secret, force, rev
327 327 pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure
328 328 push: force, rev, bookmark, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
329 329 recover:
330 330 remove: after, force, subrepos, include, exclude, dry-run
331 331 rename: after, force, include, exclude, dry-run
332 332 resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
333 333 revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
334 334 rollback: dry-run, force
335 335 root:
336 336 serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
337 337 status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
338 338 summary: remote
339 339 tag: force, local, rev, remove, edit, message, date, user
340 340 tags: template
341 341 tip: patch, git, style, template
342 342 unbundle: update
343 343 update: clean, check, merge, date, rev, tool
344 344 verify:
345 345 version: template
346 346
347 347 $ hg init a
348 348 $ cd a
349 349 $ echo fee > fee
350 350 $ hg ci -q -Amfee
351 351 $ hg tag fee
352 352 $ mkdir fie
353 353 $ echo dead > fie/dead
354 354 $ echo live > fie/live
355 355 $ hg bookmark fo
356 356 $ hg branch -q fie
357 357 $ hg ci -q -Amfie
358 358 $ echo fo > fo
359 359 $ hg branch -qf default
360 360 $ hg ci -q -Amfo
361 361 $ echo Fum > Fum
362 362 $ hg ci -q -AmFum
363 363 $ hg bookmark Fum
364 364
365 365 Test debugpathcomplete
366 366
367 367 $ hg debugpathcomplete f
368 368 fee
369 369 fie
370 370 fo
371 371 $ hg debugpathcomplete -f f
372 372 fee
373 373 fie/dead
374 374 fie/live
375 375 fo
376 376
377 377 $ hg rm Fum
378 378 $ hg debugpathcomplete -r F
379 379 Fum
380 380
381 381 Test debugnamecomplete
382 382
383 383 $ hg debugnamecomplete
384 384 Fum
385 385 default
386 386 fee
387 387 fie
388 388 fo
389 389 tip
390 390 $ hg debugnamecomplete f
391 391 fee
392 392 fie
393 393 fo
394 394
395 395 Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still
396 396 used for completions in some shells.
397 397
398 398 $ hg debuglabelcomplete
399 399 Fum
400 400 default
401 401 fee
402 402 fie
403 403 fo
404 404 tip
405 405 $ hg debuglabelcomplete f
406 406 fee
407 407 fie
408 408 fo
@@ -1,802 +1,843 b''
1 1 #require no-reposimplestore
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [extensions]
5 5 > share =
6 6 > EOF
7 7
8 8 store and revlogv1 are required in source
9 9
10 10 $ hg --config format.usestore=false init no-store
11 11 $ hg -R no-store debugupgraderepo
12 12 abort: cannot upgrade repository; requirement missing: store
13 13 [255]
14 14
15 15 $ hg init no-revlogv1
16 16 $ cat > no-revlogv1/.hg/requires << EOF
17 17 > dotencode
18 18 > fncache
19 19 > generaldelta
20 20 > store
21 21 > EOF
22 22
23 23 $ hg -R no-revlogv1 debugupgraderepo
24 24 abort: cannot upgrade repository; requirement missing: revlogv1
25 25 [255]
26 26
27 27 Cannot upgrade shared repositories
28 28
29 29 $ hg init share-parent
30 30 $ hg -q share share-parent share-child
31 31
32 32 $ hg -R share-child debugupgraderepo
33 33 abort: cannot upgrade repository; unsupported source requirement: shared
34 34 [255]
35 35
36 36 Do not yet support upgrading treemanifest repos
37 37
38 38 $ hg --config experimental.treemanifest=true init treemanifest
39 39 $ hg -R treemanifest debugupgraderepo
40 40 abort: cannot upgrade repository; unsupported source requirement: treemanifest
41 41 [255]
42 42
43 43 Cannot add treemanifest requirement during upgrade
44 44
45 45 $ hg init disallowaddedreq
46 46 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
47 47 abort: cannot upgrade repository; do not support adding requirement: treemanifest
48 48 [255]
49 49
50 50 An upgrade of a repository created with recommended settings only suggests optimizations
51 51
52 52 $ hg init empty
53 53 $ cd empty
54 54 $ hg debugformat
55 55 format-variant repo
56 56 fncache: yes
57 57 dotencode: yes
58 58 generaldelta: yes
59 59 sparserevlog: yes
60 60 plain-cl-delta: yes
61 61 compression: zlib
62 62 $ hg debugformat --verbose
63 63 format-variant repo config default
64 64 fncache: yes yes yes
65 65 dotencode: yes yes yes
66 66 generaldelta: yes yes yes
67 67 sparserevlog: yes yes yes
68 68 plain-cl-delta: yes yes yes
69 69 compression: zlib zlib zlib
70 70 $ hg debugformat --verbose --config format.usefncache=no
71 71 format-variant repo config default
72 72 fncache: yes no yes
73 73 dotencode: yes no yes
74 74 generaldelta: yes yes yes
75 75 sparserevlog: yes yes yes
76 76 plain-cl-delta: yes yes yes
77 77 compression: zlib zlib zlib
78 78 $ hg debugformat --verbose --config format.usefncache=no --color=debug
79 79 format-variant repo config default
80 80 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
81 81 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
82 82 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
83 83 [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
84 84 [formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
85 85 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
86 86 $ hg debugformat -Tjson
87 87 [
88 88 {
89 89 "config": true,
90 90 "default": true,
91 91 "name": "fncache",
92 92 "repo": true
93 93 },
94 94 {
95 95 "config": true,
96 96 "default": true,
97 97 "name": "dotencode",
98 98 "repo": true
99 99 },
100 100 {
101 101 "config": true,
102 102 "default": true,
103 103 "name": "generaldelta",
104 104 "repo": true
105 105 },
106 106 {
107 107 "config": true,
108 108 "default": true,
109 109 "name": "sparserevlog",
110 110 "repo": true
111 111 },
112 112 {
113 113 "config": true,
114 114 "default": true,
115 115 "name": "plain-cl-delta",
116 116 "repo": true
117 117 },
118 118 {
119 119 "config": "zlib",
120 120 "default": "zlib",
121 121 "name": "compression",
122 122 "repo": "zlib"
123 123 }
124 124 ]
125 125 $ hg debugupgraderepo
126 126 (no feature deficiencies found in existing repository)
127 127 performing an upgrade with "--run" will make the following changes:
128 128
129 129 requirements
130 130 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
131 131
132 132 additional optimizations are available by specifying "--optimize <name>":
133 133
134 134 re-delta-parent
135 135 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
136 136
137 137 re-delta-multibase
138 138 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
139 139
140 140 re-delta-all
141 141 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
142 142
143 143 re-delta-fulladd
144 144 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
145 145
146 146
147 147 --optimize can be used to add optimizations
148 148
149 149 $ hg debugupgrade --optimize redeltaparent
150 150 (no feature deficiencies found in existing repository)
151 151 performing an upgrade with "--run" will make the following changes:
152 152
153 153 requirements
154 154 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
155 155
156 156 re-delta-parent
157 157 deltas within internal storage will choose a new base revision if needed
158 158
159 159 additional optimizations are available by specifying "--optimize <name>":
160 160
161 161 re-delta-multibase
162 162 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
163 163
164 164 re-delta-all
165 165 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
166 166
167 167 re-delta-fulladd
168 168 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
169 169
170 170
171 171 modern form of the option
172 172
173 173 $ hg debugupgrade --optimize re-delta-parent
174 174 (no feature deficiencies found in existing repository)
175 175 performing an upgrade with "--run" will make the following changes:
176 176
177 177 requirements
178 178 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
179 179
180 180 re-delta-parent
181 181 deltas within internal storage will choose a new base revision if needed
182 182
183 183 additional optimizations are available by specifying "--optimize <name>":
184 184
185 185 re-delta-multibase
186 186 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
187 187
188 188 re-delta-all
189 189 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
190 190
191 191 re-delta-fulladd
192 192 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
193 193
194 194
195 195 unknown optimization:
196 196
197 197 $ hg debugupgrade --optimize foobar
198 198 abort: unknown optimization action requested: foobar
199 199 (run without arguments to see valid optimizations)
200 200 [255]
201 201
202 202 Various sub-optimal detections work
203 203
204 204 $ cat > .hg/requires << EOF
205 205 > revlogv1
206 206 > store
207 207 > EOF
208 208
209 209 $ hg debugformat
210 210 format-variant repo
211 211 fncache: no
212 212 dotencode: no
213 213 generaldelta: no
214 214 sparserevlog: no
215 215 plain-cl-delta: yes
216 216 compression: zlib
217 217 $ hg debugformat --verbose
218 218 format-variant repo config default
219 219 fncache: no yes yes
220 220 dotencode: no yes yes
221 221 generaldelta: no yes yes
222 222 sparserevlog: no yes yes
223 223 plain-cl-delta: yes yes yes
224 224 compression: zlib zlib zlib
225 225 $ hg debugformat --verbose --config format.usegeneraldelta=no
226 226 format-variant repo config default
227 227 fncache: no yes yes
228 228 dotencode: no yes yes
229 229 generaldelta: no no yes
230 230 sparserevlog: no no yes
231 231 plain-cl-delta: yes yes yes
232 232 compression: zlib zlib zlib
233 233 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
234 234 format-variant repo config default
235 235 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
236 236 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
237 237 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
238 238 [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
239 239 [formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
240 240 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
241 241 $ hg debugupgraderepo
242 242 repository lacks features recommended by current config options:
243 243
244 244 fncache
245 245 long and reserved filenames may not work correctly; repository performance is sub-optimal
246 246
247 247 dotencode
248 248 storage of filenames beginning with a period or space may not work correctly
249 249
250 250 generaldelta
251 251 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
252 252
253 253 sparserevlog
254 254 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
255 255
256 256
257 257 performing an upgrade with "--run" will make the following changes:
258 258
259 259 requirements
260 260 preserved: revlogv1, store
261 261 added: dotencode, fncache, generaldelta, sparserevlog
262 262
263 263 fncache
264 264 repository will be more resilient to storing certain paths and performance of certain operations should be improved
265 265
266 266 dotencode
267 267 repository will be better able to store files beginning with a space or period
268 268
269 269 generaldelta
270 270 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
271 271
272 272 sparserevlog
273 273 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
274 274
275 275 additional optimizations are available by specifying "--optimize <name>":
276 276
277 277 re-delta-parent
278 278 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
279 279
280 280 re-delta-multibase
281 281 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
282 282
283 283 re-delta-all
284 284 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
285 285
286 286 re-delta-fulladd
287 287 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
288 288
289 289
290 290 $ hg --config format.dotencode=false debugupgraderepo
291 291 repository lacks features recommended by current config options:
292 292
293 293 fncache
294 294 long and reserved filenames may not work correctly; repository performance is sub-optimal
295 295
296 296 generaldelta
297 297 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
298 298
299 299 sparserevlog
300 300 in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
301 301
302 302 repository lacks features used by the default config options:
303 303
304 304 dotencode
305 305 storage of filenames beginning with a period or space may not work correctly
306 306
307 307
308 308 performing an upgrade with "--run" will make the following changes:
309 309
310 310 requirements
311 311 preserved: revlogv1, store
312 312 added: fncache, generaldelta, sparserevlog
313 313
314 314 fncache
315 315 repository will be more resilient to storing certain paths and performance of certain operations should be improved
316 316
317 317 generaldelta
318 318 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
319 319
320 320 sparserevlog
321 321 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
322 322
323 323 additional optimizations are available by specifying "--optimize <name>":
324 324
325 325 re-delta-parent
326 326 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
327 327
328 328 re-delta-multibase
329 329 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
330 330
331 331 re-delta-all
332 332 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
333 333
334 334 re-delta-fulladd
335 335 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
336 336
337 337
338 338 $ cd ..
339 339
340 340 Upgrading a repository that is already modern essentially no-ops
341 341
342 342 $ hg init modern
343 343 $ hg -R modern debugupgraderepo --run
344 344 upgrade will perform the following actions:
345 345
346 346 requirements
347 347 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
348 348
349 349 beginning upgrade...
350 350 repository locked and read-only
351 351 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
352 352 (it is safe to interrupt this process any time before data migration completes)
353 353 data fully migrated to temporary repository
354 354 marking source repository as being upgraded; clients will be unable to read from repository
355 355 starting in-place swap of repository data
356 356 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
357 357 replacing store...
358 358 store replacement complete; repository was inconsistent for *s (glob)
359 359 finalizing requirements file and making repository readable again
360 360 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
361 361 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
362 362 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
363 363
364 364 Upgrading a repository to generaldelta works
365 365
366 366 $ hg --config format.usegeneraldelta=false init upgradegd
367 367 $ cd upgradegd
368 368 $ touch f0
369 369 $ hg -q commit -A -m initial
370 370 $ touch f1
371 371 $ hg -q commit -A -m 'add f1'
372 372 $ hg -q up -r 0
373 373 $ touch f2
374 374 $ hg -q commit -A -m 'add f2'
375 375
376 376 $ hg debugupgraderepo --run --config format.sparse-revlog=false
377 377 upgrade will perform the following actions:
378 378
379 379 requirements
380 380 preserved: dotencode, fncache, revlogv1, store
381 381 added: generaldelta
382 382
383 383 generaldelta
384 384 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
385 385
386 386 beginning upgrade...
387 387 repository locked and read-only
388 388 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
389 389 (it is safe to interrupt this process any time before data migration completes)
390 390 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
391 391 migrating 917 bytes in store; 401 bytes tracked data
392 392 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
393 393 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
394 394 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
395 395 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
396 396 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
397 397 finished migrating 3 changelog revisions; change in size: 0 bytes
398 398 finished migrating 9 total revisions; total change in store size: 0 bytes
399 399 copying phaseroots
400 400 data fully migrated to temporary repository
401 401 marking source repository as being upgraded; clients will be unable to read from repository
402 402 starting in-place swap of repository data
403 403 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
404 404 replacing store...
405 405 store replacement complete; repository was inconsistent for *s (glob)
406 406 finalizing requirements file and making repository readable again
407 407 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
408 408 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
409 409 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
410 410
411 411 Original requirements backed up
412 412
413 413 $ cat .hg/upgradebackup.*/requires
414 414 dotencode
415 415 fncache
416 416 revlogv1
417 417 store
418 418
419 419 generaldelta added to original requirements files
420 420
421 421 $ cat .hg/requires
422 422 dotencode
423 423 fncache
424 424 generaldelta
425 425 revlogv1
426 426 store
427 427
428 428 store directory has files we expect
429 429
430 430 $ ls .hg/store
431 431 00changelog.i
432 432 00manifest.i
433 433 data
434 434 fncache
435 435 phaseroots
436 436 undo
437 437 undo.backupfiles
438 438 undo.phaseroots
439 439
440 440 manifest should be generaldelta
441 441
442 442 $ hg debugrevlog -m | grep flags
443 443 flags : inline, generaldelta
444 444
445 445 verify should be happy
446 446
447 447 $ hg verify
448 448 checking changesets
449 449 checking manifests
450 450 crosschecking files in changesets and manifests
451 451 checking files
452 452 checked 3 changesets with 3 changes to 3 files
453 453
454 454 old store should be backed up
455 455
456 $ ls -d .hg/upgradebackup.*/
457 .hg/upgradebackup.*/ (glob)
456 458 $ ls .hg/upgradebackup.*/store
457 459 00changelog.i
458 460 00manifest.i
459 461 data
460 462 fncache
461 463 phaseroots
462 464 undo
463 465 undo.backup.fncache
464 466 undo.backupfiles
465 467 undo.phaseroots
466 468
469 unless --no-backup is passed
470
471 $ rm -rf .hg/upgradebackup.*/
472 $ hg debugupgraderepo --run --no-backup
473 upgrade will perform the following actions:
474
475 requirements
476 preserved: dotencode, fncache, generaldelta, revlogv1, store
477 added: sparserevlog
478
479 sparserevlog
480 Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
481
482 beginning upgrade...
483 repository locked and read-only
484 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
485 (it is safe to interrupt this process any time before data migration completes)
486 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
487 migrating 917 bytes in store; 401 bytes tracked data
488 migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
489 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
490 migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
491 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
492 migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
493 finished migrating 3 changelog revisions; change in size: 0 bytes
494 finished migrating 9 total revisions; total change in store size: 0 bytes
495 copying phaseroots
496 data fully migrated to temporary repository
497 marking source repository as being upgraded; clients will be unable to read from repository
498 starting in-place swap of repository data
499 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
500 replacing store...
501 store replacement complete; repository was inconsistent for 0.0s
502 finalizing requirements file and making repository readable again
503 removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
504 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
505 $ ls -1 .hg/ | grep upgradebackup
506 [1]
467 507 $ cd ..
468 508
509
469 510 store files with special filenames aren't encoded during copy
470 511
471 512 $ hg init store-filenames
472 513 $ cd store-filenames
473 514 $ touch foo
474 515 $ hg -q commit -A -m initial
475 516 $ touch .hg/store/.XX_special_filename
476 517
477 518 $ hg debugupgraderepo --run
478 519 upgrade will perform the following actions:
479 520
480 521 requirements
481 522 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
482 523
483 524 beginning upgrade...
484 525 repository locked and read-only
485 526 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
486 527 (it is safe to interrupt this process any time before data migration completes)
487 528 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
488 529 migrating 301 bytes in store; 107 bytes tracked data
489 530 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
490 531 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
491 532 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
492 533 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
493 534 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
494 535 finished migrating 1 changelog revisions; change in size: 0 bytes
495 536 finished migrating 3 total revisions; total change in store size: 0 bytes
496 537 copying .XX_special_filename
497 538 copying phaseroots
498 539 data fully migrated to temporary repository
499 540 marking source repository as being upgraded; clients will be unable to read from repository
500 541 starting in-place swap of repository data
501 542 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
502 543 replacing store...
503 544 store replacement complete; repository was inconsistent for *s (glob)
504 545 finalizing requirements file and making repository readable again
505 546 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
506 547 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
507 548 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
508 549 $ hg debugupgraderepo --run --optimize redeltafulladd
509 550 upgrade will perform the following actions:
510 551
511 552 requirements
512 553 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
513 554
514 555 re-delta-fulladd
515 556 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
516 557
517 558 beginning upgrade...
518 559 repository locked and read-only
519 560 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
520 561 (it is safe to interrupt this process any time before data migration completes)
521 562 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
522 563 migrating 301 bytes in store; 107 bytes tracked data
523 564 migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
524 565 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
525 566 migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
526 567 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
527 568 migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
528 569 finished migrating 1 changelog revisions; change in size: 0 bytes
529 570 finished migrating 3 total revisions; total change in store size: 0 bytes
530 571 copying .XX_special_filename
531 572 copying phaseroots
532 573 data fully migrated to temporary repository
533 574 marking source repository as being upgraded; clients will be unable to read from repository
534 575 starting in-place swap of repository data
535 576 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
536 577 replacing store...
537 578 store replacement complete; repository was inconsistent for *s (glob)
538 579 finalizing requirements file and making repository readable again
539 580 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
540 581 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
541 582 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
542 583
543 584 fncache is valid after upgrade
544 585
545 586 $ hg debugrebuildfncache
546 587 fncache already up to date
547 588
548 589 $ cd ..
549 590
550 591 Check upgrading a large file repository
551 592 ---------------------------------------
552 593
553 594 $ hg init largefilesrepo
554 595 $ cat << EOF >> largefilesrepo/.hg/hgrc
555 596 > [extensions]
556 597 > largefiles =
557 598 > EOF
558 599
559 600 $ cd largefilesrepo
560 601 $ touch foo
561 602 $ hg add --large foo
562 603 $ hg -q commit -m initial
563 604 $ cat .hg/requires
564 605 dotencode
565 606 fncache
566 607 generaldelta
567 608 largefiles
568 609 revlogv1
569 610 sparserevlog
570 611 store
571 612
572 613 $ hg debugupgraderepo --run
573 614 upgrade will perform the following actions:
574 615
575 616 requirements
576 617 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
577 618
578 619 beginning upgrade...
579 620 repository locked and read-only
580 621 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
581 622 (it is safe to interrupt this process any time before data migration completes)
582 623 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
583 624 migrating 355 bytes in store; 160 bytes tracked data
584 625 migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
585 626 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
586 627 migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
587 628 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
588 629 migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
589 630 finished migrating 1 changelog revisions; change in size: 0 bytes
590 631 finished migrating 3 total revisions; total change in store size: 0 bytes
591 632 copying phaseroots
592 633 data fully migrated to temporary repository
593 634 marking source repository as being upgraded; clients will be unable to read from repository
594 635 starting in-place swap of repository data
595 636 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
596 637 replacing store...
597 638 store replacement complete; repository was inconsistent for *s (glob)
598 639 finalizing requirements file and making repository readable again
599 640 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
600 641 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
601 642 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
602 643 $ cat .hg/requires
603 644 dotencode
604 645 fncache
605 646 generaldelta
606 647 largefiles
607 648 revlogv1
608 649 sparserevlog
609 650 store
610 651
611 652 $ cat << EOF >> .hg/hgrc
612 653 > [extensions]
613 654 > lfs =
614 655 > [lfs]
615 656 > threshold = 10
616 657 > EOF
617 658 $ echo '123456789012345' > lfs.bin
618 659 $ hg ci -Am 'lfs.bin'
619 660 adding lfs.bin
620 661 $ grep lfs .hg/requires
621 662 lfs
622 663 $ find .hg/store/lfs -type f
623 664 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
624 665
625 666 $ hg debugupgraderepo --run
626 667 upgrade will perform the following actions:
627 668
628 669 requirements
629 670 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
630 671
631 672 beginning upgrade...
632 673 repository locked and read-only
633 674 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
634 675 (it is safe to interrupt this process any time before data migration completes)
635 676 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
636 677 migrating 801 bytes in store; 467 bytes tracked data
637 678 migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
638 679 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
639 680 migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
640 681 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
641 682 migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
642 683 finished migrating 2 changelog revisions; change in size: 0 bytes
643 684 finished migrating 6 total revisions; total change in store size: 0 bytes
644 685 copying phaseroots
645 686 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
646 687 data fully migrated to temporary repository
647 688 marking source repository as being upgraded; clients will be unable to read from repository
648 689 starting in-place swap of repository data
649 690 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
650 691 replacing store...
651 692 store replacement complete; repository was inconsistent for *s (glob)
652 693 finalizing requirements file and making repository readable again
653 694 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
654 695 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
655 696 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
656 697
657 698 $ grep lfs .hg/requires
658 699 lfs
659 700 $ find .hg/store/lfs -type f
660 701 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
661 702 $ hg verify
662 703 checking changesets
663 704 checking manifests
664 705 crosschecking files in changesets and manifests
665 706 checking files
666 707 checked 2 changesets with 2 changes to 2 files
667 708 $ hg debugdata lfs.bin 0
668 709 version https://git-lfs.github.com/spec/v1
669 710 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
670 711 size 16
671 712 x-is-binary 0
672 713
673 714 $ cd ..
674 715
675 716 repository config is taken in account
676 717 -------------------------------------
677 718
678 719 $ cat << EOF >> $HGRCPATH
679 720 > [format]
680 721 > maxchainlen = 1
681 722 > EOF
682 723
683 724 $ hg init localconfig
684 725 $ cd localconfig
685 726 $ cat << EOF > file
686 727 > some content
687 728 > with some length
688 729 > to make sure we get a delta
689 730 > after changes
690 731 > very long
691 732 > very long
692 733 > very long
693 734 > very long
694 735 > very long
695 736 > very long
696 737 > very long
697 738 > very long
698 739 > very long
699 740 > very long
700 741 > very long
701 742 > EOF
702 743 $ hg -q commit -A -m A
703 744 $ echo "new line" >> file
704 745 $ hg -q commit -m B
705 746 $ echo "new line" >> file
706 747 $ hg -q commit -m C
707 748
708 749 $ cat << EOF >> .hg/hgrc
709 750 > [format]
710 751 > maxchainlen = 9001
711 752 > EOF
712 753 $ hg config format
713 754 format.maxchainlen=9001
714 755 $ hg debugdeltachain file
715 756 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
716 757 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
717 758 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
718 759 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
719 760
720 761 $ hg debugupgraderepo --run --optimize redeltaall
721 762 upgrade will perform the following actions:
722 763
723 764 requirements
724 765 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
725 766
726 767 re-delta-all
727 768 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
728 769
729 770 beginning upgrade...
730 771 repository locked and read-only
731 772 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
732 773 (it is safe to interrupt this process any time before data migration completes)
733 774 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
734 775 migrating 1019 bytes in store; 882 bytes tracked data
735 776 migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
736 777 finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
737 778 migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
738 779 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
739 780 migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
740 781 finished migrating 3 changelog revisions; change in size: 0 bytes
741 782 finished migrating 9 total revisions; total change in store size: -9 bytes
742 783 copying phaseroots
743 784 data fully migrated to temporary repository
744 785 marking source repository as being upgraded; clients will be unable to read from repository
745 786 starting in-place swap of repository data
746 787 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
747 788 replacing store...
748 789 store replacement complete; repository was inconsistent for *s (glob)
749 790 finalizing requirements file and making repository readable again
750 791 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
751 792 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
752 793 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
753 794 $ hg debugdeltachain file
754 795 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
755 796 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
756 797 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
757 798 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
758 799 $ cd ..
759 800
760 801 $ cat << EOF >> $HGRCPATH
761 802 > [format]
762 803 > maxchainlen = 9001
763 804 > EOF
764 805
765 806 Check upgrading a sparse-revlog repository
766 807 ---------------------------------------
767 808
768 809 $ hg init sparserevlogrepo --config format.sparse-revlog=no
769 810 $ cd sparserevlogrepo
770 811 $ touch foo
771 812 $ hg add foo
772 813 $ hg -q commit -m "foo"
773 814 $ cat .hg/requires
774 815 dotencode
775 816 fncache
776 817 generaldelta
777 818 revlogv1
778 819 store
779 820
780 821 Check that we can add the sparse-revlog format requirement
781 822 $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null
782 823 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
783 824 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
784 825 $ cat .hg/requires
785 826 dotencode
786 827 fncache
787 828 generaldelta
788 829 revlogv1
789 830 sparserevlog
790 831 store
791 832
792 833 Check that we can remove the sparse-revlog format requirement
793 834 $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null
794 835 copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
795 836 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
796 837 $ cat .hg/requires
797 838 dotencode
798 839 fncache
799 840 generaldelta
800 841 revlogv1
801 842 store
802 843 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now