##// END OF EJS Templates
nodemap: track the maximum revision tracked in the nodemap...
marmoute -
r44807:e41a164d default
parent child Browse files
Show More
@@ -1,4364 +1,4365 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import codecs
11 11 import collections
12 12 import difflib
13 13 import errno
14 14 import operator
15 15 import os
16 16 import platform
17 17 import random
18 18 import re
19 19 import socket
20 20 import ssl
21 21 import stat
22 22 import string
23 23 import subprocess
24 24 import sys
25 25 import time
26 26
27 27 from .i18n import _
28 28 from .node import (
29 29 bin,
30 30 hex,
31 31 nullhex,
32 32 nullid,
33 33 nullrev,
34 34 short,
35 35 )
36 36 from .pycompat import (
37 37 getattr,
38 38 open,
39 39 )
40 40 from . import (
41 41 bundle2,
42 42 changegroup,
43 43 cmdutil,
44 44 color,
45 45 context,
46 46 copies,
47 47 dagparser,
48 48 encoding,
49 49 error,
50 50 exchange,
51 51 extensions,
52 52 filemerge,
53 53 filesetlang,
54 54 formatter,
55 55 hg,
56 56 httppeer,
57 57 localrepo,
58 58 lock as lockmod,
59 59 logcmdutil,
60 60 merge as mergemod,
61 61 obsolete,
62 62 obsutil,
63 63 pathutil,
64 64 phases,
65 65 policy,
66 66 pvec,
67 67 pycompat,
68 68 registrar,
69 69 repair,
70 70 revlog,
71 71 revset,
72 72 revsetlang,
73 73 scmutil,
74 74 setdiscovery,
75 75 simplemerge,
76 76 sshpeer,
77 77 sslutil,
78 78 streamclone,
79 79 tags as tagsmod,
80 80 templater,
81 81 treediscovery,
82 82 upgrade,
83 83 url as urlmod,
84 84 util,
85 85 vfs as vfsmod,
86 86 wireprotoframing,
87 87 wireprotoserver,
88 88 wireprotov2peer,
89 89 )
90 90 from .utils import (
91 91 cborutil,
92 92 compression,
93 93 dateutil,
94 94 procutil,
95 95 stringutil,
96 96 )
97 97
98 98 from .revlogutils import (
99 99 deltas as deltautil,
100 100 nodemap,
101 101 )
102 102
103 103 release = lockmod.release
104 104
105 105 command = registrar.command()
106 106
107 107
108 108 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
109 109 def debugancestor(ui, repo, *args):
110 110 """find the ancestor revision of two revisions in a given index"""
111 111 if len(args) == 3:
112 112 index, rev1, rev2 = args
113 113 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
114 114 lookup = r.lookup
115 115 elif len(args) == 2:
116 116 if not repo:
117 117 raise error.Abort(
118 118 _(b'there is no Mercurial repository here (.hg not found)')
119 119 )
120 120 rev1, rev2 = args
121 121 r = repo.changelog
122 122 lookup = repo.lookup
123 123 else:
124 124 raise error.Abort(_(b'either two or three arguments required'))
125 125 a = r.ancestor(lookup(rev1), lookup(rev2))
126 126 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
127 127
128 128
129 129 @command(b'debugapplystreamclonebundle', [], b'FILE')
130 130 def debugapplystreamclonebundle(ui, repo, fname):
131 131 """apply a stream clone bundle file"""
132 132 f = hg.openpath(ui, fname)
133 133 gen = exchange.readbundle(ui, f, fname)
134 134 gen.apply(repo)
135 135
136 136
137 137 @command(
138 138 b'debugbuilddag',
139 139 [
140 140 (
141 141 b'm',
142 142 b'mergeable-file',
143 143 None,
144 144 _(b'add single file mergeable changes'),
145 145 ),
146 146 (
147 147 b'o',
148 148 b'overwritten-file',
149 149 None,
150 150 _(b'add single file all revs overwrite'),
151 151 ),
152 152 (b'n', b'new-file', None, _(b'add new file at each rev')),
153 153 ],
154 154 _(b'[OPTION]... [TEXT]'),
155 155 )
156 156 def debugbuilddag(
157 157 ui,
158 158 repo,
159 159 text=None,
160 160 mergeable_file=False,
161 161 overwritten_file=False,
162 162 new_file=False,
163 163 ):
164 164 """builds a repo with a given DAG from scratch in the current empty repo
165 165
166 166 The description of the DAG is read from stdin if not given on the
167 167 command line.
168 168
169 169 Elements:
170 170
171 171 - "+n" is a linear run of n nodes based on the current default parent
172 172 - "." is a single node based on the current default parent
173 173 - "$" resets the default parent to null (implied at the start);
174 174 otherwise the default parent is always the last node created
175 175 - "<p" sets the default parent to the backref p
176 176 - "*p" is a fork at parent p, which is a backref
177 177 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
178 178 - "/p2" is a merge of the preceding node and p2
179 179 - ":tag" defines a local tag for the preceding node
180 180 - "@branch" sets the named branch for subsequent nodes
181 181 - "#...\\n" is a comment up to the end of the line
182 182
183 183 Whitespace between the above elements is ignored.
184 184
185 185 A backref is either
186 186
187 187 - a number n, which references the node curr-n, where curr is the current
188 188 node, or
189 189 - the name of a local tag you placed earlier using ":tag", or
190 190 - empty to denote the default parent.
191 191
192 192 All string valued-elements are either strictly alphanumeric, or must
193 193 be enclosed in double quotes ("..."), with "\\" as escape character.
194 194 """
195 195
196 196 if text is None:
197 197 ui.status(_(b"reading DAG from stdin\n"))
198 198 text = ui.fin.read()
199 199
200 200 cl = repo.changelog
201 201 if len(cl) > 0:
202 202 raise error.Abort(_(b'repository is not empty'))
203 203
204 204 # determine number of revs in DAG
205 205 total = 0
206 206 for type, data in dagparser.parsedag(text):
207 207 if type == b'n':
208 208 total += 1
209 209
210 210 if mergeable_file:
211 211 linesperrev = 2
212 212 # make a file with k lines per rev
213 213 initialmergedlines = [
214 214 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
215 215 ]
216 216 initialmergedlines.append(b"")
217 217
218 218 tags = []
219 219 progress = ui.makeprogress(
220 220 _(b'building'), unit=_(b'revisions'), total=total
221 221 )
222 222 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
223 223 at = -1
224 224 atbranch = b'default'
225 225 nodeids = []
226 226 id = 0
227 227 progress.update(id)
228 228 for type, data in dagparser.parsedag(text):
229 229 if type == b'n':
230 230 ui.note((b'node %s\n' % pycompat.bytestr(data)))
231 231 id, ps = data
232 232
233 233 files = []
234 234 filecontent = {}
235 235
236 236 p2 = None
237 237 if mergeable_file:
238 238 fn = b"mf"
239 239 p1 = repo[ps[0]]
240 240 if len(ps) > 1:
241 241 p2 = repo[ps[1]]
242 242 pa = p1.ancestor(p2)
243 243 base, local, other = [
244 244 x[fn].data() for x in (pa, p1, p2)
245 245 ]
246 246 m3 = simplemerge.Merge3Text(base, local, other)
247 247 ml = [l.strip() for l in m3.merge_lines()]
248 248 ml.append(b"")
249 249 elif at > 0:
250 250 ml = p1[fn].data().split(b"\n")
251 251 else:
252 252 ml = initialmergedlines
253 253 ml[id * linesperrev] += b" r%i" % id
254 254 mergedtext = b"\n".join(ml)
255 255 files.append(fn)
256 256 filecontent[fn] = mergedtext
257 257
258 258 if overwritten_file:
259 259 fn = b"of"
260 260 files.append(fn)
261 261 filecontent[fn] = b"r%i\n" % id
262 262
263 263 if new_file:
264 264 fn = b"nf%i" % id
265 265 files.append(fn)
266 266 filecontent[fn] = b"r%i\n" % id
267 267 if len(ps) > 1:
268 268 if not p2:
269 269 p2 = repo[ps[1]]
270 270 for fn in p2:
271 271 if fn.startswith(b"nf"):
272 272 files.append(fn)
273 273 filecontent[fn] = p2[fn].data()
274 274
275 275 def fctxfn(repo, cx, path):
276 276 if path in filecontent:
277 277 return context.memfilectx(
278 278 repo, cx, path, filecontent[path]
279 279 )
280 280 return None
281 281
282 282 if len(ps) == 0 or ps[0] < 0:
283 283 pars = [None, None]
284 284 elif len(ps) == 1:
285 285 pars = [nodeids[ps[0]], None]
286 286 else:
287 287 pars = [nodeids[p] for p in ps]
288 288 cx = context.memctx(
289 289 repo,
290 290 pars,
291 291 b"r%i" % id,
292 292 files,
293 293 fctxfn,
294 294 date=(id, 0),
295 295 user=b"debugbuilddag",
296 296 extra={b'branch': atbranch},
297 297 )
298 298 nodeid = repo.commitctx(cx)
299 299 nodeids.append(nodeid)
300 300 at = id
301 301 elif type == b'l':
302 302 id, name = data
303 303 ui.note((b'tag %s\n' % name))
304 304 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
305 305 elif type == b'a':
306 306 ui.note((b'branch %s\n' % data))
307 307 atbranch = data
308 308 progress.update(id)
309 309
310 310 if tags:
311 311 repo.vfs.write(b"localtags", b"".join(tags))
312 312
313 313
314 314 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
315 315 indent_string = b' ' * indent
316 316 if all:
317 317 ui.writenoi18n(
318 318 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
319 319 % indent_string
320 320 )
321 321
322 322 def showchunks(named):
323 323 ui.write(b"\n%s%s\n" % (indent_string, named))
324 324 for deltadata in gen.deltaiter():
325 325 node, p1, p2, cs, deltabase, delta, flags = deltadata
326 326 ui.write(
327 327 b"%s%s %s %s %s %s %d\n"
328 328 % (
329 329 indent_string,
330 330 hex(node),
331 331 hex(p1),
332 332 hex(p2),
333 333 hex(cs),
334 334 hex(deltabase),
335 335 len(delta),
336 336 )
337 337 )
338 338
339 339 gen.changelogheader()
340 340 showchunks(b"changelog")
341 341 gen.manifestheader()
342 342 showchunks(b"manifest")
343 343 for chunkdata in iter(gen.filelogheader, {}):
344 344 fname = chunkdata[b'filename']
345 345 showchunks(fname)
346 346 else:
347 347 if isinstance(gen, bundle2.unbundle20):
348 348 raise error.Abort(_(b'use debugbundle2 for this file'))
349 349 gen.changelogheader()
350 350 for deltadata in gen.deltaiter():
351 351 node, p1, p2, cs, deltabase, delta, flags = deltadata
352 352 ui.write(b"%s%s\n" % (indent_string, hex(node)))
353 353
354 354
355 355 def _debugobsmarkers(ui, part, indent=0, **opts):
356 356 """display version and markers contained in 'data'"""
357 357 opts = pycompat.byteskwargs(opts)
358 358 data = part.read()
359 359 indent_string = b' ' * indent
360 360 try:
361 361 version, markers = obsolete._readmarkers(data)
362 362 except error.UnknownVersion as exc:
363 363 msg = b"%sunsupported version: %s (%d bytes)\n"
364 364 msg %= indent_string, exc.version, len(data)
365 365 ui.write(msg)
366 366 else:
367 367 msg = b"%sversion: %d (%d bytes)\n"
368 368 msg %= indent_string, version, len(data)
369 369 ui.write(msg)
370 370 fm = ui.formatter(b'debugobsolete', opts)
371 371 for rawmarker in sorted(markers):
372 372 m = obsutil.marker(None, rawmarker)
373 373 fm.startitem()
374 374 fm.plain(indent_string)
375 375 cmdutil.showmarker(fm, m)
376 376 fm.end()
377 377
378 378
379 379 def _debugphaseheads(ui, data, indent=0):
380 380 """display version and markers contained in 'data'"""
381 381 indent_string = b' ' * indent
382 382 headsbyphase = phases.binarydecode(data)
383 383 for phase in phases.allphases:
384 384 for head in headsbyphase[phase]:
385 385 ui.write(indent_string)
386 386 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
387 387
388 388
389 389 def _quasirepr(thing):
390 390 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
391 391 return b'{%s}' % (
392 392 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
393 393 )
394 394 return pycompat.bytestr(repr(thing))
395 395
396 396
397 397 def _debugbundle2(ui, gen, all=None, **opts):
398 398 """lists the contents of a bundle2"""
399 399 if not isinstance(gen, bundle2.unbundle20):
400 400 raise error.Abort(_(b'not a bundle2 file'))
401 401 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
402 402 parttypes = opts.get('part_type', [])
403 403 for part in gen.iterparts():
404 404 if parttypes and part.type not in parttypes:
405 405 continue
406 406 msg = b'%s -- %s (mandatory: %r)\n'
407 407 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
408 408 if part.type == b'changegroup':
409 409 version = part.params.get(b'version', b'01')
410 410 cg = changegroup.getunbundler(version, part, b'UN')
411 411 if not ui.quiet:
412 412 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
413 413 if part.type == b'obsmarkers':
414 414 if not ui.quiet:
415 415 _debugobsmarkers(ui, part, indent=4, **opts)
416 416 if part.type == b'phase-heads':
417 417 if not ui.quiet:
418 418 _debugphaseheads(ui, part, indent=4)
419 419
420 420
421 421 @command(
422 422 b'debugbundle',
423 423 [
424 424 (b'a', b'all', None, _(b'show all details')),
425 425 (b'', b'part-type', [], _(b'show only the named part type')),
426 426 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
427 427 ],
428 428 _(b'FILE'),
429 429 norepo=True,
430 430 )
431 431 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
432 432 """lists the contents of a bundle"""
433 433 with hg.openpath(ui, bundlepath) as f:
434 434 if spec:
435 435 spec = exchange.getbundlespec(ui, f)
436 436 ui.write(b'%s\n' % spec)
437 437 return
438 438
439 439 gen = exchange.readbundle(ui, f, bundlepath)
440 440 if isinstance(gen, bundle2.unbundle20):
441 441 return _debugbundle2(ui, gen, all=all, **opts)
442 442 _debugchangegroup(ui, gen, all=all, **opts)
443 443
444 444
445 445 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
446 446 def debugcapabilities(ui, path, **opts):
447 447 """lists the capabilities of a remote peer"""
448 448 opts = pycompat.byteskwargs(opts)
449 449 peer = hg.peer(ui, opts, path)
450 450 caps = peer.capabilities()
451 451 ui.writenoi18n(b'Main capabilities:\n')
452 452 for c in sorted(caps):
453 453 ui.write(b' %s\n' % c)
454 454 b2caps = bundle2.bundle2caps(peer)
455 455 if b2caps:
456 456 ui.writenoi18n(b'Bundle2 capabilities:\n')
457 457 for key, values in sorted(pycompat.iteritems(b2caps)):
458 458 ui.write(b' %s\n' % key)
459 459 for v in values:
460 460 ui.write(b' %s\n' % v)
461 461
462 462
463 463 @command(b'debugcheckstate', [], b'')
464 464 def debugcheckstate(ui, repo):
465 465 """validate the correctness of the current dirstate"""
466 466 parent1, parent2 = repo.dirstate.parents()
467 467 m1 = repo[parent1].manifest()
468 468 m2 = repo[parent2].manifest()
469 469 errors = 0
470 470 for f in repo.dirstate:
471 471 state = repo.dirstate[f]
472 472 if state in b"nr" and f not in m1:
473 473 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
474 474 errors += 1
475 475 if state in b"a" and f in m1:
476 476 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
477 477 errors += 1
478 478 if state in b"m" and f not in m1 and f not in m2:
479 479 ui.warn(
480 480 _(b"%s in state %s, but not in either manifest\n") % (f, state)
481 481 )
482 482 errors += 1
483 483 for f in m1:
484 484 state = repo.dirstate[f]
485 485 if state not in b"nrm":
486 486 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
487 487 errors += 1
488 488 if errors:
489 489 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
490 490 raise error.Abort(errstr)
491 491
492 492
493 493 @command(
494 494 b'debugcolor',
495 495 [(b'', b'style', None, _(b'show all configured styles'))],
496 496 b'hg debugcolor',
497 497 )
498 498 def debugcolor(ui, repo, **opts):
499 499 """show available color, effects or style"""
500 500 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
501 501 if opts.get('style'):
502 502 return _debugdisplaystyle(ui)
503 503 else:
504 504 return _debugdisplaycolor(ui)
505 505
506 506
507 507 def _debugdisplaycolor(ui):
508 508 ui = ui.copy()
509 509 ui._styles.clear()
510 510 for effect in color._activeeffects(ui).keys():
511 511 ui._styles[effect] = effect
512 512 if ui._terminfoparams:
513 513 for k, v in ui.configitems(b'color'):
514 514 if k.startswith(b'color.'):
515 515 ui._styles[k] = k[6:]
516 516 elif k.startswith(b'terminfo.'):
517 517 ui._styles[k] = k[9:]
518 518 ui.write(_(b'available colors:\n'))
519 519 # sort label with a '_' after the other to group '_background' entry.
520 520 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
521 521 for colorname, label in items:
522 522 ui.write(b'%s\n' % colorname, label=label)
523 523
524 524
525 525 def _debugdisplaystyle(ui):
526 526 ui.write(_(b'available style:\n'))
527 527 if not ui._styles:
528 528 return
529 529 width = max(len(s) for s in ui._styles)
530 530 for label, effects in sorted(ui._styles.items()):
531 531 ui.write(b'%s' % label, label=label)
532 532 if effects:
533 533 # 50
534 534 ui.write(b': ')
535 535 ui.write(b' ' * (max(0, width - len(label))))
536 536 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
537 537 ui.write(b'\n')
538 538
539 539
540 540 @command(b'debugcreatestreamclonebundle', [], b'FILE')
541 541 def debugcreatestreamclonebundle(ui, repo, fname):
542 542 """create a stream clone bundle file
543 543
544 544 Stream bundles are special bundles that are essentially archives of
545 545 revlog files. They are commonly used for cloning very quickly.
546 546 """
547 547 # TODO we may want to turn this into an abort when this functionality
548 548 # is moved into `hg bundle`.
549 549 if phases.hassecret(repo):
550 550 ui.warn(
551 551 _(
552 552 b'(warning: stream clone bundle will contain secret '
553 553 b'revisions)\n'
554 554 )
555 555 )
556 556
557 557 requirements, gen = streamclone.generatebundlev1(repo)
558 558 changegroup.writechunks(ui, gen, fname)
559 559
560 560 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
561 561
562 562
563 563 @command(
564 564 b'debugdag',
565 565 [
566 566 (b't', b'tags', None, _(b'use tags as labels')),
567 567 (b'b', b'branches', None, _(b'annotate with branch names')),
568 568 (b'', b'dots', None, _(b'use dots for runs')),
569 569 (b's', b'spaces', None, _(b'separate elements by spaces')),
570 570 ],
571 571 _(b'[OPTION]... [FILE [REV]...]'),
572 572 optionalrepo=True,
573 573 )
574 574 def debugdag(ui, repo, file_=None, *revs, **opts):
575 575 """format the changelog or an index DAG as a concise textual description
576 576
577 577 If you pass a revlog index, the revlog's DAG is emitted. If you list
578 578 revision numbers, they get labeled in the output as rN.
579 579
580 580 Otherwise, the changelog DAG of the current repo is emitted.
581 581 """
582 582 spaces = opts.get('spaces')
583 583 dots = opts.get('dots')
584 584 if file_:
585 585 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
586 586 revs = set((int(r) for r in revs))
587 587
588 588 def events():
589 589 for r in rlog:
590 590 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
591 591 if r in revs:
592 592 yield b'l', (r, b"r%i" % r)
593 593
594 594 elif repo:
595 595 cl = repo.changelog
596 596 tags = opts.get('tags')
597 597 branches = opts.get('branches')
598 598 if tags:
599 599 labels = {}
600 600 for l, n in repo.tags().items():
601 601 labels.setdefault(cl.rev(n), []).append(l)
602 602
603 603 def events():
604 604 b = b"default"
605 605 for r in cl:
606 606 if branches:
607 607 newb = cl.read(cl.node(r))[5][b'branch']
608 608 if newb != b:
609 609 yield b'a', newb
610 610 b = newb
611 611 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
612 612 if tags:
613 613 ls = labels.get(r)
614 614 if ls:
615 615 for l in ls:
616 616 yield b'l', (r, l)
617 617
618 618 else:
619 619 raise error.Abort(_(b'need repo for changelog dag'))
620 620
621 621 for line in dagparser.dagtextlines(
622 622 events(),
623 623 addspaces=spaces,
624 624 wraplabels=True,
625 625 wrapannotations=True,
626 626 wrapnonlinear=dots,
627 627 usedots=dots,
628 628 maxlinewidth=70,
629 629 ):
630 630 ui.write(line)
631 631 ui.write(b"\n")
632 632
633 633
634 634 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
635 635 def debugdata(ui, repo, file_, rev=None, **opts):
636 636 """dump the contents of a data file revision"""
637 637 opts = pycompat.byteskwargs(opts)
638 638 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
639 639 if rev is not None:
640 640 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
641 641 file_, rev = None, file_
642 642 elif rev is None:
643 643 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
644 644 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
645 645 try:
646 646 ui.write(r.rawdata(r.lookup(rev)))
647 647 except KeyError:
648 648 raise error.Abort(_(b'invalid revision identifier %s') % rev)
649 649
650 650
651 651 @command(
652 652 b'debugdate',
653 653 [(b'e', b'extended', None, _(b'try extended date formats'))],
654 654 _(b'[-e] DATE [RANGE]'),
655 655 norepo=True,
656 656 optionalrepo=True,
657 657 )
658 658 def debugdate(ui, date, range=None, **opts):
659 659 """parse and display a date"""
660 660 if opts["extended"]:
661 661 d = dateutil.parsedate(date, dateutil.extendeddateformats)
662 662 else:
663 663 d = dateutil.parsedate(date)
664 664 ui.writenoi18n(b"internal: %d %d\n" % d)
665 665 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
666 666 if range:
667 667 m = dateutil.matchdate(range)
668 668 ui.writenoi18n(b"match: %s\n" % m(d[0]))
669 669
670 670
671 671 @command(
672 672 b'debugdeltachain',
673 673 cmdutil.debugrevlogopts + cmdutil.formatteropts,
674 674 _(b'-c|-m|FILE'),
675 675 optionalrepo=True,
676 676 )
677 677 def debugdeltachain(ui, repo, file_=None, **opts):
678 678 """dump information about delta chains in a revlog
679 679
680 680 Output can be templatized. Available template keywords are:
681 681
682 682 :``rev``: revision number
683 683 :``chainid``: delta chain identifier (numbered by unique base)
684 684 :``chainlen``: delta chain length to this revision
685 685 :``prevrev``: previous revision in delta chain
686 686 :``deltatype``: role of delta / how it was computed
687 687 :``compsize``: compressed size of revision
688 688 :``uncompsize``: uncompressed size of revision
689 689 :``chainsize``: total size of compressed revisions in chain
690 690 :``chainratio``: total chain size divided by uncompressed revision size
691 691 (new delta chains typically start at ratio 2.00)
692 692 :``lindist``: linear distance from base revision in delta chain to end
693 693 of this revision
694 694 :``extradist``: total size of revisions not part of this delta chain from
695 695 base of delta chain to end of this revision; a measurement
696 696 of how much extra data we need to read/seek across to read
697 697 the delta chain for this revision
698 698 :``extraratio``: extradist divided by chainsize; another representation of
699 699 how much unrelated data is needed to load this delta chain
700 700
701 701 If the repository is configured to use the sparse read, additional keywords
702 702 are available:
703 703
704 704 :``readsize``: total size of data read from the disk for a revision
705 705 (sum of the sizes of all the blocks)
706 706 :``largestblock``: size of the largest block of data read from the disk
707 707 :``readdensity``: density of useful bytes in the data read from the disk
708 708 :``srchunks``: in how many data hunks the whole revision would be read
709 709
710 710 The sparse read can be enabled with experimental.sparse-read = True
711 711 """
712 712 opts = pycompat.byteskwargs(opts)
713 713 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
714 714 index = r.index
715 715 start = r.start
716 716 length = r.length
717 717 generaldelta = r.version & revlog.FLAG_GENERALDELTA
718 718 withsparseread = getattr(r, '_withsparseread', False)
719 719
720 720 def revinfo(rev):
721 721 e = index[rev]
722 722 compsize = e[1]
723 723 uncompsize = e[2]
724 724 chainsize = 0
725 725
726 726 if generaldelta:
727 727 if e[3] == e[5]:
728 728 deltatype = b'p1'
729 729 elif e[3] == e[6]:
730 730 deltatype = b'p2'
731 731 elif e[3] == rev - 1:
732 732 deltatype = b'prev'
733 733 elif e[3] == rev:
734 734 deltatype = b'base'
735 735 else:
736 736 deltatype = b'other'
737 737 else:
738 738 if e[3] == rev:
739 739 deltatype = b'base'
740 740 else:
741 741 deltatype = b'prev'
742 742
743 743 chain = r._deltachain(rev)[0]
744 744 for iterrev in chain:
745 745 e = index[iterrev]
746 746 chainsize += e[1]
747 747
748 748 return compsize, uncompsize, deltatype, chain, chainsize
749 749
750 750 fm = ui.formatter(b'debugdeltachain', opts)
751 751
752 752 fm.plain(
753 753 b' rev chain# chainlen prev delta '
754 754 b'size rawsize chainsize ratio lindist extradist '
755 755 b'extraratio'
756 756 )
757 757 if withsparseread:
758 758 fm.plain(b' readsize largestblk rddensity srchunks')
759 759 fm.plain(b'\n')
760 760
761 761 chainbases = {}
762 762 for rev in r:
763 763 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
764 764 chainbase = chain[0]
765 765 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
766 766 basestart = start(chainbase)
767 767 revstart = start(rev)
768 768 lineardist = revstart + comp - basestart
769 769 extradist = lineardist - chainsize
770 770 try:
771 771 prevrev = chain[-2]
772 772 except IndexError:
773 773 prevrev = -1
774 774
775 775 if uncomp != 0:
776 776 chainratio = float(chainsize) / float(uncomp)
777 777 else:
778 778 chainratio = chainsize
779 779
780 780 if chainsize != 0:
781 781 extraratio = float(extradist) / float(chainsize)
782 782 else:
783 783 extraratio = extradist
784 784
785 785 fm.startitem()
786 786 fm.write(
787 787 b'rev chainid chainlen prevrev deltatype compsize '
788 788 b'uncompsize chainsize chainratio lindist extradist '
789 789 b'extraratio',
790 790 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
791 791 rev,
792 792 chainid,
793 793 len(chain),
794 794 prevrev,
795 795 deltatype,
796 796 comp,
797 797 uncomp,
798 798 chainsize,
799 799 chainratio,
800 800 lineardist,
801 801 extradist,
802 802 extraratio,
803 803 rev=rev,
804 804 chainid=chainid,
805 805 chainlen=len(chain),
806 806 prevrev=prevrev,
807 807 deltatype=deltatype,
808 808 compsize=comp,
809 809 uncompsize=uncomp,
810 810 chainsize=chainsize,
811 811 chainratio=chainratio,
812 812 lindist=lineardist,
813 813 extradist=extradist,
814 814 extraratio=extraratio,
815 815 )
816 816 if withsparseread:
817 817 readsize = 0
818 818 largestblock = 0
819 819 srchunks = 0
820 820
821 821 for revschunk in deltautil.slicechunk(r, chain):
822 822 srchunks += 1
823 823 blkend = start(revschunk[-1]) + length(revschunk[-1])
824 824 blksize = blkend - start(revschunk[0])
825 825
826 826 readsize += blksize
827 827 if largestblock < blksize:
828 828 largestblock = blksize
829 829
830 830 if readsize:
831 831 readdensity = float(chainsize) / float(readsize)
832 832 else:
833 833 readdensity = 1
834 834
835 835 fm.write(
836 836 b'readsize largestblock readdensity srchunks',
837 837 b' %10d %10d %9.5f %8d',
838 838 readsize,
839 839 largestblock,
840 840 readdensity,
841 841 srchunks,
842 842 readsize=readsize,
843 843 largestblock=largestblock,
844 844 readdensity=readdensity,
845 845 srchunks=srchunks,
846 846 )
847 847
848 848 fm.plain(b'\n')
849 849
850 850 fm.end()
851 851
852 852
853 853 @command(
854 854 b'debugdirstate|debugstate',
855 855 [
856 856 (
857 857 b'',
858 858 b'nodates',
859 859 None,
860 860 _(b'do not display the saved mtime (DEPRECATED)'),
861 861 ),
862 862 (b'', b'dates', True, _(b'display the saved mtime')),
863 863 (b'', b'datesort', None, _(b'sort by saved mtime')),
864 864 ],
865 865 _(b'[OPTION]...'),
866 866 )
867 867 def debugstate(ui, repo, **opts):
868 868 """show the contents of the current dirstate"""
869 869
870 870 nodates = not opts['dates']
871 871 if opts.get('nodates') is not None:
872 872 nodates = True
873 873 datesort = opts.get('datesort')
874 874
875 875 if datesort:
876 876 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
877 877 else:
878 878 keyfunc = None # sort by filename
879 879 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
880 880 if ent[3] == -1:
881 881 timestr = b'unset '
882 882 elif nodates:
883 883 timestr = b'set '
884 884 else:
885 885 timestr = time.strftime(
886 886 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
887 887 )
888 888 timestr = encoding.strtolocal(timestr)
889 889 if ent[1] & 0o20000:
890 890 mode = b'lnk'
891 891 else:
892 892 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
893 893 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
894 894 for f in repo.dirstate.copies():
895 895 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
896 896
897 897
898 898 @command(
899 899 b'debugdiscovery',
900 900 [
901 901 (b'', b'old', None, _(b'use old-style discovery')),
902 902 (
903 903 b'',
904 904 b'nonheads',
905 905 None,
906 906 _(b'use old-style discovery with non-heads included'),
907 907 ),
908 908 (b'', b'rev', [], b'restrict discovery to this set of revs'),
909 909 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
910 910 ]
911 911 + cmdutil.remoteopts,
912 912 _(b'[--rev REV] [OTHER]'),
913 913 )
914 914 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
915 915 """runs the changeset discovery protocol in isolation"""
916 916 opts = pycompat.byteskwargs(opts)
917 917 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
918 918 remote = hg.peer(repo, opts, remoteurl)
919 919 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
920 920
921 921 # make sure tests are repeatable
922 922 random.seed(int(opts[b'seed']))
923 923
924 924 if opts.get(b'old'):
925 925
926 926 def doit(pushedrevs, remoteheads, remote=remote):
927 927 if not util.safehasattr(remote, b'branches'):
928 928 # enable in-client legacy support
929 929 remote = localrepo.locallegacypeer(remote.local())
930 930 common, _in, hds = treediscovery.findcommonincoming(
931 931 repo, remote, force=True
932 932 )
933 933 common = set(common)
934 934 if not opts.get(b'nonheads'):
935 935 ui.writenoi18n(
936 936 b"unpruned common: %s\n"
937 937 % b" ".join(sorted(short(n) for n in common))
938 938 )
939 939
940 940 clnode = repo.changelog.node
941 941 common = repo.revs(b'heads(::%ln)', common)
942 942 common = {clnode(r) for r in common}
943 943 return common, hds
944 944
945 945 else:
946 946
947 947 def doit(pushedrevs, remoteheads, remote=remote):
948 948 nodes = None
949 949 if pushedrevs:
950 950 revs = scmutil.revrange(repo, pushedrevs)
951 951 nodes = [repo[r].node() for r in revs]
952 952 common, any, hds = setdiscovery.findcommonheads(
953 953 ui, repo, remote, ancestorsof=nodes
954 954 )
955 955 return common, hds
956 956
957 957 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
958 958 localrevs = opts[b'rev']
959 959 with util.timedcm('debug-discovery') as t:
960 960 common, hds = doit(localrevs, remoterevs)
961 961
962 962 # compute all statistics
963 963 common = set(common)
964 964 rheads = set(hds)
965 965 lheads = set(repo.heads())
966 966
967 967 data = {}
968 968 data[b'elapsed'] = t.elapsed
969 969 data[b'nb-common'] = len(common)
970 970 data[b'nb-common-local'] = len(common & lheads)
971 971 data[b'nb-common-remote'] = len(common & rheads)
972 972 data[b'nb-common-both'] = len(common & rheads & lheads)
973 973 data[b'nb-local'] = len(lheads)
974 974 data[b'nb-local-missing'] = data[b'nb-local'] - data[b'nb-common-local']
975 975 data[b'nb-remote'] = len(rheads)
976 976 data[b'nb-remote-unknown'] = data[b'nb-remote'] - data[b'nb-common-remote']
977 977 data[b'nb-revs'] = len(repo.revs(b'all()'))
978 978 data[b'nb-revs-common'] = len(repo.revs(b'::%ln', common))
979 979 data[b'nb-revs-missing'] = data[b'nb-revs'] - data[b'nb-revs-common']
980 980
981 981 # display discovery summary
982 982 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
983 983 ui.writenoi18n(b"heads summary:\n")
984 984 ui.writenoi18n(b" total common heads: %(nb-common)9d\n" % data)
985 985 ui.writenoi18n(b" also local heads: %(nb-common-local)9d\n" % data)
986 986 ui.writenoi18n(b" also remote heads: %(nb-common-remote)9d\n" % data)
987 987 ui.writenoi18n(b" both: %(nb-common-both)9d\n" % data)
988 988 ui.writenoi18n(b" local heads: %(nb-local)9d\n" % data)
989 989 ui.writenoi18n(b" common: %(nb-common-local)9d\n" % data)
990 990 ui.writenoi18n(b" missing: %(nb-local-missing)9d\n" % data)
991 991 ui.writenoi18n(b" remote heads: %(nb-remote)9d\n" % data)
992 992 ui.writenoi18n(b" common: %(nb-common-remote)9d\n" % data)
993 993 ui.writenoi18n(b" unknown: %(nb-remote-unknown)9d\n" % data)
994 994 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
995 995 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
996 996 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
997 997
998 998 if ui.verbose:
999 999 ui.writenoi18n(
1000 1000 b"common heads: %s\n" % b" ".join(sorted(short(n) for n in common))
1001 1001 )
1002 1002
1003 1003
1004 1004 _chunksize = 4 << 10
1005 1005
1006 1006
1007 1007 @command(
1008 1008 b'debugdownload', [(b'o', b'output', b'', _(b'path')),], optionalrepo=True
1009 1009 )
1010 1010 def debugdownload(ui, repo, url, output=None, **opts):
1011 1011 """download a resource using Mercurial logic and config
1012 1012 """
1013 1013 fh = urlmod.open(ui, url, output)
1014 1014
1015 1015 dest = ui
1016 1016 if output:
1017 1017 dest = open(output, b"wb", _chunksize)
1018 1018 try:
1019 1019 data = fh.read(_chunksize)
1020 1020 while data:
1021 1021 dest.write(data)
1022 1022 data = fh.read(_chunksize)
1023 1023 finally:
1024 1024 if output:
1025 1025 dest.close()
1026 1026
1027 1027
1028 1028 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1029 1029 def debugextensions(ui, repo, **opts):
1030 1030 '''show information about active extensions'''
1031 1031 opts = pycompat.byteskwargs(opts)
1032 1032 exts = extensions.extensions(ui)
1033 1033 hgver = util.version()
1034 1034 fm = ui.formatter(b'debugextensions', opts)
1035 1035 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1036 1036 isinternal = extensions.ismoduleinternal(extmod)
1037 1037 extsource = None
1038 1038
1039 1039 if util.safehasattr(extmod, '__file__'):
1040 1040 extsource = pycompat.fsencode(extmod.__file__)
1041 1041 elif getattr(sys, 'oxidized', False):
1042 1042 extsource = pycompat.sysexecutable
1043 1043 if isinternal:
1044 1044 exttestedwith = [] # never expose magic string to users
1045 1045 else:
1046 1046 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1047 1047 extbuglink = getattr(extmod, 'buglink', None)
1048 1048
1049 1049 fm.startitem()
1050 1050
1051 1051 if ui.quiet or ui.verbose:
1052 1052 fm.write(b'name', b'%s\n', extname)
1053 1053 else:
1054 1054 fm.write(b'name', b'%s', extname)
1055 1055 if isinternal or hgver in exttestedwith:
1056 1056 fm.plain(b'\n')
1057 1057 elif not exttestedwith:
1058 1058 fm.plain(_(b' (untested!)\n'))
1059 1059 else:
1060 1060 lasttestedversion = exttestedwith[-1]
1061 1061 fm.plain(b' (%s!)\n' % lasttestedversion)
1062 1062
1063 1063 fm.condwrite(
1064 1064 ui.verbose and extsource,
1065 1065 b'source',
1066 1066 _(b' location: %s\n'),
1067 1067 extsource or b"",
1068 1068 )
1069 1069
1070 1070 if ui.verbose:
1071 1071 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1072 1072 fm.data(bundled=isinternal)
1073 1073
1074 1074 fm.condwrite(
1075 1075 ui.verbose and exttestedwith,
1076 1076 b'testedwith',
1077 1077 _(b' tested with: %s\n'),
1078 1078 fm.formatlist(exttestedwith, name=b'ver'),
1079 1079 )
1080 1080
1081 1081 fm.condwrite(
1082 1082 ui.verbose and extbuglink,
1083 1083 b'buglink',
1084 1084 _(b' bug reporting: %s\n'),
1085 1085 extbuglink or b"",
1086 1086 )
1087 1087
1088 1088 fm.end()
1089 1089
1090 1090
1091 1091 @command(
1092 1092 b'debugfileset',
1093 1093 [
1094 1094 (
1095 1095 b'r',
1096 1096 b'rev',
1097 1097 b'',
1098 1098 _(b'apply the filespec on this revision'),
1099 1099 _(b'REV'),
1100 1100 ),
1101 1101 (
1102 1102 b'',
1103 1103 b'all-files',
1104 1104 False,
1105 1105 _(b'test files from all revisions and working directory'),
1106 1106 ),
1107 1107 (
1108 1108 b's',
1109 1109 b'show-matcher',
1110 1110 None,
1111 1111 _(b'print internal representation of matcher'),
1112 1112 ),
1113 1113 (
1114 1114 b'p',
1115 1115 b'show-stage',
1116 1116 [],
1117 1117 _(b'print parsed tree at the given stage'),
1118 1118 _(b'NAME'),
1119 1119 ),
1120 1120 ],
1121 1121 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1122 1122 )
1123 1123 def debugfileset(ui, repo, expr, **opts):
1124 1124 '''parse and apply a fileset specification'''
1125 1125 from . import fileset
1126 1126
1127 1127 fileset.symbols # force import of fileset so we have predicates to optimize
1128 1128 opts = pycompat.byteskwargs(opts)
1129 1129 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1130 1130
1131 1131 stages = [
1132 1132 (b'parsed', pycompat.identity),
1133 1133 (b'analyzed', filesetlang.analyze),
1134 1134 (b'optimized', filesetlang.optimize),
1135 1135 ]
1136 1136 stagenames = set(n for n, f in stages)
1137 1137
1138 1138 showalways = set()
1139 1139 if ui.verbose and not opts[b'show_stage']:
1140 1140 # show parsed tree by --verbose (deprecated)
1141 1141 showalways.add(b'parsed')
1142 1142 if opts[b'show_stage'] == [b'all']:
1143 1143 showalways.update(stagenames)
1144 1144 else:
1145 1145 for n in opts[b'show_stage']:
1146 1146 if n not in stagenames:
1147 1147 raise error.Abort(_(b'invalid stage name: %s') % n)
1148 1148 showalways.update(opts[b'show_stage'])
1149 1149
1150 1150 tree = filesetlang.parse(expr)
1151 1151 for n, f in stages:
1152 1152 tree = f(tree)
1153 1153 if n in showalways:
1154 1154 if opts[b'show_stage'] or n != b'parsed':
1155 1155 ui.write(b"* %s:\n" % n)
1156 1156 ui.write(filesetlang.prettyformat(tree), b"\n")
1157 1157
1158 1158 files = set()
1159 1159 if opts[b'all_files']:
1160 1160 for r in repo:
1161 1161 c = repo[r]
1162 1162 files.update(c.files())
1163 1163 files.update(c.substate)
1164 1164 if opts[b'all_files'] or ctx.rev() is None:
1165 1165 wctx = repo[None]
1166 1166 files.update(
1167 1167 repo.dirstate.walk(
1168 1168 scmutil.matchall(repo),
1169 1169 subrepos=list(wctx.substate),
1170 1170 unknown=True,
1171 1171 ignored=True,
1172 1172 )
1173 1173 )
1174 1174 files.update(wctx.substate)
1175 1175 else:
1176 1176 files.update(ctx.files())
1177 1177 files.update(ctx.substate)
1178 1178
1179 1179 m = ctx.matchfileset(repo.getcwd(), expr)
1180 1180 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1181 1181 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1182 1182 for f in sorted(files):
1183 1183 if not m(f):
1184 1184 continue
1185 1185 ui.write(b"%s\n" % f)
1186 1186
1187 1187
1188 1188 @command(b'debugformat', [] + cmdutil.formatteropts)
1189 1189 def debugformat(ui, repo, **opts):
1190 1190 """display format information about the current repository
1191 1191
1192 1192 Use --verbose to get extra information about current config value and
1193 1193 Mercurial default."""
1194 1194 opts = pycompat.byteskwargs(opts)
1195 1195 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1196 1196 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1197 1197
1198 1198 def makeformatname(name):
1199 1199 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1200 1200
1201 1201 fm = ui.formatter(b'debugformat', opts)
1202 1202 if fm.isplain():
1203 1203
1204 1204 def formatvalue(value):
1205 1205 if util.safehasattr(value, b'startswith'):
1206 1206 return value
1207 1207 if value:
1208 1208 return b'yes'
1209 1209 else:
1210 1210 return b'no'
1211 1211
1212 1212 else:
1213 1213 formatvalue = pycompat.identity
1214 1214
1215 1215 fm.plain(b'format-variant')
1216 1216 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1217 1217 fm.plain(b' repo')
1218 1218 if ui.verbose:
1219 1219 fm.plain(b' config default')
1220 1220 fm.plain(b'\n')
1221 1221 for fv in upgrade.allformatvariant:
1222 1222 fm.startitem()
1223 1223 repovalue = fv.fromrepo(repo)
1224 1224 configvalue = fv.fromconfig(repo)
1225 1225
1226 1226 if repovalue != configvalue:
1227 1227 namelabel = b'formatvariant.name.mismatchconfig'
1228 1228 repolabel = b'formatvariant.repo.mismatchconfig'
1229 1229 elif repovalue != fv.default:
1230 1230 namelabel = b'formatvariant.name.mismatchdefault'
1231 1231 repolabel = b'formatvariant.repo.mismatchdefault'
1232 1232 else:
1233 1233 namelabel = b'formatvariant.name.uptodate'
1234 1234 repolabel = b'formatvariant.repo.uptodate'
1235 1235
1236 1236 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1237 1237 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1238 1238 if fv.default != configvalue:
1239 1239 configlabel = b'formatvariant.config.special'
1240 1240 else:
1241 1241 configlabel = b'formatvariant.config.default'
1242 1242 fm.condwrite(
1243 1243 ui.verbose,
1244 1244 b'config',
1245 1245 b' %6s',
1246 1246 formatvalue(configvalue),
1247 1247 label=configlabel,
1248 1248 )
1249 1249 fm.condwrite(
1250 1250 ui.verbose,
1251 1251 b'default',
1252 1252 b' %7s',
1253 1253 formatvalue(fv.default),
1254 1254 label=b'formatvariant.default',
1255 1255 )
1256 1256 fm.plain(b'\n')
1257 1257 fm.end()
1258 1258
1259 1259
1260 1260 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1261 1261 def debugfsinfo(ui, path=b"."):
1262 1262 """show information detected about current filesystem"""
1263 1263 ui.writenoi18n(b'path: %s\n' % path)
1264 1264 ui.writenoi18n(
1265 1265 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1266 1266 )
1267 1267 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1268 1268 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1269 1269 ui.writenoi18n(
1270 1270 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1271 1271 )
1272 1272 ui.writenoi18n(
1273 1273 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1274 1274 )
1275 1275 casesensitive = b'(unknown)'
1276 1276 try:
1277 1277 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1278 1278 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1279 1279 except OSError:
1280 1280 pass
1281 1281 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1282 1282
1283 1283
1284 1284 @command(
1285 1285 b'debuggetbundle',
1286 1286 [
1287 1287 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1288 1288 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1289 1289 (
1290 1290 b't',
1291 1291 b'type',
1292 1292 b'bzip2',
1293 1293 _(b'bundle compression type to use'),
1294 1294 _(b'TYPE'),
1295 1295 ),
1296 1296 ],
1297 1297 _(b'REPO FILE [-H|-C ID]...'),
1298 1298 norepo=True,
1299 1299 )
1300 1300 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1301 1301 """retrieves a bundle from a repo
1302 1302
1303 1303 Every ID must be a full-length hex node id string. Saves the bundle to the
1304 1304 given file.
1305 1305 """
1306 1306 opts = pycompat.byteskwargs(opts)
1307 1307 repo = hg.peer(ui, opts, repopath)
1308 1308 if not repo.capable(b'getbundle'):
1309 1309 raise error.Abort(b"getbundle() not supported by target repository")
1310 1310 args = {}
1311 1311 if common:
1312 1312 args['common'] = [bin(s) for s in common]
1313 1313 if head:
1314 1314 args['heads'] = [bin(s) for s in head]
1315 1315 # TODO: get desired bundlecaps from command line.
1316 1316 args['bundlecaps'] = None
1317 1317 bundle = repo.getbundle(b'debug', **args)
1318 1318
1319 1319 bundletype = opts.get(b'type', b'bzip2').lower()
1320 1320 btypes = {
1321 1321 b'none': b'HG10UN',
1322 1322 b'bzip2': b'HG10BZ',
1323 1323 b'gzip': b'HG10GZ',
1324 1324 b'bundle2': b'HG20',
1325 1325 }
1326 1326 bundletype = btypes.get(bundletype)
1327 1327 if bundletype not in bundle2.bundletypes:
1328 1328 raise error.Abort(_(b'unknown bundle type specified with --type'))
1329 1329 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1330 1330
1331 1331
1332 1332 @command(b'debugignore', [], b'[FILE]')
1333 1333 def debugignore(ui, repo, *files, **opts):
1334 1334 """display the combined ignore pattern and information about ignored files
1335 1335
1336 1336 With no argument display the combined ignore pattern.
1337 1337
1338 1338 Given space separated file names, shows if the given file is ignored and
1339 1339 if so, show the ignore rule (file and line number) that matched it.
1340 1340 """
1341 1341 ignore = repo.dirstate._ignore
1342 1342 if not files:
1343 1343 # Show all the patterns
1344 1344 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1345 1345 else:
1346 1346 m = scmutil.match(repo[None], pats=files)
1347 1347 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1348 1348 for f in m.files():
1349 1349 nf = util.normpath(f)
1350 1350 ignored = None
1351 1351 ignoredata = None
1352 1352 if nf != b'.':
1353 1353 if ignore(nf):
1354 1354 ignored = nf
1355 1355 ignoredata = repo.dirstate._ignorefileandline(nf)
1356 1356 else:
1357 1357 for p in pathutil.finddirs(nf):
1358 1358 if ignore(p):
1359 1359 ignored = p
1360 1360 ignoredata = repo.dirstate._ignorefileandline(p)
1361 1361 break
1362 1362 if ignored:
1363 1363 if ignored == nf:
1364 1364 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1365 1365 else:
1366 1366 ui.write(
1367 1367 _(
1368 1368 b"%s is ignored because of "
1369 1369 b"containing directory %s\n"
1370 1370 )
1371 1371 % (uipathfn(f), ignored)
1372 1372 )
1373 1373 ignorefile, lineno, line = ignoredata
1374 1374 ui.write(
1375 1375 _(b"(ignore rule in %s, line %d: '%s')\n")
1376 1376 % (ignorefile, lineno, line)
1377 1377 )
1378 1378 else:
1379 1379 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1380 1380
1381 1381
1382 1382 @command(
1383 1383 b'debugindex',
1384 1384 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1385 1385 _(b'-c|-m|FILE'),
1386 1386 )
1387 1387 def debugindex(ui, repo, file_=None, **opts):
1388 1388 """dump index data for a storage primitive"""
1389 1389 opts = pycompat.byteskwargs(opts)
1390 1390 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1391 1391
1392 1392 if ui.debugflag:
1393 1393 shortfn = hex
1394 1394 else:
1395 1395 shortfn = short
1396 1396
1397 1397 idlen = 12
1398 1398 for i in store:
1399 1399 idlen = len(shortfn(store.node(i)))
1400 1400 break
1401 1401
1402 1402 fm = ui.formatter(b'debugindex', opts)
1403 1403 fm.plain(
1404 1404 b' rev linkrev %s %s p2\n'
1405 1405 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1406 1406 )
1407 1407
1408 1408 for rev in store:
1409 1409 node = store.node(rev)
1410 1410 parents = store.parents(node)
1411 1411
1412 1412 fm.startitem()
1413 1413 fm.write(b'rev', b'%6d ', rev)
1414 1414 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1415 1415 fm.write(b'node', b'%s ', shortfn(node))
1416 1416 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1417 1417 fm.write(b'p2', b'%s', shortfn(parents[1]))
1418 1418 fm.plain(b'\n')
1419 1419
1420 1420 fm.end()
1421 1421
1422 1422
1423 1423 @command(
1424 1424 b'debugindexdot',
1425 1425 cmdutil.debugrevlogopts,
1426 1426 _(b'-c|-m|FILE'),
1427 1427 optionalrepo=True,
1428 1428 )
1429 1429 def debugindexdot(ui, repo, file_=None, **opts):
1430 1430 """dump an index DAG as a graphviz dot file"""
1431 1431 opts = pycompat.byteskwargs(opts)
1432 1432 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1433 1433 ui.writenoi18n(b"digraph G {\n")
1434 1434 for i in r:
1435 1435 node = r.node(i)
1436 1436 pp = r.parents(node)
1437 1437 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1438 1438 if pp[1] != nullid:
1439 1439 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1440 1440 ui.write(b"}\n")
1441 1441
1442 1442
1443 1443 @command(b'debugindexstats', [])
1444 1444 def debugindexstats(ui, repo):
1445 1445 """show stats related to the changelog index"""
1446 1446 repo.changelog.shortest(nullid, 1)
1447 1447 index = repo.changelog.index
1448 1448 if not util.safehasattr(index, b'stats'):
1449 1449 raise error.Abort(_(b'debugindexstats only works with native code'))
1450 1450 for k, v in sorted(index.stats().items()):
1451 1451 ui.write(b'%s: %d\n' % (k, v))
1452 1452
1453 1453
1454 1454 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1455 1455 def debuginstall(ui, **opts):
1456 1456 '''test Mercurial installation
1457 1457
1458 1458 Returns 0 on success.
1459 1459 '''
1460 1460 opts = pycompat.byteskwargs(opts)
1461 1461
1462 1462 problems = 0
1463 1463
1464 1464 fm = ui.formatter(b'debuginstall', opts)
1465 1465 fm.startitem()
1466 1466
1467 1467 # encoding
1468 1468 fm.write(b'encoding', _(b"checking encoding (%s)...\n"), encoding.encoding)
1469 1469 err = None
1470 1470 try:
1471 1471 codecs.lookup(pycompat.sysstr(encoding.encoding))
1472 1472 except LookupError as inst:
1473 1473 err = stringutil.forcebytestr(inst)
1474 1474 problems += 1
1475 1475 fm.condwrite(
1476 1476 err,
1477 1477 b'encodingerror',
1478 1478 _(b" %s\n (check that your locale is properly set)\n"),
1479 1479 err,
1480 1480 )
1481 1481
1482 1482 # Python
1483 1483 pythonlib = None
1484 1484 if util.safehasattr(os, '__file__'):
1485 1485 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1486 1486 elif getattr(sys, 'oxidized', False):
1487 1487 pythonlib = pycompat.sysexecutable
1488 1488
1489 1489 fm.write(
1490 1490 b'pythonexe',
1491 1491 _(b"checking Python executable (%s)\n"),
1492 1492 pycompat.sysexecutable or _(b"unknown"),
1493 1493 )
1494 1494 fm.write(
1495 1495 b'pythonimplementation',
1496 1496 _(b"checking Python implementation (%s)\n"),
1497 1497 pycompat.sysbytes(platform.python_implementation()),
1498 1498 )
1499 1499 fm.write(
1500 1500 b'pythonver',
1501 1501 _(b"checking Python version (%s)\n"),
1502 1502 (b"%d.%d.%d" % sys.version_info[:3]),
1503 1503 )
1504 1504 fm.write(
1505 1505 b'pythonlib',
1506 1506 _(b"checking Python lib (%s)...\n"),
1507 1507 pythonlib or _(b"unknown"),
1508 1508 )
1509 1509
1510 1510 security = set(sslutil.supportedprotocols)
1511 1511 if sslutil.hassni:
1512 1512 security.add(b'sni')
1513 1513
1514 1514 fm.write(
1515 1515 b'pythonsecurity',
1516 1516 _(b"checking Python security support (%s)\n"),
1517 1517 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1518 1518 )
1519 1519
1520 1520 # These are warnings, not errors. So don't increment problem count. This
1521 1521 # may change in the future.
1522 1522 if b'tls1.2' not in security:
1523 1523 fm.plain(
1524 1524 _(
1525 1525 b' TLS 1.2 not supported by Python install; '
1526 1526 b'network connections lack modern security\n'
1527 1527 )
1528 1528 )
1529 1529 if b'sni' not in security:
1530 1530 fm.plain(
1531 1531 _(
1532 1532 b' SNI not supported by Python install; may have '
1533 1533 b'connectivity issues with some servers\n'
1534 1534 )
1535 1535 )
1536 1536
1537 1537 # TODO print CA cert info
1538 1538
1539 1539 # hg version
1540 1540 hgver = util.version()
1541 1541 fm.write(
1542 1542 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1543 1543 )
1544 1544 fm.write(
1545 1545 b'hgverextra',
1546 1546 _(b"checking Mercurial custom build (%s)\n"),
1547 1547 b'+'.join(hgver.split(b'+')[1:]),
1548 1548 )
1549 1549
1550 1550 # compiled modules
1551 1551 hgmodules = None
1552 1552 if util.safehasattr(sys.modules[__name__], '__file__'):
1553 1553 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1554 1554 elif getattr(sys, 'oxidized', False):
1555 1555 hgmodules = pycompat.sysexecutable
1556 1556
1557 1557 fm.write(
1558 1558 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1559 1559 )
1560 1560 fm.write(
1561 1561 b'hgmodules',
1562 1562 _(b"checking installed modules (%s)...\n"),
1563 1563 hgmodules or _(b"unknown"),
1564 1564 )
1565 1565
1566 1566 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1567 1567 rustext = rustandc # for now, that's the only case
1568 1568 cext = policy.policy in (b'c', b'allow') or rustandc
1569 1569 nopure = cext or rustext
1570 1570 if nopure:
1571 1571 err = None
1572 1572 try:
1573 1573 if cext:
1574 1574 from .cext import ( # pytype: disable=import-error
1575 1575 base85,
1576 1576 bdiff,
1577 1577 mpatch,
1578 1578 osutil,
1579 1579 )
1580 1580
1581 1581 # quiet pyflakes
1582 1582 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1583 1583 if rustext:
1584 1584 from .rustext import ( # pytype: disable=import-error
1585 1585 ancestor,
1586 1586 dirstate,
1587 1587 )
1588 1588
1589 1589 dir(ancestor), dir(dirstate) # quiet pyflakes
1590 1590 except Exception as inst:
1591 1591 err = stringutil.forcebytestr(inst)
1592 1592 problems += 1
1593 1593 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1594 1594
1595 1595 compengines = util.compengines._engines.values()
1596 1596 fm.write(
1597 1597 b'compengines',
1598 1598 _(b'checking registered compression engines (%s)\n'),
1599 1599 fm.formatlist(
1600 1600 sorted(e.name() for e in compengines),
1601 1601 name=b'compengine',
1602 1602 fmt=b'%s',
1603 1603 sep=b', ',
1604 1604 ),
1605 1605 )
1606 1606 fm.write(
1607 1607 b'compenginesavail',
1608 1608 _(b'checking available compression engines (%s)\n'),
1609 1609 fm.formatlist(
1610 1610 sorted(e.name() for e in compengines if e.available()),
1611 1611 name=b'compengine',
1612 1612 fmt=b'%s',
1613 1613 sep=b', ',
1614 1614 ),
1615 1615 )
1616 1616 wirecompengines = compression.compengines.supportedwireengines(
1617 1617 compression.SERVERROLE
1618 1618 )
1619 1619 fm.write(
1620 1620 b'compenginesserver',
1621 1621 _(
1622 1622 b'checking available compression engines '
1623 1623 b'for wire protocol (%s)\n'
1624 1624 ),
1625 1625 fm.formatlist(
1626 1626 [e.name() for e in wirecompengines if e.wireprotosupport()],
1627 1627 name=b'compengine',
1628 1628 fmt=b'%s',
1629 1629 sep=b', ',
1630 1630 ),
1631 1631 )
1632 1632 re2 = b'missing'
1633 1633 if util._re2:
1634 1634 re2 = b'available'
1635 1635 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1636 1636 fm.data(re2=bool(util._re2))
1637 1637
1638 1638 # templates
1639 1639 p = templater.templatepaths()
1640 1640 fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p))
1641 1641 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1642 1642 if p:
1643 1643 m = templater.templatepath(b"map-cmdline.default")
1644 1644 if m:
1645 1645 # template found, check if it is working
1646 1646 err = None
1647 1647 try:
1648 1648 templater.templater.frommapfile(m)
1649 1649 except Exception as inst:
1650 1650 err = stringutil.forcebytestr(inst)
1651 1651 p = None
1652 1652 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1653 1653 else:
1654 1654 p = None
1655 1655 fm.condwrite(
1656 1656 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1657 1657 )
1658 1658 fm.condwrite(
1659 1659 not m,
1660 1660 b'defaulttemplatenotfound',
1661 1661 _(b" template '%s' not found\n"),
1662 1662 b"default",
1663 1663 )
1664 1664 if not p:
1665 1665 problems += 1
1666 1666 fm.condwrite(
1667 1667 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1668 1668 )
1669 1669
1670 1670 # editor
1671 1671 editor = ui.geteditor()
1672 1672 editor = util.expandpath(editor)
1673 1673 editorbin = procutil.shellsplit(editor)[0]
1674 1674 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1675 1675 cmdpath = procutil.findexe(editorbin)
1676 1676 fm.condwrite(
1677 1677 not cmdpath and editor == b'vi',
1678 1678 b'vinotfound',
1679 1679 _(
1680 1680 b" No commit editor set and can't find %s in PATH\n"
1681 1681 b" (specify a commit editor in your configuration"
1682 1682 b" file)\n"
1683 1683 ),
1684 1684 not cmdpath and editor == b'vi' and editorbin,
1685 1685 )
1686 1686 fm.condwrite(
1687 1687 not cmdpath and editor != b'vi',
1688 1688 b'editornotfound',
1689 1689 _(
1690 1690 b" Can't find editor '%s' in PATH\n"
1691 1691 b" (specify a commit editor in your configuration"
1692 1692 b" file)\n"
1693 1693 ),
1694 1694 not cmdpath and editorbin,
1695 1695 )
1696 1696 if not cmdpath and editor != b'vi':
1697 1697 problems += 1
1698 1698
1699 1699 # check username
1700 1700 username = None
1701 1701 err = None
1702 1702 try:
1703 1703 username = ui.username()
1704 1704 except error.Abort as e:
1705 1705 err = stringutil.forcebytestr(e)
1706 1706 problems += 1
1707 1707
1708 1708 fm.condwrite(
1709 1709 username, b'username', _(b"checking username (%s)\n"), username
1710 1710 )
1711 1711 fm.condwrite(
1712 1712 err,
1713 1713 b'usernameerror',
1714 1714 _(
1715 1715 b"checking username...\n %s\n"
1716 1716 b" (specify a username in your configuration file)\n"
1717 1717 ),
1718 1718 err,
1719 1719 )
1720 1720
1721 1721 for name, mod in extensions.extensions():
1722 1722 handler = getattr(mod, 'debuginstall', None)
1723 1723 if handler is not None:
1724 1724 problems += handler(ui, fm)
1725 1725
1726 1726 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1727 1727 if not problems:
1728 1728 fm.data(problems=problems)
1729 1729 fm.condwrite(
1730 1730 problems,
1731 1731 b'problems',
1732 1732 _(b"%d problems detected, please check your install!\n"),
1733 1733 problems,
1734 1734 )
1735 1735 fm.end()
1736 1736
1737 1737 return problems
1738 1738
1739 1739
1740 1740 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1741 1741 def debugknown(ui, repopath, *ids, **opts):
1742 1742 """test whether node ids are known to a repo
1743 1743
1744 1744 Every ID must be a full-length hex node id string. Returns a list of 0s
1745 1745 and 1s indicating unknown/known.
1746 1746 """
1747 1747 opts = pycompat.byteskwargs(opts)
1748 1748 repo = hg.peer(ui, opts, repopath)
1749 1749 if not repo.capable(b'known'):
1750 1750 raise error.Abort(b"known() not supported by target repository")
1751 1751 flags = repo.known([bin(s) for s in ids])
1752 1752 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1753 1753
1754 1754
1755 1755 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1756 1756 def debuglabelcomplete(ui, repo, *args):
1757 1757 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1758 1758 debugnamecomplete(ui, repo, *args)
1759 1759
1760 1760
1761 1761 @command(
1762 1762 b'debuglocks',
1763 1763 [
1764 1764 (b'L', b'force-lock', None, _(b'free the store lock (DANGEROUS)')),
1765 1765 (
1766 1766 b'W',
1767 1767 b'force-wlock',
1768 1768 None,
1769 1769 _(b'free the working state lock (DANGEROUS)'),
1770 1770 ),
1771 1771 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1772 1772 (
1773 1773 b'S',
1774 1774 b'set-wlock',
1775 1775 None,
1776 1776 _(b'set the working state lock until stopped'),
1777 1777 ),
1778 1778 ],
1779 1779 _(b'[OPTION]...'),
1780 1780 )
1781 1781 def debuglocks(ui, repo, **opts):
1782 1782 """show or modify state of locks
1783 1783
1784 1784 By default, this command will show which locks are held. This
1785 1785 includes the user and process holding the lock, the amount of time
1786 1786 the lock has been held, and the machine name where the process is
1787 1787 running if it's not local.
1788 1788
1789 1789 Locks protect the integrity of Mercurial's data, so should be
1790 1790 treated with care. System crashes or other interruptions may cause
1791 1791 locks to not be properly released, though Mercurial will usually
1792 1792 detect and remove such stale locks automatically.
1793 1793
1794 1794 However, detecting stale locks may not always be possible (for
1795 1795 instance, on a shared filesystem). Removing locks may also be
1796 1796 blocked by filesystem permissions.
1797 1797
1798 1798 Setting a lock will prevent other commands from changing the data.
1799 1799 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1800 1800 The set locks are removed when the command exits.
1801 1801
1802 1802 Returns 0 if no locks are held.
1803 1803
1804 1804 """
1805 1805
1806 1806 if opts.get('force_lock'):
1807 1807 repo.svfs.unlink(b'lock')
1808 1808 if opts.get('force_wlock'):
1809 1809 repo.vfs.unlink(b'wlock')
1810 1810 if opts.get('force_lock') or opts.get('force_wlock'):
1811 1811 return 0
1812 1812
1813 1813 locks = []
1814 1814 try:
1815 1815 if opts.get('set_wlock'):
1816 1816 try:
1817 1817 locks.append(repo.wlock(False))
1818 1818 except error.LockHeld:
1819 1819 raise error.Abort(_(b'wlock is already held'))
1820 1820 if opts.get('set_lock'):
1821 1821 try:
1822 1822 locks.append(repo.lock(False))
1823 1823 except error.LockHeld:
1824 1824 raise error.Abort(_(b'lock is already held'))
1825 1825 if len(locks):
1826 1826 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
1827 1827 return 0
1828 1828 finally:
1829 1829 release(*locks)
1830 1830
1831 1831 now = time.time()
1832 1832 held = 0
1833 1833
1834 1834 def report(vfs, name, method):
1835 1835 # this causes stale locks to get reaped for more accurate reporting
1836 1836 try:
1837 1837 l = method(False)
1838 1838 except error.LockHeld:
1839 1839 l = None
1840 1840
1841 1841 if l:
1842 1842 l.release()
1843 1843 else:
1844 1844 try:
1845 1845 st = vfs.lstat(name)
1846 1846 age = now - st[stat.ST_MTIME]
1847 1847 user = util.username(st.st_uid)
1848 1848 locker = vfs.readlock(name)
1849 1849 if b":" in locker:
1850 1850 host, pid = locker.split(b':')
1851 1851 if host == socket.gethostname():
1852 1852 locker = b'user %s, process %s' % (user or b'None', pid)
1853 1853 else:
1854 1854 locker = b'user %s, process %s, host %s' % (
1855 1855 user or b'None',
1856 1856 pid,
1857 1857 host,
1858 1858 )
1859 1859 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
1860 1860 return 1
1861 1861 except OSError as e:
1862 1862 if e.errno != errno.ENOENT:
1863 1863 raise
1864 1864
1865 1865 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
1866 1866 return 0
1867 1867
1868 1868 held += report(repo.svfs, b"lock", repo.lock)
1869 1869 held += report(repo.vfs, b"wlock", repo.wlock)
1870 1870
1871 1871 return held
1872 1872
1873 1873
1874 1874 @command(
1875 1875 b'debugmanifestfulltextcache',
1876 1876 [
1877 1877 (b'', b'clear', False, _(b'clear the cache')),
1878 1878 (
1879 1879 b'a',
1880 1880 b'add',
1881 1881 [],
1882 1882 _(b'add the given manifest nodes to the cache'),
1883 1883 _(b'NODE'),
1884 1884 ),
1885 1885 ],
1886 1886 b'',
1887 1887 )
1888 1888 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1889 1889 """show, clear or amend the contents of the manifest fulltext cache"""
1890 1890
1891 1891 def getcache():
1892 1892 r = repo.manifestlog.getstorage(b'')
1893 1893 try:
1894 1894 return r._fulltextcache
1895 1895 except AttributeError:
1896 1896 msg = _(
1897 1897 b"Current revlog implementation doesn't appear to have a "
1898 1898 b"manifest fulltext cache\n"
1899 1899 )
1900 1900 raise error.Abort(msg)
1901 1901
1902 1902 if opts.get('clear'):
1903 1903 with repo.wlock():
1904 1904 cache = getcache()
1905 1905 cache.clear(clear_persisted_data=True)
1906 1906 return
1907 1907
1908 1908 if add:
1909 1909 with repo.wlock():
1910 1910 m = repo.manifestlog
1911 1911 store = m.getstorage(b'')
1912 1912 for n in add:
1913 1913 try:
1914 1914 manifest = m[store.lookup(n)]
1915 1915 except error.LookupError as e:
1916 1916 raise error.Abort(e, hint=b"Check your manifest node id")
1917 1917 manifest.read() # stores revisision in cache too
1918 1918 return
1919 1919
1920 1920 cache = getcache()
1921 1921 if not len(cache):
1922 1922 ui.write(_(b'cache empty\n'))
1923 1923 else:
1924 1924 ui.write(
1925 1925 _(
1926 1926 b'cache contains %d manifest entries, in order of most to '
1927 1927 b'least recent:\n'
1928 1928 )
1929 1929 % (len(cache),)
1930 1930 )
1931 1931 totalsize = 0
1932 1932 for nodeid in cache:
1933 1933 # Use cache.get to not update the LRU order
1934 1934 data = cache.peek(nodeid)
1935 1935 size = len(data)
1936 1936 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1937 1937 ui.write(
1938 1938 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
1939 1939 )
1940 1940 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
1941 1941 ui.write(
1942 1942 _(b'total cache data size %s, on-disk %s\n')
1943 1943 % (util.bytecount(totalsize), util.bytecount(ondisk))
1944 1944 )
1945 1945
1946 1946
1947 1947 @command(b'debugmergestate', [], b'')
1948 1948 def debugmergestate(ui, repo, *args):
1949 1949 """print merge state
1950 1950
1951 1951 Use --verbose to print out information about whether v1 or v2 merge state
1952 1952 was chosen."""
1953 1953
1954 1954 def _hashornull(h):
1955 1955 if h == nullhex:
1956 1956 return b'null'
1957 1957 else:
1958 1958 return h
1959 1959
1960 1960 def printrecords(version):
1961 1961 ui.writenoi18n(b'* version %d records\n' % version)
1962 1962 if version == 1:
1963 1963 records = v1records
1964 1964 else:
1965 1965 records = v2records
1966 1966
1967 1967 for rtype, record in records:
1968 1968 # pretty print some record types
1969 1969 if rtype == b'L':
1970 1970 ui.writenoi18n(b'local: %s\n' % record)
1971 1971 elif rtype == b'O':
1972 1972 ui.writenoi18n(b'other: %s\n' % record)
1973 1973 elif rtype == b'm':
1974 1974 driver, mdstate = record.split(b'\0', 1)
1975 1975 ui.writenoi18n(
1976 1976 b'merge driver: %s (state "%s")\n' % (driver, mdstate)
1977 1977 )
1978 1978 elif rtype in b'FDC':
1979 1979 r = record.split(b'\0')
1980 1980 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1981 1981 if version == 1:
1982 1982 onode = b'not stored in v1 format'
1983 1983 flags = r[7]
1984 1984 else:
1985 1985 onode, flags = r[7:9]
1986 1986 ui.writenoi18n(
1987 1987 b'file: %s (record type "%s", state "%s", hash %s)\n'
1988 1988 % (f, rtype, state, _hashornull(hash))
1989 1989 )
1990 1990 ui.writenoi18n(
1991 1991 b' local path: %s (flags "%s")\n' % (lfile, flags)
1992 1992 )
1993 1993 ui.writenoi18n(
1994 1994 b' ancestor path: %s (node %s)\n'
1995 1995 % (afile, _hashornull(anode))
1996 1996 )
1997 1997 ui.writenoi18n(
1998 1998 b' other path: %s (node %s)\n'
1999 1999 % (ofile, _hashornull(onode))
2000 2000 )
2001 2001 elif rtype == b'f':
2002 2002 filename, rawextras = record.split(b'\0', 1)
2003 2003 extras = rawextras.split(b'\0')
2004 2004 i = 0
2005 2005 extrastrings = []
2006 2006 while i < len(extras):
2007 2007 extrastrings.append(b'%s = %s' % (extras[i], extras[i + 1]))
2008 2008 i += 2
2009 2009
2010 2010 ui.writenoi18n(
2011 2011 b'file extras: %s (%s)\n'
2012 2012 % (filename, b', '.join(extrastrings))
2013 2013 )
2014 2014 elif rtype == b'l':
2015 2015 labels = record.split(b'\0', 2)
2016 2016 labels = [l for l in labels if len(l) > 0]
2017 2017 ui.writenoi18n(b'labels:\n')
2018 2018 ui.write((b' local: %s\n' % labels[0]))
2019 2019 ui.write((b' other: %s\n' % labels[1]))
2020 2020 if len(labels) > 2:
2021 2021 ui.write((b' base: %s\n' % labels[2]))
2022 2022 else:
2023 2023 ui.writenoi18n(
2024 2024 b'unrecognized entry: %s\t%s\n'
2025 2025 % (rtype, record.replace(b'\0', b'\t'))
2026 2026 )
2027 2027
2028 2028 # Avoid mergestate.read() since it may raise an exception for unsupported
2029 2029 # merge state records. We shouldn't be doing this, but this is OK since this
2030 2030 # command is pretty low-level.
2031 2031 ms = mergemod.mergestate(repo)
2032 2032
2033 2033 # sort so that reasonable information is on top
2034 2034 v1records = ms._readrecordsv1()
2035 2035 v2records = ms._readrecordsv2()
2036 2036 order = b'LOml'
2037 2037
2038 2038 def key(r):
2039 2039 idx = order.find(r[0])
2040 2040 if idx == -1:
2041 2041 return (1, r[1])
2042 2042 else:
2043 2043 return (0, idx)
2044 2044
2045 2045 v1records.sort(key=key)
2046 2046 v2records.sort(key=key)
2047 2047
2048 2048 if not v1records and not v2records:
2049 2049 ui.writenoi18n(b'no merge state found\n')
2050 2050 elif not v2records:
2051 2051 ui.notenoi18n(b'no version 2 merge state\n')
2052 2052 printrecords(1)
2053 2053 elif ms._v1v2match(v1records, v2records):
2054 2054 ui.notenoi18n(b'v1 and v2 states match: using v2\n')
2055 2055 printrecords(2)
2056 2056 else:
2057 2057 ui.notenoi18n(b'v1 and v2 states mismatch: using v1\n')
2058 2058 printrecords(1)
2059 2059 if ui.verbose:
2060 2060 printrecords(2)
2061 2061
2062 2062
2063 2063 @command(b'debugnamecomplete', [], _(b'NAME...'))
2064 2064 def debugnamecomplete(ui, repo, *args):
2065 2065 '''complete "names" - tags, open branch names, bookmark names'''
2066 2066
2067 2067 names = set()
2068 2068 # since we previously only listed open branches, we will handle that
2069 2069 # specially (after this for loop)
2070 2070 for name, ns in pycompat.iteritems(repo.names):
2071 2071 if name != b'branches':
2072 2072 names.update(ns.listnames(repo))
2073 2073 names.update(
2074 2074 tag
2075 2075 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2076 2076 if not closed
2077 2077 )
2078 2078 completions = set()
2079 2079 if not args:
2080 2080 args = [b'']
2081 2081 for a in args:
2082 2082 completions.update(n for n in names if n.startswith(a))
2083 2083 ui.write(b'\n'.join(sorted(completions)))
2084 2084 ui.write(b'\n')
2085 2085
2086 2086
2087 2087 @command(
2088 2088 b'debugnodemap',
2089 2089 [
2090 2090 (
2091 2091 b'',
2092 2092 b'dump-new',
2093 2093 False,
2094 2094 _(b'write a (new) persistent binary nodemap on stdin'),
2095 2095 ),
2096 2096 (b'', b'dump-disk', False, _(b'dump on-disk data on stdin')),
2097 2097 (
2098 2098 b'',
2099 2099 b'check',
2100 2100 False,
2101 2101 _(b'check that the data on disk data are correct.'),
2102 2102 ),
2103 2103 (
2104 2104 b'',
2105 2105 b'metadata',
2106 2106 False,
2107 2107 _(b'display the on disk meta data for the nodemap'),
2108 2108 ),
2109 2109 ],
2110 2110 )
2111 2111 def debugnodemap(ui, repo, **opts):
2112 2112 """write and inspect on disk nodemap
2113 2113 """
2114 2114 if opts['dump_new']:
2115 2115 unfi = repo.unfiltered()
2116 2116 cl = unfi.changelog
2117 2117 data = nodemap.persistent_data(cl.index)
2118 2118 ui.write(data)
2119 2119 elif opts['dump_disk']:
2120 2120 unfi = repo.unfiltered()
2121 2121 cl = unfi.changelog
2122 2122 nm_data = nodemap.persisted_data(cl)
2123 2123 if nm_data is not None:
2124 2124 docket, data = nm_data
2125 2125 ui.write(data)
2126 2126 elif opts['check']:
2127 2127 unfi = repo.unfiltered()
2128 2128 cl = unfi.changelog
2129 2129 nm_data = nodemap.persisted_data(cl)
2130 2130 if nm_data is not None:
2131 2131 docket, data = nm_data
2132 2132 return nodemap.check_data(ui, cl.index, data)
2133 2133 elif opts['metadata']:
2134 2134 unfi = repo.unfiltered()
2135 2135 cl = unfi.changelog
2136 2136 nm_data = nodemap.persisted_data(cl)
2137 2137 if nm_data is not None:
2138 2138 docket, data = nm_data
2139 2139 ui.write((b"uid: %s\n") % docket.uid)
2140 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2140 2141
2141 2142
2142 2143 @command(
2143 2144 b'debugobsolete',
2144 2145 [
2145 2146 (b'', b'flags', 0, _(b'markers flag')),
2146 2147 (
2147 2148 b'',
2148 2149 b'record-parents',
2149 2150 False,
2150 2151 _(b'record parent information for the precursor'),
2151 2152 ),
2152 2153 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2153 2154 (
2154 2155 b'',
2155 2156 b'exclusive',
2156 2157 False,
2157 2158 _(b'restrict display to markers only relevant to REV'),
2158 2159 ),
2159 2160 (b'', b'index', False, _(b'display index of the marker')),
2160 2161 (b'', b'delete', [], _(b'delete markers specified by indices')),
2161 2162 ]
2162 2163 + cmdutil.commitopts2
2163 2164 + cmdutil.formatteropts,
2164 2165 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2165 2166 )
2166 2167 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2167 2168 """create arbitrary obsolete marker
2168 2169
2169 2170 With no arguments, displays the list of obsolescence markers."""
2170 2171
2171 2172 opts = pycompat.byteskwargs(opts)
2172 2173
2173 2174 def parsenodeid(s):
2174 2175 try:
2175 2176 # We do not use revsingle/revrange functions here to accept
2176 2177 # arbitrary node identifiers, possibly not present in the
2177 2178 # local repository.
2178 2179 n = bin(s)
2179 2180 if len(n) != len(nullid):
2180 2181 raise TypeError()
2181 2182 return n
2182 2183 except TypeError:
2183 2184 raise error.Abort(
2184 2185 b'changeset references must be full hexadecimal '
2185 2186 b'node identifiers'
2186 2187 )
2187 2188
2188 2189 if opts.get(b'delete'):
2189 2190 indices = []
2190 2191 for v in opts.get(b'delete'):
2191 2192 try:
2192 2193 indices.append(int(v))
2193 2194 except ValueError:
2194 2195 raise error.Abort(
2195 2196 _(b'invalid index value: %r') % v,
2196 2197 hint=_(b'use integers for indices'),
2197 2198 )
2198 2199
2199 2200 if repo.currenttransaction():
2200 2201 raise error.Abort(
2201 2202 _(b'cannot delete obsmarkers in the middle of transaction.')
2202 2203 )
2203 2204
2204 2205 with repo.lock():
2205 2206 n = repair.deleteobsmarkers(repo.obsstore, indices)
2206 2207 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2207 2208
2208 2209 return
2209 2210
2210 2211 if precursor is not None:
2211 2212 if opts[b'rev']:
2212 2213 raise error.Abort(b'cannot select revision when creating marker')
2213 2214 metadata = {}
2214 2215 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2215 2216 succs = tuple(parsenodeid(succ) for succ in successors)
2216 2217 l = repo.lock()
2217 2218 try:
2218 2219 tr = repo.transaction(b'debugobsolete')
2219 2220 try:
2220 2221 date = opts.get(b'date')
2221 2222 if date:
2222 2223 date = dateutil.parsedate(date)
2223 2224 else:
2224 2225 date = None
2225 2226 prec = parsenodeid(precursor)
2226 2227 parents = None
2227 2228 if opts[b'record_parents']:
2228 2229 if prec not in repo.unfiltered():
2229 2230 raise error.Abort(
2230 2231 b'cannot used --record-parents on '
2231 2232 b'unknown changesets'
2232 2233 )
2233 2234 parents = repo.unfiltered()[prec].parents()
2234 2235 parents = tuple(p.node() for p in parents)
2235 2236 repo.obsstore.create(
2236 2237 tr,
2237 2238 prec,
2238 2239 succs,
2239 2240 opts[b'flags'],
2240 2241 parents=parents,
2241 2242 date=date,
2242 2243 metadata=metadata,
2243 2244 ui=ui,
2244 2245 )
2245 2246 tr.close()
2246 2247 except ValueError as exc:
2247 2248 raise error.Abort(
2248 2249 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2249 2250 )
2250 2251 finally:
2251 2252 tr.release()
2252 2253 finally:
2253 2254 l.release()
2254 2255 else:
2255 2256 if opts[b'rev']:
2256 2257 revs = scmutil.revrange(repo, opts[b'rev'])
2257 2258 nodes = [repo[r].node() for r in revs]
2258 2259 markers = list(
2259 2260 obsutil.getmarkers(
2260 2261 repo, nodes=nodes, exclusive=opts[b'exclusive']
2261 2262 )
2262 2263 )
2263 2264 markers.sort(key=lambda x: x._data)
2264 2265 else:
2265 2266 markers = obsutil.getmarkers(repo)
2266 2267
2267 2268 markerstoiter = markers
2268 2269 isrelevant = lambda m: True
2269 2270 if opts.get(b'rev') and opts.get(b'index'):
2270 2271 markerstoiter = obsutil.getmarkers(repo)
2271 2272 markerset = set(markers)
2272 2273 isrelevant = lambda m: m in markerset
2273 2274
2274 2275 fm = ui.formatter(b'debugobsolete', opts)
2275 2276 for i, m in enumerate(markerstoiter):
2276 2277 if not isrelevant(m):
2277 2278 # marker can be irrelevant when we're iterating over a set
2278 2279 # of markers (markerstoiter) which is bigger than the set
2279 2280 # of markers we want to display (markers)
2280 2281 # this can happen if both --index and --rev options are
2281 2282 # provided and thus we need to iterate over all of the markers
2282 2283 # to get the correct indices, but only display the ones that
2283 2284 # are relevant to --rev value
2284 2285 continue
2285 2286 fm.startitem()
2286 2287 ind = i if opts.get(b'index') else None
2287 2288 cmdutil.showmarker(fm, m, index=ind)
2288 2289 fm.end()
2289 2290
2290 2291
2291 2292 @command(
2292 2293 b'debugp1copies',
2293 2294 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2294 2295 _(b'[-r REV]'),
2295 2296 )
2296 2297 def debugp1copies(ui, repo, **opts):
2297 2298 """dump copy information compared to p1"""
2298 2299
2299 2300 opts = pycompat.byteskwargs(opts)
2300 2301 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2301 2302 for dst, src in ctx.p1copies().items():
2302 2303 ui.write(b'%s -> %s\n' % (src, dst))
2303 2304
2304 2305
2305 2306 @command(
2306 2307 b'debugp2copies',
2307 2308 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2308 2309 _(b'[-r REV]'),
2309 2310 )
2310 2311 def debugp1copies(ui, repo, **opts):
2311 2312 """dump copy information compared to p2"""
2312 2313
2313 2314 opts = pycompat.byteskwargs(opts)
2314 2315 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2315 2316 for dst, src in ctx.p2copies().items():
2316 2317 ui.write(b'%s -> %s\n' % (src, dst))
2317 2318
2318 2319
2319 2320 @command(
2320 2321 b'debugpathcomplete',
2321 2322 [
2322 2323 (b'f', b'full', None, _(b'complete an entire path')),
2323 2324 (b'n', b'normal', None, _(b'show only normal files')),
2324 2325 (b'a', b'added', None, _(b'show only added files')),
2325 2326 (b'r', b'removed', None, _(b'show only removed files')),
2326 2327 ],
2327 2328 _(b'FILESPEC...'),
2328 2329 )
2329 2330 def debugpathcomplete(ui, repo, *specs, **opts):
2330 2331 '''complete part or all of a tracked path
2331 2332
2332 2333 This command supports shells that offer path name completion. It
2333 2334 currently completes only files already known to the dirstate.
2334 2335
2335 2336 Completion extends only to the next path segment unless
2336 2337 --full is specified, in which case entire paths are used.'''
2337 2338
2338 2339 def complete(path, acceptable):
2339 2340 dirstate = repo.dirstate
2340 2341 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2341 2342 rootdir = repo.root + pycompat.ossep
2342 2343 if spec != repo.root and not spec.startswith(rootdir):
2343 2344 return [], []
2344 2345 if os.path.isdir(spec):
2345 2346 spec += b'/'
2346 2347 spec = spec[len(rootdir) :]
2347 2348 fixpaths = pycompat.ossep != b'/'
2348 2349 if fixpaths:
2349 2350 spec = spec.replace(pycompat.ossep, b'/')
2350 2351 speclen = len(spec)
2351 2352 fullpaths = opts['full']
2352 2353 files, dirs = set(), set()
2353 2354 adddir, addfile = dirs.add, files.add
2354 2355 for f, st in pycompat.iteritems(dirstate):
2355 2356 if f.startswith(spec) and st[0] in acceptable:
2356 2357 if fixpaths:
2357 2358 f = f.replace(b'/', pycompat.ossep)
2358 2359 if fullpaths:
2359 2360 addfile(f)
2360 2361 continue
2361 2362 s = f.find(pycompat.ossep, speclen)
2362 2363 if s >= 0:
2363 2364 adddir(f[:s])
2364 2365 else:
2365 2366 addfile(f)
2366 2367 return files, dirs
2367 2368
2368 2369 acceptable = b''
2369 2370 if opts['normal']:
2370 2371 acceptable += b'nm'
2371 2372 if opts['added']:
2372 2373 acceptable += b'a'
2373 2374 if opts['removed']:
2374 2375 acceptable += b'r'
2375 2376 cwd = repo.getcwd()
2376 2377 if not specs:
2377 2378 specs = [b'.']
2378 2379
2379 2380 files, dirs = set(), set()
2380 2381 for spec in specs:
2381 2382 f, d = complete(spec, acceptable or b'nmar')
2382 2383 files.update(f)
2383 2384 dirs.update(d)
2384 2385 files.update(dirs)
2385 2386 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2386 2387 ui.write(b'\n')
2387 2388
2388 2389
2389 2390 @command(
2390 2391 b'debugpathcopies',
2391 2392 cmdutil.walkopts,
2392 2393 b'hg debugpathcopies REV1 REV2 [FILE]',
2393 2394 inferrepo=True,
2394 2395 )
2395 2396 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2396 2397 """show copies between two revisions"""
2397 2398 ctx1 = scmutil.revsingle(repo, rev1)
2398 2399 ctx2 = scmutil.revsingle(repo, rev2)
2399 2400 m = scmutil.match(ctx1, pats, opts)
2400 2401 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2401 2402 ui.write(b'%s -> %s\n' % (src, dst))
2402 2403
2403 2404
2404 2405 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2405 2406 def debugpeer(ui, path):
2406 2407 """establish a connection to a peer repository"""
2407 2408 # Always enable peer request logging. Requires --debug to display
2408 2409 # though.
2409 2410 overrides = {
2410 2411 (b'devel', b'debug.peer-request'): True,
2411 2412 }
2412 2413
2413 2414 with ui.configoverride(overrides):
2414 2415 peer = hg.peer(ui, {}, path)
2415 2416
2416 2417 local = peer.local() is not None
2417 2418 canpush = peer.canpush()
2418 2419
2419 2420 ui.write(_(b'url: %s\n') % peer.url())
2420 2421 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2421 2422 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2422 2423
2423 2424
2424 2425 @command(
2425 2426 b'debugpickmergetool',
2426 2427 [
2427 2428 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2428 2429 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2429 2430 ]
2430 2431 + cmdutil.walkopts
2431 2432 + cmdutil.mergetoolopts,
2432 2433 _(b'[PATTERN]...'),
2433 2434 inferrepo=True,
2434 2435 )
2435 2436 def debugpickmergetool(ui, repo, *pats, **opts):
2436 2437 """examine which merge tool is chosen for specified file
2437 2438
2438 2439 As described in :hg:`help merge-tools`, Mercurial examines
2439 2440 configurations below in this order to decide which merge tool is
2440 2441 chosen for specified file.
2441 2442
2442 2443 1. ``--tool`` option
2443 2444 2. ``HGMERGE`` environment variable
2444 2445 3. configurations in ``merge-patterns`` section
2445 2446 4. configuration of ``ui.merge``
2446 2447 5. configurations in ``merge-tools`` section
2447 2448 6. ``hgmerge`` tool (for historical reason only)
2448 2449 7. default tool for fallback (``:merge`` or ``:prompt``)
2449 2450
2450 2451 This command writes out examination result in the style below::
2451 2452
2452 2453 FILE = MERGETOOL
2453 2454
2454 2455 By default, all files known in the first parent context of the
2455 2456 working directory are examined. Use file patterns and/or -I/-X
2456 2457 options to limit target files. -r/--rev is also useful to examine
2457 2458 files in another context without actual updating to it.
2458 2459
2459 2460 With --debug, this command shows warning messages while matching
2460 2461 against ``merge-patterns`` and so on, too. It is recommended to
2461 2462 use this option with explicit file patterns and/or -I/-X options,
2462 2463 because this option increases amount of output per file according
2463 2464 to configurations in hgrc.
2464 2465
2465 2466 With -v/--verbose, this command shows configurations below at
2466 2467 first (only if specified).
2467 2468
2468 2469 - ``--tool`` option
2469 2470 - ``HGMERGE`` environment variable
2470 2471 - configuration of ``ui.merge``
2471 2472
2472 2473 If merge tool is chosen before matching against
2473 2474 ``merge-patterns``, this command can't show any helpful
2474 2475 information, even with --debug. In such case, information above is
2475 2476 useful to know why a merge tool is chosen.
2476 2477 """
2477 2478 opts = pycompat.byteskwargs(opts)
2478 2479 overrides = {}
2479 2480 if opts[b'tool']:
2480 2481 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2481 2482 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2482 2483
2483 2484 with ui.configoverride(overrides, b'debugmergepatterns'):
2484 2485 hgmerge = encoding.environ.get(b"HGMERGE")
2485 2486 if hgmerge is not None:
2486 2487 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2487 2488 uimerge = ui.config(b"ui", b"merge")
2488 2489 if uimerge:
2489 2490 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2490 2491
2491 2492 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2492 2493 m = scmutil.match(ctx, pats, opts)
2493 2494 changedelete = opts[b'changedelete']
2494 2495 for path in ctx.walk(m):
2495 2496 fctx = ctx[path]
2496 2497 try:
2497 2498 if not ui.debugflag:
2498 2499 ui.pushbuffer(error=True)
2499 2500 tool, toolpath = filemerge._picktool(
2500 2501 repo,
2501 2502 ui,
2502 2503 path,
2503 2504 fctx.isbinary(),
2504 2505 b'l' in fctx.flags(),
2505 2506 changedelete,
2506 2507 )
2507 2508 finally:
2508 2509 if not ui.debugflag:
2509 2510 ui.popbuffer()
2510 2511 ui.write(b'%s = %s\n' % (path, tool))
2511 2512
2512 2513
2513 2514 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2514 2515 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2515 2516 '''access the pushkey key/value protocol
2516 2517
2517 2518 With two args, list the keys in the given namespace.
2518 2519
2519 2520 With five args, set a key to new if it currently is set to old.
2520 2521 Reports success or failure.
2521 2522 '''
2522 2523
2523 2524 target = hg.peer(ui, {}, repopath)
2524 2525 if keyinfo:
2525 2526 key, old, new = keyinfo
2526 2527 with target.commandexecutor() as e:
2527 2528 r = e.callcommand(
2528 2529 b'pushkey',
2529 2530 {
2530 2531 b'namespace': namespace,
2531 2532 b'key': key,
2532 2533 b'old': old,
2533 2534 b'new': new,
2534 2535 },
2535 2536 ).result()
2536 2537
2537 2538 ui.status(pycompat.bytestr(r) + b'\n')
2538 2539 return not r
2539 2540 else:
2540 2541 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2541 2542 ui.write(
2542 2543 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2543 2544 )
2544 2545
2545 2546
2546 2547 @command(b'debugpvec', [], _(b'A B'))
2547 2548 def debugpvec(ui, repo, a, b=None):
2548 2549 ca = scmutil.revsingle(repo, a)
2549 2550 cb = scmutil.revsingle(repo, b)
2550 2551 pa = pvec.ctxpvec(ca)
2551 2552 pb = pvec.ctxpvec(cb)
2552 2553 if pa == pb:
2553 2554 rel = b"="
2554 2555 elif pa > pb:
2555 2556 rel = b">"
2556 2557 elif pa < pb:
2557 2558 rel = b"<"
2558 2559 elif pa | pb:
2559 2560 rel = b"|"
2560 2561 ui.write(_(b"a: %s\n") % pa)
2561 2562 ui.write(_(b"b: %s\n") % pb)
2562 2563 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2563 2564 ui.write(
2564 2565 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2565 2566 % (
2566 2567 abs(pa._depth - pb._depth),
2567 2568 pvec._hamming(pa._vec, pb._vec),
2568 2569 pa.distance(pb),
2569 2570 rel,
2570 2571 )
2571 2572 )
2572 2573
2573 2574
2574 2575 @command(
2575 2576 b'debugrebuilddirstate|debugrebuildstate',
2576 2577 [
2577 2578 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2578 2579 (
2579 2580 b'',
2580 2581 b'minimal',
2581 2582 None,
2582 2583 _(
2583 2584 b'only rebuild files that are inconsistent with '
2584 2585 b'the working copy parent'
2585 2586 ),
2586 2587 ),
2587 2588 ],
2588 2589 _(b'[-r REV]'),
2589 2590 )
2590 2591 def debugrebuilddirstate(ui, repo, rev, **opts):
2591 2592 """rebuild the dirstate as it would look like for the given revision
2592 2593
2593 2594 If no revision is specified the first current parent will be used.
2594 2595
2595 2596 The dirstate will be set to the files of the given revision.
2596 2597 The actual working directory content or existing dirstate
2597 2598 information such as adds or removes is not considered.
2598 2599
2599 2600 ``minimal`` will only rebuild the dirstate status for files that claim to be
2600 2601 tracked but are not in the parent manifest, or that exist in the parent
2601 2602 manifest but are not in the dirstate. It will not change adds, removes, or
2602 2603 modified files that are in the working copy parent.
2603 2604
2604 2605 One use of this command is to make the next :hg:`status` invocation
2605 2606 check the actual file content.
2606 2607 """
2607 2608 ctx = scmutil.revsingle(repo, rev)
2608 2609 with repo.wlock():
2609 2610 dirstate = repo.dirstate
2610 2611 changedfiles = None
2611 2612 # See command doc for what minimal does.
2612 2613 if opts.get('minimal'):
2613 2614 manifestfiles = set(ctx.manifest().keys())
2614 2615 dirstatefiles = set(dirstate)
2615 2616 manifestonly = manifestfiles - dirstatefiles
2616 2617 dsonly = dirstatefiles - manifestfiles
2617 2618 dsnotadded = set(f for f in dsonly if dirstate[f] != b'a')
2618 2619 changedfiles = manifestonly | dsnotadded
2619 2620
2620 2621 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2621 2622
2622 2623
2623 2624 @command(b'debugrebuildfncache', [], b'')
2624 2625 def debugrebuildfncache(ui, repo):
2625 2626 """rebuild the fncache file"""
2626 2627 repair.rebuildfncache(ui, repo)
2627 2628
2628 2629
2629 2630 @command(
2630 2631 b'debugrename',
2631 2632 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2632 2633 _(b'[-r REV] [FILE]...'),
2633 2634 )
2634 2635 def debugrename(ui, repo, *pats, **opts):
2635 2636 """dump rename information"""
2636 2637
2637 2638 opts = pycompat.byteskwargs(opts)
2638 2639 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2639 2640 m = scmutil.match(ctx, pats, opts)
2640 2641 for abs in ctx.walk(m):
2641 2642 fctx = ctx[abs]
2642 2643 o = fctx.filelog().renamed(fctx.filenode())
2643 2644 rel = repo.pathto(abs)
2644 2645 if o:
2645 2646 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2646 2647 else:
2647 2648 ui.write(_(b"%s not renamed\n") % rel)
2648 2649
2649 2650
2650 2651 @command(
2651 2652 b'debugrevlog',
2652 2653 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2653 2654 _(b'-c|-m|FILE'),
2654 2655 optionalrepo=True,
2655 2656 )
2656 2657 def debugrevlog(ui, repo, file_=None, **opts):
2657 2658 """show data and statistics about a revlog"""
2658 2659 opts = pycompat.byteskwargs(opts)
2659 2660 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2660 2661
2661 2662 if opts.get(b"dump"):
2662 2663 numrevs = len(r)
2663 2664 ui.write(
2664 2665 (
2665 2666 b"# rev p1rev p2rev start end deltastart base p1 p2"
2666 2667 b" rawsize totalsize compression heads chainlen\n"
2667 2668 )
2668 2669 )
2669 2670 ts = 0
2670 2671 heads = set()
2671 2672
2672 2673 for rev in pycompat.xrange(numrevs):
2673 2674 dbase = r.deltaparent(rev)
2674 2675 if dbase == -1:
2675 2676 dbase = rev
2676 2677 cbase = r.chainbase(rev)
2677 2678 clen = r.chainlen(rev)
2678 2679 p1, p2 = r.parentrevs(rev)
2679 2680 rs = r.rawsize(rev)
2680 2681 ts = ts + rs
2681 2682 heads -= set(r.parentrevs(rev))
2682 2683 heads.add(rev)
2683 2684 try:
2684 2685 compression = ts / r.end(rev)
2685 2686 except ZeroDivisionError:
2686 2687 compression = 0
2687 2688 ui.write(
2688 2689 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2689 2690 b"%11d %5d %8d\n"
2690 2691 % (
2691 2692 rev,
2692 2693 p1,
2693 2694 p2,
2694 2695 r.start(rev),
2695 2696 r.end(rev),
2696 2697 r.start(dbase),
2697 2698 r.start(cbase),
2698 2699 r.start(p1),
2699 2700 r.start(p2),
2700 2701 rs,
2701 2702 ts,
2702 2703 compression,
2703 2704 len(heads),
2704 2705 clen,
2705 2706 )
2706 2707 )
2707 2708 return 0
2708 2709
2709 2710 v = r.version
2710 2711 format = v & 0xFFFF
2711 2712 flags = []
2712 2713 gdelta = False
2713 2714 if v & revlog.FLAG_INLINE_DATA:
2714 2715 flags.append(b'inline')
2715 2716 if v & revlog.FLAG_GENERALDELTA:
2716 2717 gdelta = True
2717 2718 flags.append(b'generaldelta')
2718 2719 if not flags:
2719 2720 flags = [b'(none)']
2720 2721
2721 2722 ### tracks merge vs single parent
2722 2723 nummerges = 0
2723 2724
2724 2725 ### tracks ways the "delta" are build
2725 2726 # nodelta
2726 2727 numempty = 0
2727 2728 numemptytext = 0
2728 2729 numemptydelta = 0
2729 2730 # full file content
2730 2731 numfull = 0
2731 2732 # intermediate snapshot against a prior snapshot
2732 2733 numsemi = 0
2733 2734 # snapshot count per depth
2734 2735 numsnapdepth = collections.defaultdict(lambda: 0)
2735 2736 # delta against previous revision
2736 2737 numprev = 0
2737 2738 # delta against first or second parent (not prev)
2738 2739 nump1 = 0
2739 2740 nump2 = 0
2740 2741 # delta against neither prev nor parents
2741 2742 numother = 0
2742 2743 # delta against prev that are also first or second parent
2743 2744 # (details of `numprev`)
2744 2745 nump1prev = 0
2745 2746 nump2prev = 0
2746 2747
2747 2748 # data about delta chain of each revs
2748 2749 chainlengths = []
2749 2750 chainbases = []
2750 2751 chainspans = []
2751 2752
2752 2753 # data about each revision
2753 2754 datasize = [None, 0, 0]
2754 2755 fullsize = [None, 0, 0]
2755 2756 semisize = [None, 0, 0]
2756 2757 # snapshot count per depth
2757 2758 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2758 2759 deltasize = [None, 0, 0]
2759 2760 chunktypecounts = {}
2760 2761 chunktypesizes = {}
2761 2762
2762 2763 def addsize(size, l):
2763 2764 if l[0] is None or size < l[0]:
2764 2765 l[0] = size
2765 2766 if size > l[1]:
2766 2767 l[1] = size
2767 2768 l[2] += size
2768 2769
2769 2770 numrevs = len(r)
2770 2771 for rev in pycompat.xrange(numrevs):
2771 2772 p1, p2 = r.parentrevs(rev)
2772 2773 delta = r.deltaparent(rev)
2773 2774 if format > 0:
2774 2775 addsize(r.rawsize(rev), datasize)
2775 2776 if p2 != nullrev:
2776 2777 nummerges += 1
2777 2778 size = r.length(rev)
2778 2779 if delta == nullrev:
2779 2780 chainlengths.append(0)
2780 2781 chainbases.append(r.start(rev))
2781 2782 chainspans.append(size)
2782 2783 if size == 0:
2783 2784 numempty += 1
2784 2785 numemptytext += 1
2785 2786 else:
2786 2787 numfull += 1
2787 2788 numsnapdepth[0] += 1
2788 2789 addsize(size, fullsize)
2789 2790 addsize(size, snapsizedepth[0])
2790 2791 else:
2791 2792 chainlengths.append(chainlengths[delta] + 1)
2792 2793 baseaddr = chainbases[delta]
2793 2794 revaddr = r.start(rev)
2794 2795 chainbases.append(baseaddr)
2795 2796 chainspans.append((revaddr - baseaddr) + size)
2796 2797 if size == 0:
2797 2798 numempty += 1
2798 2799 numemptydelta += 1
2799 2800 elif r.issnapshot(rev):
2800 2801 addsize(size, semisize)
2801 2802 numsemi += 1
2802 2803 depth = r.snapshotdepth(rev)
2803 2804 numsnapdepth[depth] += 1
2804 2805 addsize(size, snapsizedepth[depth])
2805 2806 else:
2806 2807 addsize(size, deltasize)
2807 2808 if delta == rev - 1:
2808 2809 numprev += 1
2809 2810 if delta == p1:
2810 2811 nump1prev += 1
2811 2812 elif delta == p2:
2812 2813 nump2prev += 1
2813 2814 elif delta == p1:
2814 2815 nump1 += 1
2815 2816 elif delta == p2:
2816 2817 nump2 += 1
2817 2818 elif delta != nullrev:
2818 2819 numother += 1
2819 2820
2820 2821 # Obtain data on the raw chunks in the revlog.
2821 2822 if util.safehasattr(r, b'_getsegmentforrevs'):
2822 2823 segment = r._getsegmentforrevs(rev, rev)[1]
2823 2824 else:
2824 2825 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2825 2826 if segment:
2826 2827 chunktype = bytes(segment[0:1])
2827 2828 else:
2828 2829 chunktype = b'empty'
2829 2830
2830 2831 if chunktype not in chunktypecounts:
2831 2832 chunktypecounts[chunktype] = 0
2832 2833 chunktypesizes[chunktype] = 0
2833 2834
2834 2835 chunktypecounts[chunktype] += 1
2835 2836 chunktypesizes[chunktype] += size
2836 2837
2837 2838 # Adjust size min value for empty cases
2838 2839 for size in (datasize, fullsize, semisize, deltasize):
2839 2840 if size[0] is None:
2840 2841 size[0] = 0
2841 2842
2842 2843 numdeltas = numrevs - numfull - numempty - numsemi
2843 2844 numoprev = numprev - nump1prev - nump2prev
2844 2845 totalrawsize = datasize[2]
2845 2846 datasize[2] /= numrevs
2846 2847 fulltotal = fullsize[2]
2847 2848 if numfull == 0:
2848 2849 fullsize[2] = 0
2849 2850 else:
2850 2851 fullsize[2] /= numfull
2851 2852 semitotal = semisize[2]
2852 2853 snaptotal = {}
2853 2854 if numsemi > 0:
2854 2855 semisize[2] /= numsemi
2855 2856 for depth in snapsizedepth:
2856 2857 snaptotal[depth] = snapsizedepth[depth][2]
2857 2858 snapsizedepth[depth][2] /= numsnapdepth[depth]
2858 2859
2859 2860 deltatotal = deltasize[2]
2860 2861 if numdeltas > 0:
2861 2862 deltasize[2] /= numdeltas
2862 2863 totalsize = fulltotal + semitotal + deltatotal
2863 2864 avgchainlen = sum(chainlengths) / numrevs
2864 2865 maxchainlen = max(chainlengths)
2865 2866 maxchainspan = max(chainspans)
2866 2867 compratio = 1
2867 2868 if totalsize:
2868 2869 compratio = totalrawsize / totalsize
2869 2870
2870 2871 basedfmtstr = b'%%%dd\n'
2871 2872 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
2872 2873
2873 2874 def dfmtstr(max):
2874 2875 return basedfmtstr % len(str(max))
2875 2876
2876 2877 def pcfmtstr(max, padding=0):
2877 2878 return basepcfmtstr % (len(str(max)), b' ' * padding)
2878 2879
2879 2880 def pcfmt(value, total):
2880 2881 if total:
2881 2882 return (value, 100 * float(value) / total)
2882 2883 else:
2883 2884 return value, 100.0
2884 2885
2885 2886 ui.writenoi18n(b'format : %d\n' % format)
2886 2887 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
2887 2888
2888 2889 ui.write(b'\n')
2889 2890 fmt = pcfmtstr(totalsize)
2890 2891 fmt2 = dfmtstr(totalsize)
2891 2892 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2892 2893 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
2893 2894 ui.writenoi18n(
2894 2895 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
2895 2896 )
2896 2897 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2897 2898 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
2898 2899 ui.writenoi18n(
2899 2900 b' text : '
2900 2901 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
2901 2902 )
2902 2903 ui.writenoi18n(
2903 2904 b' delta : '
2904 2905 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
2905 2906 )
2906 2907 ui.writenoi18n(
2907 2908 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
2908 2909 )
2909 2910 for depth in sorted(numsnapdepth):
2910 2911 ui.write(
2911 2912 (b' lvl-%-3d : ' % depth)
2912 2913 + fmt % pcfmt(numsnapdepth[depth], numrevs)
2913 2914 )
2914 2915 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
2915 2916 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
2916 2917 ui.writenoi18n(
2917 2918 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
2918 2919 )
2919 2920 for depth in sorted(numsnapdepth):
2920 2921 ui.write(
2921 2922 (b' lvl-%-3d : ' % depth)
2922 2923 + fmt % pcfmt(snaptotal[depth], totalsize)
2923 2924 )
2924 2925 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
2925 2926
2926 2927 def fmtchunktype(chunktype):
2927 2928 if chunktype == b'empty':
2928 2929 return b' %s : ' % chunktype
2929 2930 elif chunktype in pycompat.bytestr(string.ascii_letters):
2930 2931 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2931 2932 else:
2932 2933 return b' 0x%s : ' % hex(chunktype)
2933 2934
2934 2935 ui.write(b'\n')
2935 2936 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
2936 2937 for chunktype in sorted(chunktypecounts):
2937 2938 ui.write(fmtchunktype(chunktype))
2938 2939 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2939 2940 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
2940 2941 for chunktype in sorted(chunktypecounts):
2941 2942 ui.write(fmtchunktype(chunktype))
2942 2943 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2943 2944
2944 2945 ui.write(b'\n')
2945 2946 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2946 2947 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
2947 2948 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
2948 2949 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
2949 2950 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
2950 2951
2951 2952 if format > 0:
2952 2953 ui.write(b'\n')
2953 2954 ui.writenoi18n(
2954 2955 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
2955 2956 % tuple(datasize)
2956 2957 )
2957 2958 ui.writenoi18n(
2958 2959 b'full revision size (min/max/avg) : %d / %d / %d\n'
2959 2960 % tuple(fullsize)
2960 2961 )
2961 2962 ui.writenoi18n(
2962 2963 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
2963 2964 % tuple(semisize)
2964 2965 )
2965 2966 for depth in sorted(snapsizedepth):
2966 2967 if depth == 0:
2967 2968 continue
2968 2969 ui.writenoi18n(
2969 2970 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
2970 2971 % ((depth,) + tuple(snapsizedepth[depth]))
2971 2972 )
2972 2973 ui.writenoi18n(
2973 2974 b'delta size (min/max/avg) : %d / %d / %d\n'
2974 2975 % tuple(deltasize)
2975 2976 )
2976 2977
2977 2978 if numdeltas > 0:
2978 2979 ui.write(b'\n')
2979 2980 fmt = pcfmtstr(numdeltas)
2980 2981 fmt2 = pcfmtstr(numdeltas, 4)
2981 2982 ui.writenoi18n(
2982 2983 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
2983 2984 )
2984 2985 if numprev > 0:
2985 2986 ui.writenoi18n(
2986 2987 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
2987 2988 )
2988 2989 ui.writenoi18n(
2989 2990 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
2990 2991 )
2991 2992 ui.writenoi18n(
2992 2993 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
2993 2994 )
2994 2995 if gdelta:
2995 2996 ui.writenoi18n(
2996 2997 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
2997 2998 )
2998 2999 ui.writenoi18n(
2999 3000 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3000 3001 )
3001 3002 ui.writenoi18n(
3002 3003 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3003 3004 )
3004 3005
3005 3006
3006 3007 @command(
3007 3008 b'debugrevlogindex',
3008 3009 cmdutil.debugrevlogopts
3009 3010 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3010 3011 _(b'[-f FORMAT] -c|-m|FILE'),
3011 3012 optionalrepo=True,
3012 3013 )
3013 3014 def debugrevlogindex(ui, repo, file_=None, **opts):
3014 3015 """dump the contents of a revlog index"""
3015 3016 opts = pycompat.byteskwargs(opts)
3016 3017 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3017 3018 format = opts.get(b'format', 0)
3018 3019 if format not in (0, 1):
3019 3020 raise error.Abort(_(b"unknown format %d") % format)
3020 3021
3021 3022 if ui.debugflag:
3022 3023 shortfn = hex
3023 3024 else:
3024 3025 shortfn = short
3025 3026
3026 3027 # There might not be anything in r, so have a sane default
3027 3028 idlen = 12
3028 3029 for i in r:
3029 3030 idlen = len(shortfn(r.node(i)))
3030 3031 break
3031 3032
3032 3033 if format == 0:
3033 3034 if ui.verbose:
3034 3035 ui.writenoi18n(
3035 3036 b" rev offset length linkrev %s %s p2\n"
3036 3037 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3037 3038 )
3038 3039 else:
3039 3040 ui.writenoi18n(
3040 3041 b" rev linkrev %s %s p2\n"
3041 3042 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3042 3043 )
3043 3044 elif format == 1:
3044 3045 if ui.verbose:
3045 3046 ui.writenoi18n(
3046 3047 (
3047 3048 b" rev flag offset length size link p1"
3048 3049 b" p2 %s\n"
3049 3050 )
3050 3051 % b"nodeid".rjust(idlen)
3051 3052 )
3052 3053 else:
3053 3054 ui.writenoi18n(
3054 3055 b" rev flag size link p1 p2 %s\n"
3055 3056 % b"nodeid".rjust(idlen)
3056 3057 )
3057 3058
3058 3059 for i in r:
3059 3060 node = r.node(i)
3060 3061 if format == 0:
3061 3062 try:
3062 3063 pp = r.parents(node)
3063 3064 except Exception:
3064 3065 pp = [nullid, nullid]
3065 3066 if ui.verbose:
3066 3067 ui.write(
3067 3068 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3068 3069 % (
3069 3070 i,
3070 3071 r.start(i),
3071 3072 r.length(i),
3072 3073 r.linkrev(i),
3073 3074 shortfn(node),
3074 3075 shortfn(pp[0]),
3075 3076 shortfn(pp[1]),
3076 3077 )
3077 3078 )
3078 3079 else:
3079 3080 ui.write(
3080 3081 b"% 6d % 7d %s %s %s\n"
3081 3082 % (
3082 3083 i,
3083 3084 r.linkrev(i),
3084 3085 shortfn(node),
3085 3086 shortfn(pp[0]),
3086 3087 shortfn(pp[1]),
3087 3088 )
3088 3089 )
3089 3090 elif format == 1:
3090 3091 pr = r.parentrevs(i)
3091 3092 if ui.verbose:
3092 3093 ui.write(
3093 3094 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3094 3095 % (
3095 3096 i,
3096 3097 r.flags(i),
3097 3098 r.start(i),
3098 3099 r.length(i),
3099 3100 r.rawsize(i),
3100 3101 r.linkrev(i),
3101 3102 pr[0],
3102 3103 pr[1],
3103 3104 shortfn(node),
3104 3105 )
3105 3106 )
3106 3107 else:
3107 3108 ui.write(
3108 3109 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3109 3110 % (
3110 3111 i,
3111 3112 r.flags(i),
3112 3113 r.rawsize(i),
3113 3114 r.linkrev(i),
3114 3115 pr[0],
3115 3116 pr[1],
3116 3117 shortfn(node),
3117 3118 )
3118 3119 )
3119 3120
3120 3121
3121 3122 @command(
3122 3123 b'debugrevspec',
3123 3124 [
3124 3125 (
3125 3126 b'',
3126 3127 b'optimize',
3127 3128 None,
3128 3129 _(b'print parsed tree after optimizing (DEPRECATED)'),
3129 3130 ),
3130 3131 (
3131 3132 b'',
3132 3133 b'show-revs',
3133 3134 True,
3134 3135 _(b'print list of result revisions (default)'),
3135 3136 ),
3136 3137 (
3137 3138 b's',
3138 3139 b'show-set',
3139 3140 None,
3140 3141 _(b'print internal representation of result set'),
3141 3142 ),
3142 3143 (
3143 3144 b'p',
3144 3145 b'show-stage',
3145 3146 [],
3146 3147 _(b'print parsed tree at the given stage'),
3147 3148 _(b'NAME'),
3148 3149 ),
3149 3150 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3150 3151 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3151 3152 ],
3152 3153 b'REVSPEC',
3153 3154 )
3154 3155 def debugrevspec(ui, repo, expr, **opts):
3155 3156 """parse and apply a revision specification
3156 3157
3157 3158 Use -p/--show-stage option to print the parsed tree at the given stages.
3158 3159 Use -p all to print tree at every stage.
3159 3160
3160 3161 Use --no-show-revs option with -s or -p to print only the set
3161 3162 representation or the parsed tree respectively.
3162 3163
3163 3164 Use --verify-optimized to compare the optimized result with the unoptimized
3164 3165 one. Returns 1 if the optimized result differs.
3165 3166 """
3166 3167 opts = pycompat.byteskwargs(opts)
3167 3168 aliases = ui.configitems(b'revsetalias')
3168 3169 stages = [
3169 3170 (b'parsed', lambda tree: tree),
3170 3171 (
3171 3172 b'expanded',
3172 3173 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3173 3174 ),
3174 3175 (b'concatenated', revsetlang.foldconcat),
3175 3176 (b'analyzed', revsetlang.analyze),
3176 3177 (b'optimized', revsetlang.optimize),
3177 3178 ]
3178 3179 if opts[b'no_optimized']:
3179 3180 stages = stages[:-1]
3180 3181 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3181 3182 raise error.Abort(
3182 3183 _(b'cannot use --verify-optimized with --no-optimized')
3183 3184 )
3184 3185 stagenames = set(n for n, f in stages)
3185 3186
3186 3187 showalways = set()
3187 3188 showchanged = set()
3188 3189 if ui.verbose and not opts[b'show_stage']:
3189 3190 # show parsed tree by --verbose (deprecated)
3190 3191 showalways.add(b'parsed')
3191 3192 showchanged.update([b'expanded', b'concatenated'])
3192 3193 if opts[b'optimize']:
3193 3194 showalways.add(b'optimized')
3194 3195 if opts[b'show_stage'] and opts[b'optimize']:
3195 3196 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3196 3197 if opts[b'show_stage'] == [b'all']:
3197 3198 showalways.update(stagenames)
3198 3199 else:
3199 3200 for n in opts[b'show_stage']:
3200 3201 if n not in stagenames:
3201 3202 raise error.Abort(_(b'invalid stage name: %s') % n)
3202 3203 showalways.update(opts[b'show_stage'])
3203 3204
3204 3205 treebystage = {}
3205 3206 printedtree = None
3206 3207 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3207 3208 for n, f in stages:
3208 3209 treebystage[n] = tree = f(tree)
3209 3210 if n in showalways or (n in showchanged and tree != printedtree):
3210 3211 if opts[b'show_stage'] or n != b'parsed':
3211 3212 ui.write(b"* %s:\n" % n)
3212 3213 ui.write(revsetlang.prettyformat(tree), b"\n")
3213 3214 printedtree = tree
3214 3215
3215 3216 if opts[b'verify_optimized']:
3216 3217 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3217 3218 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3218 3219 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3219 3220 ui.writenoi18n(
3220 3221 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3221 3222 )
3222 3223 ui.writenoi18n(
3223 3224 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3224 3225 )
3225 3226 arevs = list(arevs)
3226 3227 brevs = list(brevs)
3227 3228 if arevs == brevs:
3228 3229 return 0
3229 3230 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3230 3231 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3231 3232 sm = difflib.SequenceMatcher(None, arevs, brevs)
3232 3233 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3233 3234 if tag in ('delete', 'replace'):
3234 3235 for c in arevs[alo:ahi]:
3235 3236 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3236 3237 if tag in ('insert', 'replace'):
3237 3238 for c in brevs[blo:bhi]:
3238 3239 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3239 3240 if tag == 'equal':
3240 3241 for c in arevs[alo:ahi]:
3241 3242 ui.write(b' %d\n' % c)
3242 3243 return 1
3243 3244
3244 3245 func = revset.makematcher(tree)
3245 3246 revs = func(repo)
3246 3247 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3247 3248 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3248 3249 if not opts[b'show_revs']:
3249 3250 return
3250 3251 for c in revs:
3251 3252 ui.write(b"%d\n" % c)
3252 3253
3253 3254
3254 3255 @command(
3255 3256 b'debugserve',
3256 3257 [
3257 3258 (
3258 3259 b'',
3259 3260 b'sshstdio',
3260 3261 False,
3261 3262 _(b'run an SSH server bound to process handles'),
3262 3263 ),
3263 3264 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3264 3265 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3265 3266 ],
3266 3267 b'',
3267 3268 )
3268 3269 def debugserve(ui, repo, **opts):
3269 3270 """run a server with advanced settings
3270 3271
3271 3272 This command is similar to :hg:`serve`. It exists partially as a
3272 3273 workaround to the fact that ``hg serve --stdio`` must have specific
3273 3274 arguments for security reasons.
3274 3275 """
3275 3276 opts = pycompat.byteskwargs(opts)
3276 3277
3277 3278 if not opts[b'sshstdio']:
3278 3279 raise error.Abort(_(b'only --sshstdio is currently supported'))
3279 3280
3280 3281 logfh = None
3281 3282
3282 3283 if opts[b'logiofd'] and opts[b'logiofile']:
3283 3284 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3284 3285
3285 3286 if opts[b'logiofd']:
3286 3287 # Ideally we would be line buffered. But line buffering in binary
3287 3288 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3288 3289 # buffering could have performance impacts. But since this isn't
3289 3290 # performance critical code, it should be fine.
3290 3291 try:
3291 3292 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3292 3293 except OSError as e:
3293 3294 if e.errno != errno.ESPIPE:
3294 3295 raise
3295 3296 # can't seek a pipe, so `ab` mode fails on py3
3296 3297 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3297 3298 elif opts[b'logiofile']:
3298 3299 logfh = open(opts[b'logiofile'], b'ab', 0)
3299 3300
3300 3301 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3301 3302 s.serve_forever()
3302 3303
3303 3304
3304 3305 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3305 3306 def debugsetparents(ui, repo, rev1, rev2=None):
3306 3307 """manually set the parents of the current working directory
3307 3308
3308 3309 This is useful for writing repository conversion tools, but should
3309 3310 be used with care. For example, neither the working directory nor the
3310 3311 dirstate is updated, so file status may be incorrect after running this
3311 3312 command.
3312 3313
3313 3314 Returns 0 on success.
3314 3315 """
3315 3316
3316 3317 node1 = scmutil.revsingle(repo, rev1).node()
3317 3318 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3318 3319
3319 3320 with repo.wlock():
3320 3321 repo.setparents(node1, node2)
3321 3322
3322 3323
3323 3324 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3324 3325 def debugsidedata(ui, repo, file_, rev=None, **opts):
3325 3326 """dump the side data for a cl/manifest/file revision
3326 3327
3327 3328 Use --verbose to dump the sidedata content."""
3328 3329 opts = pycompat.byteskwargs(opts)
3329 3330 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3330 3331 if rev is not None:
3331 3332 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3332 3333 file_, rev = None, file_
3333 3334 elif rev is None:
3334 3335 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3335 3336 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3336 3337 r = getattr(r, '_revlog', r)
3337 3338 try:
3338 3339 sidedata = r.sidedata(r.lookup(rev))
3339 3340 except KeyError:
3340 3341 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3341 3342 if sidedata:
3342 3343 sidedata = list(sidedata.items())
3343 3344 sidedata.sort()
3344 3345 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3345 3346 for key, value in sidedata:
3346 3347 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3347 3348 if ui.verbose:
3348 3349 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3349 3350
3350 3351
3351 3352 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3352 3353 def debugssl(ui, repo, source=None, **opts):
3353 3354 '''test a secure connection to a server
3354 3355
3355 3356 This builds the certificate chain for the server on Windows, installing the
3356 3357 missing intermediates and trusted root via Windows Update if necessary. It
3357 3358 does nothing on other platforms.
3358 3359
3359 3360 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3360 3361 that server is used. See :hg:`help urls` for more information.
3361 3362
3362 3363 If the update succeeds, retry the original operation. Otherwise, the cause
3363 3364 of the SSL error is likely another issue.
3364 3365 '''
3365 3366 if not pycompat.iswindows:
3366 3367 raise error.Abort(
3367 3368 _(b'certificate chain building is only possible on Windows')
3368 3369 )
3369 3370
3370 3371 if not source:
3371 3372 if not repo:
3372 3373 raise error.Abort(
3373 3374 _(
3374 3375 b"there is no Mercurial repository here, and no "
3375 3376 b"server specified"
3376 3377 )
3377 3378 )
3378 3379 source = b"default"
3379 3380
3380 3381 source, branches = hg.parseurl(ui.expandpath(source))
3381 3382 url = util.url(source)
3382 3383
3383 3384 defaultport = {b'https': 443, b'ssh': 22}
3384 3385 if url.scheme in defaultport:
3385 3386 try:
3386 3387 addr = (url.host, int(url.port or defaultport[url.scheme]))
3387 3388 except ValueError:
3388 3389 raise error.Abort(_(b"malformed port number in URL"))
3389 3390 else:
3390 3391 raise error.Abort(_(b"only https and ssh connections are supported"))
3391 3392
3392 3393 from . import win32
3393 3394
3394 3395 s = ssl.wrap_socket(
3395 3396 socket.socket(),
3396 3397 ssl_version=ssl.PROTOCOL_TLS,
3397 3398 cert_reqs=ssl.CERT_NONE,
3398 3399 ca_certs=None,
3399 3400 )
3400 3401
3401 3402 try:
3402 3403 s.connect(addr)
3403 3404 cert = s.getpeercert(True)
3404 3405
3405 3406 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3406 3407
3407 3408 complete = win32.checkcertificatechain(cert, build=False)
3408 3409
3409 3410 if not complete:
3410 3411 ui.status(_(b'certificate chain is incomplete, updating... '))
3411 3412
3412 3413 if not win32.checkcertificatechain(cert):
3413 3414 ui.status(_(b'failed.\n'))
3414 3415 else:
3415 3416 ui.status(_(b'done.\n'))
3416 3417 else:
3417 3418 ui.status(_(b'full certificate chain is available\n'))
3418 3419 finally:
3419 3420 s.close()
3420 3421
3421 3422
3422 3423 @command(
3423 3424 b'debugsub',
3424 3425 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3425 3426 _(b'[-r REV] [REV]'),
3426 3427 )
3427 3428 def debugsub(ui, repo, rev=None):
3428 3429 ctx = scmutil.revsingle(repo, rev, None)
3429 3430 for k, v in sorted(ctx.substate.items()):
3430 3431 ui.writenoi18n(b'path %s\n' % k)
3431 3432 ui.writenoi18n(b' source %s\n' % v[0])
3432 3433 ui.writenoi18n(b' revision %s\n' % v[1])
3433 3434
3434 3435
3435 3436 @command(
3436 3437 b'debugsuccessorssets',
3437 3438 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3438 3439 _(b'[REV]'),
3439 3440 )
3440 3441 def debugsuccessorssets(ui, repo, *revs, **opts):
3441 3442 """show set of successors for revision
3442 3443
3443 3444 A successors set of changeset A is a consistent group of revisions that
3444 3445 succeed A. It contains non-obsolete changesets only unless closests
3445 3446 successors set is set.
3446 3447
3447 3448 In most cases a changeset A has a single successors set containing a single
3448 3449 successor (changeset A replaced by A').
3449 3450
3450 3451 A changeset that is made obsolete with no successors are called "pruned".
3451 3452 Such changesets have no successors sets at all.
3452 3453
3453 3454 A changeset that has been "split" will have a successors set containing
3454 3455 more than one successor.
3455 3456
3456 3457 A changeset that has been rewritten in multiple different ways is called
3457 3458 "divergent". Such changesets have multiple successor sets (each of which
3458 3459 may also be split, i.e. have multiple successors).
3459 3460
3460 3461 Results are displayed as follows::
3461 3462
3462 3463 <rev1>
3463 3464 <successors-1A>
3464 3465 <rev2>
3465 3466 <successors-2A>
3466 3467 <successors-2B1> <successors-2B2> <successors-2B3>
3467 3468
3468 3469 Here rev2 has two possible (i.e. divergent) successors sets. The first
3469 3470 holds one element, whereas the second holds three (i.e. the changeset has
3470 3471 been split).
3471 3472 """
3472 3473 # passed to successorssets caching computation from one call to another
3473 3474 cache = {}
3474 3475 ctx2str = bytes
3475 3476 node2str = short
3476 3477 for rev in scmutil.revrange(repo, revs):
3477 3478 ctx = repo[rev]
3478 3479 ui.write(b'%s\n' % ctx2str(ctx))
3479 3480 for succsset in obsutil.successorssets(
3480 3481 repo, ctx.node(), closest=opts['closest'], cache=cache
3481 3482 ):
3482 3483 if succsset:
3483 3484 ui.write(b' ')
3484 3485 ui.write(node2str(succsset[0]))
3485 3486 for node in succsset[1:]:
3486 3487 ui.write(b' ')
3487 3488 ui.write(node2str(node))
3488 3489 ui.write(b'\n')
3489 3490
3490 3491
3491 3492 @command(b'debugtagscache', [])
3492 3493 def debugtagscache(ui, repo):
3493 3494 """display the contents of .hg/cache/hgtagsfnodes1"""
3494 3495 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3495 3496 for r in repo:
3496 3497 node = repo[r].node()
3497 3498 tagsnode = cache.getfnode(node, computemissing=False)
3498 3499 tagsnodedisplay = hex(tagsnode) if tagsnode else 'missing/invalid'
3499 3500 ui.write(b'%s %s %s\n' % (r, hex(node), tagsnodedisplay))
3500 3501
3501 3502
3502 3503 @command(
3503 3504 b'debugtemplate',
3504 3505 [
3505 3506 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3506 3507 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3507 3508 ],
3508 3509 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3509 3510 optionalrepo=True,
3510 3511 )
3511 3512 def debugtemplate(ui, repo, tmpl, **opts):
3512 3513 """parse and apply a template
3513 3514
3514 3515 If -r/--rev is given, the template is processed as a log template and
3515 3516 applied to the given changesets. Otherwise, it is processed as a generic
3516 3517 template.
3517 3518
3518 3519 Use --verbose to print the parsed tree.
3519 3520 """
3520 3521 revs = None
3521 3522 if opts['rev']:
3522 3523 if repo is None:
3523 3524 raise error.RepoError(
3524 3525 _(b'there is no Mercurial repository here (.hg not found)')
3525 3526 )
3526 3527 revs = scmutil.revrange(repo, opts['rev'])
3527 3528
3528 3529 props = {}
3529 3530 for d in opts['define']:
3530 3531 try:
3531 3532 k, v = (e.strip() for e in d.split(b'=', 1))
3532 3533 if not k or k == b'ui':
3533 3534 raise ValueError
3534 3535 props[k] = v
3535 3536 except ValueError:
3536 3537 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3537 3538
3538 3539 if ui.verbose:
3539 3540 aliases = ui.configitems(b'templatealias')
3540 3541 tree = templater.parse(tmpl)
3541 3542 ui.note(templater.prettyformat(tree), b'\n')
3542 3543 newtree = templater.expandaliases(tree, aliases)
3543 3544 if newtree != tree:
3544 3545 ui.notenoi18n(
3545 3546 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3546 3547 )
3547 3548
3548 3549 if revs is None:
3549 3550 tres = formatter.templateresources(ui, repo)
3550 3551 t = formatter.maketemplater(ui, tmpl, resources=tres)
3551 3552 if ui.verbose:
3552 3553 kwds, funcs = t.symbolsuseddefault()
3553 3554 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3554 3555 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3555 3556 ui.write(t.renderdefault(props))
3556 3557 else:
3557 3558 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3558 3559 if ui.verbose:
3559 3560 kwds, funcs = displayer.t.symbolsuseddefault()
3560 3561 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3561 3562 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3562 3563 for r in revs:
3563 3564 displayer.show(repo[r], **pycompat.strkwargs(props))
3564 3565 displayer.close()
3565 3566
3566 3567
3567 3568 @command(
3568 3569 b'debuguigetpass',
3569 3570 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3570 3571 _(b'[-p TEXT]'),
3571 3572 norepo=True,
3572 3573 )
3573 3574 def debuguigetpass(ui, prompt=b''):
3574 3575 """show prompt to type password"""
3575 3576 r = ui.getpass(prompt)
3576 3577 ui.writenoi18n(b'respose: %s\n' % r)
3577 3578
3578 3579
3579 3580 @command(
3580 3581 b'debuguiprompt',
3581 3582 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3582 3583 _(b'[-p TEXT]'),
3583 3584 norepo=True,
3584 3585 )
3585 3586 def debuguiprompt(ui, prompt=b''):
3586 3587 """show plain prompt"""
3587 3588 r = ui.prompt(prompt)
3588 3589 ui.writenoi18n(b'response: %s\n' % r)
3589 3590
3590 3591
3591 3592 @command(b'debugupdatecaches', [])
3592 3593 def debugupdatecaches(ui, repo, *pats, **opts):
3593 3594 """warm all known caches in the repository"""
3594 3595 with repo.wlock(), repo.lock():
3595 3596 repo.updatecaches(full=True)
3596 3597
3597 3598
3598 3599 @command(
3599 3600 b'debugupgraderepo',
3600 3601 [
3601 3602 (
3602 3603 b'o',
3603 3604 b'optimize',
3604 3605 [],
3605 3606 _(b'extra optimization to perform'),
3606 3607 _(b'NAME'),
3607 3608 ),
3608 3609 (b'', b'run', False, _(b'performs an upgrade')),
3609 3610 (b'', b'backup', True, _(b'keep the old repository content around')),
3610 3611 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
3611 3612 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
3612 3613 ],
3613 3614 )
3614 3615 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
3615 3616 """upgrade a repository to use different features
3616 3617
3617 3618 If no arguments are specified, the repository is evaluated for upgrade
3618 3619 and a list of problems and potential optimizations is printed.
3619 3620
3620 3621 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
3621 3622 can be influenced via additional arguments. More details will be provided
3622 3623 by the command output when run without ``--run``.
3623 3624
3624 3625 During the upgrade, the repository will be locked and no writes will be
3625 3626 allowed.
3626 3627
3627 3628 At the end of the upgrade, the repository may not be readable while new
3628 3629 repository data is swapped in. This window will be as long as it takes to
3629 3630 rename some directories inside the ``.hg`` directory. On most machines, this
3630 3631 should complete almost instantaneously and the chances of a consumer being
3631 3632 unable to access the repository should be low.
3632 3633
3633 3634 By default, all revlog will be upgraded. You can restrict this using flag
3634 3635 such as `--manifest`:
3635 3636
3636 3637 * `--manifest`: only optimize the manifest
3637 3638 * `--no-manifest`: optimize all revlog but the manifest
3638 3639 * `--changelog`: optimize the changelog only
3639 3640 * `--no-changelog --no-manifest`: optimize filelogs only
3640 3641 """
3641 3642 return upgrade.upgraderepo(
3642 3643 ui, repo, run=run, optimize=optimize, backup=backup, **opts
3643 3644 )
3644 3645
3645 3646
3646 3647 @command(
3647 3648 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
3648 3649 )
3649 3650 def debugwalk(ui, repo, *pats, **opts):
3650 3651 """show how files match on given patterns"""
3651 3652 opts = pycompat.byteskwargs(opts)
3652 3653 m = scmutil.match(repo[None], pats, opts)
3653 3654 if ui.verbose:
3654 3655 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
3655 3656 items = list(repo[None].walk(m))
3656 3657 if not items:
3657 3658 return
3658 3659 f = lambda fn: fn
3659 3660 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
3660 3661 f = lambda fn: util.normpath(fn)
3661 3662 fmt = b'f %%-%ds %%-%ds %%s' % (
3662 3663 max([len(abs) for abs in items]),
3663 3664 max([len(repo.pathto(abs)) for abs in items]),
3664 3665 )
3665 3666 for abs in items:
3666 3667 line = fmt % (
3667 3668 abs,
3668 3669 f(repo.pathto(abs)),
3669 3670 m.exact(abs) and b'exact' or b'',
3670 3671 )
3671 3672 ui.write(b"%s\n" % line.rstrip())
3672 3673
3673 3674
3674 3675 @command(b'debugwhyunstable', [], _(b'REV'))
3675 3676 def debugwhyunstable(ui, repo, rev):
3676 3677 """explain instabilities of a changeset"""
3677 3678 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
3678 3679 dnodes = b''
3679 3680 if entry.get(b'divergentnodes'):
3680 3681 dnodes = (
3681 3682 b' '.join(
3682 3683 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
3683 3684 for ctx in entry[b'divergentnodes']
3684 3685 )
3685 3686 + b' '
3686 3687 )
3687 3688 ui.write(
3688 3689 b'%s: %s%s %s\n'
3689 3690 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
3690 3691 )
3691 3692
3692 3693
3693 3694 @command(
3694 3695 b'debugwireargs',
3695 3696 [
3696 3697 (b'', b'three', b'', b'three'),
3697 3698 (b'', b'four', b'', b'four'),
3698 3699 (b'', b'five', b'', b'five'),
3699 3700 ]
3700 3701 + cmdutil.remoteopts,
3701 3702 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
3702 3703 norepo=True,
3703 3704 )
3704 3705 def debugwireargs(ui, repopath, *vals, **opts):
3705 3706 opts = pycompat.byteskwargs(opts)
3706 3707 repo = hg.peer(ui, opts, repopath)
3707 3708 for opt in cmdutil.remoteopts:
3708 3709 del opts[opt[1]]
3709 3710 args = {}
3710 3711 for k, v in pycompat.iteritems(opts):
3711 3712 if v:
3712 3713 args[k] = v
3713 3714 args = pycompat.strkwargs(args)
3714 3715 # run twice to check that we don't mess up the stream for the next command
3715 3716 res1 = repo.debugwireargs(*vals, **args)
3716 3717 res2 = repo.debugwireargs(*vals, **args)
3717 3718 ui.write(b"%s\n" % res1)
3718 3719 if res1 != res2:
3719 3720 ui.warn(b"%s\n" % res2)
3720 3721
3721 3722
3722 3723 def _parsewirelangblocks(fh):
3723 3724 activeaction = None
3724 3725 blocklines = []
3725 3726 lastindent = 0
3726 3727
3727 3728 for line in fh:
3728 3729 line = line.rstrip()
3729 3730 if not line:
3730 3731 continue
3731 3732
3732 3733 if line.startswith(b'#'):
3733 3734 continue
3734 3735
3735 3736 if not line.startswith(b' '):
3736 3737 # New block. Flush previous one.
3737 3738 if activeaction:
3738 3739 yield activeaction, blocklines
3739 3740
3740 3741 activeaction = line
3741 3742 blocklines = []
3742 3743 lastindent = 0
3743 3744 continue
3744 3745
3745 3746 # Else we start with an indent.
3746 3747
3747 3748 if not activeaction:
3748 3749 raise error.Abort(_(b'indented line outside of block'))
3749 3750
3750 3751 indent = len(line) - len(line.lstrip())
3751 3752
3752 3753 # If this line is indented more than the last line, concatenate it.
3753 3754 if indent > lastindent and blocklines:
3754 3755 blocklines[-1] += line.lstrip()
3755 3756 else:
3756 3757 blocklines.append(line)
3757 3758 lastindent = indent
3758 3759
3759 3760 # Flush last block.
3760 3761 if activeaction:
3761 3762 yield activeaction, blocklines
3762 3763
3763 3764
3764 3765 @command(
3765 3766 b'debugwireproto',
3766 3767 [
3767 3768 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
3768 3769 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
3769 3770 (
3770 3771 b'',
3771 3772 b'noreadstderr',
3772 3773 False,
3773 3774 _(b'do not read from stderr of the remote'),
3774 3775 ),
3775 3776 (
3776 3777 b'',
3777 3778 b'nologhandshake',
3778 3779 False,
3779 3780 _(b'do not log I/O related to the peer handshake'),
3780 3781 ),
3781 3782 ]
3782 3783 + cmdutil.remoteopts,
3783 3784 _(b'[PATH]'),
3784 3785 optionalrepo=True,
3785 3786 )
3786 3787 def debugwireproto(ui, repo, path=None, **opts):
3787 3788 """send wire protocol commands to a server
3788 3789
3789 3790 This command can be used to issue wire protocol commands to remote
3790 3791 peers and to debug the raw data being exchanged.
3791 3792
3792 3793 ``--localssh`` will start an SSH server against the current repository
3793 3794 and connect to that. By default, the connection will perform a handshake
3794 3795 and establish an appropriate peer instance.
3795 3796
3796 3797 ``--peer`` can be used to bypass the handshake protocol and construct a
3797 3798 peer instance using the specified class type. Valid values are ``raw``,
3798 3799 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
3799 3800 raw data payloads and don't support higher-level command actions.
3800 3801
3801 3802 ``--noreadstderr`` can be used to disable automatic reading from stderr
3802 3803 of the peer (for SSH connections only). Disabling automatic reading of
3803 3804 stderr is useful for making output more deterministic.
3804 3805
3805 3806 Commands are issued via a mini language which is specified via stdin.
3806 3807 The language consists of individual actions to perform. An action is
3807 3808 defined by a block. A block is defined as a line with no leading
3808 3809 space followed by 0 or more lines with leading space. Blocks are
3809 3810 effectively a high-level command with additional metadata.
3810 3811
3811 3812 Lines beginning with ``#`` are ignored.
3812 3813
3813 3814 The following sections denote available actions.
3814 3815
3815 3816 raw
3816 3817 ---
3817 3818
3818 3819 Send raw data to the server.
3819 3820
3820 3821 The block payload contains the raw data to send as one atomic send
3821 3822 operation. The data may not actually be delivered in a single system
3822 3823 call: it depends on the abilities of the transport being used.
3823 3824
3824 3825 Each line in the block is de-indented and concatenated. Then, that
3825 3826 value is evaluated as a Python b'' literal. This allows the use of
3826 3827 backslash escaping, etc.
3827 3828
3828 3829 raw+
3829 3830 ----
3830 3831
3831 3832 Behaves like ``raw`` except flushes output afterwards.
3832 3833
3833 3834 command <X>
3834 3835 -----------
3835 3836
3836 3837 Send a request to run a named command, whose name follows the ``command``
3837 3838 string.
3838 3839
3839 3840 Arguments to the command are defined as lines in this block. The format of
3840 3841 each line is ``<key> <value>``. e.g.::
3841 3842
3842 3843 command listkeys
3843 3844 namespace bookmarks
3844 3845
3845 3846 If the value begins with ``eval:``, it will be interpreted as a Python
3846 3847 literal expression. Otherwise values are interpreted as Python b'' literals.
3847 3848 This allows sending complex types and encoding special byte sequences via
3848 3849 backslash escaping.
3849 3850
3850 3851 The following arguments have special meaning:
3851 3852
3852 3853 ``PUSHFILE``
3853 3854 When defined, the *push* mechanism of the peer will be used instead
3854 3855 of the static request-response mechanism and the content of the
3855 3856 file specified in the value of this argument will be sent as the
3856 3857 command payload.
3857 3858
3858 3859 This can be used to submit a local bundle file to the remote.
3859 3860
3860 3861 batchbegin
3861 3862 ----------
3862 3863
3863 3864 Instruct the peer to begin a batched send.
3864 3865
3865 3866 All ``command`` blocks are queued for execution until the next
3866 3867 ``batchsubmit`` block.
3867 3868
3868 3869 batchsubmit
3869 3870 -----------
3870 3871
3871 3872 Submit previously queued ``command`` blocks as a batch request.
3872 3873
3873 3874 This action MUST be paired with a ``batchbegin`` action.
3874 3875
3875 3876 httprequest <method> <path>
3876 3877 ---------------------------
3877 3878
3878 3879 (HTTP peer only)
3879 3880
3880 3881 Send an HTTP request to the peer.
3881 3882
3882 3883 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3883 3884
3884 3885 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3885 3886 headers to add to the request. e.g. ``Accept: foo``.
3886 3887
3887 3888 The following arguments are special:
3888 3889
3889 3890 ``BODYFILE``
3890 3891 The content of the file defined as the value to this argument will be
3891 3892 transferred verbatim as the HTTP request body.
3892 3893
3893 3894 ``frame <type> <flags> <payload>``
3894 3895 Send a unified protocol frame as part of the request body.
3895 3896
3896 3897 All frames will be collected and sent as the body to the HTTP
3897 3898 request.
3898 3899
3899 3900 close
3900 3901 -----
3901 3902
3902 3903 Close the connection to the server.
3903 3904
3904 3905 flush
3905 3906 -----
3906 3907
3907 3908 Flush data written to the server.
3908 3909
3909 3910 readavailable
3910 3911 -------------
3911 3912
3912 3913 Close the write end of the connection and read all available data from
3913 3914 the server.
3914 3915
3915 3916 If the connection to the server encompasses multiple pipes, we poll both
3916 3917 pipes and read available data.
3917 3918
3918 3919 readline
3919 3920 --------
3920 3921
3921 3922 Read a line of output from the server. If there are multiple output
3922 3923 pipes, reads only the main pipe.
3923 3924
3924 3925 ereadline
3925 3926 ---------
3926 3927
3927 3928 Like ``readline``, but read from the stderr pipe, if available.
3928 3929
3929 3930 read <X>
3930 3931 --------
3931 3932
3932 3933 ``read()`` N bytes from the server's main output pipe.
3933 3934
3934 3935 eread <X>
3935 3936 ---------
3936 3937
3937 3938 ``read()`` N bytes from the server's stderr pipe, if available.
3938 3939
3939 3940 Specifying Unified Frame-Based Protocol Frames
3940 3941 ----------------------------------------------
3941 3942
3942 3943 It is possible to emit a *Unified Frame-Based Protocol* by using special
3943 3944 syntax.
3944 3945
3945 3946 A frame is composed as a type, flags, and payload. These can be parsed
3946 3947 from a string of the form:
3947 3948
3948 3949 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3949 3950
3950 3951 ``request-id`` and ``stream-id`` are integers defining the request and
3951 3952 stream identifiers.
3952 3953
3953 3954 ``type`` can be an integer value for the frame type or the string name
3954 3955 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3955 3956 ``command-name``.
3956 3957
3957 3958 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3958 3959 components. Each component (and there can be just one) can be an integer
3959 3960 or a flag name for stream flags or frame flags, respectively. Values are
3960 3961 resolved to integers and then bitwise OR'd together.
3961 3962
3962 3963 ``payload`` represents the raw frame payload. If it begins with
3963 3964 ``cbor:``, the following string is evaluated as Python code and the
3964 3965 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3965 3966 as a Python byte string literal.
3966 3967 """
3967 3968 opts = pycompat.byteskwargs(opts)
3968 3969
3969 3970 if opts[b'localssh'] and not repo:
3970 3971 raise error.Abort(_(b'--localssh requires a repository'))
3971 3972
3972 3973 if opts[b'peer'] and opts[b'peer'] not in (
3973 3974 b'raw',
3974 3975 b'http2',
3975 3976 b'ssh1',
3976 3977 b'ssh2',
3977 3978 ):
3978 3979 raise error.Abort(
3979 3980 _(b'invalid value for --peer'),
3980 3981 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
3981 3982 )
3982 3983
3983 3984 if path and opts[b'localssh']:
3984 3985 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
3985 3986
3986 3987 if ui.interactive():
3987 3988 ui.write(_(b'(waiting for commands on stdin)\n'))
3988 3989
3989 3990 blocks = list(_parsewirelangblocks(ui.fin))
3990 3991
3991 3992 proc = None
3992 3993 stdin = None
3993 3994 stdout = None
3994 3995 stderr = None
3995 3996 opener = None
3996 3997
3997 3998 if opts[b'localssh']:
3998 3999 # We start the SSH server in its own process so there is process
3999 4000 # separation. This prevents a whole class of potential bugs around
4000 4001 # shared state from interfering with server operation.
4001 4002 args = procutil.hgcmd() + [
4002 4003 b'-R',
4003 4004 repo.root,
4004 4005 b'debugserve',
4005 4006 b'--sshstdio',
4006 4007 ]
4007 4008 proc = subprocess.Popen(
4008 4009 pycompat.rapply(procutil.tonativestr, args),
4009 4010 stdin=subprocess.PIPE,
4010 4011 stdout=subprocess.PIPE,
4011 4012 stderr=subprocess.PIPE,
4012 4013 bufsize=0,
4013 4014 )
4014 4015
4015 4016 stdin = proc.stdin
4016 4017 stdout = proc.stdout
4017 4018 stderr = proc.stderr
4018 4019
4019 4020 # We turn the pipes into observers so we can log I/O.
4020 4021 if ui.verbose or opts[b'peer'] == b'raw':
4021 4022 stdin = util.makeloggingfileobject(
4022 4023 ui, proc.stdin, b'i', logdata=True
4023 4024 )
4024 4025 stdout = util.makeloggingfileobject(
4025 4026 ui, proc.stdout, b'o', logdata=True
4026 4027 )
4027 4028 stderr = util.makeloggingfileobject(
4028 4029 ui, proc.stderr, b'e', logdata=True
4029 4030 )
4030 4031
4031 4032 # --localssh also implies the peer connection settings.
4032 4033
4033 4034 url = b'ssh://localserver'
4034 4035 autoreadstderr = not opts[b'noreadstderr']
4035 4036
4036 4037 if opts[b'peer'] == b'ssh1':
4037 4038 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4038 4039 peer = sshpeer.sshv1peer(
4039 4040 ui,
4040 4041 url,
4041 4042 proc,
4042 4043 stdin,
4043 4044 stdout,
4044 4045 stderr,
4045 4046 None,
4046 4047 autoreadstderr=autoreadstderr,
4047 4048 )
4048 4049 elif opts[b'peer'] == b'ssh2':
4049 4050 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4050 4051 peer = sshpeer.sshv2peer(
4051 4052 ui,
4052 4053 url,
4053 4054 proc,
4054 4055 stdin,
4055 4056 stdout,
4056 4057 stderr,
4057 4058 None,
4058 4059 autoreadstderr=autoreadstderr,
4059 4060 )
4060 4061 elif opts[b'peer'] == b'raw':
4061 4062 ui.write(_(b'using raw connection to peer\n'))
4062 4063 peer = None
4063 4064 else:
4064 4065 ui.write(_(b'creating ssh peer from handshake results\n'))
4065 4066 peer = sshpeer.makepeer(
4066 4067 ui,
4067 4068 url,
4068 4069 proc,
4069 4070 stdin,
4070 4071 stdout,
4071 4072 stderr,
4072 4073 autoreadstderr=autoreadstderr,
4073 4074 )
4074 4075
4075 4076 elif path:
4076 4077 # We bypass hg.peer() so we can proxy the sockets.
4077 4078 # TODO consider not doing this because we skip
4078 4079 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4079 4080 u = util.url(path)
4080 4081 if u.scheme != b'http':
4081 4082 raise error.Abort(_(b'only http:// paths are currently supported'))
4082 4083
4083 4084 url, authinfo = u.authinfo()
4084 4085 openerargs = {
4085 4086 'useragent': b'Mercurial debugwireproto',
4086 4087 }
4087 4088
4088 4089 # Turn pipes/sockets into observers so we can log I/O.
4089 4090 if ui.verbose:
4090 4091 openerargs.update(
4091 4092 {
4092 4093 'loggingfh': ui,
4093 4094 'loggingname': b's',
4094 4095 'loggingopts': {'logdata': True, 'logdataapis': False,},
4095 4096 }
4096 4097 )
4097 4098
4098 4099 if ui.debugflag:
4099 4100 openerargs['loggingopts']['logdataapis'] = True
4100 4101
4101 4102 # Don't send default headers when in raw mode. This allows us to
4102 4103 # bypass most of the behavior of our URL handling code so we can
4103 4104 # have near complete control over what's sent on the wire.
4104 4105 if opts[b'peer'] == b'raw':
4105 4106 openerargs['sendaccept'] = False
4106 4107
4107 4108 opener = urlmod.opener(ui, authinfo, **openerargs)
4108 4109
4109 4110 if opts[b'peer'] == b'http2':
4110 4111 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4111 4112 # We go through makepeer() because we need an API descriptor for
4112 4113 # the peer instance to be useful.
4113 4114 with ui.configoverride(
4114 4115 {(b'experimental', b'httppeer.advertise-v2'): True}
4115 4116 ):
4116 4117 if opts[b'nologhandshake']:
4117 4118 ui.pushbuffer()
4118 4119
4119 4120 peer = httppeer.makepeer(ui, path, opener=opener)
4120 4121
4121 4122 if opts[b'nologhandshake']:
4122 4123 ui.popbuffer()
4123 4124
4124 4125 if not isinstance(peer, httppeer.httpv2peer):
4125 4126 raise error.Abort(
4126 4127 _(
4127 4128 b'could not instantiate HTTP peer for '
4128 4129 b'wire protocol version 2'
4129 4130 ),
4130 4131 hint=_(
4131 4132 b'the server may not have the feature '
4132 4133 b'enabled or is not allowing this '
4133 4134 b'client version'
4134 4135 ),
4135 4136 )
4136 4137
4137 4138 elif opts[b'peer'] == b'raw':
4138 4139 ui.write(_(b'using raw connection to peer\n'))
4139 4140 peer = None
4140 4141 elif opts[b'peer']:
4141 4142 raise error.Abort(
4142 4143 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4143 4144 )
4144 4145 else:
4145 4146 peer = httppeer.makepeer(ui, path, opener=opener)
4146 4147
4147 4148 # We /could/ populate stdin/stdout with sock.makefile()...
4148 4149 else:
4149 4150 raise error.Abort(_(b'unsupported connection configuration'))
4150 4151
4151 4152 batchedcommands = None
4152 4153
4153 4154 # Now perform actions based on the parsed wire language instructions.
4154 4155 for action, lines in blocks:
4155 4156 if action in (b'raw', b'raw+'):
4156 4157 if not stdin:
4157 4158 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4158 4159
4159 4160 # Concatenate the data together.
4160 4161 data = b''.join(l.lstrip() for l in lines)
4161 4162 data = stringutil.unescapestr(data)
4162 4163 stdin.write(data)
4163 4164
4164 4165 if action == b'raw+':
4165 4166 stdin.flush()
4166 4167 elif action == b'flush':
4167 4168 if not stdin:
4168 4169 raise error.Abort(_(b'cannot call flush on this peer'))
4169 4170 stdin.flush()
4170 4171 elif action.startswith(b'command'):
4171 4172 if not peer:
4172 4173 raise error.Abort(
4173 4174 _(
4174 4175 b'cannot send commands unless peer instance '
4175 4176 b'is available'
4176 4177 )
4177 4178 )
4178 4179
4179 4180 command = action.split(b' ', 1)[1]
4180 4181
4181 4182 args = {}
4182 4183 for line in lines:
4183 4184 # We need to allow empty values.
4184 4185 fields = line.lstrip().split(b' ', 1)
4185 4186 if len(fields) == 1:
4186 4187 key = fields[0]
4187 4188 value = b''
4188 4189 else:
4189 4190 key, value = fields
4190 4191
4191 4192 if value.startswith(b'eval:'):
4192 4193 value = stringutil.evalpythonliteral(value[5:])
4193 4194 else:
4194 4195 value = stringutil.unescapestr(value)
4195 4196
4196 4197 args[key] = value
4197 4198
4198 4199 if batchedcommands is not None:
4199 4200 batchedcommands.append((command, args))
4200 4201 continue
4201 4202
4202 4203 ui.status(_(b'sending %s command\n') % command)
4203 4204
4204 4205 if b'PUSHFILE' in args:
4205 4206 with open(args[b'PUSHFILE'], 'rb') as fh:
4206 4207 del args[b'PUSHFILE']
4207 4208 res, output = peer._callpush(
4208 4209 command, fh, **pycompat.strkwargs(args)
4209 4210 )
4210 4211 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4211 4212 ui.status(
4212 4213 _(b'remote output: %s\n') % stringutil.escapestr(output)
4213 4214 )
4214 4215 else:
4215 4216 with peer.commandexecutor() as e:
4216 4217 res = e.callcommand(command, args).result()
4217 4218
4218 4219 if isinstance(res, wireprotov2peer.commandresponse):
4219 4220 val = res.objects()
4220 4221 ui.status(
4221 4222 _(b'response: %s\n')
4222 4223 % stringutil.pprint(val, bprefix=True, indent=2)
4223 4224 )
4224 4225 else:
4225 4226 ui.status(
4226 4227 _(b'response: %s\n')
4227 4228 % stringutil.pprint(res, bprefix=True, indent=2)
4228 4229 )
4229 4230
4230 4231 elif action == b'batchbegin':
4231 4232 if batchedcommands is not None:
4232 4233 raise error.Abort(_(b'nested batchbegin not allowed'))
4233 4234
4234 4235 batchedcommands = []
4235 4236 elif action == b'batchsubmit':
4236 4237 # There is a batching API we could go through. But it would be
4237 4238 # difficult to normalize requests into function calls. It is easier
4238 4239 # to bypass this layer and normalize to commands + args.
4239 4240 ui.status(
4240 4241 _(b'sending batch with %d sub-commands\n')
4241 4242 % len(batchedcommands)
4242 4243 )
4243 4244 assert peer is not None
4244 4245 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4245 4246 ui.status(
4246 4247 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4247 4248 )
4248 4249
4249 4250 batchedcommands = None
4250 4251
4251 4252 elif action.startswith(b'httprequest '):
4252 4253 if not opener:
4253 4254 raise error.Abort(
4254 4255 _(b'cannot use httprequest without an HTTP peer')
4255 4256 )
4256 4257
4257 4258 request = action.split(b' ', 2)
4258 4259 if len(request) != 3:
4259 4260 raise error.Abort(
4260 4261 _(
4261 4262 b'invalid httprequest: expected format is '
4262 4263 b'"httprequest <method> <path>'
4263 4264 )
4264 4265 )
4265 4266
4266 4267 method, httppath = request[1:]
4267 4268 headers = {}
4268 4269 body = None
4269 4270 frames = []
4270 4271 for line in lines:
4271 4272 line = line.lstrip()
4272 4273 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4273 4274 if m:
4274 4275 # Headers need to use native strings.
4275 4276 key = pycompat.strurl(m.group(1))
4276 4277 value = pycompat.strurl(m.group(2))
4277 4278 headers[key] = value
4278 4279 continue
4279 4280
4280 4281 if line.startswith(b'BODYFILE '):
4281 4282 with open(line.split(b' ', 1), b'rb') as fh:
4282 4283 body = fh.read()
4283 4284 elif line.startswith(b'frame '):
4284 4285 frame = wireprotoframing.makeframefromhumanstring(
4285 4286 line[len(b'frame ') :]
4286 4287 )
4287 4288
4288 4289 frames.append(frame)
4289 4290 else:
4290 4291 raise error.Abort(
4291 4292 _(b'unknown argument to httprequest: %s') % line
4292 4293 )
4293 4294
4294 4295 url = path + httppath
4295 4296
4296 4297 if frames:
4297 4298 body = b''.join(bytes(f) for f in frames)
4298 4299
4299 4300 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4300 4301
4301 4302 # urllib.Request insists on using has_data() as a proxy for
4302 4303 # determining the request method. Override that to use our
4303 4304 # explicitly requested method.
4304 4305 req.get_method = lambda: pycompat.sysstr(method)
4305 4306
4306 4307 try:
4307 4308 res = opener.open(req)
4308 4309 body = res.read()
4309 4310 except util.urlerr.urlerror as e:
4310 4311 # read() method must be called, but only exists in Python 2
4311 4312 getattr(e, 'read', lambda: None)()
4312 4313 continue
4313 4314
4314 4315 ct = res.headers.get('Content-Type')
4315 4316 if ct == 'application/mercurial-cbor':
4316 4317 ui.write(
4317 4318 _(b'cbor> %s\n')
4318 4319 % stringutil.pprint(
4319 4320 cborutil.decodeall(body), bprefix=True, indent=2
4320 4321 )
4321 4322 )
4322 4323
4323 4324 elif action == b'close':
4324 4325 assert peer is not None
4325 4326 peer.close()
4326 4327 elif action == b'readavailable':
4327 4328 if not stdout or not stderr:
4328 4329 raise error.Abort(
4329 4330 _(b'readavailable not available on this peer')
4330 4331 )
4331 4332
4332 4333 stdin.close()
4333 4334 stdout.read()
4334 4335 stderr.read()
4335 4336
4336 4337 elif action == b'readline':
4337 4338 if not stdout:
4338 4339 raise error.Abort(_(b'readline not available on this peer'))
4339 4340 stdout.readline()
4340 4341 elif action == b'ereadline':
4341 4342 if not stderr:
4342 4343 raise error.Abort(_(b'ereadline not available on this peer'))
4343 4344 stderr.readline()
4344 4345 elif action.startswith(b'read '):
4345 4346 count = int(action.split(b' ', 1)[1])
4346 4347 if not stdout:
4347 4348 raise error.Abort(_(b'read not available on this peer'))
4348 4349 stdout.read(count)
4349 4350 elif action.startswith(b'eread '):
4350 4351 count = int(action.split(b' ', 1)[1])
4351 4352 if not stderr:
4352 4353 raise error.Abort(_(b'eread not available on this peer'))
4353 4354 stderr.read(count)
4354 4355 else:
4355 4356 raise error.Abort(_(b'unknown action: %s') % action)
4356 4357
4357 4358 if batchedcommands is not None:
4358 4359 raise error.Abort(_(b'unclosed "batchbegin" request'))
4359 4360
4360 4361 if peer:
4361 4362 peer.close()
4362 4363
4363 4364 if proc:
4364 4365 proc.kill()
@@ -1,284 +1,284 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import nullid, nullrev
14 14 from .. import (
15 15 pycompat,
16 16 util,
17 17 )
18 18
19 19 from ..revlogutils import nodemap as nodemaputil
20 20
21 21 stringio = pycompat.bytesio
22 22
23 23
24 24 _pack = struct.pack
25 25 _unpack = struct.unpack
26 26 _compress = zlib.compress
27 27 _decompress = zlib.decompress
28 28
29 29 # Some code below makes tuples directly because it's more convenient. However,
30 30 # code outside this module should always use dirstatetuple.
31 31 def dirstatetuple(*x):
32 32 # x is a tuple
33 33 return x
34 34
35 35
36 36 indexformatng = b">Qiiiiii20s12x"
37 37 indexfirst = struct.calcsize(b'Q')
38 38 sizeint = struct.calcsize(b'i')
39 39 indexsize = struct.calcsize(indexformatng)
40 40
41 41
42 42 def gettype(q):
43 43 return int(q & 0xFFFF)
44 44
45 45
46 46 def offset_type(offset, type):
47 47 return int(int(offset) << 16 | type)
48 48
49 49
50 50 class BaseIndexObject(object):
51 51 @property
52 52 def nodemap(self):
53 53 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
54 54 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
55 55 return self._nodemap
56 56
57 57 @util.propertycache
58 58 def _nodemap(self):
59 59 nodemap = nodemaputil.NodeMap({nullid: nullrev})
60 60 for r in range(0, len(self)):
61 61 n = self[r][7]
62 62 nodemap[n] = r
63 63 return nodemap
64 64
65 65 def has_node(self, node):
66 66 """return True if the node exist in the index"""
67 67 return node in self._nodemap
68 68
69 69 def rev(self, node):
70 70 """return a revision for a node
71 71
72 72 If the node is unknown, raise a RevlogError"""
73 73 return self._nodemap[node]
74 74
75 75 def get_rev(self, node):
76 76 """return a revision for a node
77 77
78 78 If the node is unknown, return None"""
79 79 return self._nodemap.get(node)
80 80
81 81 def _stripnodes(self, start):
82 82 if '_nodemap' in vars(self):
83 83 for r in range(start, len(self)):
84 84 n = self[r][7]
85 85 del self._nodemap[n]
86 86
87 87 def clearcaches(self):
88 88 self.__dict__.pop('_nodemap', None)
89 89
90 90 def __len__(self):
91 91 return self._lgt + len(self._extra)
92 92
93 93 def append(self, tup):
94 94 if '_nodemap' in vars(self):
95 95 self._nodemap[tup[7]] = len(self)
96 96 self._extra.append(tup)
97 97
98 98 def _check_index(self, i):
99 99 if not isinstance(i, int):
100 100 raise TypeError(b"expecting int indexes")
101 101 if i < 0 or i >= len(self):
102 102 raise IndexError
103 103
104 104 def __getitem__(self, i):
105 105 if i == -1:
106 106 return (0, 0, 0, -1, -1, -1, -1, nullid)
107 107 self._check_index(i)
108 108 if i >= self._lgt:
109 109 return self._extra[i - self._lgt]
110 110 index = self._calculate_index(i)
111 111 r = struct.unpack(indexformatng, self._data[index : index + indexsize])
112 112 if i == 0:
113 113 e = list(r)
114 114 type = gettype(e[0])
115 115 e[0] = offset_type(0, type)
116 116 return tuple(e)
117 117 return r
118 118
119 119
120 120 class IndexObject(BaseIndexObject):
121 121 def __init__(self, data):
122 122 assert len(data) % indexsize == 0
123 123 self._data = data
124 124 self._lgt = len(data) // indexsize
125 125 self._extra = []
126 126
127 127 def _calculate_index(self, i):
128 128 return i * indexsize
129 129
130 130 def __delitem__(self, i):
131 131 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
132 132 raise ValueError(b"deleting slices only supports a:-1 with step 1")
133 133 i = i.start
134 134 self._check_index(i)
135 135 self._stripnodes(i)
136 136 if i < self._lgt:
137 137 self._data = self._data[: i * indexsize]
138 138 self._lgt = i
139 139 self._extra = []
140 140 else:
141 141 self._extra = self._extra[: i - self._lgt]
142 142
143 143
144 144 class PersistentNodeMapIndexObject(IndexObject):
145 145 """a Debug oriented class to test persistent nodemap
146 146
147 147 We need a simple python object to test API and higher level behavior. See
148 148 the Rust implementation for more serious usage. This should be used only
149 149 through the dedicated `devel.persistent-nodemap` config.
150 150 """
151 151
152 152 def nodemap_data_all(self):
153 153 """Return bytes containing a full serialization of a nodemap
154 154
155 155 The nodemap should be valid for the full set of revisions in the
156 156 index."""
157 157 return nodemaputil.persistent_data(self)
158 158
159 159 def nodemap_data_incremental(self):
160 160 """Return bytes containing a incremental update to persistent nodemap
161 161
162 162 This containst the data for an append-only update of the data provided
163 163 in the last call to `update_nodemap_data`.
164 164 """
165 165 if self._nm_root is None:
166 166 return None
167 167 data = nodemaputil.update_persistent_data(
168 168 self, self._nm_root, self._nm_max_idx, self._nm_rev
169 169 )
170 170 self._nm_root = self._nm_max_idx = self._nm_rev = None
171 171 return data
172 172
173 def update_nodemap_data(self, nm_data):
174 """provide full blokc of persisted binary data for a nodemap
173 def update_nodemap_data(self, docket, nm_data):
174 """provide full block of persisted binary data for a nodemap
175 175
176 176 The data are expected to come from disk. See `nodemap_data_all` for a
177 177 produceur of such data."""
178 178 if nm_data is not None:
179 179 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
180 180 if self._nm_root:
181 self._nm_rev = len(self) - 1
181 self._nm_rev = docket.tip_rev
182 182 else:
183 183 self._nm_root = self._nm_max_idx = self._nm_rev = None
184 184
185 185
186 186 class InlinedIndexObject(BaseIndexObject):
187 187 def __init__(self, data, inline=0):
188 188 self._data = data
189 189 self._lgt = self._inline_scan(None)
190 190 self._inline_scan(self._lgt)
191 191 self._extra = []
192 192
193 193 def _inline_scan(self, lgt):
194 194 off = 0
195 195 if lgt is not None:
196 196 self._offsets = [0] * lgt
197 197 count = 0
198 198 while off <= len(self._data) - indexsize:
199 199 (s,) = struct.unpack(
200 200 b'>i', self._data[off + indexfirst : off + sizeint + indexfirst]
201 201 )
202 202 if lgt is not None:
203 203 self._offsets[count] = off
204 204 count += 1
205 205 off += indexsize + s
206 206 if off != len(self._data):
207 207 raise ValueError(b"corrupted data")
208 208 return count
209 209
210 210 def __delitem__(self, i):
211 211 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
212 212 raise ValueError(b"deleting slices only supports a:-1 with step 1")
213 213 i = i.start
214 214 self._check_index(i)
215 215 self._stripnodes(i)
216 216 if i < self._lgt:
217 217 self._offsets = self._offsets[:i]
218 218 self._lgt = i
219 219 self._extra = []
220 220 else:
221 221 self._extra = self._extra[: i - self._lgt]
222 222
223 223 def _calculate_index(self, i):
224 224 return self._offsets[i]
225 225
226 226
227 227 def parse_index2(data, inline):
228 228 if not inline:
229 229 return IndexObject(data), None
230 230 return InlinedIndexObject(data, inline), (0, data)
231 231
232 232
233 233 def parse_index_devel_nodemap(data, inline):
234 234 """like parse_index2, but alway return a PersistentNodeMapIndexObject
235 235 """
236 236 return PersistentNodeMapIndexObject(data), None
237 237
238 238
239 239 def parse_dirstate(dmap, copymap, st):
240 240 parents = [st[:20], st[20:40]]
241 241 # dereference fields so they will be local in loop
242 242 format = b">cllll"
243 243 e_size = struct.calcsize(format)
244 244 pos1 = 40
245 245 l = len(st)
246 246
247 247 # the inner loop
248 248 while pos1 < l:
249 249 pos2 = pos1 + e_size
250 250 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
251 251 pos1 = pos2 + e[4]
252 252 f = st[pos2:pos1]
253 253 if b'\0' in f:
254 254 f, c = f.split(b'\0')
255 255 copymap[f] = c
256 256 dmap[f] = e[:4]
257 257 return parents
258 258
259 259
260 260 def pack_dirstate(dmap, copymap, pl, now):
261 261 now = int(now)
262 262 cs = stringio()
263 263 write = cs.write
264 264 write(b"".join(pl))
265 265 for f, e in pycompat.iteritems(dmap):
266 266 if e[0] == b'n' and e[3] == now:
267 267 # The file was last modified "simultaneously" with the current
268 268 # write to dirstate (i.e. within the same second for file-
269 269 # systems with a granularity of 1 sec). This commonly happens
270 270 # for at least a couple of files on 'update'.
271 271 # The user could change the file without changing its size
272 272 # within the same second. Invalidate the file's mtime in
273 273 # dirstate, forcing future 'status' calls to compare the
274 274 # contents of the file if the size is the same. This prevents
275 275 # mistakenly treating such files as clean.
276 276 e = dirstatetuple(e[0], e[1], e[2], -1)
277 277 dmap[f] = e
278 278
279 279 if f in copymap:
280 280 f = b"%s\0%s" % (f, copymap[f])
281 281 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
282 282 write(e)
283 283 write(f)
284 284 return cs.getvalue()
@@ -1,3033 +1,3033 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import contextlib
18 18 import errno
19 19 import io
20 20 import os
21 21 import struct
22 22 import zlib
23 23
24 24 # import stuff from node for others to import from revlog
25 25 from .node import (
26 26 bin,
27 27 hex,
28 28 nullhex,
29 29 nullid,
30 30 nullrev,
31 31 short,
32 32 wdirfilenodeids,
33 33 wdirhex,
34 34 wdirid,
35 35 wdirrev,
36 36 )
37 37 from .i18n import _
38 38 from .pycompat import getattr
39 39 from .revlogutils.constants import (
40 40 FLAG_GENERALDELTA,
41 41 FLAG_INLINE_DATA,
42 42 REVLOGV0,
43 43 REVLOGV1,
44 44 REVLOGV1_FLAGS,
45 45 REVLOGV2,
46 46 REVLOGV2_FLAGS,
47 47 REVLOG_DEFAULT_FLAGS,
48 48 REVLOG_DEFAULT_FORMAT,
49 49 REVLOG_DEFAULT_VERSION,
50 50 )
51 51 from .revlogutils.flagutil import (
52 52 REVIDX_DEFAULT_FLAGS,
53 53 REVIDX_ELLIPSIS,
54 54 REVIDX_EXTSTORED,
55 55 REVIDX_FLAGS_ORDER,
56 56 REVIDX_ISCENSORED,
57 57 REVIDX_RAWTEXT_CHANGING_FLAGS,
58 58 REVIDX_SIDEDATA,
59 59 )
60 60 from .thirdparty import attr
61 61 from . import (
62 62 ancestor,
63 63 dagop,
64 64 error,
65 65 mdiff,
66 66 policy,
67 67 pycompat,
68 68 templatefilters,
69 69 util,
70 70 )
71 71 from .interfaces import (
72 72 repository,
73 73 util as interfaceutil,
74 74 )
75 75 from .revlogutils import (
76 76 deltas as deltautil,
77 77 flagutil,
78 78 nodemap as nodemaputil,
79 79 sidedata as sidedatautil,
80 80 )
81 81 from .utils import (
82 82 storageutil,
83 83 stringutil,
84 84 )
85 85
86 86 # blanked usage of all the name to prevent pyflakes constraints
87 87 # We need these name available in the module for extensions.
88 88 REVLOGV0
89 89 REVLOGV1
90 90 REVLOGV2
91 91 FLAG_INLINE_DATA
92 92 FLAG_GENERALDELTA
93 93 REVLOG_DEFAULT_FLAGS
94 94 REVLOG_DEFAULT_FORMAT
95 95 REVLOG_DEFAULT_VERSION
96 96 REVLOGV1_FLAGS
97 97 REVLOGV2_FLAGS
98 98 REVIDX_ISCENSORED
99 99 REVIDX_ELLIPSIS
100 100 REVIDX_SIDEDATA
101 101 REVIDX_EXTSTORED
102 102 REVIDX_DEFAULT_FLAGS
103 103 REVIDX_FLAGS_ORDER
104 104 REVIDX_RAWTEXT_CHANGING_FLAGS
105 105
106 106 parsers = policy.importmod('parsers')
107 107 rustancestor = policy.importrust('ancestor')
108 108 rustdagop = policy.importrust('dagop')
109 109 rustrevlog = policy.importrust('revlog')
110 110
111 111 # Aliased for performance.
112 112 _zlibdecompress = zlib.decompress
113 113
114 114 # max size of revlog with inline data
115 115 _maxinline = 131072
116 116 _chunksize = 1048576
117 117
118 118 # Flag processors for REVIDX_ELLIPSIS.
119 119 def ellipsisreadprocessor(rl, text):
120 120 return text, False, {}
121 121
122 122
123 123 def ellipsiswriteprocessor(rl, text, sidedata):
124 124 return text, False
125 125
126 126
127 127 def ellipsisrawprocessor(rl, text):
128 128 return False
129 129
130 130
131 131 ellipsisprocessor = (
132 132 ellipsisreadprocessor,
133 133 ellipsiswriteprocessor,
134 134 ellipsisrawprocessor,
135 135 )
136 136
137 137
138 138 def getoffset(q):
139 139 return int(q >> 16)
140 140
141 141
142 142 def gettype(q):
143 143 return int(q & 0xFFFF)
144 144
145 145
146 146 def offset_type(offset, type):
147 147 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
148 148 raise ValueError(b'unknown revlog index flags')
149 149 return int(int(offset) << 16 | type)
150 150
151 151
152 152 def _verify_revision(rl, skipflags, state, node):
153 153 """Verify the integrity of the given revlog ``node`` while providing a hook
154 154 point for extensions to influence the operation."""
155 155 if skipflags:
156 156 state[b'skipread'].add(node)
157 157 else:
158 158 # Side-effect: read content and verify hash.
159 159 rl.revision(node)
160 160
161 161
162 162 @attr.s(slots=True, frozen=True)
163 163 class _revisioninfo(object):
164 164 """Information about a revision that allows building its fulltext
165 165 node: expected hash of the revision
166 166 p1, p2: parent revs of the revision
167 167 btext: built text cache consisting of a one-element list
168 168 cachedelta: (baserev, uncompressed_delta) or None
169 169 flags: flags associated to the revision storage
170 170
171 171 One of btext[0] or cachedelta must be set.
172 172 """
173 173
174 174 node = attr.ib()
175 175 p1 = attr.ib()
176 176 p2 = attr.ib()
177 177 btext = attr.ib()
178 178 textlen = attr.ib()
179 179 cachedelta = attr.ib()
180 180 flags = attr.ib()
181 181
182 182
183 183 @interfaceutil.implementer(repository.irevisiondelta)
184 184 @attr.s(slots=True)
185 185 class revlogrevisiondelta(object):
186 186 node = attr.ib()
187 187 p1node = attr.ib()
188 188 p2node = attr.ib()
189 189 basenode = attr.ib()
190 190 flags = attr.ib()
191 191 baserevisionsize = attr.ib()
192 192 revision = attr.ib()
193 193 delta = attr.ib()
194 194 linknode = attr.ib(default=None)
195 195
196 196
197 197 @interfaceutil.implementer(repository.iverifyproblem)
198 198 @attr.s(frozen=True)
199 199 class revlogproblem(object):
200 200 warning = attr.ib(default=None)
201 201 error = attr.ib(default=None)
202 202 node = attr.ib(default=None)
203 203
204 204
205 205 # index v0:
206 206 # 4 bytes: offset
207 207 # 4 bytes: compressed length
208 208 # 4 bytes: base rev
209 209 # 4 bytes: link rev
210 210 # 20 bytes: parent 1 nodeid
211 211 # 20 bytes: parent 2 nodeid
212 212 # 20 bytes: nodeid
213 213 indexformatv0 = struct.Struct(b">4l20s20s20s")
214 214 indexformatv0_pack = indexformatv0.pack
215 215 indexformatv0_unpack = indexformatv0.unpack
216 216
217 217
218 218 class revlogoldindex(list):
219 219 @property
220 220 def nodemap(self):
221 221 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
222 222 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
223 223 return self._nodemap
224 224
225 225 @util.propertycache
226 226 def _nodemap(self):
227 227 nodemap = nodemaputil.NodeMap({nullid: nullrev})
228 228 for r in range(0, len(self)):
229 229 n = self[r][7]
230 230 nodemap[n] = r
231 231 return nodemap
232 232
233 233 def has_node(self, node):
234 234 """return True if the node exist in the index"""
235 235 return node in self._nodemap
236 236
237 237 def rev(self, node):
238 238 """return a revision for a node
239 239
240 240 If the node is unknown, raise a RevlogError"""
241 241 return self._nodemap[node]
242 242
243 243 def get_rev(self, node):
244 244 """return a revision for a node
245 245
246 246 If the node is unknown, return None"""
247 247 return self._nodemap.get(node)
248 248
249 249 def append(self, tup):
250 250 self._nodemap[tup[7]] = len(self)
251 251 super(revlogoldindex, self).append(tup)
252 252
253 253 def __delitem__(self, i):
254 254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
255 255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
256 256 for r in pycompat.xrange(i.start, len(self)):
257 257 del self._nodemap[self[r][7]]
258 258 super(revlogoldindex, self).__delitem__(i)
259 259
260 260 def clearcaches(self):
261 261 self.__dict__.pop('_nodemap', None)
262 262
263 263 def __getitem__(self, i):
264 264 if i == -1:
265 265 return (0, 0, 0, -1, -1, -1, -1, nullid)
266 266 return list.__getitem__(self, i)
267 267
268 268
269 269 class revlogoldio(object):
270 270 def __init__(self):
271 271 self.size = indexformatv0.size
272 272
273 273 def parseindex(self, data, inline):
274 274 s = self.size
275 275 index = []
276 276 nodemap = nodemaputil.NodeMap({nullid: nullrev})
277 277 n = off = 0
278 278 l = len(data)
279 279 while off + s <= l:
280 280 cur = data[off : off + s]
281 281 off += s
282 282 e = indexformatv0_unpack(cur)
283 283 # transform to revlogv1 format
284 284 e2 = (
285 285 offset_type(e[0], 0),
286 286 e[1],
287 287 -1,
288 288 e[2],
289 289 e[3],
290 290 nodemap.get(e[4], nullrev),
291 291 nodemap.get(e[5], nullrev),
292 292 e[6],
293 293 )
294 294 index.append(e2)
295 295 nodemap[e[6]] = n
296 296 n += 1
297 297
298 298 index = revlogoldindex(index)
299 299 return index, None
300 300
301 301 def packentry(self, entry, node, version, rev):
302 302 if gettype(entry[0]):
303 303 raise error.RevlogError(
304 304 _(b'index entry flags need revlog version 1')
305 305 )
306 306 e2 = (
307 307 getoffset(entry[0]),
308 308 entry[1],
309 309 entry[3],
310 310 entry[4],
311 311 node(entry[5]),
312 312 node(entry[6]),
313 313 entry[7],
314 314 )
315 315 return indexformatv0_pack(*e2)
316 316
317 317
318 318 # index ng:
319 319 # 6 bytes: offset
320 320 # 2 bytes: flags
321 321 # 4 bytes: compressed length
322 322 # 4 bytes: uncompressed length
323 323 # 4 bytes: base rev
324 324 # 4 bytes: link rev
325 325 # 4 bytes: parent 1 rev
326 326 # 4 bytes: parent 2 rev
327 327 # 32 bytes: nodeid
328 328 indexformatng = struct.Struct(b">Qiiiiii20s12x")
329 329 indexformatng_pack = indexformatng.pack
330 330 versionformat = struct.Struct(b">I")
331 331 versionformat_pack = versionformat.pack
332 332 versionformat_unpack = versionformat.unpack
333 333
334 334 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
335 335 # signed integer)
336 336 _maxentrysize = 0x7FFFFFFF
337 337
338 338
339 339 class revlogio(object):
340 340 def __init__(self):
341 341 self.size = indexformatng.size
342 342
343 343 def parseindex(self, data, inline):
344 344 # call the C implementation to parse the index data
345 345 index, cache = parsers.parse_index2(data, inline)
346 346 return index, cache
347 347
348 348 def packentry(self, entry, node, version, rev):
349 349 p = indexformatng_pack(*entry)
350 350 if rev == 0:
351 351 p = versionformat_pack(version) + p[4:]
352 352 return p
353 353
354 354
355 355 NodemapRevlogIO = None
356 356
357 357 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
358 358
359 359 class NodemapRevlogIO(revlogio):
360 360 """A debug oriented IO class that return a PersistentNodeMapIndexObject
361 361
362 362 The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
363 363 """
364 364
365 365 def parseindex(self, data, inline):
366 366 index, cache = parsers.parse_index_devel_nodemap(data, inline)
367 367 return index, cache
368 368
369 369
370 370 class rustrevlogio(revlogio):
371 371 def parseindex(self, data, inline):
372 372 index, cache = super(rustrevlogio, self).parseindex(data, inline)
373 373 return rustrevlog.MixedIndex(index), cache
374 374
375 375
376 376 class revlog(object):
377 377 """
378 378 the underlying revision storage object
379 379
380 380 A revlog consists of two parts, an index and the revision data.
381 381
382 382 The index is a file with a fixed record size containing
383 383 information on each revision, including its nodeid (hash), the
384 384 nodeids of its parents, the position and offset of its data within
385 385 the data file, and the revision it's based on. Finally, each entry
386 386 contains a linkrev entry that can serve as a pointer to external
387 387 data.
388 388
389 389 The revision data itself is a linear collection of data chunks.
390 390 Each chunk represents a revision and is usually represented as a
391 391 delta against the previous chunk. To bound lookup time, runs of
392 392 deltas are limited to about 2 times the length of the original
393 393 version data. This makes retrieval of a version proportional to
394 394 its size, or O(1) relative to the number of revisions.
395 395
396 396 Both pieces of the revlog are written to in an append-only
397 397 fashion, which means we never need to rewrite a file to insert or
398 398 remove data, and can use some simple techniques to avoid the need
399 399 for locking while reading.
400 400
401 401 If checkambig, indexfile is opened with checkambig=True at
402 402 writing, to avoid file stat ambiguity.
403 403
404 404 If mmaplargeindex is True, and an mmapindexthreshold is set, the
405 405 index will be mmapped rather than read if it is larger than the
406 406 configured threshold.
407 407
408 408 If censorable is True, the revlog can have censored revisions.
409 409
410 410 If `upperboundcomp` is not None, this is the expected maximal gain from
411 411 compression for the data content.
412 412 """
413 413
414 414 _flagserrorclass = error.RevlogError
415 415
416 416 def __init__(
417 417 self,
418 418 opener,
419 419 indexfile,
420 420 datafile=None,
421 421 checkambig=False,
422 422 mmaplargeindex=False,
423 423 censorable=False,
424 424 upperboundcomp=None,
425 425 persistentnodemap=False,
426 426 ):
427 427 """
428 428 create a revlog object
429 429
430 430 opener is a function that abstracts the file opening operation
431 431 and can be used to implement COW semantics or the like.
432 432
433 433 """
434 434 self.upperboundcomp = upperboundcomp
435 435 self.indexfile = indexfile
436 436 self.datafile = datafile or (indexfile[:-2] + b".d")
437 437 self.nodemap_file = None
438 438 if persistentnodemap:
439 439 self.nodemap_file = indexfile[:-2] + b".n"
440 440
441 441 self.opener = opener
442 442 # When True, indexfile is opened with checkambig=True at writing, to
443 443 # avoid file stat ambiguity.
444 444 self._checkambig = checkambig
445 445 self._mmaplargeindex = mmaplargeindex
446 446 self._censorable = censorable
447 447 # 3-tuple of (node, rev, text) for a raw revision.
448 448 self._revisioncache = None
449 449 # Maps rev to chain base rev.
450 450 self._chainbasecache = util.lrucachedict(100)
451 451 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
452 452 self._chunkcache = (0, b'')
453 453 # How much data to read and cache into the raw revlog data cache.
454 454 self._chunkcachesize = 65536
455 455 self._maxchainlen = None
456 456 self._deltabothparents = True
457 457 self.index = None
458 458 self._nodemap_docket = None
459 459 # Mapping of partial identifiers to full nodes.
460 460 self._pcache = {}
461 461 # Mapping of revision integer to full node.
462 462 self._compengine = b'zlib'
463 463 self._compengineopts = {}
464 464 self._maxdeltachainspan = -1
465 465 self._withsparseread = False
466 466 self._sparserevlog = False
467 467 self._srdensitythreshold = 0.50
468 468 self._srmingapsize = 262144
469 469
470 470 # Make copy of flag processors so each revlog instance can support
471 471 # custom flags.
472 472 self._flagprocessors = dict(flagutil.flagprocessors)
473 473
474 474 # 2-tuple of file handles being used for active writing.
475 475 self._writinghandles = None
476 476
477 477 self._loadindex()
478 478
479 479 def _loadindex(self):
480 480 mmapindexthreshold = None
481 481 opts = self.opener.options
482 482
483 483 if b'revlogv2' in opts:
484 484 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
485 485 elif b'revlogv1' in opts:
486 486 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
487 487 if b'generaldelta' in opts:
488 488 newversionflags |= FLAG_GENERALDELTA
489 489 elif b'revlogv0' in self.opener.options:
490 490 newversionflags = REVLOGV0
491 491 else:
492 492 newversionflags = REVLOG_DEFAULT_VERSION
493 493
494 494 if b'chunkcachesize' in opts:
495 495 self._chunkcachesize = opts[b'chunkcachesize']
496 496 if b'maxchainlen' in opts:
497 497 self._maxchainlen = opts[b'maxchainlen']
498 498 if b'deltabothparents' in opts:
499 499 self._deltabothparents = opts[b'deltabothparents']
500 500 self._lazydelta = bool(opts.get(b'lazydelta', True))
501 501 self._lazydeltabase = False
502 502 if self._lazydelta:
503 503 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
504 504 if b'compengine' in opts:
505 505 self._compengine = opts[b'compengine']
506 506 if b'zlib.level' in opts:
507 507 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
508 508 if b'zstd.level' in opts:
509 509 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
510 510 if b'maxdeltachainspan' in opts:
511 511 self._maxdeltachainspan = opts[b'maxdeltachainspan']
512 512 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
513 513 mmapindexthreshold = opts[b'mmapindexthreshold']
514 514 self.hassidedata = bool(opts.get(b'side-data', False))
515 515 if self.hassidedata:
516 516 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
517 517 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
518 518 withsparseread = bool(opts.get(b'with-sparse-read', False))
519 519 # sparse-revlog forces sparse-read
520 520 self._withsparseread = self._sparserevlog or withsparseread
521 521 if b'sparse-read-density-threshold' in opts:
522 522 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
523 523 if b'sparse-read-min-gap-size' in opts:
524 524 self._srmingapsize = opts[b'sparse-read-min-gap-size']
525 525 if opts.get(b'enableellipsis'):
526 526 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
527 527
528 528 # revlog v0 doesn't have flag processors
529 529 for flag, processor in pycompat.iteritems(
530 530 opts.get(b'flagprocessors', {})
531 531 ):
532 532 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
533 533
534 534 if self._chunkcachesize <= 0:
535 535 raise error.RevlogError(
536 536 _(b'revlog chunk cache size %r is not greater than 0')
537 537 % self._chunkcachesize
538 538 )
539 539 elif self._chunkcachesize & (self._chunkcachesize - 1):
540 540 raise error.RevlogError(
541 541 _(b'revlog chunk cache size %r is not a power of 2')
542 542 % self._chunkcachesize
543 543 )
544 544
545 545 indexdata = b''
546 546 self._initempty = True
547 547 try:
548 548 nodemap_data = nodemaputil.persisted_data(self)
549 549 if nodemap_data is not None:
550 550 self._nodemap_docket = nodemap_data[0]
551 551 with self._indexfp() as f:
552 552 if (
553 553 mmapindexthreshold is not None
554 554 and self.opener.fstat(f).st_size >= mmapindexthreshold
555 555 ):
556 556 # TODO: should .close() to release resources without
557 557 # relying on Python GC
558 558 indexdata = util.buffer(util.mmapread(f))
559 559 else:
560 560 indexdata = f.read()
561 561 if len(indexdata) > 0:
562 562 versionflags = versionformat_unpack(indexdata[:4])[0]
563 563 self._initempty = False
564 564 else:
565 565 versionflags = newversionflags
566 566 except IOError as inst:
567 567 if inst.errno != errno.ENOENT:
568 568 raise
569 569
570 570 versionflags = newversionflags
571 571
572 572 self.version = versionflags
573 573
574 574 flags = versionflags & ~0xFFFF
575 575 fmt = versionflags & 0xFFFF
576 576
577 577 if fmt == REVLOGV0:
578 578 if flags:
579 579 raise error.RevlogError(
580 580 _(b'unknown flags (%#04x) in version %d revlog %s')
581 581 % (flags >> 16, fmt, self.indexfile)
582 582 )
583 583
584 584 self._inline = False
585 585 self._generaldelta = False
586 586
587 587 elif fmt == REVLOGV1:
588 588 if flags & ~REVLOGV1_FLAGS:
589 589 raise error.RevlogError(
590 590 _(b'unknown flags (%#04x) in version %d revlog %s')
591 591 % (flags >> 16, fmt, self.indexfile)
592 592 )
593 593
594 594 self._inline = versionflags & FLAG_INLINE_DATA
595 595 self._generaldelta = versionflags & FLAG_GENERALDELTA
596 596
597 597 elif fmt == REVLOGV2:
598 598 if flags & ~REVLOGV2_FLAGS:
599 599 raise error.RevlogError(
600 600 _(b'unknown flags (%#04x) in version %d revlog %s')
601 601 % (flags >> 16, fmt, self.indexfile)
602 602 )
603 603
604 604 self._inline = versionflags & FLAG_INLINE_DATA
605 605 # generaldelta implied by version 2 revlogs.
606 606 self._generaldelta = True
607 607
608 608 else:
609 609 raise error.RevlogError(
610 610 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
611 611 )
612 612 # sparse-revlog can't be on without general-delta (issue6056)
613 613 if not self._generaldelta:
614 614 self._sparserevlog = False
615 615
616 616 self._storedeltachains = True
617 617
618 618 devel_nodemap = (
619 619 self.nodemap_file
620 620 and opts.get(b'devel-force-nodemap', False)
621 621 and NodemapRevlogIO is not None
622 622 )
623 623
624 624 self._io = revlogio()
625 625 if self.version == REVLOGV0:
626 626 self._io = revlogoldio()
627 627 elif devel_nodemap:
628 628 self._io = NodemapRevlogIO()
629 629 elif rustrevlog is not None and self.opener.options.get(b'rust.index'):
630 630 self._io = rustrevlogio()
631 631 try:
632 632 d = self._io.parseindex(indexdata, self._inline)
633 633 index, _chunkcache = d
634 634 use_nodemap = (
635 635 not self._inline
636 636 and self.nodemap_file is not None
637 637 and util.safehasattr(index, 'update_nodemap_data')
638 638 )
639 639 if use_nodemap:
640 640 nodemap_data = nodemaputil.persisted_data(self)
641 641 if nodemap_data is not None:
642 index.update_nodemap_data(nodemap_data[1])
642 index.update_nodemap_data(*nodemap_data)
643 643 except (ValueError, IndexError):
644 644 raise error.RevlogError(
645 645 _(b"index %s is corrupted") % self.indexfile
646 646 )
647 647 self.index, self._chunkcache = d
648 648 if not self._chunkcache:
649 649 self._chunkclear()
650 650 # revnum -> (chain-length, sum-delta-length)
651 651 self._chaininfocache = {}
652 652 # revlog header -> revlog compressor
653 653 self._decompressors = {}
654 654
655 655 @util.propertycache
656 656 def _compressor(self):
657 657 engine = util.compengines[self._compengine]
658 658 return engine.revlogcompressor(self._compengineopts)
659 659
660 660 def _indexfp(self, mode=b'r'):
661 661 """file object for the revlog's index file"""
662 662 args = {'mode': mode}
663 663 if mode != b'r':
664 664 args['checkambig'] = self._checkambig
665 665 if mode == b'w':
666 666 args['atomictemp'] = True
667 667 return self.opener(self.indexfile, **args)
668 668
669 669 def _datafp(self, mode=b'r'):
670 670 """file object for the revlog's data file"""
671 671 return self.opener(self.datafile, mode=mode)
672 672
673 673 @contextlib.contextmanager
674 674 def _datareadfp(self, existingfp=None):
675 675 """file object suitable to read data"""
676 676 # Use explicit file handle, if given.
677 677 if existingfp is not None:
678 678 yield existingfp
679 679
680 680 # Use a file handle being actively used for writes, if available.
681 681 # There is some danger to doing this because reads will seek the
682 682 # file. However, _writeentry() performs a SEEK_END before all writes,
683 683 # so we should be safe.
684 684 elif self._writinghandles:
685 685 if self._inline:
686 686 yield self._writinghandles[0]
687 687 else:
688 688 yield self._writinghandles[1]
689 689
690 690 # Otherwise open a new file handle.
691 691 else:
692 692 if self._inline:
693 693 func = self._indexfp
694 694 else:
695 695 func = self._datafp
696 696 with func() as fp:
697 697 yield fp
698 698
699 699 def tiprev(self):
700 700 return len(self.index) - 1
701 701
702 702 def tip(self):
703 703 return self.node(self.tiprev())
704 704
705 705 def __contains__(self, rev):
706 706 return 0 <= rev < len(self)
707 707
708 708 def __len__(self):
709 709 return len(self.index)
710 710
711 711 def __iter__(self):
712 712 return iter(pycompat.xrange(len(self)))
713 713
714 714 def revs(self, start=0, stop=None):
715 715 """iterate over all rev in this revlog (from start to stop)"""
716 716 return storageutil.iterrevs(len(self), start=start, stop=stop)
717 717
718 718 @property
719 719 def nodemap(self):
720 720 msg = (
721 721 b"revlog.nodemap is deprecated, "
722 722 b"use revlog.index.[has_node|rev|get_rev]"
723 723 )
724 724 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
725 725 return self.index.nodemap
726 726
727 727 @property
728 728 def _nodecache(self):
729 729 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
730 730 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
731 731 return self.index.nodemap
732 732
733 733 def hasnode(self, node):
734 734 try:
735 735 self.rev(node)
736 736 return True
737 737 except KeyError:
738 738 return False
739 739
740 740 def candelta(self, baserev, rev):
741 741 """whether two revisions (baserev, rev) can be delta-ed or not"""
742 742 # Disable delta if either rev requires a content-changing flag
743 743 # processor (ex. LFS). This is because such flag processor can alter
744 744 # the rawtext content that the delta will be based on, and two clients
745 745 # could have a same revlog node with different flags (i.e. different
746 746 # rawtext contents) and the delta could be incompatible.
747 747 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
748 748 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
749 749 ):
750 750 return False
751 751 return True
752 752
753 753 def clearcaches(self):
754 754 self._revisioncache = None
755 755 self._chainbasecache.clear()
756 756 self._chunkcache = (0, b'')
757 757 self._pcache = {}
758 758 self.index.clearcaches()
759 759
760 760 def rev(self, node):
761 761 try:
762 762 return self.index.rev(node)
763 763 except TypeError:
764 764 raise
765 765 except error.RevlogError:
766 766 # parsers.c radix tree lookup failed
767 767 if node == wdirid or node in wdirfilenodeids:
768 768 raise error.WdirUnsupported
769 769 raise error.LookupError(node, self.indexfile, _(b'no node'))
770 770
771 771 # Accessors for index entries.
772 772
773 773 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
774 774 # are flags.
775 775 def start(self, rev):
776 776 return int(self.index[rev][0] >> 16)
777 777
778 778 def flags(self, rev):
779 779 return self.index[rev][0] & 0xFFFF
780 780
781 781 def length(self, rev):
782 782 return self.index[rev][1]
783 783
784 784 def rawsize(self, rev):
785 785 """return the length of the uncompressed text for a given revision"""
786 786 l = self.index[rev][2]
787 787 if l >= 0:
788 788 return l
789 789
790 790 t = self.rawdata(rev)
791 791 return len(t)
792 792
793 793 def size(self, rev):
794 794 """length of non-raw text (processed by a "read" flag processor)"""
795 795 # fast path: if no "read" flag processor could change the content,
796 796 # size is rawsize. note: ELLIPSIS is known to not change the content.
797 797 flags = self.flags(rev)
798 798 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
799 799 return self.rawsize(rev)
800 800
801 801 return len(self.revision(rev, raw=False))
802 802
803 803 def chainbase(self, rev):
804 804 base = self._chainbasecache.get(rev)
805 805 if base is not None:
806 806 return base
807 807
808 808 index = self.index
809 809 iterrev = rev
810 810 base = index[iterrev][3]
811 811 while base != iterrev:
812 812 iterrev = base
813 813 base = index[iterrev][3]
814 814
815 815 self._chainbasecache[rev] = base
816 816 return base
817 817
818 818 def linkrev(self, rev):
819 819 return self.index[rev][4]
820 820
821 821 def parentrevs(self, rev):
822 822 try:
823 823 entry = self.index[rev]
824 824 except IndexError:
825 825 if rev == wdirrev:
826 826 raise error.WdirUnsupported
827 827 raise
828 828
829 829 return entry[5], entry[6]
830 830
831 831 # fast parentrevs(rev) where rev isn't filtered
832 832 _uncheckedparentrevs = parentrevs
833 833
834 834 def node(self, rev):
835 835 try:
836 836 return self.index[rev][7]
837 837 except IndexError:
838 838 if rev == wdirrev:
839 839 raise error.WdirUnsupported
840 840 raise
841 841
842 842 # Derived from index values.
843 843
844 844 def end(self, rev):
845 845 return self.start(rev) + self.length(rev)
846 846
847 847 def parents(self, node):
848 848 i = self.index
849 849 d = i[self.rev(node)]
850 850 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
851 851
852 852 def chainlen(self, rev):
853 853 return self._chaininfo(rev)[0]
854 854
855 855 def _chaininfo(self, rev):
856 856 chaininfocache = self._chaininfocache
857 857 if rev in chaininfocache:
858 858 return chaininfocache[rev]
859 859 index = self.index
860 860 generaldelta = self._generaldelta
861 861 iterrev = rev
862 862 e = index[iterrev]
863 863 clen = 0
864 864 compresseddeltalen = 0
865 865 while iterrev != e[3]:
866 866 clen += 1
867 867 compresseddeltalen += e[1]
868 868 if generaldelta:
869 869 iterrev = e[3]
870 870 else:
871 871 iterrev -= 1
872 872 if iterrev in chaininfocache:
873 873 t = chaininfocache[iterrev]
874 874 clen += t[0]
875 875 compresseddeltalen += t[1]
876 876 break
877 877 e = index[iterrev]
878 878 else:
879 879 # Add text length of base since decompressing that also takes
880 880 # work. For cache hits the length is already included.
881 881 compresseddeltalen += e[1]
882 882 r = (clen, compresseddeltalen)
883 883 chaininfocache[rev] = r
884 884 return r
885 885
886 886 def _deltachain(self, rev, stoprev=None):
887 887 """Obtain the delta chain for a revision.
888 888
889 889 ``stoprev`` specifies a revision to stop at. If not specified, we
890 890 stop at the base of the chain.
891 891
892 892 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
893 893 revs in ascending order and ``stopped`` is a bool indicating whether
894 894 ``stoprev`` was hit.
895 895 """
896 896 # Try C implementation.
897 897 try:
898 898 return self.index.deltachain(rev, stoprev, self._generaldelta)
899 899 except AttributeError:
900 900 pass
901 901
902 902 chain = []
903 903
904 904 # Alias to prevent attribute lookup in tight loop.
905 905 index = self.index
906 906 generaldelta = self._generaldelta
907 907
908 908 iterrev = rev
909 909 e = index[iterrev]
910 910 while iterrev != e[3] and iterrev != stoprev:
911 911 chain.append(iterrev)
912 912 if generaldelta:
913 913 iterrev = e[3]
914 914 else:
915 915 iterrev -= 1
916 916 e = index[iterrev]
917 917
918 918 if iterrev == stoprev:
919 919 stopped = True
920 920 else:
921 921 chain.append(iterrev)
922 922 stopped = False
923 923
924 924 chain.reverse()
925 925 return chain, stopped
926 926
927 927 def ancestors(self, revs, stoprev=0, inclusive=False):
928 928 """Generate the ancestors of 'revs' in reverse revision order.
929 929 Does not generate revs lower than stoprev.
930 930
931 931 See the documentation for ancestor.lazyancestors for more details."""
932 932
933 933 # first, make sure start revisions aren't filtered
934 934 revs = list(revs)
935 935 checkrev = self.node
936 936 for r in revs:
937 937 checkrev(r)
938 938 # and we're sure ancestors aren't filtered as well
939 939
940 940 if rustancestor is not None:
941 941 lazyancestors = rustancestor.LazyAncestors
942 942 arg = self.index
943 943 elif util.safehasattr(parsers, b'rustlazyancestors'):
944 944 lazyancestors = ancestor.rustlazyancestors
945 945 arg = self.index
946 946 else:
947 947 lazyancestors = ancestor.lazyancestors
948 948 arg = self._uncheckedparentrevs
949 949 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
950 950
951 951 def descendants(self, revs):
952 952 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
953 953
954 954 def findcommonmissing(self, common=None, heads=None):
955 955 """Return a tuple of the ancestors of common and the ancestors of heads
956 956 that are not ancestors of common. In revset terminology, we return the
957 957 tuple:
958 958
959 959 ::common, (::heads) - (::common)
960 960
961 961 The list is sorted by revision number, meaning it is
962 962 topologically sorted.
963 963
964 964 'heads' and 'common' are both lists of node IDs. If heads is
965 965 not supplied, uses all of the revlog's heads. If common is not
966 966 supplied, uses nullid."""
967 967 if common is None:
968 968 common = [nullid]
969 969 if heads is None:
970 970 heads = self.heads()
971 971
972 972 common = [self.rev(n) for n in common]
973 973 heads = [self.rev(n) for n in heads]
974 974
975 975 # we want the ancestors, but inclusive
976 976 class lazyset(object):
977 977 def __init__(self, lazyvalues):
978 978 self.addedvalues = set()
979 979 self.lazyvalues = lazyvalues
980 980
981 981 def __contains__(self, value):
982 982 return value in self.addedvalues or value in self.lazyvalues
983 983
984 984 def __iter__(self):
985 985 added = self.addedvalues
986 986 for r in added:
987 987 yield r
988 988 for r in self.lazyvalues:
989 989 if not r in added:
990 990 yield r
991 991
992 992 def add(self, value):
993 993 self.addedvalues.add(value)
994 994
995 995 def update(self, values):
996 996 self.addedvalues.update(values)
997 997
998 998 has = lazyset(self.ancestors(common))
999 999 has.add(nullrev)
1000 1000 has.update(common)
1001 1001
1002 1002 # take all ancestors from heads that aren't in has
1003 1003 missing = set()
1004 1004 visit = collections.deque(r for r in heads if r not in has)
1005 1005 while visit:
1006 1006 r = visit.popleft()
1007 1007 if r in missing:
1008 1008 continue
1009 1009 else:
1010 1010 missing.add(r)
1011 1011 for p in self.parentrevs(r):
1012 1012 if p not in has:
1013 1013 visit.append(p)
1014 1014 missing = list(missing)
1015 1015 missing.sort()
1016 1016 return has, [self.node(miss) for miss in missing]
1017 1017
1018 1018 def incrementalmissingrevs(self, common=None):
1019 1019 """Return an object that can be used to incrementally compute the
1020 1020 revision numbers of the ancestors of arbitrary sets that are not
1021 1021 ancestors of common. This is an ancestor.incrementalmissingancestors
1022 1022 object.
1023 1023
1024 1024 'common' is a list of revision numbers. If common is not supplied, uses
1025 1025 nullrev.
1026 1026 """
1027 1027 if common is None:
1028 1028 common = [nullrev]
1029 1029
1030 1030 if rustancestor is not None:
1031 1031 return rustancestor.MissingAncestors(self.index, common)
1032 1032 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1033 1033
1034 1034 def findmissingrevs(self, common=None, heads=None):
1035 1035 """Return the revision numbers of the ancestors of heads that
1036 1036 are not ancestors of common.
1037 1037
1038 1038 More specifically, return a list of revision numbers corresponding to
1039 1039 nodes N such that every N satisfies the following constraints:
1040 1040
1041 1041 1. N is an ancestor of some node in 'heads'
1042 1042 2. N is not an ancestor of any node in 'common'
1043 1043
1044 1044 The list is sorted by revision number, meaning it is
1045 1045 topologically sorted.
1046 1046
1047 1047 'heads' and 'common' are both lists of revision numbers. If heads is
1048 1048 not supplied, uses all of the revlog's heads. If common is not
1049 1049 supplied, uses nullid."""
1050 1050 if common is None:
1051 1051 common = [nullrev]
1052 1052 if heads is None:
1053 1053 heads = self.headrevs()
1054 1054
1055 1055 inc = self.incrementalmissingrevs(common=common)
1056 1056 return inc.missingancestors(heads)
1057 1057
1058 1058 def findmissing(self, common=None, heads=None):
1059 1059 """Return the ancestors of heads that are not ancestors of common.
1060 1060
1061 1061 More specifically, return a list of nodes N such that every N
1062 1062 satisfies the following constraints:
1063 1063
1064 1064 1. N is an ancestor of some node in 'heads'
1065 1065 2. N is not an ancestor of any node in 'common'
1066 1066
1067 1067 The list is sorted by revision number, meaning it is
1068 1068 topologically sorted.
1069 1069
1070 1070 'heads' and 'common' are both lists of node IDs. If heads is
1071 1071 not supplied, uses all of the revlog's heads. If common is not
1072 1072 supplied, uses nullid."""
1073 1073 if common is None:
1074 1074 common = [nullid]
1075 1075 if heads is None:
1076 1076 heads = self.heads()
1077 1077
1078 1078 common = [self.rev(n) for n in common]
1079 1079 heads = [self.rev(n) for n in heads]
1080 1080
1081 1081 inc = self.incrementalmissingrevs(common=common)
1082 1082 return [self.node(r) for r in inc.missingancestors(heads)]
1083 1083
1084 1084 def nodesbetween(self, roots=None, heads=None):
1085 1085 """Return a topological path from 'roots' to 'heads'.
1086 1086
1087 1087 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1088 1088 topologically sorted list of all nodes N that satisfy both of
1089 1089 these constraints:
1090 1090
1091 1091 1. N is a descendant of some node in 'roots'
1092 1092 2. N is an ancestor of some node in 'heads'
1093 1093
1094 1094 Every node is considered to be both a descendant and an ancestor
1095 1095 of itself, so every reachable node in 'roots' and 'heads' will be
1096 1096 included in 'nodes'.
1097 1097
1098 1098 'outroots' is the list of reachable nodes in 'roots', i.e., the
1099 1099 subset of 'roots' that is returned in 'nodes'. Likewise,
1100 1100 'outheads' is the subset of 'heads' that is also in 'nodes'.
1101 1101
1102 1102 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1103 1103 unspecified, uses nullid as the only root. If 'heads' is
1104 1104 unspecified, uses list of all of the revlog's heads."""
1105 1105 nonodes = ([], [], [])
1106 1106 if roots is not None:
1107 1107 roots = list(roots)
1108 1108 if not roots:
1109 1109 return nonodes
1110 1110 lowestrev = min([self.rev(n) for n in roots])
1111 1111 else:
1112 1112 roots = [nullid] # Everybody's a descendant of nullid
1113 1113 lowestrev = nullrev
1114 1114 if (lowestrev == nullrev) and (heads is None):
1115 1115 # We want _all_ the nodes!
1116 1116 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1117 1117 if heads is None:
1118 1118 # All nodes are ancestors, so the latest ancestor is the last
1119 1119 # node.
1120 1120 highestrev = len(self) - 1
1121 1121 # Set ancestors to None to signal that every node is an ancestor.
1122 1122 ancestors = None
1123 1123 # Set heads to an empty dictionary for later discovery of heads
1124 1124 heads = {}
1125 1125 else:
1126 1126 heads = list(heads)
1127 1127 if not heads:
1128 1128 return nonodes
1129 1129 ancestors = set()
1130 1130 # Turn heads into a dictionary so we can remove 'fake' heads.
1131 1131 # Also, later we will be using it to filter out the heads we can't
1132 1132 # find from roots.
1133 1133 heads = dict.fromkeys(heads, False)
1134 1134 # Start at the top and keep marking parents until we're done.
1135 1135 nodestotag = set(heads)
1136 1136 # Remember where the top was so we can use it as a limit later.
1137 1137 highestrev = max([self.rev(n) for n in nodestotag])
1138 1138 while nodestotag:
1139 1139 # grab a node to tag
1140 1140 n = nodestotag.pop()
1141 1141 # Never tag nullid
1142 1142 if n == nullid:
1143 1143 continue
1144 1144 # A node's revision number represents its place in a
1145 1145 # topologically sorted list of nodes.
1146 1146 r = self.rev(n)
1147 1147 if r >= lowestrev:
1148 1148 if n not in ancestors:
1149 1149 # If we are possibly a descendant of one of the roots
1150 1150 # and we haven't already been marked as an ancestor
1151 1151 ancestors.add(n) # Mark as ancestor
1152 1152 # Add non-nullid parents to list of nodes to tag.
1153 1153 nodestotag.update(
1154 1154 [p for p in self.parents(n) if p != nullid]
1155 1155 )
1156 1156 elif n in heads: # We've seen it before, is it a fake head?
1157 1157 # So it is, real heads should not be the ancestors of
1158 1158 # any other heads.
1159 1159 heads.pop(n)
1160 1160 if not ancestors:
1161 1161 return nonodes
1162 1162 # Now that we have our set of ancestors, we want to remove any
1163 1163 # roots that are not ancestors.
1164 1164
1165 1165 # If one of the roots was nullid, everything is included anyway.
1166 1166 if lowestrev > nullrev:
1167 1167 # But, since we weren't, let's recompute the lowest rev to not
1168 1168 # include roots that aren't ancestors.
1169 1169
1170 1170 # Filter out roots that aren't ancestors of heads
1171 1171 roots = [root for root in roots if root in ancestors]
1172 1172 # Recompute the lowest revision
1173 1173 if roots:
1174 1174 lowestrev = min([self.rev(root) for root in roots])
1175 1175 else:
1176 1176 # No more roots? Return empty list
1177 1177 return nonodes
1178 1178 else:
1179 1179 # We are descending from nullid, and don't need to care about
1180 1180 # any other roots.
1181 1181 lowestrev = nullrev
1182 1182 roots = [nullid]
1183 1183 # Transform our roots list into a set.
1184 1184 descendants = set(roots)
1185 1185 # Also, keep the original roots so we can filter out roots that aren't
1186 1186 # 'real' roots (i.e. are descended from other roots).
1187 1187 roots = descendants.copy()
1188 1188 # Our topologically sorted list of output nodes.
1189 1189 orderedout = []
1190 1190 # Don't start at nullid since we don't want nullid in our output list,
1191 1191 # and if nullid shows up in descendants, empty parents will look like
1192 1192 # they're descendants.
1193 1193 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1194 1194 n = self.node(r)
1195 1195 isdescendant = False
1196 1196 if lowestrev == nullrev: # Everybody is a descendant of nullid
1197 1197 isdescendant = True
1198 1198 elif n in descendants:
1199 1199 # n is already a descendant
1200 1200 isdescendant = True
1201 1201 # This check only needs to be done here because all the roots
1202 1202 # will start being marked is descendants before the loop.
1203 1203 if n in roots:
1204 1204 # If n was a root, check if it's a 'real' root.
1205 1205 p = tuple(self.parents(n))
1206 1206 # If any of its parents are descendants, it's not a root.
1207 1207 if (p[0] in descendants) or (p[1] in descendants):
1208 1208 roots.remove(n)
1209 1209 else:
1210 1210 p = tuple(self.parents(n))
1211 1211 # A node is a descendant if either of its parents are
1212 1212 # descendants. (We seeded the dependents list with the roots
1213 1213 # up there, remember?)
1214 1214 if (p[0] in descendants) or (p[1] in descendants):
1215 1215 descendants.add(n)
1216 1216 isdescendant = True
1217 1217 if isdescendant and ((ancestors is None) or (n in ancestors)):
1218 1218 # Only include nodes that are both descendants and ancestors.
1219 1219 orderedout.append(n)
1220 1220 if (ancestors is not None) and (n in heads):
1221 1221 # We're trying to figure out which heads are reachable
1222 1222 # from roots.
1223 1223 # Mark this head as having been reached
1224 1224 heads[n] = True
1225 1225 elif ancestors is None:
1226 1226 # Otherwise, we're trying to discover the heads.
1227 1227 # Assume this is a head because if it isn't, the next step
1228 1228 # will eventually remove it.
1229 1229 heads[n] = True
1230 1230 # But, obviously its parents aren't.
1231 1231 for p in self.parents(n):
1232 1232 heads.pop(p, None)
1233 1233 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1234 1234 roots = list(roots)
1235 1235 assert orderedout
1236 1236 assert roots
1237 1237 assert heads
1238 1238 return (orderedout, roots, heads)
1239 1239
1240 1240 def headrevs(self, revs=None):
1241 1241 if revs is None:
1242 1242 try:
1243 1243 return self.index.headrevs()
1244 1244 except AttributeError:
1245 1245 return self._headrevs()
1246 1246 if rustdagop is not None:
1247 1247 return rustdagop.headrevs(self.index, revs)
1248 1248 return dagop.headrevs(revs, self._uncheckedparentrevs)
1249 1249
1250 1250 def computephases(self, roots):
1251 1251 return self.index.computephasesmapsets(roots)
1252 1252
1253 1253 def _headrevs(self):
1254 1254 count = len(self)
1255 1255 if not count:
1256 1256 return [nullrev]
1257 1257 # we won't iter over filtered rev so nobody is a head at start
1258 1258 ishead = [0] * (count + 1)
1259 1259 index = self.index
1260 1260 for r in self:
1261 1261 ishead[r] = 1 # I may be an head
1262 1262 e = index[r]
1263 1263 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1264 1264 return [r for r, val in enumerate(ishead) if val]
1265 1265
1266 1266 def heads(self, start=None, stop=None):
1267 1267 """return the list of all nodes that have no children
1268 1268
1269 1269 if start is specified, only heads that are descendants of
1270 1270 start will be returned
1271 1271 if stop is specified, it will consider all the revs from stop
1272 1272 as if they had no children
1273 1273 """
1274 1274 if start is None and stop is None:
1275 1275 if not len(self):
1276 1276 return [nullid]
1277 1277 return [self.node(r) for r in self.headrevs()]
1278 1278
1279 1279 if start is None:
1280 1280 start = nullrev
1281 1281 else:
1282 1282 start = self.rev(start)
1283 1283
1284 1284 stoprevs = set(self.rev(n) for n in stop or [])
1285 1285
1286 1286 revs = dagop.headrevssubset(
1287 1287 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1288 1288 )
1289 1289
1290 1290 return [self.node(rev) for rev in revs]
1291 1291
1292 1292 def children(self, node):
1293 1293 """find the children of a given node"""
1294 1294 c = []
1295 1295 p = self.rev(node)
1296 1296 for r in self.revs(start=p + 1):
1297 1297 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1298 1298 if prevs:
1299 1299 for pr in prevs:
1300 1300 if pr == p:
1301 1301 c.append(self.node(r))
1302 1302 elif p == nullrev:
1303 1303 c.append(self.node(r))
1304 1304 return c
1305 1305
1306 1306 def commonancestorsheads(self, a, b):
1307 1307 """calculate all the heads of the common ancestors of nodes a and b"""
1308 1308 a, b = self.rev(a), self.rev(b)
1309 1309 ancs = self._commonancestorsheads(a, b)
1310 1310 return pycompat.maplist(self.node, ancs)
1311 1311
1312 1312 def _commonancestorsheads(self, *revs):
1313 1313 """calculate all the heads of the common ancestors of revs"""
1314 1314 try:
1315 1315 ancs = self.index.commonancestorsheads(*revs)
1316 1316 except (AttributeError, OverflowError): # C implementation failed
1317 1317 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1318 1318 return ancs
1319 1319
1320 1320 def isancestor(self, a, b):
1321 1321 """return True if node a is an ancestor of node b
1322 1322
1323 1323 A revision is considered an ancestor of itself."""
1324 1324 a, b = self.rev(a), self.rev(b)
1325 1325 return self.isancestorrev(a, b)
1326 1326
1327 1327 def isancestorrev(self, a, b):
1328 1328 """return True if revision a is an ancestor of revision b
1329 1329
1330 1330 A revision is considered an ancestor of itself.
1331 1331
1332 1332 The implementation of this is trivial but the use of
1333 1333 reachableroots is not."""
1334 1334 if a == nullrev:
1335 1335 return True
1336 1336 elif a == b:
1337 1337 return True
1338 1338 elif a > b:
1339 1339 return False
1340 1340 return bool(self.reachableroots(a, [b], [a], includepath=False))
1341 1341
1342 1342 def reachableroots(self, minroot, heads, roots, includepath=False):
1343 1343 """return (heads(::(<roots> and <roots>::<heads>)))
1344 1344
1345 1345 If includepath is True, return (<roots>::<heads>)."""
1346 1346 try:
1347 1347 return self.index.reachableroots2(
1348 1348 minroot, heads, roots, includepath
1349 1349 )
1350 1350 except AttributeError:
1351 1351 return dagop._reachablerootspure(
1352 1352 self.parentrevs, minroot, roots, heads, includepath
1353 1353 )
1354 1354
1355 1355 def ancestor(self, a, b):
1356 1356 """calculate the "best" common ancestor of nodes a and b"""
1357 1357
1358 1358 a, b = self.rev(a), self.rev(b)
1359 1359 try:
1360 1360 ancs = self.index.ancestors(a, b)
1361 1361 except (AttributeError, OverflowError):
1362 1362 ancs = ancestor.ancestors(self.parentrevs, a, b)
1363 1363 if ancs:
1364 1364 # choose a consistent winner when there's a tie
1365 1365 return min(map(self.node, ancs))
1366 1366 return nullid
1367 1367
1368 1368 def _match(self, id):
1369 1369 if isinstance(id, int):
1370 1370 # rev
1371 1371 return self.node(id)
1372 1372 if len(id) == 20:
1373 1373 # possibly a binary node
1374 1374 # odds of a binary node being all hex in ASCII are 1 in 10**25
1375 1375 try:
1376 1376 node = id
1377 1377 self.rev(node) # quick search the index
1378 1378 return node
1379 1379 except error.LookupError:
1380 1380 pass # may be partial hex id
1381 1381 try:
1382 1382 # str(rev)
1383 1383 rev = int(id)
1384 1384 if b"%d" % rev != id:
1385 1385 raise ValueError
1386 1386 if rev < 0:
1387 1387 rev = len(self) + rev
1388 1388 if rev < 0 or rev >= len(self):
1389 1389 raise ValueError
1390 1390 return self.node(rev)
1391 1391 except (ValueError, OverflowError):
1392 1392 pass
1393 1393 if len(id) == 40:
1394 1394 try:
1395 1395 # a full hex nodeid?
1396 1396 node = bin(id)
1397 1397 self.rev(node)
1398 1398 return node
1399 1399 except (TypeError, error.LookupError):
1400 1400 pass
1401 1401
1402 1402 def _partialmatch(self, id):
1403 1403 # we don't care wdirfilenodeids as they should be always full hash
1404 1404 maybewdir = wdirhex.startswith(id)
1405 1405 try:
1406 1406 partial = self.index.partialmatch(id)
1407 1407 if partial and self.hasnode(partial):
1408 1408 if maybewdir:
1409 1409 # single 'ff...' match in radix tree, ambiguous with wdir
1410 1410 raise error.RevlogError
1411 1411 return partial
1412 1412 if maybewdir:
1413 1413 # no 'ff...' match in radix tree, wdir identified
1414 1414 raise error.WdirUnsupported
1415 1415 return None
1416 1416 except error.RevlogError:
1417 1417 # parsers.c radix tree lookup gave multiple matches
1418 1418 # fast path: for unfiltered changelog, radix tree is accurate
1419 1419 if not getattr(self, 'filteredrevs', None):
1420 1420 raise error.AmbiguousPrefixLookupError(
1421 1421 id, self.indexfile, _(b'ambiguous identifier')
1422 1422 )
1423 1423 # fall through to slow path that filters hidden revisions
1424 1424 except (AttributeError, ValueError):
1425 1425 # we are pure python, or key was too short to search radix tree
1426 1426 pass
1427 1427
1428 1428 if id in self._pcache:
1429 1429 return self._pcache[id]
1430 1430
1431 1431 if len(id) <= 40:
1432 1432 try:
1433 1433 # hex(node)[:...]
1434 1434 l = len(id) // 2 # grab an even number of digits
1435 1435 prefix = bin(id[: l * 2])
1436 1436 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1437 1437 nl = [
1438 1438 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1439 1439 ]
1440 1440 if nullhex.startswith(id):
1441 1441 nl.append(nullid)
1442 1442 if len(nl) > 0:
1443 1443 if len(nl) == 1 and not maybewdir:
1444 1444 self._pcache[id] = nl[0]
1445 1445 return nl[0]
1446 1446 raise error.AmbiguousPrefixLookupError(
1447 1447 id, self.indexfile, _(b'ambiguous identifier')
1448 1448 )
1449 1449 if maybewdir:
1450 1450 raise error.WdirUnsupported
1451 1451 return None
1452 1452 except TypeError:
1453 1453 pass
1454 1454
1455 1455 def lookup(self, id):
1456 1456 """locate a node based on:
1457 1457 - revision number or str(revision number)
1458 1458 - nodeid or subset of hex nodeid
1459 1459 """
1460 1460 n = self._match(id)
1461 1461 if n is not None:
1462 1462 return n
1463 1463 n = self._partialmatch(id)
1464 1464 if n:
1465 1465 return n
1466 1466
1467 1467 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1468 1468
1469 1469 def shortest(self, node, minlength=1):
1470 1470 """Find the shortest unambiguous prefix that matches node."""
1471 1471
1472 1472 def isvalid(prefix):
1473 1473 try:
1474 1474 matchednode = self._partialmatch(prefix)
1475 1475 except error.AmbiguousPrefixLookupError:
1476 1476 return False
1477 1477 except error.WdirUnsupported:
1478 1478 # single 'ff...' match
1479 1479 return True
1480 1480 if matchednode is None:
1481 1481 raise error.LookupError(node, self.indexfile, _(b'no node'))
1482 1482 return True
1483 1483
1484 1484 def maybewdir(prefix):
1485 1485 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1486 1486
1487 1487 hexnode = hex(node)
1488 1488
1489 1489 def disambiguate(hexnode, minlength):
1490 1490 """Disambiguate against wdirid."""
1491 1491 for length in range(minlength, 41):
1492 1492 prefix = hexnode[:length]
1493 1493 if not maybewdir(prefix):
1494 1494 return prefix
1495 1495
1496 1496 if not getattr(self, 'filteredrevs', None):
1497 1497 try:
1498 1498 length = max(self.index.shortest(node), minlength)
1499 1499 return disambiguate(hexnode, length)
1500 1500 except error.RevlogError:
1501 1501 if node != wdirid:
1502 1502 raise error.LookupError(node, self.indexfile, _(b'no node'))
1503 1503 except AttributeError:
1504 1504 # Fall through to pure code
1505 1505 pass
1506 1506
1507 1507 if node == wdirid:
1508 1508 for length in range(minlength, 41):
1509 1509 prefix = hexnode[:length]
1510 1510 if isvalid(prefix):
1511 1511 return prefix
1512 1512
1513 1513 for length in range(minlength, 41):
1514 1514 prefix = hexnode[:length]
1515 1515 if isvalid(prefix):
1516 1516 return disambiguate(hexnode, length)
1517 1517
1518 1518 def cmp(self, node, text):
1519 1519 """compare text with a given file revision
1520 1520
1521 1521 returns True if text is different than what is stored.
1522 1522 """
1523 1523 p1, p2 = self.parents(node)
1524 1524 return storageutil.hashrevisionsha1(text, p1, p2) != node
1525 1525
1526 1526 def _cachesegment(self, offset, data):
1527 1527 """Add a segment to the revlog cache.
1528 1528
1529 1529 Accepts an absolute offset and the data that is at that location.
1530 1530 """
1531 1531 o, d = self._chunkcache
1532 1532 # try to add to existing cache
1533 1533 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1534 1534 self._chunkcache = o, d + data
1535 1535 else:
1536 1536 self._chunkcache = offset, data
1537 1537
1538 1538 def _readsegment(self, offset, length, df=None):
1539 1539 """Load a segment of raw data from the revlog.
1540 1540
1541 1541 Accepts an absolute offset, length to read, and an optional existing
1542 1542 file handle to read from.
1543 1543
1544 1544 If an existing file handle is passed, it will be seeked and the
1545 1545 original seek position will NOT be restored.
1546 1546
1547 1547 Returns a str or buffer of raw byte data.
1548 1548
1549 1549 Raises if the requested number of bytes could not be read.
1550 1550 """
1551 1551 # Cache data both forward and backward around the requested
1552 1552 # data, in a fixed size window. This helps speed up operations
1553 1553 # involving reading the revlog backwards.
1554 1554 cachesize = self._chunkcachesize
1555 1555 realoffset = offset & ~(cachesize - 1)
1556 1556 reallength = (
1557 1557 (offset + length + cachesize) & ~(cachesize - 1)
1558 1558 ) - realoffset
1559 1559 with self._datareadfp(df) as df:
1560 1560 df.seek(realoffset)
1561 1561 d = df.read(reallength)
1562 1562
1563 1563 self._cachesegment(realoffset, d)
1564 1564 if offset != realoffset or reallength != length:
1565 1565 startoffset = offset - realoffset
1566 1566 if len(d) - startoffset < length:
1567 1567 raise error.RevlogError(
1568 1568 _(
1569 1569 b'partial read of revlog %s; expected %d bytes from '
1570 1570 b'offset %d, got %d'
1571 1571 )
1572 1572 % (
1573 1573 self.indexfile if self._inline else self.datafile,
1574 1574 length,
1575 1575 realoffset,
1576 1576 len(d) - startoffset,
1577 1577 )
1578 1578 )
1579 1579
1580 1580 return util.buffer(d, startoffset, length)
1581 1581
1582 1582 if len(d) < length:
1583 1583 raise error.RevlogError(
1584 1584 _(
1585 1585 b'partial read of revlog %s; expected %d bytes from offset '
1586 1586 b'%d, got %d'
1587 1587 )
1588 1588 % (
1589 1589 self.indexfile if self._inline else self.datafile,
1590 1590 length,
1591 1591 offset,
1592 1592 len(d),
1593 1593 )
1594 1594 )
1595 1595
1596 1596 return d
1597 1597
1598 1598 def _getsegment(self, offset, length, df=None):
1599 1599 """Obtain a segment of raw data from the revlog.
1600 1600
1601 1601 Accepts an absolute offset, length of bytes to obtain, and an
1602 1602 optional file handle to the already-opened revlog. If the file
1603 1603 handle is used, it's original seek position will not be preserved.
1604 1604
1605 1605 Requests for data may be returned from a cache.
1606 1606
1607 1607 Returns a str or a buffer instance of raw byte data.
1608 1608 """
1609 1609 o, d = self._chunkcache
1610 1610 l = len(d)
1611 1611
1612 1612 # is it in the cache?
1613 1613 cachestart = offset - o
1614 1614 cacheend = cachestart + length
1615 1615 if cachestart >= 0 and cacheend <= l:
1616 1616 if cachestart == 0 and cacheend == l:
1617 1617 return d # avoid a copy
1618 1618 return util.buffer(d, cachestart, cacheend - cachestart)
1619 1619
1620 1620 return self._readsegment(offset, length, df=df)
1621 1621
1622 1622 def _getsegmentforrevs(self, startrev, endrev, df=None):
1623 1623 """Obtain a segment of raw data corresponding to a range of revisions.
1624 1624
1625 1625 Accepts the start and end revisions and an optional already-open
1626 1626 file handle to be used for reading. If the file handle is read, its
1627 1627 seek position will not be preserved.
1628 1628
1629 1629 Requests for data may be satisfied by a cache.
1630 1630
1631 1631 Returns a 2-tuple of (offset, data) for the requested range of
1632 1632 revisions. Offset is the integer offset from the beginning of the
1633 1633 revlog and data is a str or buffer of the raw byte data.
1634 1634
1635 1635 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1636 1636 to determine where each revision's data begins and ends.
1637 1637 """
1638 1638 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1639 1639 # (functions are expensive).
1640 1640 index = self.index
1641 1641 istart = index[startrev]
1642 1642 start = int(istart[0] >> 16)
1643 1643 if startrev == endrev:
1644 1644 end = start + istart[1]
1645 1645 else:
1646 1646 iend = index[endrev]
1647 1647 end = int(iend[0] >> 16) + iend[1]
1648 1648
1649 1649 if self._inline:
1650 1650 start += (startrev + 1) * self._io.size
1651 1651 end += (endrev + 1) * self._io.size
1652 1652 length = end - start
1653 1653
1654 1654 return start, self._getsegment(start, length, df=df)
1655 1655
1656 1656 def _chunk(self, rev, df=None):
1657 1657 """Obtain a single decompressed chunk for a revision.
1658 1658
1659 1659 Accepts an integer revision and an optional already-open file handle
1660 1660 to be used for reading. If used, the seek position of the file will not
1661 1661 be preserved.
1662 1662
1663 1663 Returns a str holding uncompressed data for the requested revision.
1664 1664 """
1665 1665 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1666 1666
1667 1667 def _chunks(self, revs, df=None, targetsize=None):
1668 1668 """Obtain decompressed chunks for the specified revisions.
1669 1669
1670 1670 Accepts an iterable of numeric revisions that are assumed to be in
1671 1671 ascending order. Also accepts an optional already-open file handle
1672 1672 to be used for reading. If used, the seek position of the file will
1673 1673 not be preserved.
1674 1674
1675 1675 This function is similar to calling ``self._chunk()`` multiple times,
1676 1676 but is faster.
1677 1677
1678 1678 Returns a list with decompressed data for each requested revision.
1679 1679 """
1680 1680 if not revs:
1681 1681 return []
1682 1682 start = self.start
1683 1683 length = self.length
1684 1684 inline = self._inline
1685 1685 iosize = self._io.size
1686 1686 buffer = util.buffer
1687 1687
1688 1688 l = []
1689 1689 ladd = l.append
1690 1690
1691 1691 if not self._withsparseread:
1692 1692 slicedchunks = (revs,)
1693 1693 else:
1694 1694 slicedchunks = deltautil.slicechunk(
1695 1695 self, revs, targetsize=targetsize
1696 1696 )
1697 1697
1698 1698 for revschunk in slicedchunks:
1699 1699 firstrev = revschunk[0]
1700 1700 # Skip trailing revisions with empty diff
1701 1701 for lastrev in revschunk[::-1]:
1702 1702 if length(lastrev) != 0:
1703 1703 break
1704 1704
1705 1705 try:
1706 1706 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1707 1707 except OverflowError:
1708 1708 # issue4215 - we can't cache a run of chunks greater than
1709 1709 # 2G on Windows
1710 1710 return [self._chunk(rev, df=df) for rev in revschunk]
1711 1711
1712 1712 decomp = self.decompress
1713 1713 for rev in revschunk:
1714 1714 chunkstart = start(rev)
1715 1715 if inline:
1716 1716 chunkstart += (rev + 1) * iosize
1717 1717 chunklength = length(rev)
1718 1718 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1719 1719
1720 1720 return l
1721 1721
1722 1722 def _chunkclear(self):
1723 1723 """Clear the raw chunk cache."""
1724 1724 self._chunkcache = (0, b'')
1725 1725
1726 1726 def deltaparent(self, rev):
1727 1727 """return deltaparent of the given revision"""
1728 1728 base = self.index[rev][3]
1729 1729 if base == rev:
1730 1730 return nullrev
1731 1731 elif self._generaldelta:
1732 1732 return base
1733 1733 else:
1734 1734 return rev - 1
1735 1735
1736 1736 def issnapshot(self, rev):
1737 1737 """tells whether rev is a snapshot
1738 1738 """
1739 1739 if not self._sparserevlog:
1740 1740 return self.deltaparent(rev) == nullrev
1741 1741 elif util.safehasattr(self.index, b'issnapshot'):
1742 1742 # directly assign the method to cache the testing and access
1743 1743 self.issnapshot = self.index.issnapshot
1744 1744 return self.issnapshot(rev)
1745 1745 if rev == nullrev:
1746 1746 return True
1747 1747 entry = self.index[rev]
1748 1748 base = entry[3]
1749 1749 if base == rev:
1750 1750 return True
1751 1751 if base == nullrev:
1752 1752 return True
1753 1753 p1 = entry[5]
1754 1754 p2 = entry[6]
1755 1755 if base == p1 or base == p2:
1756 1756 return False
1757 1757 return self.issnapshot(base)
1758 1758
1759 1759 def snapshotdepth(self, rev):
1760 1760 """number of snapshot in the chain before this one"""
1761 1761 if not self.issnapshot(rev):
1762 1762 raise error.ProgrammingError(b'revision %d not a snapshot')
1763 1763 return len(self._deltachain(rev)[0]) - 1
1764 1764
1765 1765 def revdiff(self, rev1, rev2):
1766 1766 """return or calculate a delta between two revisions
1767 1767
1768 1768 The delta calculated is in binary form and is intended to be written to
1769 1769 revlog data directly. So this function needs raw revision data.
1770 1770 """
1771 1771 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1772 1772 return bytes(self._chunk(rev2))
1773 1773
1774 1774 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1775 1775
1776 1776 def _processflags(self, text, flags, operation, raw=False):
1777 1777 """deprecated entry point to access flag processors"""
1778 1778 msg = b'_processflag(...) use the specialized variant'
1779 1779 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1780 1780 if raw:
1781 1781 return text, flagutil.processflagsraw(self, text, flags)
1782 1782 elif operation == b'read':
1783 1783 return flagutil.processflagsread(self, text, flags)
1784 1784 else: # write operation
1785 1785 return flagutil.processflagswrite(self, text, flags)
1786 1786
1787 1787 def revision(self, nodeorrev, _df=None, raw=False):
1788 1788 """return an uncompressed revision of a given node or revision
1789 1789 number.
1790 1790
1791 1791 _df - an existing file handle to read from. (internal-only)
1792 1792 raw - an optional argument specifying if the revision data is to be
1793 1793 treated as raw data when applying flag transforms. 'raw' should be set
1794 1794 to True when generating changegroups or in debug commands.
1795 1795 """
1796 1796 if raw:
1797 1797 msg = (
1798 1798 b'revlog.revision(..., raw=True) is deprecated, '
1799 1799 b'use revlog.rawdata(...)'
1800 1800 )
1801 1801 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1802 1802 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1803 1803
1804 1804 def sidedata(self, nodeorrev, _df=None):
1805 1805 """a map of extra data related to the changeset but not part of the hash
1806 1806
1807 1807 This function currently return a dictionary. However, more advanced
1808 1808 mapping object will likely be used in the future for a more
1809 1809 efficient/lazy code.
1810 1810 """
1811 1811 return self._revisiondata(nodeorrev, _df)[1]
1812 1812
1813 1813 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1814 1814 # deal with <nodeorrev> argument type
1815 1815 if isinstance(nodeorrev, int):
1816 1816 rev = nodeorrev
1817 1817 node = self.node(rev)
1818 1818 else:
1819 1819 node = nodeorrev
1820 1820 rev = None
1821 1821
1822 1822 # fast path the special `nullid` rev
1823 1823 if node == nullid:
1824 1824 return b"", {}
1825 1825
1826 1826 # ``rawtext`` is the text as stored inside the revlog. Might be the
1827 1827 # revision or might need to be processed to retrieve the revision.
1828 1828 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1829 1829
1830 1830 if raw and validated:
1831 1831 # if we don't want to process the raw text and that raw
1832 1832 # text is cached, we can exit early.
1833 1833 return rawtext, {}
1834 1834 if rev is None:
1835 1835 rev = self.rev(node)
1836 1836 # the revlog's flag for this revision
1837 1837 # (usually alter its state or content)
1838 1838 flags = self.flags(rev)
1839 1839
1840 1840 if validated and flags == REVIDX_DEFAULT_FLAGS:
1841 1841 # no extra flags set, no flag processor runs, text = rawtext
1842 1842 return rawtext, {}
1843 1843
1844 1844 sidedata = {}
1845 1845 if raw:
1846 1846 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1847 1847 text = rawtext
1848 1848 else:
1849 1849 try:
1850 1850 r = flagutil.processflagsread(self, rawtext, flags)
1851 1851 except error.SidedataHashError as exc:
1852 1852 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1853 1853 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1854 1854 raise error.RevlogError(msg)
1855 1855 text, validatehash, sidedata = r
1856 1856 if validatehash:
1857 1857 self.checkhash(text, node, rev=rev)
1858 1858 if not validated:
1859 1859 self._revisioncache = (node, rev, rawtext)
1860 1860
1861 1861 return text, sidedata
1862 1862
1863 1863 def _rawtext(self, node, rev, _df=None):
1864 1864 """return the possibly unvalidated rawtext for a revision
1865 1865
1866 1866 returns (rev, rawtext, validated)
1867 1867 """
1868 1868
1869 1869 # revision in the cache (could be useful to apply delta)
1870 1870 cachedrev = None
1871 1871 # An intermediate text to apply deltas to
1872 1872 basetext = None
1873 1873
1874 1874 # Check if we have the entry in cache
1875 1875 # The cache entry looks like (node, rev, rawtext)
1876 1876 if self._revisioncache:
1877 1877 if self._revisioncache[0] == node:
1878 1878 return (rev, self._revisioncache[2], True)
1879 1879 cachedrev = self._revisioncache[1]
1880 1880
1881 1881 if rev is None:
1882 1882 rev = self.rev(node)
1883 1883
1884 1884 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1885 1885 if stopped:
1886 1886 basetext = self._revisioncache[2]
1887 1887
1888 1888 # drop cache to save memory, the caller is expected to
1889 1889 # update self._revisioncache after validating the text
1890 1890 self._revisioncache = None
1891 1891
1892 1892 targetsize = None
1893 1893 rawsize = self.index[rev][2]
1894 1894 if 0 <= rawsize:
1895 1895 targetsize = 4 * rawsize
1896 1896
1897 1897 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1898 1898 if basetext is None:
1899 1899 basetext = bytes(bins[0])
1900 1900 bins = bins[1:]
1901 1901
1902 1902 rawtext = mdiff.patches(basetext, bins)
1903 1903 del basetext # let us have a chance to free memory early
1904 1904 return (rev, rawtext, False)
1905 1905
1906 1906 def rawdata(self, nodeorrev, _df=None):
1907 1907 """return an uncompressed raw data of a given node or revision number.
1908 1908
1909 1909 _df - an existing file handle to read from. (internal-only)
1910 1910 """
1911 1911 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1912 1912
1913 1913 def hash(self, text, p1, p2):
1914 1914 """Compute a node hash.
1915 1915
1916 1916 Available as a function so that subclasses can replace the hash
1917 1917 as needed.
1918 1918 """
1919 1919 return storageutil.hashrevisionsha1(text, p1, p2)
1920 1920
1921 1921 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1922 1922 """Check node hash integrity.
1923 1923
1924 1924 Available as a function so that subclasses can extend hash mismatch
1925 1925 behaviors as needed.
1926 1926 """
1927 1927 try:
1928 1928 if p1 is None and p2 is None:
1929 1929 p1, p2 = self.parents(node)
1930 1930 if node != self.hash(text, p1, p2):
1931 1931 # Clear the revision cache on hash failure. The revision cache
1932 1932 # only stores the raw revision and clearing the cache does have
1933 1933 # the side-effect that we won't have a cache hit when the raw
1934 1934 # revision data is accessed. But this case should be rare and
1935 1935 # it is extra work to teach the cache about the hash
1936 1936 # verification state.
1937 1937 if self._revisioncache and self._revisioncache[0] == node:
1938 1938 self._revisioncache = None
1939 1939
1940 1940 revornode = rev
1941 1941 if revornode is None:
1942 1942 revornode = templatefilters.short(hex(node))
1943 1943 raise error.RevlogError(
1944 1944 _(b"integrity check failed on %s:%s")
1945 1945 % (self.indexfile, pycompat.bytestr(revornode))
1946 1946 )
1947 1947 except error.RevlogError:
1948 1948 if self._censorable and storageutil.iscensoredtext(text):
1949 1949 raise error.CensoredNodeError(self.indexfile, node, text)
1950 1950 raise
1951 1951
1952 1952 def _enforceinlinesize(self, tr, fp=None):
1953 1953 """Check if the revlog is too big for inline and convert if so.
1954 1954
1955 1955 This should be called after revisions are added to the revlog. If the
1956 1956 revlog has grown too large to be an inline revlog, it will convert it
1957 1957 to use multiple index and data files.
1958 1958 """
1959 1959 tiprev = len(self) - 1
1960 1960 if (
1961 1961 not self._inline
1962 1962 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
1963 1963 ):
1964 1964 return
1965 1965
1966 1966 trinfo = tr.find(self.indexfile)
1967 1967 if trinfo is None:
1968 1968 raise error.RevlogError(
1969 1969 _(b"%s not found in the transaction") % self.indexfile
1970 1970 )
1971 1971
1972 1972 trindex = trinfo[2]
1973 1973 if trindex is not None:
1974 1974 dataoff = self.start(trindex)
1975 1975 else:
1976 1976 # revlog was stripped at start of transaction, use all leftover data
1977 1977 trindex = len(self) - 1
1978 1978 dataoff = self.end(tiprev)
1979 1979
1980 1980 tr.add(self.datafile, dataoff)
1981 1981
1982 1982 if fp:
1983 1983 fp.flush()
1984 1984 fp.close()
1985 1985 # We can't use the cached file handle after close(). So prevent
1986 1986 # its usage.
1987 1987 self._writinghandles = None
1988 1988
1989 1989 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
1990 1990 for r in self:
1991 1991 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1992 1992
1993 1993 with self._indexfp(b'w') as fp:
1994 1994 self.version &= ~FLAG_INLINE_DATA
1995 1995 self._inline = False
1996 1996 io = self._io
1997 1997 for i in self:
1998 1998 e = io.packentry(self.index[i], self.node, self.version, i)
1999 1999 fp.write(e)
2000 2000
2001 2001 # the temp file replace the real index when we exit the context
2002 2002 # manager
2003 2003
2004 2004 tr.replace(self.indexfile, trindex * self._io.size)
2005 2005 nodemaputil.setup_persistent_nodemap(tr, self)
2006 2006 self._chunkclear()
2007 2007
2008 2008 def _nodeduplicatecallback(self, transaction, node):
2009 2009 """called when trying to add a node already stored.
2010 2010 """
2011 2011
2012 2012 def addrevision(
2013 2013 self,
2014 2014 text,
2015 2015 transaction,
2016 2016 link,
2017 2017 p1,
2018 2018 p2,
2019 2019 cachedelta=None,
2020 2020 node=None,
2021 2021 flags=REVIDX_DEFAULT_FLAGS,
2022 2022 deltacomputer=None,
2023 2023 sidedata=None,
2024 2024 ):
2025 2025 """add a revision to the log
2026 2026
2027 2027 text - the revision data to add
2028 2028 transaction - the transaction object used for rollback
2029 2029 link - the linkrev data to add
2030 2030 p1, p2 - the parent nodeids of the revision
2031 2031 cachedelta - an optional precomputed delta
2032 2032 node - nodeid of revision; typically node is not specified, and it is
2033 2033 computed by default as hash(text, p1, p2), however subclasses might
2034 2034 use different hashing method (and override checkhash() in such case)
2035 2035 flags - the known flags to set on the revision
2036 2036 deltacomputer - an optional deltacomputer instance shared between
2037 2037 multiple calls
2038 2038 """
2039 2039 if link == nullrev:
2040 2040 raise error.RevlogError(
2041 2041 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2042 2042 )
2043 2043
2044 2044 if sidedata is None:
2045 2045 sidedata = {}
2046 2046 flags = flags & ~REVIDX_SIDEDATA
2047 2047 elif not self.hassidedata:
2048 2048 raise error.ProgrammingError(
2049 2049 _(b"trying to add sidedata to a revlog who don't support them")
2050 2050 )
2051 2051 else:
2052 2052 flags |= REVIDX_SIDEDATA
2053 2053
2054 2054 if flags:
2055 2055 node = node or self.hash(text, p1, p2)
2056 2056
2057 2057 rawtext, validatehash = flagutil.processflagswrite(
2058 2058 self, text, flags, sidedata=sidedata
2059 2059 )
2060 2060
2061 2061 # If the flag processor modifies the revision data, ignore any provided
2062 2062 # cachedelta.
2063 2063 if rawtext != text:
2064 2064 cachedelta = None
2065 2065
2066 2066 if len(rawtext) > _maxentrysize:
2067 2067 raise error.RevlogError(
2068 2068 _(
2069 2069 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2070 2070 )
2071 2071 % (self.indexfile, len(rawtext))
2072 2072 )
2073 2073
2074 2074 node = node or self.hash(rawtext, p1, p2)
2075 2075 if self.index.has_node(node):
2076 2076 return node
2077 2077
2078 2078 if validatehash:
2079 2079 self.checkhash(rawtext, node, p1=p1, p2=p2)
2080 2080
2081 2081 return self.addrawrevision(
2082 2082 rawtext,
2083 2083 transaction,
2084 2084 link,
2085 2085 p1,
2086 2086 p2,
2087 2087 node,
2088 2088 flags,
2089 2089 cachedelta=cachedelta,
2090 2090 deltacomputer=deltacomputer,
2091 2091 )
2092 2092
2093 2093 def addrawrevision(
2094 2094 self,
2095 2095 rawtext,
2096 2096 transaction,
2097 2097 link,
2098 2098 p1,
2099 2099 p2,
2100 2100 node,
2101 2101 flags,
2102 2102 cachedelta=None,
2103 2103 deltacomputer=None,
2104 2104 ):
2105 2105 """add a raw revision with known flags, node and parents
2106 2106 useful when reusing a revision not stored in this revlog (ex: received
2107 2107 over wire, or read from an external bundle).
2108 2108 """
2109 2109 dfh = None
2110 2110 if not self._inline:
2111 2111 dfh = self._datafp(b"a+")
2112 2112 ifh = self._indexfp(b"a+")
2113 2113 try:
2114 2114 return self._addrevision(
2115 2115 node,
2116 2116 rawtext,
2117 2117 transaction,
2118 2118 link,
2119 2119 p1,
2120 2120 p2,
2121 2121 flags,
2122 2122 cachedelta,
2123 2123 ifh,
2124 2124 dfh,
2125 2125 deltacomputer=deltacomputer,
2126 2126 )
2127 2127 finally:
2128 2128 if dfh:
2129 2129 dfh.close()
2130 2130 ifh.close()
2131 2131
2132 2132 def compress(self, data):
2133 2133 """Generate a possibly-compressed representation of data."""
2134 2134 if not data:
2135 2135 return b'', data
2136 2136
2137 2137 compressed = self._compressor.compress(data)
2138 2138
2139 2139 if compressed:
2140 2140 # The revlog compressor added the header in the returned data.
2141 2141 return b'', compressed
2142 2142
2143 2143 if data[0:1] == b'\0':
2144 2144 return b'', data
2145 2145 return b'u', data
2146 2146
2147 2147 def decompress(self, data):
2148 2148 """Decompress a revlog chunk.
2149 2149
2150 2150 The chunk is expected to begin with a header identifying the
2151 2151 format type so it can be routed to an appropriate decompressor.
2152 2152 """
2153 2153 if not data:
2154 2154 return data
2155 2155
2156 2156 # Revlogs are read much more frequently than they are written and many
2157 2157 # chunks only take microseconds to decompress, so performance is
2158 2158 # important here.
2159 2159 #
2160 2160 # We can make a few assumptions about revlogs:
2161 2161 #
2162 2162 # 1) the majority of chunks will be compressed (as opposed to inline
2163 2163 # raw data).
2164 2164 # 2) decompressing *any* data will likely by at least 10x slower than
2165 2165 # returning raw inline data.
2166 2166 # 3) we want to prioritize common and officially supported compression
2167 2167 # engines
2168 2168 #
2169 2169 # It follows that we want to optimize for "decompress compressed data
2170 2170 # when encoded with common and officially supported compression engines"
2171 2171 # case over "raw data" and "data encoded by less common or non-official
2172 2172 # compression engines." That is why we have the inline lookup first
2173 2173 # followed by the compengines lookup.
2174 2174 #
2175 2175 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2176 2176 # compressed chunks. And this matters for changelog and manifest reads.
2177 2177 t = data[0:1]
2178 2178
2179 2179 if t == b'x':
2180 2180 try:
2181 2181 return _zlibdecompress(data)
2182 2182 except zlib.error as e:
2183 2183 raise error.RevlogError(
2184 2184 _(b'revlog decompress error: %s')
2185 2185 % stringutil.forcebytestr(e)
2186 2186 )
2187 2187 # '\0' is more common than 'u' so it goes first.
2188 2188 elif t == b'\0':
2189 2189 return data
2190 2190 elif t == b'u':
2191 2191 return util.buffer(data, 1)
2192 2192
2193 2193 try:
2194 2194 compressor = self._decompressors[t]
2195 2195 except KeyError:
2196 2196 try:
2197 2197 engine = util.compengines.forrevlogheader(t)
2198 2198 compressor = engine.revlogcompressor(self._compengineopts)
2199 2199 self._decompressors[t] = compressor
2200 2200 except KeyError:
2201 2201 raise error.RevlogError(_(b'unknown compression type %r') % t)
2202 2202
2203 2203 return compressor.decompress(data)
2204 2204
2205 2205 def _addrevision(
2206 2206 self,
2207 2207 node,
2208 2208 rawtext,
2209 2209 transaction,
2210 2210 link,
2211 2211 p1,
2212 2212 p2,
2213 2213 flags,
2214 2214 cachedelta,
2215 2215 ifh,
2216 2216 dfh,
2217 2217 alwayscache=False,
2218 2218 deltacomputer=None,
2219 2219 ):
2220 2220 """internal function to add revisions to the log
2221 2221
2222 2222 see addrevision for argument descriptions.
2223 2223
2224 2224 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2225 2225
2226 2226 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2227 2227 be used.
2228 2228
2229 2229 invariants:
2230 2230 - rawtext is optional (can be None); if not set, cachedelta must be set.
2231 2231 if both are set, they must correspond to each other.
2232 2232 """
2233 2233 if node == nullid:
2234 2234 raise error.RevlogError(
2235 2235 _(b"%s: attempt to add null revision") % self.indexfile
2236 2236 )
2237 2237 if node == wdirid or node in wdirfilenodeids:
2238 2238 raise error.RevlogError(
2239 2239 _(b"%s: attempt to add wdir revision") % self.indexfile
2240 2240 )
2241 2241
2242 2242 if self._inline:
2243 2243 fh = ifh
2244 2244 else:
2245 2245 fh = dfh
2246 2246
2247 2247 btext = [rawtext]
2248 2248
2249 2249 curr = len(self)
2250 2250 prev = curr - 1
2251 2251 offset = self.end(prev)
2252 2252 p1r, p2r = self.rev(p1), self.rev(p2)
2253 2253
2254 2254 # full versions are inserted when the needed deltas
2255 2255 # become comparable to the uncompressed text
2256 2256 if rawtext is None:
2257 2257 # need rawtext size, before changed by flag processors, which is
2258 2258 # the non-raw size. use revlog explicitly to avoid filelog's extra
2259 2259 # logic that might remove metadata size.
2260 2260 textlen = mdiff.patchedsize(
2261 2261 revlog.size(self, cachedelta[0]), cachedelta[1]
2262 2262 )
2263 2263 else:
2264 2264 textlen = len(rawtext)
2265 2265
2266 2266 if deltacomputer is None:
2267 2267 deltacomputer = deltautil.deltacomputer(self)
2268 2268
2269 2269 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2270 2270
2271 2271 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2272 2272
2273 2273 e = (
2274 2274 offset_type(offset, flags),
2275 2275 deltainfo.deltalen,
2276 2276 textlen,
2277 2277 deltainfo.base,
2278 2278 link,
2279 2279 p1r,
2280 2280 p2r,
2281 2281 node,
2282 2282 )
2283 2283 self.index.append(e)
2284 2284
2285 2285 entry = self._io.packentry(e, self.node, self.version, curr)
2286 2286 self._writeentry(
2287 2287 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2288 2288 )
2289 2289
2290 2290 rawtext = btext[0]
2291 2291
2292 2292 if alwayscache and rawtext is None:
2293 2293 rawtext = deltacomputer.buildtext(revinfo, fh)
2294 2294
2295 2295 if type(rawtext) == bytes: # only accept immutable objects
2296 2296 self._revisioncache = (node, curr, rawtext)
2297 2297 self._chainbasecache[curr] = deltainfo.chainbase
2298 2298 return node
2299 2299
2300 2300 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2301 2301 # Files opened in a+ mode have inconsistent behavior on various
2302 2302 # platforms. Windows requires that a file positioning call be made
2303 2303 # when the file handle transitions between reads and writes. See
2304 2304 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2305 2305 # platforms, Python or the platform itself can be buggy. Some versions
2306 2306 # of Solaris have been observed to not append at the end of the file
2307 2307 # if the file was seeked to before the end. See issue4943 for more.
2308 2308 #
2309 2309 # We work around this issue by inserting a seek() before writing.
2310 2310 # Note: This is likely not necessary on Python 3. However, because
2311 2311 # the file handle is reused for reads and may be seeked there, we need
2312 2312 # to be careful before changing this.
2313 2313 ifh.seek(0, os.SEEK_END)
2314 2314 if dfh:
2315 2315 dfh.seek(0, os.SEEK_END)
2316 2316
2317 2317 curr = len(self) - 1
2318 2318 if not self._inline:
2319 2319 transaction.add(self.datafile, offset)
2320 2320 transaction.add(self.indexfile, curr * len(entry))
2321 2321 if data[0]:
2322 2322 dfh.write(data[0])
2323 2323 dfh.write(data[1])
2324 2324 ifh.write(entry)
2325 2325 else:
2326 2326 offset += curr * self._io.size
2327 2327 transaction.add(self.indexfile, offset, curr)
2328 2328 ifh.write(entry)
2329 2329 ifh.write(data[0])
2330 2330 ifh.write(data[1])
2331 2331 self._enforceinlinesize(transaction, ifh)
2332 2332 nodemaputil.setup_persistent_nodemap(transaction, self)
2333 2333
2334 2334 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2335 2335 """
2336 2336 add a delta group
2337 2337
2338 2338 given a set of deltas, add them to the revision log. the
2339 2339 first delta is against its parent, which should be in our
2340 2340 log, the rest are against the previous delta.
2341 2341
2342 2342 If ``addrevisioncb`` is defined, it will be called with arguments of
2343 2343 this revlog and the node that was added.
2344 2344 """
2345 2345
2346 2346 if self._writinghandles:
2347 2347 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2348 2348
2349 2349 nodes = []
2350 2350
2351 2351 r = len(self)
2352 2352 end = 0
2353 2353 if r:
2354 2354 end = self.end(r - 1)
2355 2355 ifh = self._indexfp(b"a+")
2356 2356 isize = r * self._io.size
2357 2357 if self._inline:
2358 2358 transaction.add(self.indexfile, end + isize, r)
2359 2359 dfh = None
2360 2360 else:
2361 2361 transaction.add(self.indexfile, isize, r)
2362 2362 transaction.add(self.datafile, end)
2363 2363 dfh = self._datafp(b"a+")
2364 2364
2365 2365 def flush():
2366 2366 if dfh:
2367 2367 dfh.flush()
2368 2368 ifh.flush()
2369 2369
2370 2370 self._writinghandles = (ifh, dfh)
2371 2371
2372 2372 try:
2373 2373 deltacomputer = deltautil.deltacomputer(self)
2374 2374 # loop through our set of deltas
2375 2375 for data in deltas:
2376 2376 node, p1, p2, linknode, deltabase, delta, flags = data
2377 2377 link = linkmapper(linknode)
2378 2378 flags = flags or REVIDX_DEFAULT_FLAGS
2379 2379
2380 2380 nodes.append(node)
2381 2381
2382 2382 if self.index.has_node(node):
2383 2383 self._nodeduplicatecallback(transaction, node)
2384 2384 # this can happen if two branches make the same change
2385 2385 continue
2386 2386
2387 2387 for p in (p1, p2):
2388 2388 if not self.index.has_node(p):
2389 2389 raise error.LookupError(
2390 2390 p, self.indexfile, _(b'unknown parent')
2391 2391 )
2392 2392
2393 2393 if not self.index.has_node(deltabase):
2394 2394 raise error.LookupError(
2395 2395 deltabase, self.indexfile, _(b'unknown delta base')
2396 2396 )
2397 2397
2398 2398 baserev = self.rev(deltabase)
2399 2399
2400 2400 if baserev != nullrev and self.iscensored(baserev):
2401 2401 # if base is censored, delta must be full replacement in a
2402 2402 # single patch operation
2403 2403 hlen = struct.calcsize(b">lll")
2404 2404 oldlen = self.rawsize(baserev)
2405 2405 newlen = len(delta) - hlen
2406 2406 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2407 2407 raise error.CensoredBaseError(
2408 2408 self.indexfile, self.node(baserev)
2409 2409 )
2410 2410
2411 2411 if not flags and self._peek_iscensored(baserev, delta, flush):
2412 2412 flags |= REVIDX_ISCENSORED
2413 2413
2414 2414 # We assume consumers of addrevisioncb will want to retrieve
2415 2415 # the added revision, which will require a call to
2416 2416 # revision(). revision() will fast path if there is a cache
2417 2417 # hit. So, we tell _addrevision() to always cache in this case.
2418 2418 # We're only using addgroup() in the context of changegroup
2419 2419 # generation so the revision data can always be handled as raw
2420 2420 # by the flagprocessor.
2421 2421 self._addrevision(
2422 2422 node,
2423 2423 None,
2424 2424 transaction,
2425 2425 link,
2426 2426 p1,
2427 2427 p2,
2428 2428 flags,
2429 2429 (baserev, delta),
2430 2430 ifh,
2431 2431 dfh,
2432 2432 alwayscache=bool(addrevisioncb),
2433 2433 deltacomputer=deltacomputer,
2434 2434 )
2435 2435
2436 2436 if addrevisioncb:
2437 2437 addrevisioncb(self, node)
2438 2438
2439 2439 if not dfh and not self._inline:
2440 2440 # addrevision switched from inline to conventional
2441 2441 # reopen the index
2442 2442 ifh.close()
2443 2443 dfh = self._datafp(b"a+")
2444 2444 ifh = self._indexfp(b"a+")
2445 2445 self._writinghandles = (ifh, dfh)
2446 2446 finally:
2447 2447 self._writinghandles = None
2448 2448
2449 2449 if dfh:
2450 2450 dfh.close()
2451 2451 ifh.close()
2452 2452
2453 2453 return nodes
2454 2454
2455 2455 def iscensored(self, rev):
2456 2456 """Check if a file revision is censored."""
2457 2457 if not self._censorable:
2458 2458 return False
2459 2459
2460 2460 return self.flags(rev) & REVIDX_ISCENSORED
2461 2461
2462 2462 def _peek_iscensored(self, baserev, delta, flush):
2463 2463 """Quickly check if a delta produces a censored revision."""
2464 2464 if not self._censorable:
2465 2465 return False
2466 2466
2467 2467 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2468 2468
2469 2469 def getstrippoint(self, minlink):
2470 2470 """find the minimum rev that must be stripped to strip the linkrev
2471 2471
2472 2472 Returns a tuple containing the minimum rev and a set of all revs that
2473 2473 have linkrevs that will be broken by this strip.
2474 2474 """
2475 2475 return storageutil.resolvestripinfo(
2476 2476 minlink,
2477 2477 len(self) - 1,
2478 2478 self.headrevs(),
2479 2479 self.linkrev,
2480 2480 self.parentrevs,
2481 2481 )
2482 2482
2483 2483 def strip(self, minlink, transaction):
2484 2484 """truncate the revlog on the first revision with a linkrev >= minlink
2485 2485
2486 2486 This function is called when we're stripping revision minlink and
2487 2487 its descendants from the repository.
2488 2488
2489 2489 We have to remove all revisions with linkrev >= minlink, because
2490 2490 the equivalent changelog revisions will be renumbered after the
2491 2491 strip.
2492 2492
2493 2493 So we truncate the revlog on the first of these revisions, and
2494 2494 trust that the caller has saved the revisions that shouldn't be
2495 2495 removed and that it'll re-add them after this truncation.
2496 2496 """
2497 2497 if len(self) == 0:
2498 2498 return
2499 2499
2500 2500 rev, _ = self.getstrippoint(minlink)
2501 2501 if rev == len(self):
2502 2502 return
2503 2503
2504 2504 # first truncate the files on disk
2505 2505 end = self.start(rev)
2506 2506 if not self._inline:
2507 2507 transaction.add(self.datafile, end)
2508 2508 end = rev * self._io.size
2509 2509 else:
2510 2510 end += rev * self._io.size
2511 2511
2512 2512 transaction.add(self.indexfile, end)
2513 2513
2514 2514 # then reset internal state in memory to forget those revisions
2515 2515 self._revisioncache = None
2516 2516 self._chaininfocache = {}
2517 2517 self._chunkclear()
2518 2518
2519 2519 del self.index[rev:-1]
2520 2520
2521 2521 def checksize(self):
2522 2522 """Check size of index and data files
2523 2523
2524 2524 return a (dd, di) tuple.
2525 2525 - dd: extra bytes for the "data" file
2526 2526 - di: extra bytes for the "index" file
2527 2527
2528 2528 A healthy revlog will return (0, 0).
2529 2529 """
2530 2530 expected = 0
2531 2531 if len(self):
2532 2532 expected = max(0, self.end(len(self) - 1))
2533 2533
2534 2534 try:
2535 2535 with self._datafp() as f:
2536 2536 f.seek(0, io.SEEK_END)
2537 2537 actual = f.tell()
2538 2538 dd = actual - expected
2539 2539 except IOError as inst:
2540 2540 if inst.errno != errno.ENOENT:
2541 2541 raise
2542 2542 dd = 0
2543 2543
2544 2544 try:
2545 2545 f = self.opener(self.indexfile)
2546 2546 f.seek(0, io.SEEK_END)
2547 2547 actual = f.tell()
2548 2548 f.close()
2549 2549 s = self._io.size
2550 2550 i = max(0, actual // s)
2551 2551 di = actual - (i * s)
2552 2552 if self._inline:
2553 2553 databytes = 0
2554 2554 for r in self:
2555 2555 databytes += max(0, self.length(r))
2556 2556 dd = 0
2557 2557 di = actual - len(self) * s - databytes
2558 2558 except IOError as inst:
2559 2559 if inst.errno != errno.ENOENT:
2560 2560 raise
2561 2561 di = 0
2562 2562
2563 2563 return (dd, di)
2564 2564
2565 2565 def files(self):
2566 2566 res = [self.indexfile]
2567 2567 if not self._inline:
2568 2568 res.append(self.datafile)
2569 2569 return res
2570 2570
2571 2571 def emitrevisions(
2572 2572 self,
2573 2573 nodes,
2574 2574 nodesorder=None,
2575 2575 revisiondata=False,
2576 2576 assumehaveparentrevisions=False,
2577 2577 deltamode=repository.CG_DELTAMODE_STD,
2578 2578 ):
2579 2579 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2580 2580 raise error.ProgrammingError(
2581 2581 b'unhandled value for nodesorder: %s' % nodesorder
2582 2582 )
2583 2583
2584 2584 if nodesorder is None and not self._generaldelta:
2585 2585 nodesorder = b'storage'
2586 2586
2587 2587 if (
2588 2588 not self._storedeltachains
2589 2589 and deltamode != repository.CG_DELTAMODE_PREV
2590 2590 ):
2591 2591 deltamode = repository.CG_DELTAMODE_FULL
2592 2592
2593 2593 return storageutil.emitrevisions(
2594 2594 self,
2595 2595 nodes,
2596 2596 nodesorder,
2597 2597 revlogrevisiondelta,
2598 2598 deltaparentfn=self.deltaparent,
2599 2599 candeltafn=self.candelta,
2600 2600 rawsizefn=self.rawsize,
2601 2601 revdifffn=self.revdiff,
2602 2602 flagsfn=self.flags,
2603 2603 deltamode=deltamode,
2604 2604 revisiondata=revisiondata,
2605 2605 assumehaveparentrevisions=assumehaveparentrevisions,
2606 2606 )
2607 2607
2608 2608 DELTAREUSEALWAYS = b'always'
2609 2609 DELTAREUSESAMEREVS = b'samerevs'
2610 2610 DELTAREUSENEVER = b'never'
2611 2611
2612 2612 DELTAREUSEFULLADD = b'fulladd'
2613 2613
2614 2614 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2615 2615
2616 2616 def clone(
2617 2617 self,
2618 2618 tr,
2619 2619 destrevlog,
2620 2620 addrevisioncb=None,
2621 2621 deltareuse=DELTAREUSESAMEREVS,
2622 2622 forcedeltabothparents=None,
2623 2623 sidedatacompanion=None,
2624 2624 ):
2625 2625 """Copy this revlog to another, possibly with format changes.
2626 2626
2627 2627 The destination revlog will contain the same revisions and nodes.
2628 2628 However, it may not be bit-for-bit identical due to e.g. delta encoding
2629 2629 differences.
2630 2630
2631 2631 The ``deltareuse`` argument control how deltas from the existing revlog
2632 2632 are preserved in the destination revlog. The argument can have the
2633 2633 following values:
2634 2634
2635 2635 DELTAREUSEALWAYS
2636 2636 Deltas will always be reused (if possible), even if the destination
2637 2637 revlog would not select the same revisions for the delta. This is the
2638 2638 fastest mode of operation.
2639 2639 DELTAREUSESAMEREVS
2640 2640 Deltas will be reused if the destination revlog would pick the same
2641 2641 revisions for the delta. This mode strikes a balance between speed
2642 2642 and optimization.
2643 2643 DELTAREUSENEVER
2644 2644 Deltas will never be reused. This is the slowest mode of execution.
2645 2645 This mode can be used to recompute deltas (e.g. if the diff/delta
2646 2646 algorithm changes).
2647 2647 DELTAREUSEFULLADD
2648 2648 Revision will be re-added as if their were new content. This is
2649 2649 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2650 2650 eg: large file detection and handling.
2651 2651
2652 2652 Delta computation can be slow, so the choice of delta reuse policy can
2653 2653 significantly affect run time.
2654 2654
2655 2655 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2656 2656 two extremes. Deltas will be reused if they are appropriate. But if the
2657 2657 delta could choose a better revision, it will do so. This means if you
2658 2658 are converting a non-generaldelta revlog to a generaldelta revlog,
2659 2659 deltas will be recomputed if the delta's parent isn't a parent of the
2660 2660 revision.
2661 2661
2662 2662 In addition to the delta policy, the ``forcedeltabothparents``
2663 2663 argument controls whether to force compute deltas against both parents
2664 2664 for merges. By default, the current default is used.
2665 2665
2666 2666 If not None, the `sidedatacompanion` is callable that accept two
2667 2667 arguments:
2668 2668
2669 2669 (srcrevlog, rev)
2670 2670
2671 2671 and return a triplet that control changes to sidedata content from the
2672 2672 old revision to the new clone result:
2673 2673
2674 2674 (dropall, filterout, update)
2675 2675
2676 2676 * if `dropall` is True, all sidedata should be dropped
2677 2677 * `filterout` is a set of sidedata keys that should be dropped
2678 2678 * `update` is a mapping of additionnal/new key -> value
2679 2679 """
2680 2680 if deltareuse not in self.DELTAREUSEALL:
2681 2681 raise ValueError(
2682 2682 _(b'value for deltareuse invalid: %s') % deltareuse
2683 2683 )
2684 2684
2685 2685 if len(destrevlog):
2686 2686 raise ValueError(_(b'destination revlog is not empty'))
2687 2687
2688 2688 if getattr(self, 'filteredrevs', None):
2689 2689 raise ValueError(_(b'source revlog has filtered revisions'))
2690 2690 if getattr(destrevlog, 'filteredrevs', None):
2691 2691 raise ValueError(_(b'destination revlog has filtered revisions'))
2692 2692
2693 2693 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2694 2694 # if possible.
2695 2695 oldlazydelta = destrevlog._lazydelta
2696 2696 oldlazydeltabase = destrevlog._lazydeltabase
2697 2697 oldamd = destrevlog._deltabothparents
2698 2698
2699 2699 try:
2700 2700 if deltareuse == self.DELTAREUSEALWAYS:
2701 2701 destrevlog._lazydeltabase = True
2702 2702 destrevlog._lazydelta = True
2703 2703 elif deltareuse == self.DELTAREUSESAMEREVS:
2704 2704 destrevlog._lazydeltabase = False
2705 2705 destrevlog._lazydelta = True
2706 2706 elif deltareuse == self.DELTAREUSENEVER:
2707 2707 destrevlog._lazydeltabase = False
2708 2708 destrevlog._lazydelta = False
2709 2709
2710 2710 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2711 2711
2712 2712 self._clone(
2713 2713 tr,
2714 2714 destrevlog,
2715 2715 addrevisioncb,
2716 2716 deltareuse,
2717 2717 forcedeltabothparents,
2718 2718 sidedatacompanion,
2719 2719 )
2720 2720
2721 2721 finally:
2722 2722 destrevlog._lazydelta = oldlazydelta
2723 2723 destrevlog._lazydeltabase = oldlazydeltabase
2724 2724 destrevlog._deltabothparents = oldamd
2725 2725
2726 2726 def _clone(
2727 2727 self,
2728 2728 tr,
2729 2729 destrevlog,
2730 2730 addrevisioncb,
2731 2731 deltareuse,
2732 2732 forcedeltabothparents,
2733 2733 sidedatacompanion,
2734 2734 ):
2735 2735 """perform the core duty of `revlog.clone` after parameter processing"""
2736 2736 deltacomputer = deltautil.deltacomputer(destrevlog)
2737 2737 index = self.index
2738 2738 for rev in self:
2739 2739 entry = index[rev]
2740 2740
2741 2741 # Some classes override linkrev to take filtered revs into
2742 2742 # account. Use raw entry from index.
2743 2743 flags = entry[0] & 0xFFFF
2744 2744 linkrev = entry[4]
2745 2745 p1 = index[entry[5]][7]
2746 2746 p2 = index[entry[6]][7]
2747 2747 node = entry[7]
2748 2748
2749 2749 sidedataactions = (False, [], {})
2750 2750 if sidedatacompanion is not None:
2751 2751 sidedataactions = sidedatacompanion(self, rev)
2752 2752
2753 2753 # (Possibly) reuse the delta from the revlog if allowed and
2754 2754 # the revlog chunk is a delta.
2755 2755 cachedelta = None
2756 2756 rawtext = None
2757 2757 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2758 2758 dropall, filterout, update = sidedataactions
2759 2759 text, sidedata = self._revisiondata(rev)
2760 2760 if dropall:
2761 2761 sidedata = {}
2762 2762 for key in filterout:
2763 2763 sidedata.pop(key, None)
2764 2764 sidedata.update(update)
2765 2765 if not sidedata:
2766 2766 sidedata = None
2767 2767 destrevlog.addrevision(
2768 2768 text,
2769 2769 tr,
2770 2770 linkrev,
2771 2771 p1,
2772 2772 p2,
2773 2773 cachedelta=cachedelta,
2774 2774 node=node,
2775 2775 flags=flags,
2776 2776 deltacomputer=deltacomputer,
2777 2777 sidedata=sidedata,
2778 2778 )
2779 2779 else:
2780 2780 if destrevlog._lazydelta:
2781 2781 dp = self.deltaparent(rev)
2782 2782 if dp != nullrev:
2783 2783 cachedelta = (dp, bytes(self._chunk(rev)))
2784 2784
2785 2785 if not cachedelta:
2786 2786 rawtext = self.rawdata(rev)
2787 2787
2788 2788 ifh = destrevlog.opener(
2789 2789 destrevlog.indexfile, b'a+', checkambig=False
2790 2790 )
2791 2791 dfh = None
2792 2792 if not destrevlog._inline:
2793 2793 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2794 2794 try:
2795 2795 destrevlog._addrevision(
2796 2796 node,
2797 2797 rawtext,
2798 2798 tr,
2799 2799 linkrev,
2800 2800 p1,
2801 2801 p2,
2802 2802 flags,
2803 2803 cachedelta,
2804 2804 ifh,
2805 2805 dfh,
2806 2806 deltacomputer=deltacomputer,
2807 2807 )
2808 2808 finally:
2809 2809 if dfh:
2810 2810 dfh.close()
2811 2811 ifh.close()
2812 2812
2813 2813 if addrevisioncb:
2814 2814 addrevisioncb(self, rev, node)
2815 2815
2816 2816 def censorrevision(self, tr, censornode, tombstone=b''):
2817 2817 if (self.version & 0xFFFF) == REVLOGV0:
2818 2818 raise error.RevlogError(
2819 2819 _(b'cannot censor with version %d revlogs') % self.version
2820 2820 )
2821 2821
2822 2822 censorrev = self.rev(censornode)
2823 2823 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2824 2824
2825 2825 if len(tombstone) > self.rawsize(censorrev):
2826 2826 raise error.Abort(
2827 2827 _(b'censor tombstone must be no longer than censored data')
2828 2828 )
2829 2829
2830 2830 # Rewriting the revlog in place is hard. Our strategy for censoring is
2831 2831 # to create a new revlog, copy all revisions to it, then replace the
2832 2832 # revlogs on transaction close.
2833 2833
2834 2834 newindexfile = self.indexfile + b'.tmpcensored'
2835 2835 newdatafile = self.datafile + b'.tmpcensored'
2836 2836
2837 2837 # This is a bit dangerous. We could easily have a mismatch of state.
2838 2838 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2839 2839 newrl.version = self.version
2840 2840 newrl._generaldelta = self._generaldelta
2841 2841 newrl._io = self._io
2842 2842
2843 2843 for rev in self.revs():
2844 2844 node = self.node(rev)
2845 2845 p1, p2 = self.parents(node)
2846 2846
2847 2847 if rev == censorrev:
2848 2848 newrl.addrawrevision(
2849 2849 tombstone,
2850 2850 tr,
2851 2851 self.linkrev(censorrev),
2852 2852 p1,
2853 2853 p2,
2854 2854 censornode,
2855 2855 REVIDX_ISCENSORED,
2856 2856 )
2857 2857
2858 2858 if newrl.deltaparent(rev) != nullrev:
2859 2859 raise error.Abort(
2860 2860 _(
2861 2861 b'censored revision stored as delta; '
2862 2862 b'cannot censor'
2863 2863 ),
2864 2864 hint=_(
2865 2865 b'censoring of revlogs is not '
2866 2866 b'fully implemented; please report '
2867 2867 b'this bug'
2868 2868 ),
2869 2869 )
2870 2870 continue
2871 2871
2872 2872 if self.iscensored(rev):
2873 2873 if self.deltaparent(rev) != nullrev:
2874 2874 raise error.Abort(
2875 2875 _(
2876 2876 b'cannot censor due to censored '
2877 2877 b'revision having delta stored'
2878 2878 )
2879 2879 )
2880 2880 rawtext = self._chunk(rev)
2881 2881 else:
2882 2882 rawtext = self.rawdata(rev)
2883 2883
2884 2884 newrl.addrawrevision(
2885 2885 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2886 2886 )
2887 2887
2888 2888 tr.addbackup(self.indexfile, location=b'store')
2889 2889 if not self._inline:
2890 2890 tr.addbackup(self.datafile, location=b'store')
2891 2891
2892 2892 self.opener.rename(newrl.indexfile, self.indexfile)
2893 2893 if not self._inline:
2894 2894 self.opener.rename(newrl.datafile, self.datafile)
2895 2895
2896 2896 self.clearcaches()
2897 2897 self._loadindex()
2898 2898
2899 2899 def verifyintegrity(self, state):
2900 2900 """Verifies the integrity of the revlog.
2901 2901
2902 2902 Yields ``revlogproblem`` instances describing problems that are
2903 2903 found.
2904 2904 """
2905 2905 dd, di = self.checksize()
2906 2906 if dd:
2907 2907 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2908 2908 if di:
2909 2909 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2910 2910
2911 2911 version = self.version & 0xFFFF
2912 2912
2913 2913 # The verifier tells us what version revlog we should be.
2914 2914 if version != state[b'expectedversion']:
2915 2915 yield revlogproblem(
2916 2916 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2917 2917 % (self.indexfile, version, state[b'expectedversion'])
2918 2918 )
2919 2919
2920 2920 state[b'skipread'] = set()
2921 2921 state[b'safe_renamed'] = set()
2922 2922
2923 2923 for rev in self:
2924 2924 node = self.node(rev)
2925 2925
2926 2926 # Verify contents. 4 cases to care about:
2927 2927 #
2928 2928 # common: the most common case
2929 2929 # rename: with a rename
2930 2930 # meta: file content starts with b'\1\n', the metadata
2931 2931 # header defined in filelog.py, but without a rename
2932 2932 # ext: content stored externally
2933 2933 #
2934 2934 # More formally, their differences are shown below:
2935 2935 #
2936 2936 # | common | rename | meta | ext
2937 2937 # -------------------------------------------------------
2938 2938 # flags() | 0 | 0 | 0 | not 0
2939 2939 # renamed() | False | True | False | ?
2940 2940 # rawtext[0:2]=='\1\n'| False | True | True | ?
2941 2941 #
2942 2942 # "rawtext" means the raw text stored in revlog data, which
2943 2943 # could be retrieved by "rawdata(rev)". "text"
2944 2944 # mentioned below is "revision(rev)".
2945 2945 #
2946 2946 # There are 3 different lengths stored physically:
2947 2947 # 1. L1: rawsize, stored in revlog index
2948 2948 # 2. L2: len(rawtext), stored in revlog data
2949 2949 # 3. L3: len(text), stored in revlog data if flags==0, or
2950 2950 # possibly somewhere else if flags!=0
2951 2951 #
2952 2952 # L1 should be equal to L2. L3 could be different from them.
2953 2953 # "text" may or may not affect commit hash depending on flag
2954 2954 # processors (see flagutil.addflagprocessor).
2955 2955 #
2956 2956 # | common | rename | meta | ext
2957 2957 # -------------------------------------------------
2958 2958 # rawsize() | L1 | L1 | L1 | L1
2959 2959 # size() | L1 | L2-LM | L1(*) | L1 (?)
2960 2960 # len(rawtext) | L2 | L2 | L2 | L2
2961 2961 # len(text) | L2 | L2 | L2 | L3
2962 2962 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2963 2963 #
2964 2964 # LM: length of metadata, depending on rawtext
2965 2965 # (*): not ideal, see comment in filelog.size
2966 2966 # (?): could be "- len(meta)" if the resolved content has
2967 2967 # rename metadata
2968 2968 #
2969 2969 # Checks needed to be done:
2970 2970 # 1. length check: L1 == L2, in all cases.
2971 2971 # 2. hash check: depending on flag processor, we may need to
2972 2972 # use either "text" (external), or "rawtext" (in revlog).
2973 2973
2974 2974 try:
2975 2975 skipflags = state.get(b'skipflags', 0)
2976 2976 if skipflags:
2977 2977 skipflags &= self.flags(rev)
2978 2978
2979 2979 _verify_revision(self, skipflags, state, node)
2980 2980
2981 2981 l1 = self.rawsize(rev)
2982 2982 l2 = len(self.rawdata(node))
2983 2983
2984 2984 if l1 != l2:
2985 2985 yield revlogproblem(
2986 2986 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
2987 2987 node=node,
2988 2988 )
2989 2989
2990 2990 except error.CensoredNodeError:
2991 2991 if state[b'erroroncensored']:
2992 2992 yield revlogproblem(
2993 2993 error=_(b'censored file data'), node=node
2994 2994 )
2995 2995 state[b'skipread'].add(node)
2996 2996 except Exception as e:
2997 2997 yield revlogproblem(
2998 2998 error=_(b'unpacking %s: %s')
2999 2999 % (short(node), stringutil.forcebytestr(e)),
3000 3000 node=node,
3001 3001 )
3002 3002 state[b'skipread'].add(node)
3003 3003
3004 3004 def storageinfo(
3005 3005 self,
3006 3006 exclusivefiles=False,
3007 3007 sharedfiles=False,
3008 3008 revisionscount=False,
3009 3009 trackedsize=False,
3010 3010 storedsize=False,
3011 3011 ):
3012 3012 d = {}
3013 3013
3014 3014 if exclusivefiles:
3015 3015 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
3016 3016 if not self._inline:
3017 3017 d[b'exclusivefiles'].append((self.opener, self.datafile))
3018 3018
3019 3019 if sharedfiles:
3020 3020 d[b'sharedfiles'] = []
3021 3021
3022 3022 if revisionscount:
3023 3023 d[b'revisionscount'] = len(self)
3024 3024
3025 3025 if trackedsize:
3026 3026 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3027 3027
3028 3028 if storedsize:
3029 3029 d[b'storedsize'] = sum(
3030 3030 self.opener.stat(path).st_size for path in self.files()
3031 3031 )
3032 3032
3033 3033 return d
@@ -1,456 +1,463 b''
1 1 # nodemap.py - nodemap related code and utilities
2 2 #
3 3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import os
12 12 import re
13 13 import struct
14 14
15 15 from .. import (
16 16 error,
17 17 node as nodemod,
18 18 util,
19 19 )
20 20
21 21
22 22 class NodeMap(dict):
23 23 def __missing__(self, x):
24 24 raise error.RevlogError(b'unknown node: %s' % x)
25 25
26 26
27 27 def persisted_data(revlog):
28 28 """read the nodemap for a revlog from disk"""
29 29 if revlog.nodemap_file is None:
30 30 return None
31 31 pdata = revlog.opener.tryread(revlog.nodemap_file)
32 32 if not pdata:
33 33 return None
34 34 offset = 0
35 35 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
36 36 if version != ONDISK_VERSION:
37 37 return None
38 38 offset += S_VERSION.size
39 (uid_size,) = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
39 headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
40 uid_size, tip_rev = headers
40 41 offset += S_HEADER.size
41 42 docket = NodeMapDocket(pdata[offset : offset + uid_size])
43 docket.tip_rev = tip_rev
42 44
43 45 filename = _rawdata_filepath(revlog, docket)
44 46 return docket, revlog.opener.tryread(filename)
45 47
46 48
47 49 def setup_persistent_nodemap(tr, revlog):
48 50 """Install whatever is needed transaction side to persist a nodemap on disk
49 51
50 52 (only actually persist the nodemap if this is relevant for this revlog)
51 53 """
52 54 if revlog._inline:
53 55 return # inlined revlog are too small for this to be relevant
54 56 if revlog.nodemap_file is None:
55 57 return # we do not use persistent_nodemap on this revlog
56 58 callback_id = b"revlog-persistent-nodemap-%s" % revlog.nodemap_file
57 59 if tr.hasfinalize(callback_id):
58 60 return # no need to register again
59 61 tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
60 62
61 63
62 64 def _persist_nodemap(tr, revlog):
63 65 """Write nodemap data on disk for a given revlog
64 66 """
65 67 if getattr(revlog, 'filteredrevs', ()):
66 68 raise error.ProgrammingError(
67 69 "cannot persist nodemap of a filtered changelog"
68 70 )
69 71 if revlog.nodemap_file is None:
70 72 msg = "calling persist nodemap on a revlog without the feature enableb"
71 73 raise error.ProgrammingError(msg)
72 74
73 75 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
74 76 ondisk_docket = revlog._nodemap_docket
75 77
76 78 # first attemp an incremental update of the data
77 79 if can_incremental and ondisk_docket is not None:
78 80 target_docket = revlog._nodemap_docket.copy()
79 81 data = revlog.index.nodemap_data_incremental()
80 82 datafile = _rawdata_filepath(revlog, target_docket)
81 83 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
82 84 # store vfs
83 85 with revlog.opener(datafile, b'a') as fd:
84 86 fd.write(data)
85 87 else:
86 88 # otherwise fallback to a full new export
87 89 target_docket = NodeMapDocket()
88 90 datafile = _rawdata_filepath(revlog, target_docket)
89 91 if util.safehasattr(revlog.index, "nodemap_data_all"):
90 92 data = revlog.index.nodemap_data_all()
91 93 else:
92 94 data = persistent_data(revlog.index)
93 95 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
94 96 # store vfs
95 97 with revlog.opener(datafile, b'w') as fd:
96 98 fd.write(data)
99 target_docket.tip_rev = revlog.tiprev()
97 100 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
98 101 # store vfs
99 102 with revlog.opener(revlog.nodemap_file, b'w', atomictemp=True) as fp:
100 103 fp.write(target_docket.serialize())
101 104 revlog._nodemap_docket = target_docket
102 105 # EXP-TODO: if the transaction abort, we should remove the new data and
103 106 # reinstall the old one.
104 107
105 108 # search for old index file in all cases, some older process might have
106 109 # left one behind.
107 110 olds = _other_rawdata_filepath(revlog, target_docket)
108 111 if olds:
109 112 realvfs = getattr(revlog, '_realopener', revlog.opener)
110 113
111 114 def cleanup(tr):
112 115 for oldfile in olds:
113 116 realvfs.tryunlink(oldfile)
114 117
115 118 callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
116 119 tr.addpostclose(callback_id, cleanup)
117 120
118 121
119 122 ### Nodemap docket file
120 123 #
121 124 # The nodemap data are stored on disk using 2 files:
122 125 #
123 126 # * a raw data files containing a persistent nodemap
124 127 # (see `Nodemap Trie` section)
125 128 #
126 129 # * a small "docket" file containing medatadata
127 130 #
128 131 # While the nodemap data can be multiple tens of megabytes, the "docket" is
129 132 # small, it is easy to update it automatically or to duplicated its content
130 133 # during a transaction.
131 134 #
132 135 # Multiple raw data can exist at the same time (The currently valid one and a
133 136 # new one beind used by an in progress transaction). To accomodate this, the
134 137 # filename hosting the raw data has a variable parts. The exact filename is
135 138 # specified inside the "docket" file.
136 139 #
137 140 # The docket file contains information to find, qualify and validate the raw
138 141 # data. Its content is currently very light, but it will expand as the on disk
139 142 # nodemap gains the necessary features to be used in production.
140 143
141 144 # version 0 is experimental, no BC garantee, do no use outside of tests.
142 145 ONDISK_VERSION = 0
143 146
144 147 S_VERSION = struct.Struct(">B")
145 S_HEADER = struct.Struct(">B")
148 S_HEADER = struct.Struct(">BQ")
146 149
147 150 ID_SIZE = 8
148 151
149 152
150 153 def _make_uid():
151 154 """return a new unique identifier.
152 155
153 156 The identifier is random and composed of ascii characters."""
154 157 return nodemod.hex(os.urandom(ID_SIZE))
155 158
156 159
157 160 class NodeMapDocket(object):
158 161 """metadata associated with persistent nodemap data
159 162
160 163 The persistent data may come from disk or be on their way to disk.
161 164 """
162 165
163 166 def __init__(self, uid=None):
164 167 if uid is None:
165 168 uid = _make_uid()
166 169 self.uid = uid
170 self.tip_rev = None
167 171
168 172 def copy(self):
169 return NodeMapDocket(uid=self.uid)
173 new = NodeMapDocket(uid=self.uid)
174 new.tip_rev = self.tip_rev
175 return new
170 176
171 177 def serialize(self):
172 178 """return serialized bytes for a docket using the passed uid"""
173 179 data = []
174 180 data.append(S_VERSION.pack(ONDISK_VERSION))
175 data.append(S_HEADER.pack(len(self.uid)))
181 headers = (len(self.uid), self.tip_rev)
182 data.append(S_HEADER.pack(*headers))
176 183 data.append(self.uid)
177 184 return b''.join(data)
178 185
179 186
180 187 def _rawdata_filepath(revlog, docket):
181 188 """The (vfs relative) nodemap's rawdata file for a given uid"""
182 189 prefix = revlog.nodemap_file[:-2]
183 190 return b"%s-%s.nd" % (prefix, docket.uid)
184 191
185 192
186 193 def _other_rawdata_filepath(revlog, docket):
187 194 prefix = revlog.nodemap_file[:-2]
188 195 pattern = re.compile(b"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
189 196 new_file_path = _rawdata_filepath(revlog, docket)
190 197 new_file_name = revlog.opener.basename(new_file_path)
191 198 dirpath = revlog.opener.dirname(new_file_path)
192 199 others = []
193 200 for f in revlog.opener.listdir(dirpath):
194 201 if pattern.match(f) and f != new_file_name:
195 202 others.append(f)
196 203 return others
197 204
198 205
199 206 ### Nodemap Trie
200 207 #
201 208 # This is a simple reference implementation to compute and persist a nodemap
202 209 # trie. This reference implementation is write only. The python version of this
203 210 # is not expected to be actually used, since it wont provide performance
204 211 # improvement over existing non-persistent C implementation.
205 212 #
206 213 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
207 214 # revision can be adressed using its node shortest prefix.
208 215 #
209 216 # The trie is stored as a sequence of block. Each block contains 16 entries
210 217 # (signed 64bit integer, big endian). Each entry can be one of the following:
211 218 #
212 219 # * value >= 0 -> index of sub-block
213 220 # * value == -1 -> no value
214 221 # * value < -1 -> a revision value: rev = -(value+10)
215 222 #
216 223 # The implementation focus on simplicity, not on performance. A Rust
217 224 # implementation should provide a efficient version of the same binary
218 225 # persistence. This reference python implementation is never meant to be
219 226 # extensively use in production.
220 227
221 228
222 229 def persistent_data(index):
223 230 """return the persistent binary form for a nodemap for a given index
224 231 """
225 232 trie = _build_trie(index)
226 233 return _persist_trie(trie)
227 234
228 235
229 236 def update_persistent_data(index, root, max_idx, last_rev):
230 237 """return the incremental update for persistent nodemap from a given index
231 238 """
232 239 trie = _update_trie(index, root, last_rev)
233 240 return _persist_trie(trie, existing_idx=max_idx)
234 241
235 242
236 243 S_BLOCK = struct.Struct(">" + ("l" * 16))
237 244
238 245 NO_ENTRY = -1
239 246 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
240 247 REV_OFFSET = 2
241 248
242 249
243 250 def _transform_rev(rev):
244 251 """Return the number used to represent the rev in the tree.
245 252
246 253 (or retrieve a rev number from such representation)
247 254
248 255 Note that this is an involution, a function equal to its inverse (i.e.
249 256 which gives the identity when applied to itself).
250 257 """
251 258 return -(rev + REV_OFFSET)
252 259
253 260
254 261 def _to_int(hex_digit):
255 262 """turn an hexadecimal digit into a proper integer"""
256 263 return int(hex_digit, 16)
257 264
258 265
259 266 class Block(dict):
260 267 """represent a block of the Trie
261 268
262 269 contains up to 16 entry indexed from 0 to 15"""
263 270
264 271 def __init__(self):
265 272 super(Block, self).__init__()
266 273 # If this block exist on disk, here is its ID
267 274 self.ondisk_id = None
268 275
269 276 def __iter__(self):
270 277 return iter(self.get(i) for i in range(16))
271 278
272 279
273 280 def _build_trie(index):
274 281 """build a nodemap trie
275 282
276 283 The nodemap stores revision number for each unique prefix.
277 284
278 285 Each block is a dictionary with keys in `[0, 15]`. Values are either
279 286 another block or a revision number.
280 287 """
281 288 root = Block()
282 289 for rev in range(len(index)):
283 290 hex = nodemod.hex(index[rev][7])
284 291 _insert_into_block(index, 0, root, rev, hex)
285 292 return root
286 293
287 294
288 295 def _update_trie(index, root, last_rev):
289 296 """consume"""
290 297 for rev in range(last_rev + 1, len(index)):
291 298 hex = nodemod.hex(index[rev][7])
292 299 _insert_into_block(index, 0, root, rev, hex)
293 300 return root
294 301
295 302
296 303 def _insert_into_block(index, level, block, current_rev, current_hex):
297 304 """insert a new revision in a block
298 305
299 306 index: the index we are adding revision for
300 307 level: the depth of the current block in the trie
301 308 block: the block currently being considered
302 309 current_rev: the revision number we are adding
303 310 current_hex: the hexadecimal representation of the of that revision
304 311 """
305 312 if block.ondisk_id is not None:
306 313 block.ondisk_id = None
307 314 hex_digit = _to_int(current_hex[level : level + 1])
308 315 entry = block.get(hex_digit)
309 316 if entry is None:
310 317 # no entry, simply store the revision number
311 318 block[hex_digit] = current_rev
312 319 elif isinstance(entry, dict):
313 320 # need to recurse to an underlying block
314 321 _insert_into_block(index, level + 1, entry, current_rev, current_hex)
315 322 else:
316 323 # collision with a previously unique prefix, inserting new
317 324 # vertices to fit both entry.
318 325 other_hex = nodemod.hex(index[entry][7])
319 326 other_rev = entry
320 327 new = Block()
321 328 block[hex_digit] = new
322 329 _insert_into_block(index, level + 1, new, other_rev, other_hex)
323 330 _insert_into_block(index, level + 1, new, current_rev, current_hex)
324 331
325 332
326 333 def _persist_trie(root, existing_idx=None):
327 334 """turn a nodemap trie into persistent binary data
328 335
329 336 See `_build_trie` for nodemap trie structure"""
330 337 block_map = {}
331 338 if existing_idx is not None:
332 339 base_idx = existing_idx + 1
333 340 else:
334 341 base_idx = 0
335 342 chunks = []
336 343 for tn in _walk_trie(root):
337 344 if tn.ondisk_id is not None:
338 345 block_map[id(tn)] = tn.ondisk_id
339 346 else:
340 347 block_map[id(tn)] = len(chunks) + base_idx
341 348 chunks.append(_persist_block(tn, block_map))
342 349 return b''.join(chunks)
343 350
344 351
345 352 def _walk_trie(block):
346 353 """yield all the block in a trie
347 354
348 355 Children blocks are always yield before their parent block.
349 356 """
350 357 for (_, item) in sorted(block.items()):
351 358 if isinstance(item, dict):
352 359 for sub_block in _walk_trie(item):
353 360 yield sub_block
354 361 yield block
355 362
356 363
357 364 def _persist_block(block_node, block_map):
358 365 """produce persistent binary data for a single block
359 366
360 367 Children block are assumed to be already persisted and present in
361 368 block_map.
362 369 """
363 370 data = tuple(_to_value(v, block_map) for v in block_node)
364 371 return S_BLOCK.pack(*data)
365 372
366 373
367 374 def _to_value(item, block_map):
368 375 """persist any value as an integer"""
369 376 if item is None:
370 377 return NO_ENTRY
371 378 elif isinstance(item, dict):
372 379 return block_map[id(item)]
373 380 else:
374 381 return _transform_rev(item)
375 382
376 383
377 384 def parse_data(data):
378 385 """parse parse nodemap data into a nodemap Trie"""
379 386 if (len(data) % S_BLOCK.size) != 0:
380 387 msg = "nodemap data size is not a multiple of block size (%d): %d"
381 388 raise error.Abort(msg % (S_BLOCK.size, len(data)))
382 389 if not data:
383 390 return Block(), None
384 391 block_map = {}
385 392 new_blocks = []
386 393 for i in range(0, len(data), S_BLOCK.size):
387 394 block = Block()
388 395 block.ondisk_id = len(block_map)
389 396 block_map[block.ondisk_id] = block
390 397 block_data = data[i : i + S_BLOCK.size]
391 398 values = S_BLOCK.unpack(block_data)
392 399 new_blocks.append((block, values))
393 400 for b, values in new_blocks:
394 401 for idx, v in enumerate(values):
395 402 if v == NO_ENTRY:
396 403 continue
397 404 elif v >= 0:
398 405 b[idx] = block_map[v]
399 406 else:
400 407 b[idx] = _transform_rev(v)
401 408 return block, i // S_BLOCK.size
402 409
403 410
404 411 # debug utility
405 412
406 413
407 414 def check_data(ui, index, data):
408 415 """verify that the provided nodemap data are valid for the given idex"""
409 416 ret = 0
410 417 ui.status((b"revision in index: %d\n") % len(index))
411 418 root, __ = parse_data(data)
412 419 all_revs = set(_all_revisions(root))
413 420 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
414 421 for r in range(len(index)):
415 422 if r not in all_revs:
416 423 msg = b" revision missing from nodemap: %d\n" % r
417 424 ui.write_err(msg)
418 425 ret = 1
419 426 else:
420 427 all_revs.remove(r)
421 428 nm_rev = _find_node(root, nodemod.hex(index[r][7]))
422 429 if nm_rev is None:
423 430 msg = b" revision node does not match any entries: %d\n" % r
424 431 ui.write_err(msg)
425 432 ret = 1
426 433 elif nm_rev != r:
427 434 msg = (
428 435 b" revision node does not match the expected revision: "
429 436 b"%d != %d\n" % (r, nm_rev)
430 437 )
431 438 ui.write_err(msg)
432 439 ret = 1
433 440
434 441 if all_revs:
435 442 for r in sorted(all_revs):
436 443 msg = b" extra revision in nodemap: %d\n" % r
437 444 ui.write_err(msg)
438 445 ret = 1
439 446 return ret
440 447
441 448
442 449 def _all_revisions(root):
443 450 """return all revisions stored in a Trie"""
444 451 for block in _walk_trie(root):
445 452 for v in block:
446 453 if v is None or isinstance(v, Block):
447 454 continue
448 455 yield v
449 456
450 457
451 458 def _find_node(block, node):
452 459 """find the revision associated with a given node"""
453 460 entry = block.get(_to_int(node[0:1]))
454 461 if isinstance(entry, dict):
455 462 return _find_node(entry, node[1:])
456 463 return entry
@@ -1,71 +1,73 b''
1 1 ===================================
2 2 Test the persistent on-disk nodemap
3 3 ===================================
4 4
5 5
6 6 $ hg init test-repo
7 7 $ cd test-repo
8 8 $ cat << EOF >> .hg/hgrc
9 9 > [experimental]
10 10 > exp-persistent-nodemap=yes
11 11 > [devel]
12 12 > persistent-nodemap=yes
13 13 > EOF
14 14 $ hg debugbuilddag .+5000
15 15 $ hg debugnodemap --metadata
16 16 uid: ???????????????? (glob)
17 tip-rev: 5000
17 18 $ f --size .hg/store/00changelog.n
18 .hg/store/00changelog.n: size=18
19 .hg/store/00changelog.n: size=26
19 20 $ f --sha256 .hg/store/00changelog-*.nd
20 21 .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
21 22 $ hg debugnodemap --dump-new | f --sha256 --size
22 23 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
23 24 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
24 25 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
25 26 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
26 27 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
27 28 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
28 29 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
29 30 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
30 31 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
31 32 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
32 33 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
33 34 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
34 35 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
35 36 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
36 37 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
37 38 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
38 39 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
39 40 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
40 41 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
41 42 $ hg debugnodemap --check
42 43 revision in index: 5001
43 44 revision in nodemap: 5001
44 45
45 46 add a new commit
46 47
47 48 $ hg up
48 49 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 50 $ echo foo > foo
50 51 $ hg add foo
51 52 $ hg ci -m 'foo'
52 53 $ hg debugnodemap --metadata
53 54 uid: ???????????????? (glob)
55 tip-rev: 5001
54 56 $ f --size .hg/store/00changelog.n
55 .hg/store/00changelog.n: size=18
57 .hg/store/00changelog.n: size=26
56 58
57 59 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
58 60
59 61 #if pure
60 62 $ f --sha256 .hg/store/00changelog-*.nd --size
61 63 .hg/store/00changelog-????????????????.nd: size=123072, sha256=136472751566c8198ff09e306a7d2f9bd18bd32298d614752b73da4d6df23340 (glob)
62 64
63 65 #else
64 66 $ f --sha256 .hg/store/00changelog-*.nd --size
65 67 .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
66 68
67 69 #endif
68 70
69 71 $ hg debugnodemap --check
70 72 revision in index: 5002
71 73 revision in nodemap: 5002
General Comments 0
You need to be logged in to leave comments. Login now