##// END OF EJS Templates
obsutil: move the 'marker' class to the new modules...
marmoute -
r33149:4e30168d default
parent child Browse files
Show More
@@ -1,2245 +1,2245 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import difflib
11 11 import errno
12 12 import operator
13 13 import os
14 14 import random
15 15 import socket
16 16 import string
17 17 import sys
18 18 import tempfile
19 19 import time
20 20
21 21 from .i18n import _
22 22 from .node import (
23 23 bin,
24 24 hex,
25 25 nullhex,
26 26 nullid,
27 27 nullrev,
28 28 short,
29 29 )
30 30 from . import (
31 31 bundle2,
32 32 changegroup,
33 33 cmdutil,
34 34 color,
35 35 context,
36 36 dagparser,
37 37 dagutil,
38 38 encoding,
39 39 error,
40 40 exchange,
41 41 extensions,
42 42 filemerge,
43 43 fileset,
44 44 formatter,
45 45 hg,
46 46 localrepo,
47 47 lock as lockmod,
48 48 merge as mergemod,
49 49 obsolete,
50 50 obsutil,
51 51 phases,
52 52 policy,
53 53 pvec,
54 54 pycompat,
55 55 registrar,
56 56 repair,
57 57 revlog,
58 58 revset,
59 59 revsetlang,
60 60 scmutil,
61 61 setdiscovery,
62 62 simplemerge,
63 63 smartset,
64 64 sslutil,
65 65 streamclone,
66 66 templater,
67 67 treediscovery,
68 68 upgrade,
69 69 util,
70 70 vfs as vfsmod,
71 71 )
72 72
73 73 release = lockmod.release
74 74
75 75 command = registrar.command()
76 76
77 77 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
78 78 def debugancestor(ui, repo, *args):
79 79 """find the ancestor revision of two revisions in a given index"""
80 80 if len(args) == 3:
81 81 index, rev1, rev2 = args
82 82 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
83 83 lookup = r.lookup
84 84 elif len(args) == 2:
85 85 if not repo:
86 86 raise error.Abort(_('there is no Mercurial repository here '
87 87 '(.hg not found)'))
88 88 rev1, rev2 = args
89 89 r = repo.changelog
90 90 lookup = repo.lookup
91 91 else:
92 92 raise error.Abort(_('either two or three arguments required'))
93 93 a = r.ancestor(lookup(rev1), lookup(rev2))
94 94 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
95 95
96 96 @command('debugapplystreamclonebundle', [], 'FILE')
97 97 def debugapplystreamclonebundle(ui, repo, fname):
98 98 """apply a stream clone bundle file"""
99 99 f = hg.openpath(ui, fname)
100 100 gen = exchange.readbundle(ui, f, fname)
101 101 gen.apply(repo)
102 102
103 103 @command('debugbuilddag',
104 104 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
105 105 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
106 106 ('n', 'new-file', None, _('add new file at each rev'))],
107 107 _('[OPTION]... [TEXT]'))
108 108 def debugbuilddag(ui, repo, text=None,
109 109 mergeable_file=False,
110 110 overwritten_file=False,
111 111 new_file=False):
112 112 """builds a repo with a given DAG from scratch in the current empty repo
113 113
114 114 The description of the DAG is read from stdin if not given on the
115 115 command line.
116 116
117 117 Elements:
118 118
119 119 - "+n" is a linear run of n nodes based on the current default parent
120 120 - "." is a single node based on the current default parent
121 121 - "$" resets the default parent to null (implied at the start);
122 122 otherwise the default parent is always the last node created
123 123 - "<p" sets the default parent to the backref p
124 124 - "*p" is a fork at parent p, which is a backref
125 125 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
126 126 - "/p2" is a merge of the preceding node and p2
127 127 - ":tag" defines a local tag for the preceding node
128 128 - "@branch" sets the named branch for subsequent nodes
129 129 - "#...\\n" is a comment up to the end of the line
130 130
131 131 Whitespace between the above elements is ignored.
132 132
133 133 A backref is either
134 134
135 135 - a number n, which references the node curr-n, where curr is the current
136 136 node, or
137 137 - the name of a local tag you placed earlier using ":tag", or
138 138 - empty to denote the default parent.
139 139
140 140 All string valued-elements are either strictly alphanumeric, or must
141 141 be enclosed in double quotes ("..."), with "\\" as escape character.
142 142 """
143 143
144 144 if text is None:
145 145 ui.status(_("reading DAG from stdin\n"))
146 146 text = ui.fin.read()
147 147
148 148 cl = repo.changelog
149 149 if len(cl) > 0:
150 150 raise error.Abort(_('repository is not empty'))
151 151
152 152 # determine number of revs in DAG
153 153 total = 0
154 154 for type, data in dagparser.parsedag(text):
155 155 if type == 'n':
156 156 total += 1
157 157
158 158 if mergeable_file:
159 159 linesperrev = 2
160 160 # make a file with k lines per rev
161 161 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
162 162 initialmergedlines.append("")
163 163
164 164 tags = []
165 165
166 166 wlock = lock = tr = None
167 167 try:
168 168 wlock = repo.wlock()
169 169 lock = repo.lock()
170 170 tr = repo.transaction("builddag")
171 171
172 172 at = -1
173 173 atbranch = 'default'
174 174 nodeids = []
175 175 id = 0
176 176 ui.progress(_('building'), id, unit=_('revisions'), total=total)
177 177 for type, data in dagparser.parsedag(text):
178 178 if type == 'n':
179 179 ui.note(('node %s\n' % str(data)))
180 180 id, ps = data
181 181
182 182 files = []
183 183 fctxs = {}
184 184
185 185 p2 = None
186 186 if mergeable_file:
187 187 fn = "mf"
188 188 p1 = repo[ps[0]]
189 189 if len(ps) > 1:
190 190 p2 = repo[ps[1]]
191 191 pa = p1.ancestor(p2)
192 192 base, local, other = [x[fn].data() for x in (pa, p1,
193 193 p2)]
194 194 m3 = simplemerge.Merge3Text(base, local, other)
195 195 ml = [l.strip() for l in m3.merge_lines()]
196 196 ml.append("")
197 197 elif at > 0:
198 198 ml = p1[fn].data().split("\n")
199 199 else:
200 200 ml = initialmergedlines
201 201 ml[id * linesperrev] += " r%i" % id
202 202 mergedtext = "\n".join(ml)
203 203 files.append(fn)
204 204 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
205 205
206 206 if overwritten_file:
207 207 fn = "of"
208 208 files.append(fn)
209 209 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
210 210
211 211 if new_file:
212 212 fn = "nf%i" % id
213 213 files.append(fn)
214 214 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
215 215 if len(ps) > 1:
216 216 if not p2:
217 217 p2 = repo[ps[1]]
218 218 for fn in p2:
219 219 if fn.startswith("nf"):
220 220 files.append(fn)
221 221 fctxs[fn] = p2[fn]
222 222
223 223 def fctxfn(repo, cx, path):
224 224 return fctxs.get(path)
225 225
226 226 if len(ps) == 0 or ps[0] < 0:
227 227 pars = [None, None]
228 228 elif len(ps) == 1:
229 229 pars = [nodeids[ps[0]], None]
230 230 else:
231 231 pars = [nodeids[p] for p in ps]
232 232 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
233 233 date=(id, 0),
234 234 user="debugbuilddag",
235 235 extra={'branch': atbranch})
236 236 nodeid = repo.commitctx(cx)
237 237 nodeids.append(nodeid)
238 238 at = id
239 239 elif type == 'l':
240 240 id, name = data
241 241 ui.note(('tag %s\n' % name))
242 242 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
243 243 elif type == 'a':
244 244 ui.note(('branch %s\n' % data))
245 245 atbranch = data
246 246 ui.progress(_('building'), id, unit=_('revisions'), total=total)
247 247 tr.close()
248 248
249 249 if tags:
250 250 repo.vfs.write("localtags", "".join(tags))
251 251 finally:
252 252 ui.progress(_('building'), None)
253 253 release(tr, lock, wlock)
254 254
255 255 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
256 256 indent_string = ' ' * indent
257 257 if all:
258 258 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
259 259 % indent_string)
260 260
261 261 def showchunks(named):
262 262 ui.write("\n%s%s\n" % (indent_string, named))
263 263 chain = None
264 264 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
265 265 node = chunkdata['node']
266 266 p1 = chunkdata['p1']
267 267 p2 = chunkdata['p2']
268 268 cs = chunkdata['cs']
269 269 deltabase = chunkdata['deltabase']
270 270 delta = chunkdata['delta']
271 271 ui.write("%s%s %s %s %s %s %s\n" %
272 272 (indent_string, hex(node), hex(p1), hex(p2),
273 273 hex(cs), hex(deltabase), len(delta)))
274 274 chain = node
275 275
276 276 chunkdata = gen.changelogheader()
277 277 showchunks("changelog")
278 278 chunkdata = gen.manifestheader()
279 279 showchunks("manifest")
280 280 for chunkdata in iter(gen.filelogheader, {}):
281 281 fname = chunkdata['filename']
282 282 showchunks(fname)
283 283 else:
284 284 if isinstance(gen, bundle2.unbundle20):
285 285 raise error.Abort(_('use debugbundle2 for this file'))
286 286 chunkdata = gen.changelogheader()
287 287 chain = None
288 288 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
289 289 node = chunkdata['node']
290 290 ui.write("%s%s\n" % (indent_string, hex(node)))
291 291 chain = node
292 292
293 293 def _debugobsmarkers(ui, part, indent=0, **opts):
294 294 """display version and markers contained in 'data'"""
295 295 opts = pycompat.byteskwargs(opts)
296 296 data = part.read()
297 297 indent_string = ' ' * indent
298 298 try:
299 299 version, markers = obsolete._readmarkers(data)
300 300 except error.UnknownVersion as exc:
301 301 msg = "%sunsupported version: %s (%d bytes)\n"
302 302 msg %= indent_string, exc.version, len(data)
303 303 ui.write(msg)
304 304 else:
305 305 msg = "%sversion: %s (%d bytes)\n"
306 306 msg %= indent_string, version, len(data)
307 307 ui.write(msg)
308 308 fm = ui.formatter('debugobsolete', opts)
309 309 for rawmarker in sorted(markers):
310 m = obsolete.marker(None, rawmarker)
310 m = obsutil.marker(None, rawmarker)
311 311 fm.startitem()
312 312 fm.plain(indent_string)
313 313 cmdutil.showmarker(fm, m)
314 314 fm.end()
315 315
316 316 def _debugphaseheads(ui, data, indent=0):
317 317 """display version and markers contained in 'data'"""
318 318 indent_string = ' ' * indent
319 319 headsbyphase = bundle2._readphaseheads(data)
320 320 for phase in phases.allphases:
321 321 for head in headsbyphase[phase]:
322 322 ui.write(indent_string)
323 323 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
324 324
325 325 def _debugbundle2(ui, gen, all=None, **opts):
326 326 """lists the contents of a bundle2"""
327 327 if not isinstance(gen, bundle2.unbundle20):
328 328 raise error.Abort(_('not a bundle2 file'))
329 329 ui.write(('Stream params: %s\n' % repr(gen.params)))
330 330 parttypes = opts.get(r'part_type', [])
331 331 for part in gen.iterparts():
332 332 if parttypes and part.type not in parttypes:
333 333 continue
334 334 ui.write('%s -- %r\n' % (part.type, repr(part.params)))
335 335 if part.type == 'changegroup':
336 336 version = part.params.get('version', '01')
337 337 cg = changegroup.getunbundler(version, part, 'UN')
338 338 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
339 339 if part.type == 'obsmarkers':
340 340 _debugobsmarkers(ui, part, indent=4, **opts)
341 341 if part.type == 'phase-heads':
342 342 _debugphaseheads(ui, part, indent=4)
343 343
344 344 @command('debugbundle',
345 345 [('a', 'all', None, _('show all details')),
346 346 ('', 'part-type', [], _('show only the named part type')),
347 347 ('', 'spec', None, _('print the bundlespec of the bundle'))],
348 348 _('FILE'),
349 349 norepo=True)
350 350 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
351 351 """lists the contents of a bundle"""
352 352 with hg.openpath(ui, bundlepath) as f:
353 353 if spec:
354 354 spec = exchange.getbundlespec(ui, f)
355 355 ui.write('%s\n' % spec)
356 356 return
357 357
358 358 gen = exchange.readbundle(ui, f, bundlepath)
359 359 if isinstance(gen, bundle2.unbundle20):
360 360 return _debugbundle2(ui, gen, all=all, **opts)
361 361 _debugchangegroup(ui, gen, all=all, **opts)
362 362
363 363 @command('debugcheckstate', [], '')
364 364 def debugcheckstate(ui, repo):
365 365 """validate the correctness of the current dirstate"""
366 366 parent1, parent2 = repo.dirstate.parents()
367 367 m1 = repo[parent1].manifest()
368 368 m2 = repo[parent2].manifest()
369 369 errors = 0
370 370 for f in repo.dirstate:
371 371 state = repo.dirstate[f]
372 372 if state in "nr" and f not in m1:
373 373 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
374 374 errors += 1
375 375 if state in "a" and f in m1:
376 376 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
377 377 errors += 1
378 378 if state in "m" and f not in m1 and f not in m2:
379 379 ui.warn(_("%s in state %s, but not in either manifest\n") %
380 380 (f, state))
381 381 errors += 1
382 382 for f in m1:
383 383 state = repo.dirstate[f]
384 384 if state not in "nrm":
385 385 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
386 386 errors += 1
387 387 if errors:
388 388 error = _(".hg/dirstate inconsistent with current parent's manifest")
389 389 raise error.Abort(error)
390 390
391 391 @command('debugcolor',
392 392 [('', 'style', None, _('show all configured styles'))],
393 393 'hg debugcolor')
394 394 def debugcolor(ui, repo, **opts):
395 395 """show available color, effects or style"""
396 396 ui.write(('color mode: %s\n') % ui._colormode)
397 397 if opts.get(r'style'):
398 398 return _debugdisplaystyle(ui)
399 399 else:
400 400 return _debugdisplaycolor(ui)
401 401
402 402 def _debugdisplaycolor(ui):
403 403 ui = ui.copy()
404 404 ui._styles.clear()
405 405 for effect in color._activeeffects(ui).keys():
406 406 ui._styles[effect] = effect
407 407 if ui._terminfoparams:
408 408 for k, v in ui.configitems('color'):
409 409 if k.startswith('color.'):
410 410 ui._styles[k] = k[6:]
411 411 elif k.startswith('terminfo.'):
412 412 ui._styles[k] = k[9:]
413 413 ui.write(_('available colors:\n'))
414 414 # sort label with a '_' after the other to group '_background' entry.
415 415 items = sorted(ui._styles.items(),
416 416 key=lambda i: ('_' in i[0], i[0], i[1]))
417 417 for colorname, label in items:
418 418 ui.write(('%s\n') % colorname, label=label)
419 419
420 420 def _debugdisplaystyle(ui):
421 421 ui.write(_('available style:\n'))
422 422 width = max(len(s) for s in ui._styles)
423 423 for label, effects in sorted(ui._styles.items()):
424 424 ui.write('%s' % label, label=label)
425 425 if effects:
426 426 # 50
427 427 ui.write(': ')
428 428 ui.write(' ' * (max(0, width - len(label))))
429 429 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
430 430 ui.write('\n')
431 431
432 432 @command('debugcreatestreamclonebundle', [], 'FILE')
433 433 def debugcreatestreamclonebundle(ui, repo, fname):
434 434 """create a stream clone bundle file
435 435
436 436 Stream bundles are special bundles that are essentially archives of
437 437 revlog files. They are commonly used for cloning very quickly.
438 438 """
439 439 # TODO we may want to turn this into an abort when this functionality
440 440 # is moved into `hg bundle`.
441 441 if phases.hassecret(repo):
442 442 ui.warn(_('(warning: stream clone bundle will contain secret '
443 443 'revisions)\n'))
444 444
445 445 requirements, gen = streamclone.generatebundlev1(repo)
446 446 changegroup.writechunks(ui, gen, fname)
447 447
448 448 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
449 449
450 450 @command('debugdag',
451 451 [('t', 'tags', None, _('use tags as labels')),
452 452 ('b', 'branches', None, _('annotate with branch names')),
453 453 ('', 'dots', None, _('use dots for runs')),
454 454 ('s', 'spaces', None, _('separate elements by spaces'))],
455 455 _('[OPTION]... [FILE [REV]...]'),
456 456 optionalrepo=True)
457 457 def debugdag(ui, repo, file_=None, *revs, **opts):
458 458 """format the changelog or an index DAG as a concise textual description
459 459
460 460 If you pass a revlog index, the revlog's DAG is emitted. If you list
461 461 revision numbers, they get labeled in the output as rN.
462 462
463 463 Otherwise, the changelog DAG of the current repo is emitted.
464 464 """
465 465 spaces = opts.get(r'spaces')
466 466 dots = opts.get(r'dots')
467 467 if file_:
468 468 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
469 469 file_)
470 470 revs = set((int(r) for r in revs))
471 471 def events():
472 472 for r in rlog:
473 473 yield 'n', (r, list(p for p in rlog.parentrevs(r)
474 474 if p != -1))
475 475 if r in revs:
476 476 yield 'l', (r, "r%i" % r)
477 477 elif repo:
478 478 cl = repo.changelog
479 479 tags = opts.get(r'tags')
480 480 branches = opts.get(r'branches')
481 481 if tags:
482 482 labels = {}
483 483 for l, n in repo.tags().items():
484 484 labels.setdefault(cl.rev(n), []).append(l)
485 485 def events():
486 486 b = "default"
487 487 for r in cl:
488 488 if branches:
489 489 newb = cl.read(cl.node(r))[5]['branch']
490 490 if newb != b:
491 491 yield 'a', newb
492 492 b = newb
493 493 yield 'n', (r, list(p for p in cl.parentrevs(r)
494 494 if p != -1))
495 495 if tags:
496 496 ls = labels.get(r)
497 497 if ls:
498 498 for l in ls:
499 499 yield 'l', (r, l)
500 500 else:
501 501 raise error.Abort(_('need repo for changelog dag'))
502 502
503 503 for line in dagparser.dagtextlines(events(),
504 504 addspaces=spaces,
505 505 wraplabels=True,
506 506 wrapannotations=True,
507 507 wrapnonlinear=dots,
508 508 usedots=dots,
509 509 maxlinewidth=70):
510 510 ui.write(line)
511 511 ui.write("\n")
512 512
513 513 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
514 514 def debugdata(ui, repo, file_, rev=None, **opts):
515 515 """dump the contents of a data file revision"""
516 516 opts = pycompat.byteskwargs(opts)
517 517 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
518 518 if rev is not None:
519 519 raise error.CommandError('debugdata', _('invalid arguments'))
520 520 file_, rev = None, file_
521 521 elif rev is None:
522 522 raise error.CommandError('debugdata', _('invalid arguments'))
523 523 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
524 524 try:
525 525 ui.write(r.revision(r.lookup(rev), raw=True))
526 526 except KeyError:
527 527 raise error.Abort(_('invalid revision identifier %s') % rev)
528 528
529 529 @command('debugdate',
530 530 [('e', 'extended', None, _('try extended date formats'))],
531 531 _('[-e] DATE [RANGE]'),
532 532 norepo=True, optionalrepo=True)
533 533 def debugdate(ui, date, range=None, **opts):
534 534 """parse and display a date"""
535 535 if opts[r"extended"]:
536 536 d = util.parsedate(date, util.extendeddateformats)
537 537 else:
538 538 d = util.parsedate(date)
539 539 ui.write(("internal: %s %s\n") % d)
540 540 ui.write(("standard: %s\n") % util.datestr(d))
541 541 if range:
542 542 m = util.matchdate(range)
543 543 ui.write(("match: %s\n") % m(d[0]))
544 544
545 545 @command('debugdeltachain',
546 546 cmdutil.debugrevlogopts + cmdutil.formatteropts,
547 547 _('-c|-m|FILE'),
548 548 optionalrepo=True)
549 549 def debugdeltachain(ui, repo, file_=None, **opts):
550 550 """dump information about delta chains in a revlog
551 551
552 552 Output can be templatized. Available template keywords are:
553 553
554 554 :``rev``: revision number
555 555 :``chainid``: delta chain identifier (numbered by unique base)
556 556 :``chainlen``: delta chain length to this revision
557 557 :``prevrev``: previous revision in delta chain
558 558 :``deltatype``: role of delta / how it was computed
559 559 :``compsize``: compressed size of revision
560 560 :``uncompsize``: uncompressed size of revision
561 561 :``chainsize``: total size of compressed revisions in chain
562 562 :``chainratio``: total chain size divided by uncompressed revision size
563 563 (new delta chains typically start at ratio 2.00)
564 564 :``lindist``: linear distance from base revision in delta chain to end
565 565 of this revision
566 566 :``extradist``: total size of revisions not part of this delta chain from
567 567 base of delta chain to end of this revision; a measurement
568 568 of how much extra data we need to read/seek across to read
569 569 the delta chain for this revision
570 570 :``extraratio``: extradist divided by chainsize; another representation of
571 571 how much unrelated data is needed to load this delta chain
572 572 """
573 573 opts = pycompat.byteskwargs(opts)
574 574 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
575 575 index = r.index
576 576 generaldelta = r.version & revlog.FLAG_GENERALDELTA
577 577
578 578 def revinfo(rev):
579 579 e = index[rev]
580 580 compsize = e[1]
581 581 uncompsize = e[2]
582 582 chainsize = 0
583 583
584 584 if generaldelta:
585 585 if e[3] == e[5]:
586 586 deltatype = 'p1'
587 587 elif e[3] == e[6]:
588 588 deltatype = 'p2'
589 589 elif e[3] == rev - 1:
590 590 deltatype = 'prev'
591 591 elif e[3] == rev:
592 592 deltatype = 'base'
593 593 else:
594 594 deltatype = 'other'
595 595 else:
596 596 if e[3] == rev:
597 597 deltatype = 'base'
598 598 else:
599 599 deltatype = 'prev'
600 600
601 601 chain = r._deltachain(rev)[0]
602 602 for iterrev in chain:
603 603 e = index[iterrev]
604 604 chainsize += e[1]
605 605
606 606 return compsize, uncompsize, deltatype, chain, chainsize
607 607
608 608 fm = ui.formatter('debugdeltachain', opts)
609 609
610 610 fm.plain(' rev chain# chainlen prev delta '
611 611 'size rawsize chainsize ratio lindist extradist '
612 612 'extraratio\n')
613 613
614 614 chainbases = {}
615 615 for rev in r:
616 616 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
617 617 chainbase = chain[0]
618 618 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
619 619 basestart = r.start(chainbase)
620 620 revstart = r.start(rev)
621 621 lineardist = revstart + comp - basestart
622 622 extradist = lineardist - chainsize
623 623 try:
624 624 prevrev = chain[-2]
625 625 except IndexError:
626 626 prevrev = -1
627 627
628 628 chainratio = float(chainsize) / float(uncomp)
629 629 extraratio = float(extradist) / float(chainsize)
630 630
631 631 fm.startitem()
632 632 fm.write('rev chainid chainlen prevrev deltatype compsize '
633 633 'uncompsize chainsize chainratio lindist extradist '
634 634 'extraratio',
635 635 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
636 636 rev, chainid, len(chain), prevrev, deltatype, comp,
637 637 uncomp, chainsize, chainratio, lineardist, extradist,
638 638 extraratio,
639 639 rev=rev, chainid=chainid, chainlen=len(chain),
640 640 prevrev=prevrev, deltatype=deltatype, compsize=comp,
641 641 uncompsize=uncomp, chainsize=chainsize,
642 642 chainratio=chainratio, lindist=lineardist,
643 643 extradist=extradist, extraratio=extraratio)
644 644
645 645 fm.end()
646 646
647 647 @command('debugdirstate|debugstate',
648 648 [('', 'nodates', None, _('do not display the saved mtime')),
649 649 ('', 'datesort', None, _('sort by saved mtime'))],
650 650 _('[OPTION]...'))
651 651 def debugstate(ui, repo, **opts):
652 652 """show the contents of the current dirstate"""
653 653
654 654 nodates = opts.get(r'nodates')
655 655 datesort = opts.get(r'datesort')
656 656
657 657 timestr = ""
658 658 if datesort:
659 659 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
660 660 else:
661 661 keyfunc = None # sort by filename
662 662 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
663 663 if ent[3] == -1:
664 664 timestr = 'unset '
665 665 elif nodates:
666 666 timestr = 'set '
667 667 else:
668 668 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
669 669 time.localtime(ent[3]))
670 670 if ent[1] & 0o20000:
671 671 mode = 'lnk'
672 672 else:
673 673 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
674 674 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
675 675 for f in repo.dirstate.copies():
676 676 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
677 677
678 678 @command('debugdiscovery',
679 679 [('', 'old', None, _('use old-style discovery')),
680 680 ('', 'nonheads', None,
681 681 _('use old-style discovery with non-heads included')),
682 682 ] + cmdutil.remoteopts,
683 683 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
684 684 def debugdiscovery(ui, repo, remoteurl="default", **opts):
685 685 """runs the changeset discovery protocol in isolation"""
686 686 opts = pycompat.byteskwargs(opts)
687 687 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
688 688 opts.get('branch'))
689 689 remote = hg.peer(repo, opts, remoteurl)
690 690 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
691 691
692 692 # make sure tests are repeatable
693 693 random.seed(12323)
694 694
695 695 def doit(localheads, remoteheads, remote=remote):
696 696 if opts.get('old'):
697 697 if localheads:
698 698 raise error.Abort('cannot use localheads with old style '
699 699 'discovery')
700 700 if not util.safehasattr(remote, 'branches'):
701 701 # enable in-client legacy support
702 702 remote = localrepo.locallegacypeer(remote.local())
703 703 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
704 704 force=True)
705 705 common = set(common)
706 706 if not opts.get('nonheads'):
707 707 ui.write(("unpruned common: %s\n") %
708 708 " ".join(sorted(short(n) for n in common)))
709 709 dag = dagutil.revlogdag(repo.changelog)
710 710 all = dag.ancestorset(dag.internalizeall(common))
711 711 common = dag.externalizeall(dag.headsetofconnecteds(all))
712 712 else:
713 713 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
714 714 common = set(common)
715 715 rheads = set(hds)
716 716 lheads = set(repo.heads())
717 717 ui.write(("common heads: %s\n") %
718 718 " ".join(sorted(short(n) for n in common)))
719 719 if lheads <= common:
720 720 ui.write(("local is subset\n"))
721 721 elif rheads <= common:
722 722 ui.write(("remote is subset\n"))
723 723
724 724 serverlogs = opts.get('serverlog')
725 725 if serverlogs:
726 726 for filename in serverlogs:
727 727 with open(filename, 'r') as logfile:
728 728 line = logfile.readline()
729 729 while line:
730 730 parts = line.strip().split(';')
731 731 op = parts[1]
732 732 if op == 'cg':
733 733 pass
734 734 elif op == 'cgss':
735 735 doit(parts[2].split(' '), parts[3].split(' '))
736 736 elif op == 'unb':
737 737 doit(parts[3].split(' '), parts[2].split(' '))
738 738 line = logfile.readline()
739 739 else:
740 740 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
741 741 opts.get('remote_head'))
742 742 localrevs = opts.get('local_head')
743 743 doit(localrevs, remoterevs)
744 744
745 745 @command('debugextensions', cmdutil.formatteropts, [], norepo=True)
746 746 def debugextensions(ui, **opts):
747 747 '''show information about active extensions'''
748 748 opts = pycompat.byteskwargs(opts)
749 749 exts = extensions.extensions(ui)
750 750 hgver = util.version()
751 751 fm = ui.formatter('debugextensions', opts)
752 752 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
753 753 isinternal = extensions.ismoduleinternal(extmod)
754 754 extsource = pycompat.fsencode(extmod.__file__)
755 755 if isinternal:
756 756 exttestedwith = [] # never expose magic string to users
757 757 else:
758 758 exttestedwith = getattr(extmod, 'testedwith', '').split()
759 759 extbuglink = getattr(extmod, 'buglink', None)
760 760
761 761 fm.startitem()
762 762
763 763 if ui.quiet or ui.verbose:
764 764 fm.write('name', '%s\n', extname)
765 765 else:
766 766 fm.write('name', '%s', extname)
767 767 if isinternal or hgver in exttestedwith:
768 768 fm.plain('\n')
769 769 elif not exttestedwith:
770 770 fm.plain(_(' (untested!)\n'))
771 771 else:
772 772 lasttestedversion = exttestedwith[-1]
773 773 fm.plain(' (%s!)\n' % lasttestedversion)
774 774
775 775 fm.condwrite(ui.verbose and extsource, 'source',
776 776 _(' location: %s\n'), extsource or "")
777 777
778 778 if ui.verbose:
779 779 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
780 780 fm.data(bundled=isinternal)
781 781
782 782 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
783 783 _(' tested with: %s\n'),
784 784 fm.formatlist(exttestedwith, name='ver'))
785 785
786 786 fm.condwrite(ui.verbose and extbuglink, 'buglink',
787 787 _(' bug reporting: %s\n'), extbuglink or "")
788 788
789 789 fm.end()
790 790
791 791 @command('debugfileset',
792 792 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
793 793 _('[-r REV] FILESPEC'))
794 794 def debugfileset(ui, repo, expr, **opts):
795 795 '''parse and apply a fileset specification'''
796 796 ctx = scmutil.revsingle(repo, opts.get(r'rev'), None)
797 797 if ui.verbose:
798 798 tree = fileset.parse(expr)
799 799 ui.note(fileset.prettyformat(tree), "\n")
800 800
801 801 for f in ctx.getfileset(expr):
802 802 ui.write("%s\n" % f)
803 803
804 804 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
805 805 def debugfsinfo(ui, path="."):
806 806 """show information detected about current filesystem"""
807 807 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
808 808 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
809 809 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
810 810 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
811 811 casesensitive = '(unknown)'
812 812 try:
813 813 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
814 814 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
815 815 except OSError:
816 816 pass
817 817 ui.write(('case-sensitive: %s\n') % casesensitive)
818 818
819 819 @command('debuggetbundle',
820 820 [('H', 'head', [], _('id of head node'), _('ID')),
821 821 ('C', 'common', [], _('id of common node'), _('ID')),
822 822 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
823 823 _('REPO FILE [-H|-C ID]...'),
824 824 norepo=True)
825 825 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
826 826 """retrieves a bundle from a repo
827 827
828 828 Every ID must be a full-length hex node id string. Saves the bundle to the
829 829 given file.
830 830 """
831 831 opts = pycompat.byteskwargs(opts)
832 832 repo = hg.peer(ui, opts, repopath)
833 833 if not repo.capable('getbundle'):
834 834 raise error.Abort("getbundle() not supported by target repository")
835 835 args = {}
836 836 if common:
837 837 args[r'common'] = [bin(s) for s in common]
838 838 if head:
839 839 args[r'heads'] = [bin(s) for s in head]
840 840 # TODO: get desired bundlecaps from command line.
841 841 args[r'bundlecaps'] = None
842 842 bundle = repo.getbundle('debug', **args)
843 843
844 844 bundletype = opts.get('type', 'bzip2').lower()
845 845 btypes = {'none': 'HG10UN',
846 846 'bzip2': 'HG10BZ',
847 847 'gzip': 'HG10GZ',
848 848 'bundle2': 'HG20'}
849 849 bundletype = btypes.get(bundletype)
850 850 if bundletype not in bundle2.bundletypes:
851 851 raise error.Abort(_('unknown bundle type specified with --type'))
852 852 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
853 853
854 854 @command('debugignore', [], '[FILE]')
855 855 def debugignore(ui, repo, *files, **opts):
856 856 """display the combined ignore pattern and information about ignored files
857 857
858 858 With no argument display the combined ignore pattern.
859 859
860 860 Given space separated file names, shows if the given file is ignored and
861 861 if so, show the ignore rule (file and line number) that matched it.
862 862 """
863 863 ignore = repo.dirstate._ignore
864 864 if not files:
865 865 # Show all the patterns
866 866 ui.write("%s\n" % repr(ignore))
867 867 else:
868 868 for f in files:
869 869 nf = util.normpath(f)
870 870 ignored = None
871 871 ignoredata = None
872 872 if nf != '.':
873 873 if ignore(nf):
874 874 ignored = nf
875 875 ignoredata = repo.dirstate._ignorefileandline(nf)
876 876 else:
877 877 for p in util.finddirs(nf):
878 878 if ignore(p):
879 879 ignored = p
880 880 ignoredata = repo.dirstate._ignorefileandline(p)
881 881 break
882 882 if ignored:
883 883 if ignored == nf:
884 884 ui.write(_("%s is ignored\n") % f)
885 885 else:
886 886 ui.write(_("%s is ignored because of "
887 887 "containing folder %s\n")
888 888 % (f, ignored))
889 889 ignorefile, lineno, line = ignoredata
890 890 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
891 891 % (ignorefile, lineno, line))
892 892 else:
893 893 ui.write(_("%s is not ignored\n") % f)
894 894
895 895 @command('debugindex', cmdutil.debugrevlogopts +
896 896 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
897 897 _('[-f FORMAT] -c|-m|FILE'),
898 898 optionalrepo=True)
899 899 def debugindex(ui, repo, file_=None, **opts):
900 900 """dump the contents of an index file"""
901 901 opts = pycompat.byteskwargs(opts)
902 902 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
903 903 format = opts.get('format', 0)
904 904 if format not in (0, 1):
905 905 raise error.Abort(_("unknown format %d") % format)
906 906
907 907 generaldelta = r.version & revlog.FLAG_GENERALDELTA
908 908 if generaldelta:
909 909 basehdr = ' delta'
910 910 else:
911 911 basehdr = ' base'
912 912
913 913 if ui.debugflag:
914 914 shortfn = hex
915 915 else:
916 916 shortfn = short
917 917
918 918 # There might not be anything in r, so have a sane default
919 919 idlen = 12
920 920 for i in r:
921 921 idlen = len(shortfn(r.node(i)))
922 922 break
923 923
924 924 if format == 0:
925 925 ui.write((" rev offset length " + basehdr + " linkrev"
926 926 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
927 927 elif format == 1:
928 928 ui.write((" rev flag offset length"
929 929 " size " + basehdr + " link p1 p2"
930 930 " %s\n") % "nodeid".rjust(idlen))
931 931
932 932 for i in r:
933 933 node = r.node(i)
934 934 if generaldelta:
935 935 base = r.deltaparent(i)
936 936 else:
937 937 base = r.chainbase(i)
938 938 if format == 0:
939 939 try:
940 940 pp = r.parents(node)
941 941 except Exception:
942 942 pp = [nullid, nullid]
943 943 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
944 944 i, r.start(i), r.length(i), base, r.linkrev(i),
945 945 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
946 946 elif format == 1:
947 947 pr = r.parentrevs(i)
948 948 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
949 949 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
950 950 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
951 951
952 952 @command('debugindexdot', cmdutil.debugrevlogopts,
953 953 _('-c|-m|FILE'), optionalrepo=True)
954 954 def debugindexdot(ui, repo, file_=None, **opts):
955 955 """dump an index DAG as a graphviz dot file"""
956 956 opts = pycompat.byteskwargs(opts)
957 957 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
958 958 ui.write(("digraph G {\n"))
959 959 for i in r:
960 960 node = r.node(i)
961 961 pp = r.parents(node)
962 962 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
963 963 if pp[1] != nullid:
964 964 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
965 965 ui.write("}\n")
966 966
967 967 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
968 968 def debuginstall(ui, **opts):
969 969 '''test Mercurial installation
970 970
971 971 Returns 0 on success.
972 972 '''
973 973 opts = pycompat.byteskwargs(opts)
974 974
975 975 def writetemp(contents):
976 976 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
977 977 f = os.fdopen(fd, pycompat.sysstr("wb"))
978 978 f.write(contents)
979 979 f.close()
980 980 return name
981 981
982 982 problems = 0
983 983
984 984 fm = ui.formatter('debuginstall', opts)
985 985 fm.startitem()
986 986
987 987 # encoding
988 988 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
989 989 err = None
990 990 try:
991 991 encoding.fromlocal("test")
992 992 except error.Abort as inst:
993 993 err = inst
994 994 problems += 1
995 995 fm.condwrite(err, 'encodingerror', _(" %s\n"
996 996 " (check that your locale is properly set)\n"), err)
997 997
998 998 # Python
999 999 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1000 1000 pycompat.sysexecutable)
1001 1001 fm.write('pythonver', _("checking Python version (%s)\n"),
1002 1002 ("%d.%d.%d" % sys.version_info[:3]))
1003 1003 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1004 1004 os.path.dirname(pycompat.fsencode(os.__file__)))
1005 1005
1006 1006 security = set(sslutil.supportedprotocols)
1007 1007 if sslutil.hassni:
1008 1008 security.add('sni')
1009 1009
1010 1010 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1011 1011 fm.formatlist(sorted(security), name='protocol',
1012 1012 fmt='%s', sep=','))
1013 1013
1014 1014 # These are warnings, not errors. So don't increment problem count. This
1015 1015 # may change in the future.
1016 1016 if 'tls1.2' not in security:
1017 1017 fm.plain(_(' TLS 1.2 not supported by Python install; '
1018 1018 'network connections lack modern security\n'))
1019 1019 if 'sni' not in security:
1020 1020 fm.plain(_(' SNI not supported by Python install; may have '
1021 1021 'connectivity issues with some servers\n'))
1022 1022
1023 1023 # TODO print CA cert info
1024 1024
1025 1025 # hg version
1026 1026 hgver = util.version()
1027 1027 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1028 1028 hgver.split('+')[0])
1029 1029 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1030 1030 '+'.join(hgver.split('+')[1:]))
1031 1031
1032 1032 # compiled modules
1033 1033 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1034 1034 policy.policy)
1035 1035 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1036 1036 os.path.dirname(pycompat.fsencode(__file__)))
1037 1037
1038 1038 if policy.policy in ('c', 'allow'):
1039 1039 err = None
1040 1040 try:
1041 1041 from .cext import (
1042 1042 base85,
1043 1043 bdiff,
1044 1044 mpatch,
1045 1045 osutil,
1046 1046 )
1047 1047 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1048 1048 except Exception as inst:
1049 1049 err = inst
1050 1050 problems += 1
1051 1051 fm.condwrite(err, 'extensionserror', " %s\n", err)
1052 1052
1053 1053 compengines = util.compengines._engines.values()
1054 1054 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1055 1055 fm.formatlist(sorted(e.name() for e in compengines),
1056 1056 name='compengine', fmt='%s', sep=', '))
1057 1057 fm.write('compenginesavail', _('checking available compression engines '
1058 1058 '(%s)\n'),
1059 1059 fm.formatlist(sorted(e.name() for e in compengines
1060 1060 if e.available()),
1061 1061 name='compengine', fmt='%s', sep=', '))
1062 1062 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1063 1063 fm.write('compenginesserver', _('checking available compression engines '
1064 1064 'for wire protocol (%s)\n'),
1065 1065 fm.formatlist([e.name() for e in wirecompengines
1066 1066 if e.wireprotosupport()],
1067 1067 name='compengine', fmt='%s', sep=', '))
1068 1068
1069 1069 # templates
1070 1070 p = templater.templatepaths()
1071 1071 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1072 1072 fm.condwrite(not p, '', _(" no template directories found\n"))
1073 1073 if p:
1074 1074 m = templater.templatepath("map-cmdline.default")
1075 1075 if m:
1076 1076 # template found, check if it is working
1077 1077 err = None
1078 1078 try:
1079 1079 templater.templater.frommapfile(m)
1080 1080 except Exception as inst:
1081 1081 err = inst
1082 1082 p = None
1083 1083 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1084 1084 else:
1085 1085 p = None
1086 1086 fm.condwrite(p, 'defaulttemplate',
1087 1087 _("checking default template (%s)\n"), m)
1088 1088 fm.condwrite(not m, 'defaulttemplatenotfound',
1089 1089 _(" template '%s' not found\n"), "default")
1090 1090 if not p:
1091 1091 problems += 1
1092 1092 fm.condwrite(not p, '',
1093 1093 _(" (templates seem to have been installed incorrectly)\n"))
1094 1094
1095 1095 # editor
1096 1096 editor = ui.geteditor()
1097 1097 editor = util.expandpath(editor)
1098 1098 fm.write('editor', _("checking commit editor... (%s)\n"), editor)
1099 1099 cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
1100 1100 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1101 1101 _(" No commit editor set and can't find %s in PATH\n"
1102 1102 " (specify a commit editor in your configuration"
1103 1103 " file)\n"), not cmdpath and editor == 'vi' and editor)
1104 1104 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1105 1105 _(" Can't find editor '%s' in PATH\n"
1106 1106 " (specify a commit editor in your configuration"
1107 1107 " file)\n"), not cmdpath and editor)
1108 1108 if not cmdpath and editor != 'vi':
1109 1109 problems += 1
1110 1110
1111 1111 # check username
1112 1112 username = None
1113 1113 err = None
1114 1114 try:
1115 1115 username = ui.username()
1116 1116 except error.Abort as e:
1117 1117 err = e
1118 1118 problems += 1
1119 1119
1120 1120 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1121 1121 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1122 1122 " (specify a username in your configuration file)\n"), err)
1123 1123
1124 1124 fm.condwrite(not problems, '',
1125 1125 _("no problems detected\n"))
1126 1126 if not problems:
1127 1127 fm.data(problems=problems)
1128 1128 fm.condwrite(problems, 'problems',
1129 1129 _("%d problems detected,"
1130 1130 " please check your install!\n"), problems)
1131 1131 fm.end()
1132 1132
1133 1133 return problems
1134 1134
1135 1135 @command('debugknown', [], _('REPO ID...'), norepo=True)
1136 1136 def debugknown(ui, repopath, *ids, **opts):
1137 1137 """test whether node ids are known to a repo
1138 1138
1139 1139 Every ID must be a full-length hex node id string. Returns a list of 0s
1140 1140 and 1s indicating unknown/known.
1141 1141 """
1142 1142 opts = pycompat.byteskwargs(opts)
1143 1143 repo = hg.peer(ui, opts, repopath)
1144 1144 if not repo.capable('known'):
1145 1145 raise error.Abort("known() not supported by target repository")
1146 1146 flags = repo.known([bin(s) for s in ids])
1147 1147 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1148 1148
1149 1149 @command('debuglabelcomplete', [], _('LABEL...'))
1150 1150 def debuglabelcomplete(ui, repo, *args):
1151 1151 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1152 1152 debugnamecomplete(ui, repo, *args)
1153 1153
1154 1154 @command('debuglocks',
1155 1155 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1156 1156 ('W', 'force-wlock', None,
1157 1157 _('free the working state lock (DANGEROUS)'))],
1158 1158 _('[OPTION]...'))
1159 1159 def debuglocks(ui, repo, **opts):
1160 1160 """show or modify state of locks
1161 1161
1162 1162 By default, this command will show which locks are held. This
1163 1163 includes the user and process holding the lock, the amount of time
1164 1164 the lock has been held, and the machine name where the process is
1165 1165 running if it's not local.
1166 1166
1167 1167 Locks protect the integrity of Mercurial's data, so should be
1168 1168 treated with care. System crashes or other interruptions may cause
1169 1169 locks to not be properly released, though Mercurial will usually
1170 1170 detect and remove such stale locks automatically.
1171 1171
1172 1172 However, detecting stale locks may not always be possible (for
1173 1173 instance, on a shared filesystem). Removing locks may also be
1174 1174 blocked by filesystem permissions.
1175 1175
1176 1176 Returns 0 if no locks are held.
1177 1177
1178 1178 """
1179 1179
1180 1180 if opts.get(r'force_lock'):
1181 1181 repo.svfs.unlink('lock')
1182 1182 if opts.get(r'force_wlock'):
1183 1183 repo.vfs.unlink('wlock')
1184 1184 if opts.get(r'force_lock') or opts.get(r'force_lock'):
1185 1185 return 0
1186 1186
1187 1187 now = time.time()
1188 1188 held = 0
1189 1189
1190 1190 def report(vfs, name, method):
1191 1191 # this causes stale locks to get reaped for more accurate reporting
1192 1192 try:
1193 1193 l = method(False)
1194 1194 except error.LockHeld:
1195 1195 l = None
1196 1196
1197 1197 if l:
1198 1198 l.release()
1199 1199 else:
1200 1200 try:
1201 1201 stat = vfs.lstat(name)
1202 1202 age = now - stat.st_mtime
1203 1203 user = util.username(stat.st_uid)
1204 1204 locker = vfs.readlock(name)
1205 1205 if ":" in locker:
1206 1206 host, pid = locker.split(':')
1207 1207 if host == socket.gethostname():
1208 1208 locker = 'user %s, process %s' % (user, pid)
1209 1209 else:
1210 1210 locker = 'user %s, process %s, host %s' \
1211 1211 % (user, pid, host)
1212 1212 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1213 1213 return 1
1214 1214 except OSError as e:
1215 1215 if e.errno != errno.ENOENT:
1216 1216 raise
1217 1217
1218 1218 ui.write(("%-6s free\n") % (name + ":"))
1219 1219 return 0
1220 1220
1221 1221 held += report(repo.svfs, "lock", repo.lock)
1222 1222 held += report(repo.vfs, "wlock", repo.wlock)
1223 1223
1224 1224 return held
1225 1225
1226 1226 @command('debugmergestate', [], '')
1227 1227 def debugmergestate(ui, repo, *args):
1228 1228 """print merge state
1229 1229
1230 1230 Use --verbose to print out information about whether v1 or v2 merge state
1231 1231 was chosen."""
1232 1232 def _hashornull(h):
1233 1233 if h == nullhex:
1234 1234 return 'null'
1235 1235 else:
1236 1236 return h
1237 1237
1238 1238 def printrecords(version):
1239 1239 ui.write(('* version %s records\n') % version)
1240 1240 if version == 1:
1241 1241 records = v1records
1242 1242 else:
1243 1243 records = v2records
1244 1244
1245 1245 for rtype, record in records:
1246 1246 # pretty print some record types
1247 1247 if rtype == 'L':
1248 1248 ui.write(('local: %s\n') % record)
1249 1249 elif rtype == 'O':
1250 1250 ui.write(('other: %s\n') % record)
1251 1251 elif rtype == 'm':
1252 1252 driver, mdstate = record.split('\0', 1)
1253 1253 ui.write(('merge driver: %s (state "%s")\n')
1254 1254 % (driver, mdstate))
1255 1255 elif rtype in 'FDC':
1256 1256 r = record.split('\0')
1257 1257 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1258 1258 if version == 1:
1259 1259 onode = 'not stored in v1 format'
1260 1260 flags = r[7]
1261 1261 else:
1262 1262 onode, flags = r[7:9]
1263 1263 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1264 1264 % (f, rtype, state, _hashornull(hash)))
1265 1265 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1266 1266 ui.write((' ancestor path: %s (node %s)\n')
1267 1267 % (afile, _hashornull(anode)))
1268 1268 ui.write((' other path: %s (node %s)\n')
1269 1269 % (ofile, _hashornull(onode)))
1270 1270 elif rtype == 'f':
1271 1271 filename, rawextras = record.split('\0', 1)
1272 1272 extras = rawextras.split('\0')
1273 1273 i = 0
1274 1274 extrastrings = []
1275 1275 while i < len(extras):
1276 1276 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1277 1277 i += 2
1278 1278
1279 1279 ui.write(('file extras: %s (%s)\n')
1280 1280 % (filename, ', '.join(extrastrings)))
1281 1281 elif rtype == 'l':
1282 1282 labels = record.split('\0', 2)
1283 1283 labels = [l for l in labels if len(l) > 0]
1284 1284 ui.write(('labels:\n'))
1285 1285 ui.write((' local: %s\n' % labels[0]))
1286 1286 ui.write((' other: %s\n' % labels[1]))
1287 1287 if len(labels) > 2:
1288 1288 ui.write((' base: %s\n' % labels[2]))
1289 1289 else:
1290 1290 ui.write(('unrecognized entry: %s\t%s\n')
1291 1291 % (rtype, record.replace('\0', '\t')))
1292 1292
1293 1293 # Avoid mergestate.read() since it may raise an exception for unsupported
1294 1294 # merge state records. We shouldn't be doing this, but this is OK since this
1295 1295 # command is pretty low-level.
1296 1296 ms = mergemod.mergestate(repo)
1297 1297
1298 1298 # sort so that reasonable information is on top
1299 1299 v1records = ms._readrecordsv1()
1300 1300 v2records = ms._readrecordsv2()
1301 1301 order = 'LOml'
1302 1302 def key(r):
1303 1303 idx = order.find(r[0])
1304 1304 if idx == -1:
1305 1305 return (1, r[1])
1306 1306 else:
1307 1307 return (0, idx)
1308 1308 v1records.sort(key=key)
1309 1309 v2records.sort(key=key)
1310 1310
1311 1311 if not v1records and not v2records:
1312 1312 ui.write(('no merge state found\n'))
1313 1313 elif not v2records:
1314 1314 ui.note(('no version 2 merge state\n'))
1315 1315 printrecords(1)
1316 1316 elif ms._v1v2match(v1records, v2records):
1317 1317 ui.note(('v1 and v2 states match: using v2\n'))
1318 1318 printrecords(2)
1319 1319 else:
1320 1320 ui.note(('v1 and v2 states mismatch: using v1\n'))
1321 1321 printrecords(1)
1322 1322 if ui.verbose:
1323 1323 printrecords(2)
1324 1324
1325 1325 @command('debugnamecomplete', [], _('NAME...'))
1326 1326 def debugnamecomplete(ui, repo, *args):
1327 1327 '''complete "names" - tags, open branch names, bookmark names'''
1328 1328
1329 1329 names = set()
1330 1330 # since we previously only listed open branches, we will handle that
1331 1331 # specially (after this for loop)
1332 1332 for name, ns in repo.names.iteritems():
1333 1333 if name != 'branches':
1334 1334 names.update(ns.listnames(repo))
1335 1335 names.update(tag for (tag, heads, tip, closed)
1336 1336 in repo.branchmap().iterbranches() if not closed)
1337 1337 completions = set()
1338 1338 if not args:
1339 1339 args = ['']
1340 1340 for a in args:
1341 1341 completions.update(n for n in names if n.startswith(a))
1342 1342 ui.write('\n'.join(sorted(completions)))
1343 1343 ui.write('\n')
1344 1344
1345 1345 @command('debugobsolete',
1346 1346 [('', 'flags', 0, _('markers flag')),
1347 1347 ('', 'record-parents', False,
1348 1348 _('record parent information for the precursor')),
1349 1349 ('r', 'rev', [], _('display markers relevant to REV')),
1350 1350 ('', 'exclusive', False, _('restrict display to markers only '
1351 1351 'relevant to REV')),
1352 1352 ('', 'index', False, _('display index of the marker')),
1353 1353 ('', 'delete', [], _('delete markers specified by indices')),
1354 1354 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1355 1355 _('[OBSOLETED [REPLACEMENT ...]]'))
1356 1356 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1357 1357 """create arbitrary obsolete marker
1358 1358
1359 1359 With no arguments, displays the list of obsolescence markers."""
1360 1360
1361 1361 opts = pycompat.byteskwargs(opts)
1362 1362
1363 1363 def parsenodeid(s):
1364 1364 try:
1365 1365 # We do not use revsingle/revrange functions here to accept
1366 1366 # arbitrary node identifiers, possibly not present in the
1367 1367 # local repository.
1368 1368 n = bin(s)
1369 1369 if len(n) != len(nullid):
1370 1370 raise TypeError()
1371 1371 return n
1372 1372 except TypeError:
1373 1373 raise error.Abort('changeset references must be full hexadecimal '
1374 1374 'node identifiers')
1375 1375
1376 1376 if opts.get('delete'):
1377 1377 indices = []
1378 1378 for v in opts.get('delete'):
1379 1379 try:
1380 1380 indices.append(int(v))
1381 1381 except ValueError:
1382 1382 raise error.Abort(_('invalid index value: %r') % v,
1383 1383 hint=_('use integers for indices'))
1384 1384
1385 1385 if repo.currenttransaction():
1386 1386 raise error.Abort(_('cannot delete obsmarkers in the middle '
1387 1387 'of transaction.'))
1388 1388
1389 1389 with repo.lock():
1390 1390 n = repair.deleteobsmarkers(repo.obsstore, indices)
1391 1391 ui.write(_('deleted %i obsolescence markers\n') % n)
1392 1392
1393 1393 return
1394 1394
1395 1395 if precursor is not None:
1396 1396 if opts['rev']:
1397 1397 raise error.Abort('cannot select revision when creating marker')
1398 1398 metadata = {}
1399 1399 metadata['user'] = opts['user'] or ui.username()
1400 1400 succs = tuple(parsenodeid(succ) for succ in successors)
1401 1401 l = repo.lock()
1402 1402 try:
1403 1403 tr = repo.transaction('debugobsolete')
1404 1404 try:
1405 1405 date = opts.get('date')
1406 1406 if date:
1407 1407 date = util.parsedate(date)
1408 1408 else:
1409 1409 date = None
1410 1410 prec = parsenodeid(precursor)
1411 1411 parents = None
1412 1412 if opts['record_parents']:
1413 1413 if prec not in repo.unfiltered():
1414 1414 raise error.Abort('cannot used --record-parents on '
1415 1415 'unknown changesets')
1416 1416 parents = repo.unfiltered()[prec].parents()
1417 1417 parents = tuple(p.node() for p in parents)
1418 1418 repo.obsstore.create(tr, prec, succs, opts['flags'],
1419 1419 parents=parents, date=date,
1420 1420 metadata=metadata, ui=ui)
1421 1421 tr.close()
1422 1422 except ValueError as exc:
1423 1423 raise error.Abort(_('bad obsmarker input: %s') % exc)
1424 1424 finally:
1425 1425 tr.release()
1426 1426 finally:
1427 1427 l.release()
1428 1428 else:
1429 1429 if opts['rev']:
1430 1430 revs = scmutil.revrange(repo, opts['rev'])
1431 1431 nodes = [repo[r].node() for r in revs]
1432 1432 markers = list(obsolete.getmarkers(repo, nodes=nodes,
1433 1433 exclusive=opts['exclusive']))
1434 1434 markers.sort(key=lambda x: x._data)
1435 1435 else:
1436 1436 markers = obsolete.getmarkers(repo)
1437 1437
1438 1438 markerstoiter = markers
1439 1439 isrelevant = lambda m: True
1440 1440 if opts.get('rev') and opts.get('index'):
1441 1441 markerstoiter = obsolete.getmarkers(repo)
1442 1442 markerset = set(markers)
1443 1443 isrelevant = lambda m: m in markerset
1444 1444
1445 1445 fm = ui.formatter('debugobsolete', opts)
1446 1446 for i, m in enumerate(markerstoiter):
1447 1447 if not isrelevant(m):
1448 1448 # marker can be irrelevant when we're iterating over a set
1449 1449 # of markers (markerstoiter) which is bigger than the set
1450 1450 # of markers we want to display (markers)
1451 1451 # this can happen if both --index and --rev options are
1452 1452 # provided and thus we need to iterate over all of the markers
1453 1453 # to get the correct indices, but only display the ones that
1454 1454 # are relevant to --rev value
1455 1455 continue
1456 1456 fm.startitem()
1457 1457 ind = i if opts.get('index') else None
1458 1458 cmdutil.showmarker(fm, m, index=ind)
1459 1459 fm.end()
1460 1460
1461 1461 @command('debugpathcomplete',
1462 1462 [('f', 'full', None, _('complete an entire path')),
1463 1463 ('n', 'normal', None, _('show only normal files')),
1464 1464 ('a', 'added', None, _('show only added files')),
1465 1465 ('r', 'removed', None, _('show only removed files'))],
1466 1466 _('FILESPEC...'))
1467 1467 def debugpathcomplete(ui, repo, *specs, **opts):
1468 1468 '''complete part or all of a tracked path
1469 1469
1470 1470 This command supports shells that offer path name completion. It
1471 1471 currently completes only files already known to the dirstate.
1472 1472
1473 1473 Completion extends only to the next path segment unless
1474 1474 --full is specified, in which case entire paths are used.'''
1475 1475
1476 1476 def complete(path, acceptable):
1477 1477 dirstate = repo.dirstate
1478 1478 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1479 1479 rootdir = repo.root + pycompat.ossep
1480 1480 if spec != repo.root and not spec.startswith(rootdir):
1481 1481 return [], []
1482 1482 if os.path.isdir(spec):
1483 1483 spec += '/'
1484 1484 spec = spec[len(rootdir):]
1485 1485 fixpaths = pycompat.ossep != '/'
1486 1486 if fixpaths:
1487 1487 spec = spec.replace(pycompat.ossep, '/')
1488 1488 speclen = len(spec)
1489 1489 fullpaths = opts[r'full']
1490 1490 files, dirs = set(), set()
1491 1491 adddir, addfile = dirs.add, files.add
1492 1492 for f, st in dirstate.iteritems():
1493 1493 if f.startswith(spec) and st[0] in acceptable:
1494 1494 if fixpaths:
1495 1495 f = f.replace('/', pycompat.ossep)
1496 1496 if fullpaths:
1497 1497 addfile(f)
1498 1498 continue
1499 1499 s = f.find(pycompat.ossep, speclen)
1500 1500 if s >= 0:
1501 1501 adddir(f[:s])
1502 1502 else:
1503 1503 addfile(f)
1504 1504 return files, dirs
1505 1505
1506 1506 acceptable = ''
1507 1507 if opts[r'normal']:
1508 1508 acceptable += 'nm'
1509 1509 if opts[r'added']:
1510 1510 acceptable += 'a'
1511 1511 if opts[r'removed']:
1512 1512 acceptable += 'r'
1513 1513 cwd = repo.getcwd()
1514 1514 if not specs:
1515 1515 specs = ['.']
1516 1516
1517 1517 files, dirs = set(), set()
1518 1518 for spec in specs:
1519 1519 f, d = complete(spec, acceptable or 'nmar')
1520 1520 files.update(f)
1521 1521 dirs.update(d)
1522 1522 files.update(dirs)
1523 1523 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1524 1524 ui.write('\n')
1525 1525
1526 1526 @command('debugpickmergetool',
1527 1527 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1528 1528 ('', 'changedelete', None, _('emulate merging change and delete')),
1529 1529 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1530 1530 _('[PATTERN]...'),
1531 1531 inferrepo=True)
1532 1532 def debugpickmergetool(ui, repo, *pats, **opts):
1533 1533 """examine which merge tool is chosen for specified file
1534 1534
1535 1535 As described in :hg:`help merge-tools`, Mercurial examines
1536 1536 configurations below in this order to decide which merge tool is
1537 1537 chosen for specified file.
1538 1538
1539 1539 1. ``--tool`` option
1540 1540 2. ``HGMERGE`` environment variable
1541 1541 3. configurations in ``merge-patterns`` section
1542 1542 4. configuration of ``ui.merge``
1543 1543 5. configurations in ``merge-tools`` section
1544 1544 6. ``hgmerge`` tool (for historical reason only)
1545 1545 7. default tool for fallback (``:merge`` or ``:prompt``)
1546 1546
1547 1547 This command writes out examination result in the style below::
1548 1548
1549 1549 FILE = MERGETOOL
1550 1550
1551 1551 By default, all files known in the first parent context of the
1552 1552 working directory are examined. Use file patterns and/or -I/-X
1553 1553 options to limit target files. -r/--rev is also useful to examine
1554 1554 files in another context without actual updating to it.
1555 1555
1556 1556 With --debug, this command shows warning messages while matching
1557 1557 against ``merge-patterns`` and so on, too. It is recommended to
1558 1558 use this option with explicit file patterns and/or -I/-X options,
1559 1559 because this option increases amount of output per file according
1560 1560 to configurations in hgrc.
1561 1561
1562 1562 With -v/--verbose, this command shows configurations below at
1563 1563 first (only if specified).
1564 1564
1565 1565 - ``--tool`` option
1566 1566 - ``HGMERGE`` environment variable
1567 1567 - configuration of ``ui.merge``
1568 1568
1569 1569 If merge tool is chosen before matching against
1570 1570 ``merge-patterns``, this command can't show any helpful
1571 1571 information, even with --debug. In such case, information above is
1572 1572 useful to know why a merge tool is chosen.
1573 1573 """
1574 1574 opts = pycompat.byteskwargs(opts)
1575 1575 overrides = {}
1576 1576 if opts['tool']:
1577 1577 overrides[('ui', 'forcemerge')] = opts['tool']
1578 1578 ui.note(('with --tool %r\n') % (opts['tool']))
1579 1579
1580 1580 with ui.configoverride(overrides, 'debugmergepatterns'):
1581 1581 hgmerge = encoding.environ.get("HGMERGE")
1582 1582 if hgmerge is not None:
1583 1583 ui.note(('with HGMERGE=%r\n') % (hgmerge))
1584 1584 uimerge = ui.config("ui", "merge")
1585 1585 if uimerge:
1586 1586 ui.note(('with ui.merge=%r\n') % (uimerge))
1587 1587
1588 1588 ctx = scmutil.revsingle(repo, opts.get('rev'))
1589 1589 m = scmutil.match(ctx, pats, opts)
1590 1590 changedelete = opts['changedelete']
1591 1591 for path in ctx.walk(m):
1592 1592 fctx = ctx[path]
1593 1593 try:
1594 1594 if not ui.debugflag:
1595 1595 ui.pushbuffer(error=True)
1596 1596 tool, toolpath = filemerge._picktool(repo, ui, path,
1597 1597 fctx.isbinary(),
1598 1598 'l' in fctx.flags(),
1599 1599 changedelete)
1600 1600 finally:
1601 1601 if not ui.debugflag:
1602 1602 ui.popbuffer()
1603 1603 ui.write(('%s = %s\n') % (path, tool))
1604 1604
1605 1605 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1606 1606 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1607 1607 '''access the pushkey key/value protocol
1608 1608
1609 1609 With two args, list the keys in the given namespace.
1610 1610
1611 1611 With five args, set a key to new if it currently is set to old.
1612 1612 Reports success or failure.
1613 1613 '''
1614 1614
1615 1615 target = hg.peer(ui, {}, repopath)
1616 1616 if keyinfo:
1617 1617 key, old, new = keyinfo
1618 1618 r = target.pushkey(namespace, key, old, new)
1619 1619 ui.status(str(r) + '\n')
1620 1620 return not r
1621 1621 else:
1622 1622 for k, v in sorted(target.listkeys(namespace).iteritems()):
1623 1623 ui.write("%s\t%s\n" % (util.escapestr(k),
1624 1624 util.escapestr(v)))
1625 1625
1626 1626 @command('debugpvec', [], _('A B'))
1627 1627 def debugpvec(ui, repo, a, b=None):
1628 1628 ca = scmutil.revsingle(repo, a)
1629 1629 cb = scmutil.revsingle(repo, b)
1630 1630 pa = pvec.ctxpvec(ca)
1631 1631 pb = pvec.ctxpvec(cb)
1632 1632 if pa == pb:
1633 1633 rel = "="
1634 1634 elif pa > pb:
1635 1635 rel = ">"
1636 1636 elif pa < pb:
1637 1637 rel = "<"
1638 1638 elif pa | pb:
1639 1639 rel = "|"
1640 1640 ui.write(_("a: %s\n") % pa)
1641 1641 ui.write(_("b: %s\n") % pb)
1642 1642 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1643 1643 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1644 1644 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1645 1645 pa.distance(pb), rel))
1646 1646
1647 1647 @command('debugrebuilddirstate|debugrebuildstate',
1648 1648 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1649 1649 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1650 1650 'the working copy parent')),
1651 1651 ],
1652 1652 _('[-r REV]'))
1653 1653 def debugrebuilddirstate(ui, repo, rev, **opts):
1654 1654 """rebuild the dirstate as it would look like for the given revision
1655 1655
1656 1656 If no revision is specified the first current parent will be used.
1657 1657
1658 1658 The dirstate will be set to the files of the given revision.
1659 1659 The actual working directory content or existing dirstate
1660 1660 information such as adds or removes is not considered.
1661 1661
1662 1662 ``minimal`` will only rebuild the dirstate status for files that claim to be
1663 1663 tracked but are not in the parent manifest, or that exist in the parent
1664 1664 manifest but are not in the dirstate. It will not change adds, removes, or
1665 1665 modified files that are in the working copy parent.
1666 1666
1667 1667 One use of this command is to make the next :hg:`status` invocation
1668 1668 check the actual file content.
1669 1669 """
1670 1670 ctx = scmutil.revsingle(repo, rev)
1671 1671 with repo.wlock():
1672 1672 dirstate = repo.dirstate
1673 1673 changedfiles = None
1674 1674 # See command doc for what minimal does.
1675 1675 if opts.get(r'minimal'):
1676 1676 manifestfiles = set(ctx.manifest().keys())
1677 1677 dirstatefiles = set(dirstate)
1678 1678 manifestonly = manifestfiles - dirstatefiles
1679 1679 dsonly = dirstatefiles - manifestfiles
1680 1680 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1681 1681 changedfiles = manifestonly | dsnotadded
1682 1682
1683 1683 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1684 1684
1685 1685 @command('debugrebuildfncache', [], '')
1686 1686 def debugrebuildfncache(ui, repo):
1687 1687 """rebuild the fncache file"""
1688 1688 repair.rebuildfncache(ui, repo)
1689 1689
1690 1690 @command('debugrename',
1691 1691 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1692 1692 _('[-r REV] FILE'))
1693 1693 def debugrename(ui, repo, file1, *pats, **opts):
1694 1694 """dump rename information"""
1695 1695
1696 1696 opts = pycompat.byteskwargs(opts)
1697 1697 ctx = scmutil.revsingle(repo, opts.get('rev'))
1698 1698 m = scmutil.match(ctx, (file1,) + pats, opts)
1699 1699 for abs in ctx.walk(m):
1700 1700 fctx = ctx[abs]
1701 1701 o = fctx.filelog().renamed(fctx.filenode())
1702 1702 rel = m.rel(abs)
1703 1703 if o:
1704 1704 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1705 1705 else:
1706 1706 ui.write(_("%s not renamed\n") % rel)
1707 1707
1708 1708 @command('debugrevlog', cmdutil.debugrevlogopts +
1709 1709 [('d', 'dump', False, _('dump index data'))],
1710 1710 _('-c|-m|FILE'),
1711 1711 optionalrepo=True)
1712 1712 def debugrevlog(ui, repo, file_=None, **opts):
1713 1713 """show data and statistics about a revlog"""
1714 1714 opts = pycompat.byteskwargs(opts)
1715 1715 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1716 1716
1717 1717 if opts.get("dump"):
1718 1718 numrevs = len(r)
1719 1719 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1720 1720 " rawsize totalsize compression heads chainlen\n"))
1721 1721 ts = 0
1722 1722 heads = set()
1723 1723
1724 1724 for rev in xrange(numrevs):
1725 1725 dbase = r.deltaparent(rev)
1726 1726 if dbase == -1:
1727 1727 dbase = rev
1728 1728 cbase = r.chainbase(rev)
1729 1729 clen = r.chainlen(rev)
1730 1730 p1, p2 = r.parentrevs(rev)
1731 1731 rs = r.rawsize(rev)
1732 1732 ts = ts + rs
1733 1733 heads -= set(r.parentrevs(rev))
1734 1734 heads.add(rev)
1735 1735 try:
1736 1736 compression = ts / r.end(rev)
1737 1737 except ZeroDivisionError:
1738 1738 compression = 0
1739 1739 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1740 1740 "%11d %5d %8d\n" %
1741 1741 (rev, p1, p2, r.start(rev), r.end(rev),
1742 1742 r.start(dbase), r.start(cbase),
1743 1743 r.start(p1), r.start(p2),
1744 1744 rs, ts, compression, len(heads), clen))
1745 1745 return 0
1746 1746
1747 1747 v = r.version
1748 1748 format = v & 0xFFFF
1749 1749 flags = []
1750 1750 gdelta = False
1751 1751 if v & revlog.FLAG_INLINE_DATA:
1752 1752 flags.append('inline')
1753 1753 if v & revlog.FLAG_GENERALDELTA:
1754 1754 gdelta = True
1755 1755 flags.append('generaldelta')
1756 1756 if not flags:
1757 1757 flags = ['(none)']
1758 1758
1759 1759 nummerges = 0
1760 1760 numfull = 0
1761 1761 numprev = 0
1762 1762 nump1 = 0
1763 1763 nump2 = 0
1764 1764 numother = 0
1765 1765 nump1prev = 0
1766 1766 nump2prev = 0
1767 1767 chainlengths = []
1768 1768 chainbases = []
1769 1769 chainspans = []
1770 1770
1771 1771 datasize = [None, 0, 0]
1772 1772 fullsize = [None, 0, 0]
1773 1773 deltasize = [None, 0, 0]
1774 1774 chunktypecounts = {}
1775 1775 chunktypesizes = {}
1776 1776
1777 1777 def addsize(size, l):
1778 1778 if l[0] is None or size < l[0]:
1779 1779 l[0] = size
1780 1780 if size > l[1]:
1781 1781 l[1] = size
1782 1782 l[2] += size
1783 1783
1784 1784 numrevs = len(r)
1785 1785 for rev in xrange(numrevs):
1786 1786 p1, p2 = r.parentrevs(rev)
1787 1787 delta = r.deltaparent(rev)
1788 1788 if format > 0:
1789 1789 addsize(r.rawsize(rev), datasize)
1790 1790 if p2 != nullrev:
1791 1791 nummerges += 1
1792 1792 size = r.length(rev)
1793 1793 if delta == nullrev:
1794 1794 chainlengths.append(0)
1795 1795 chainbases.append(r.start(rev))
1796 1796 chainspans.append(size)
1797 1797 numfull += 1
1798 1798 addsize(size, fullsize)
1799 1799 else:
1800 1800 chainlengths.append(chainlengths[delta] + 1)
1801 1801 baseaddr = chainbases[delta]
1802 1802 revaddr = r.start(rev)
1803 1803 chainbases.append(baseaddr)
1804 1804 chainspans.append((revaddr - baseaddr) + size)
1805 1805 addsize(size, deltasize)
1806 1806 if delta == rev - 1:
1807 1807 numprev += 1
1808 1808 if delta == p1:
1809 1809 nump1prev += 1
1810 1810 elif delta == p2:
1811 1811 nump2prev += 1
1812 1812 elif delta == p1:
1813 1813 nump1 += 1
1814 1814 elif delta == p2:
1815 1815 nump2 += 1
1816 1816 elif delta != nullrev:
1817 1817 numother += 1
1818 1818
1819 1819 # Obtain data on the raw chunks in the revlog.
1820 1820 segment = r._getsegmentforrevs(rev, rev)[1]
1821 1821 if segment:
1822 1822 chunktype = bytes(segment[0:1])
1823 1823 else:
1824 1824 chunktype = 'empty'
1825 1825
1826 1826 if chunktype not in chunktypecounts:
1827 1827 chunktypecounts[chunktype] = 0
1828 1828 chunktypesizes[chunktype] = 0
1829 1829
1830 1830 chunktypecounts[chunktype] += 1
1831 1831 chunktypesizes[chunktype] += size
1832 1832
1833 1833 # Adjust size min value for empty cases
1834 1834 for size in (datasize, fullsize, deltasize):
1835 1835 if size[0] is None:
1836 1836 size[0] = 0
1837 1837
1838 1838 numdeltas = numrevs - numfull
1839 1839 numoprev = numprev - nump1prev - nump2prev
1840 1840 totalrawsize = datasize[2]
1841 1841 datasize[2] /= numrevs
1842 1842 fulltotal = fullsize[2]
1843 1843 fullsize[2] /= numfull
1844 1844 deltatotal = deltasize[2]
1845 1845 if numrevs - numfull > 0:
1846 1846 deltasize[2] /= numrevs - numfull
1847 1847 totalsize = fulltotal + deltatotal
1848 1848 avgchainlen = sum(chainlengths) / numrevs
1849 1849 maxchainlen = max(chainlengths)
1850 1850 maxchainspan = max(chainspans)
1851 1851 compratio = 1
1852 1852 if totalsize:
1853 1853 compratio = totalrawsize / totalsize
1854 1854
1855 1855 basedfmtstr = '%%%dd\n'
1856 1856 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1857 1857
1858 1858 def dfmtstr(max):
1859 1859 return basedfmtstr % len(str(max))
1860 1860 def pcfmtstr(max, padding=0):
1861 1861 return basepcfmtstr % (len(str(max)), ' ' * padding)
1862 1862
1863 1863 def pcfmt(value, total):
1864 1864 if total:
1865 1865 return (value, 100 * float(value) / total)
1866 1866 else:
1867 1867 return value, 100.0
1868 1868
1869 1869 ui.write(('format : %d\n') % format)
1870 1870 ui.write(('flags : %s\n') % ', '.join(flags))
1871 1871
1872 1872 ui.write('\n')
1873 1873 fmt = pcfmtstr(totalsize)
1874 1874 fmt2 = dfmtstr(totalsize)
1875 1875 ui.write(('revisions : ') + fmt2 % numrevs)
1876 1876 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
1877 1877 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
1878 1878 ui.write(('revisions : ') + fmt2 % numrevs)
1879 1879 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
1880 1880 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
1881 1881 ui.write(('revision size : ') + fmt2 % totalsize)
1882 1882 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
1883 1883 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
1884 1884
1885 1885 def fmtchunktype(chunktype):
1886 1886 if chunktype == 'empty':
1887 1887 return ' %s : ' % chunktype
1888 1888 elif chunktype in pycompat.bytestr(string.ascii_letters):
1889 1889 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
1890 1890 else:
1891 1891 return ' 0x%s : ' % hex(chunktype)
1892 1892
1893 1893 ui.write('\n')
1894 1894 ui.write(('chunks : ') + fmt2 % numrevs)
1895 1895 for chunktype in sorted(chunktypecounts):
1896 1896 ui.write(fmtchunktype(chunktype))
1897 1897 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
1898 1898 ui.write(('chunks size : ') + fmt2 % totalsize)
1899 1899 for chunktype in sorted(chunktypecounts):
1900 1900 ui.write(fmtchunktype(chunktype))
1901 1901 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
1902 1902
1903 1903 ui.write('\n')
1904 1904 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
1905 1905 ui.write(('avg chain length : ') + fmt % avgchainlen)
1906 1906 ui.write(('max chain length : ') + fmt % maxchainlen)
1907 1907 ui.write(('max chain reach : ') + fmt % maxchainspan)
1908 1908 ui.write(('compression ratio : ') + fmt % compratio)
1909 1909
1910 1910 if format > 0:
1911 1911 ui.write('\n')
1912 1912 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
1913 1913 % tuple(datasize))
1914 1914 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
1915 1915 % tuple(fullsize))
1916 1916 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
1917 1917 % tuple(deltasize))
1918 1918
1919 1919 if numdeltas > 0:
1920 1920 ui.write('\n')
1921 1921 fmt = pcfmtstr(numdeltas)
1922 1922 fmt2 = pcfmtstr(numdeltas, 4)
1923 1923 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
1924 1924 if numprev > 0:
1925 1925 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
1926 1926 numprev))
1927 1927 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
1928 1928 numprev))
1929 1929 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
1930 1930 numprev))
1931 1931 if gdelta:
1932 1932 ui.write(('deltas against p1 : ')
1933 1933 + fmt % pcfmt(nump1, numdeltas))
1934 1934 ui.write(('deltas against p2 : ')
1935 1935 + fmt % pcfmt(nump2, numdeltas))
1936 1936 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
1937 1937 numdeltas))
1938 1938
1939 1939 @command('debugrevspec',
1940 1940 [('', 'optimize', None,
1941 1941 _('print parsed tree after optimizing (DEPRECATED)')),
1942 1942 ('', 'show-revs', True, _('print list of result revisions (default)')),
1943 1943 ('s', 'show-set', None, _('print internal representation of result set')),
1944 1944 ('p', 'show-stage', [],
1945 1945 _('print parsed tree at the given stage'), _('NAME')),
1946 1946 ('', 'no-optimized', False, _('evaluate tree without optimization')),
1947 1947 ('', 'verify-optimized', False, _('verify optimized result')),
1948 1948 ],
1949 1949 ('REVSPEC'))
1950 1950 def debugrevspec(ui, repo, expr, **opts):
1951 1951 """parse and apply a revision specification
1952 1952
1953 1953 Use -p/--show-stage option to print the parsed tree at the given stages.
1954 1954 Use -p all to print tree at every stage.
1955 1955
1956 1956 Use --no-show-revs option with -s or -p to print only the set
1957 1957 representation or the parsed tree respectively.
1958 1958
1959 1959 Use --verify-optimized to compare the optimized result with the unoptimized
1960 1960 one. Returns 1 if the optimized result differs.
1961 1961 """
1962 1962 opts = pycompat.byteskwargs(opts)
1963 1963 stages = [
1964 1964 ('parsed', lambda tree: tree),
1965 1965 ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
1966 1966 ('concatenated', revsetlang.foldconcat),
1967 1967 ('analyzed', revsetlang.analyze),
1968 1968 ('optimized', revsetlang.optimize),
1969 1969 ]
1970 1970 if opts['no_optimized']:
1971 1971 stages = stages[:-1]
1972 1972 if opts['verify_optimized'] and opts['no_optimized']:
1973 1973 raise error.Abort(_('cannot use --verify-optimized with '
1974 1974 '--no-optimized'))
1975 1975 stagenames = set(n for n, f in stages)
1976 1976
1977 1977 showalways = set()
1978 1978 showchanged = set()
1979 1979 if ui.verbose and not opts['show_stage']:
1980 1980 # show parsed tree by --verbose (deprecated)
1981 1981 showalways.add('parsed')
1982 1982 showchanged.update(['expanded', 'concatenated'])
1983 1983 if opts['optimize']:
1984 1984 showalways.add('optimized')
1985 1985 if opts['show_stage'] and opts['optimize']:
1986 1986 raise error.Abort(_('cannot use --optimize with --show-stage'))
1987 1987 if opts['show_stage'] == ['all']:
1988 1988 showalways.update(stagenames)
1989 1989 else:
1990 1990 for n in opts['show_stage']:
1991 1991 if n not in stagenames:
1992 1992 raise error.Abort(_('invalid stage name: %s') % n)
1993 1993 showalways.update(opts['show_stage'])
1994 1994
1995 1995 treebystage = {}
1996 1996 printedtree = None
1997 1997 tree = revsetlang.parse(expr, lookup=repo.__contains__)
1998 1998 for n, f in stages:
1999 1999 treebystage[n] = tree = f(tree)
2000 2000 if n in showalways or (n in showchanged and tree != printedtree):
2001 2001 if opts['show_stage'] or n != 'parsed':
2002 2002 ui.write(("* %s:\n") % n)
2003 2003 ui.write(revsetlang.prettyformat(tree), "\n")
2004 2004 printedtree = tree
2005 2005
2006 2006 if opts['verify_optimized']:
2007 2007 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2008 2008 brevs = revset.makematcher(treebystage['optimized'])(repo)
2009 2009 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2010 2010 ui.write(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
2011 2011 ui.write(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
2012 2012 arevs = list(arevs)
2013 2013 brevs = list(brevs)
2014 2014 if arevs == brevs:
2015 2015 return 0
2016 2016 ui.write(('--- analyzed\n'), label='diff.file_a')
2017 2017 ui.write(('+++ optimized\n'), label='diff.file_b')
2018 2018 sm = difflib.SequenceMatcher(None, arevs, brevs)
2019 2019 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2020 2020 if tag in ('delete', 'replace'):
2021 2021 for c in arevs[alo:ahi]:
2022 2022 ui.write('-%s\n' % c, label='diff.deleted')
2023 2023 if tag in ('insert', 'replace'):
2024 2024 for c in brevs[blo:bhi]:
2025 2025 ui.write('+%s\n' % c, label='diff.inserted')
2026 2026 if tag == 'equal':
2027 2027 for c in arevs[alo:ahi]:
2028 2028 ui.write(' %s\n' % c)
2029 2029 return 1
2030 2030
2031 2031 func = revset.makematcher(tree)
2032 2032 revs = func(repo)
2033 2033 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2034 2034 ui.write(("* set:\n"), smartset.prettyformat(revs), "\n")
2035 2035 if not opts['show_revs']:
2036 2036 return
2037 2037 for c in revs:
2038 2038 ui.write("%s\n" % c)
2039 2039
2040 2040 @command('debugsetparents', [], _('REV1 [REV2]'))
2041 2041 def debugsetparents(ui, repo, rev1, rev2=None):
2042 2042 """manually set the parents of the current working directory
2043 2043
2044 2044 This is useful for writing repository conversion tools, but should
2045 2045 be used with care. For example, neither the working directory nor the
2046 2046 dirstate is updated, so file status may be incorrect after running this
2047 2047 command.
2048 2048
2049 2049 Returns 0 on success.
2050 2050 """
2051 2051
2052 2052 r1 = scmutil.revsingle(repo, rev1).node()
2053 2053 r2 = scmutil.revsingle(repo, rev2, 'null').node()
2054 2054
2055 2055 with repo.wlock():
2056 2056 repo.setparents(r1, r2)
2057 2057
2058 2058 @command('debugsub',
2059 2059 [('r', 'rev', '',
2060 2060 _('revision to check'), _('REV'))],
2061 2061 _('[-r REV] [REV]'))
2062 2062 def debugsub(ui, repo, rev=None):
2063 2063 ctx = scmutil.revsingle(repo, rev, None)
2064 2064 for k, v in sorted(ctx.substate.items()):
2065 2065 ui.write(('path %s\n') % k)
2066 2066 ui.write((' source %s\n') % v[0])
2067 2067 ui.write((' revision %s\n') % v[1])
2068 2068
2069 2069 @command('debugsuccessorssets',
2070 2070 [],
2071 2071 _('[REV]'))
2072 2072 def debugsuccessorssets(ui, repo, *revs):
2073 2073 """show set of successors for revision
2074 2074
2075 2075 A successors set of changeset A is a consistent group of revisions that
2076 2076 succeed A. It contains non-obsolete changesets only.
2077 2077
2078 2078 In most cases a changeset A has a single successors set containing a single
2079 2079 successor (changeset A replaced by A').
2080 2080
2081 2081 A changeset that is made obsolete with no successors are called "pruned".
2082 2082 Such changesets have no successors sets at all.
2083 2083
2084 2084 A changeset that has been "split" will have a successors set containing
2085 2085 more than one successor.
2086 2086
2087 2087 A changeset that has been rewritten in multiple different ways is called
2088 2088 "divergent". Such changesets have multiple successor sets (each of which
2089 2089 may also be split, i.e. have multiple successors).
2090 2090
2091 2091 Results are displayed as follows::
2092 2092
2093 2093 <rev1>
2094 2094 <successors-1A>
2095 2095 <rev2>
2096 2096 <successors-2A>
2097 2097 <successors-2B1> <successors-2B2> <successors-2B3>
2098 2098
2099 2099 Here rev2 has two possible (i.e. divergent) successors sets. The first
2100 2100 holds one element, whereas the second holds three (i.e. the changeset has
2101 2101 been split).
2102 2102 """
2103 2103 # passed to successorssets caching computation from one call to another
2104 2104 cache = {}
2105 2105 ctx2str = str
2106 2106 node2str = short
2107 2107 if ui.debug():
2108 2108 def ctx2str(ctx):
2109 2109 return ctx.hex()
2110 2110 node2str = hex
2111 2111 for rev in scmutil.revrange(repo, revs):
2112 2112 ctx = repo[rev]
2113 2113 ui.write('%s\n'% ctx2str(ctx))
2114 2114 for succsset in obsutil.successorssets(repo, ctx.node(), cache):
2115 2115 if succsset:
2116 2116 ui.write(' ')
2117 2117 ui.write(node2str(succsset[0]))
2118 2118 for node in succsset[1:]:
2119 2119 ui.write(' ')
2120 2120 ui.write(node2str(node))
2121 2121 ui.write('\n')
2122 2122
2123 2123 @command('debugtemplate',
2124 2124 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2125 2125 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2126 2126 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2127 2127 optionalrepo=True)
2128 2128 def debugtemplate(ui, repo, tmpl, **opts):
2129 2129 """parse and apply a template
2130 2130
2131 2131 If -r/--rev is given, the template is processed as a log template and
2132 2132 applied to the given changesets. Otherwise, it is processed as a generic
2133 2133 template.
2134 2134
2135 2135 Use --verbose to print the parsed tree.
2136 2136 """
2137 2137 revs = None
2138 2138 if opts[r'rev']:
2139 2139 if repo is None:
2140 2140 raise error.RepoError(_('there is no Mercurial repository here '
2141 2141 '(.hg not found)'))
2142 2142 revs = scmutil.revrange(repo, opts[r'rev'])
2143 2143
2144 2144 props = {}
2145 2145 for d in opts[r'define']:
2146 2146 try:
2147 2147 k, v = (e.strip() for e in d.split('=', 1))
2148 2148 if not k or k == 'ui':
2149 2149 raise ValueError
2150 2150 props[k] = v
2151 2151 except ValueError:
2152 2152 raise error.Abort(_('malformed keyword definition: %s') % d)
2153 2153
2154 2154 if ui.verbose:
2155 2155 aliases = ui.configitems('templatealias')
2156 2156 tree = templater.parse(tmpl)
2157 2157 ui.note(templater.prettyformat(tree), '\n')
2158 2158 newtree = templater.expandaliases(tree, aliases)
2159 2159 if newtree != tree:
2160 2160 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2161 2161
2162 2162 if revs is None:
2163 2163 t = formatter.maketemplater(ui, tmpl)
2164 2164 props['ui'] = ui
2165 2165 ui.write(t.render(props))
2166 2166 else:
2167 2167 displayer = cmdutil.makelogtemplater(ui, repo, tmpl)
2168 2168 for r in revs:
2169 2169 displayer.show(repo[r], **pycompat.strkwargs(props))
2170 2170 displayer.close()
2171 2171
2172 2172 @command('debugupdatecaches', [])
2173 2173 def debugupdatecaches(ui, repo, *pats, **opts):
2174 2174 """warm all known caches in the repository"""
2175 2175 with repo.wlock():
2176 2176 with repo.lock():
2177 2177 repo.updatecaches()
2178 2178
2179 2179 @command('debugupgraderepo', [
2180 2180 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2181 2181 ('', 'run', False, _('performs an upgrade')),
2182 2182 ])
2183 2183 def debugupgraderepo(ui, repo, run=False, optimize=None):
2184 2184 """upgrade a repository to use different features
2185 2185
2186 2186 If no arguments are specified, the repository is evaluated for upgrade
2187 2187 and a list of problems and potential optimizations is printed.
2188 2188
2189 2189 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2190 2190 can be influenced via additional arguments. More details will be provided
2191 2191 by the command output when run without ``--run``.
2192 2192
2193 2193 During the upgrade, the repository will be locked and no writes will be
2194 2194 allowed.
2195 2195
2196 2196 At the end of the upgrade, the repository may not be readable while new
2197 2197 repository data is swapped in. This window will be as long as it takes to
2198 2198 rename some directories inside the ``.hg`` directory. On most machines, this
2199 2199 should complete almost instantaneously and the chances of a consumer being
2200 2200 unable to access the repository should be low.
2201 2201 """
2202 2202 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2203 2203
2204 2204 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2205 2205 inferrepo=True)
2206 2206 def debugwalk(ui, repo, *pats, **opts):
2207 2207 """show how files match on given patterns"""
2208 2208 opts = pycompat.byteskwargs(opts)
2209 2209 m = scmutil.match(repo[None], pats, opts)
2210 2210 ui.write(('matcher: %r\n' % m))
2211 2211 items = list(repo[None].walk(m))
2212 2212 if not items:
2213 2213 return
2214 2214 f = lambda fn: fn
2215 2215 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2216 2216 f = lambda fn: util.normpath(fn)
2217 2217 fmt = 'f %%-%ds %%-%ds %%s' % (
2218 2218 max([len(abs) for abs in items]),
2219 2219 max([len(m.rel(abs)) for abs in items]))
2220 2220 for abs in items:
2221 2221 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2222 2222 ui.write("%s\n" % line.rstrip())
2223 2223
2224 2224 @command('debugwireargs',
2225 2225 [('', 'three', '', 'three'),
2226 2226 ('', 'four', '', 'four'),
2227 2227 ('', 'five', '', 'five'),
2228 2228 ] + cmdutil.remoteopts,
2229 2229 _('REPO [OPTIONS]... [ONE [TWO]]'),
2230 2230 norepo=True)
2231 2231 def debugwireargs(ui, repopath, *vals, **opts):
2232 2232 opts = pycompat.byteskwargs(opts)
2233 2233 repo = hg.peer(ui, opts, repopath)
2234 2234 for opt in cmdutil.remoteopts:
2235 2235 del opts[opt[1]]
2236 2236 args = {}
2237 2237 for k, v in opts.iteritems():
2238 2238 if v:
2239 2239 args[k] = v
2240 2240 # run twice to check that we don't mess up the stream for the next command
2241 2241 res1 = repo.debugwireargs(*vals, **args)
2242 2242 res2 = repo.debugwireargs(*vals, **args)
2243 2243 ui.write("%s\n" % res1)
2244 2244 if res1 != res2:
2245 2245 ui.warn("%s\n" % res2)
@@ -1,1078 +1,1041 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84
85 85 parsers = policy.importmod(r'parsers')
86 86
87 87 _pack = struct.pack
88 88 _unpack = struct.unpack
89 89 _calcsize = struct.calcsize
90 90 propertycache = util.propertycache
91 91
92 92 # the obsolete feature is not mature enough to be enabled by default.
93 93 # you have to rely on third party extension extension to enable this.
94 94 _enabled = False
95 95
96 96 # Options for obsolescence
97 97 createmarkersopt = 'createmarkers'
98 98 allowunstableopt = 'allowunstable'
99 99 exchangeopt = 'exchange'
100 100
101 101 def isenabled(repo, option):
102 102 """Returns True if the given repository has the given obsolete option
103 103 enabled.
104 104 """
105 105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 106 if 'all' in result:
107 107 return True
108 108
109 109 # For migration purposes, temporarily return true if the config hasn't been
110 110 # set but _enabled is true.
111 111 if len(result) == 0 and _enabled:
112 112 return True
113 113
114 114 # createmarkers must be enabled if other options are enabled
115 115 if ((allowunstableopt in result or exchangeopt in result) and
116 116 not createmarkersopt in result):
117 117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 118 "if other obsolete options are enabled"))
119 119
120 120 return option in result
121 121
122 122 ### obsolescence marker flag
123 123
124 124 ## bumpedfix flag
125 125 #
126 126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 127 # "bumped" because it's a successors of a public changesets
128 128 #
129 129 # o A' (bumped)
130 130 # |`:
131 131 # | o A
132 132 # |/
133 133 # o Z
134 134 #
135 135 # The way to solve this situation is to create a new changeset Ad as children
136 136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 138 #
139 139 # o Ad
140 140 # |`:
141 141 # | x A'
142 142 # |'|
143 143 # o | A
144 144 # |/
145 145 # o Z
146 146 #
147 147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 149 # This flag mean that the successors express the changes between the public and
150 150 # bumped version and fix the situation, breaking the transitivity of
151 151 # "bumped" here.
152 152 bumpedfix = 1
153 153 usingsha256 = 2
154 154
155 155 ## Parsing and writing of version "0"
156 156 #
157 157 # The header is followed by the markers. Each marker is made of:
158 158 #
159 159 # - 1 uint8 : number of new changesets "N", can be zero.
160 160 #
161 161 # - 1 uint32: metadata size "M" in bytes.
162 162 #
163 163 # - 1 byte: a bit field. It is reserved for flags used in common
164 164 # obsolete marker operations, to avoid repeated decoding of metadata
165 165 # entries.
166 166 #
167 167 # - 20 bytes: obsoleted changeset identifier.
168 168 #
169 169 # - N*20 bytes: new changesets identifiers.
170 170 #
171 171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 172 # string contains a key and a value, separated by a colon ':', without
173 173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 174 # cannot contain '\0'.
175 175 _fm0version = 0
176 176 _fm0fixed = '>BIB20s'
177 177 _fm0node = '20s'
178 178 _fm0fsize = _calcsize(_fm0fixed)
179 179 _fm0fnodesize = _calcsize(_fm0node)
180 180
181 181 def _fm0readmarkers(data, off):
182 182 # Loop on markers
183 183 l = len(data)
184 184 while off + _fm0fsize <= l:
185 185 # read fixed part
186 186 cur = data[off:off + _fm0fsize]
187 187 off += _fm0fsize
188 188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 189 # read replacement
190 190 sucs = ()
191 191 if numsuc:
192 192 s = (_fm0fnodesize * numsuc)
193 193 cur = data[off:off + s]
194 194 sucs = _unpack(_fm0node * numsuc, cur)
195 195 off += s
196 196 # read metadata
197 197 # (metadata will be decoded on demand)
198 198 metadata = data[off:off + mdsize]
199 199 if len(metadata) != mdsize:
200 200 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 201 'short, %d bytes expected, got %d')
202 202 % (mdsize, len(metadata)))
203 203 off += mdsize
204 204 metadata = _fm0decodemeta(metadata)
205 205 try:
206 206 when, offset = metadata.pop('date', '0 0').split(' ')
207 207 date = float(when), int(offset)
208 208 except ValueError:
209 209 date = (0., 0)
210 210 parents = None
211 211 if 'p2' in metadata:
212 212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 213 elif 'p1' in metadata:
214 214 parents = (metadata.pop('p1', None),)
215 215 elif 'p0' in metadata:
216 216 parents = ()
217 217 if parents is not None:
218 218 try:
219 219 parents = tuple(node.bin(p) for p in parents)
220 220 # if parent content is not a nodeid, drop the data
221 221 for p in parents:
222 222 if len(p) != 20:
223 223 parents = None
224 224 break
225 225 except TypeError:
226 226 # if content cannot be translated to nodeid drop the data.
227 227 parents = None
228 228
229 229 metadata = tuple(sorted(metadata.iteritems()))
230 230
231 231 yield (pre, sucs, flags, metadata, date, parents)
232 232
233 233 def _fm0encodeonemarker(marker):
234 234 pre, sucs, flags, metadata, date, parents = marker
235 235 if flags & usingsha256:
236 236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 237 metadata = dict(metadata)
238 238 time, tz = date
239 239 metadata['date'] = '%r %i' % (time, tz)
240 240 if parents is not None:
241 241 if not parents:
242 242 # mark that we explicitly recorded no parents
243 243 metadata['p0'] = ''
244 244 for i, p in enumerate(parents, 1):
245 245 metadata['p%i' % i] = node.hex(p)
246 246 metadata = _fm0encodemeta(metadata)
247 247 numsuc = len(sucs)
248 248 format = _fm0fixed + (_fm0node * numsuc)
249 249 data = [numsuc, len(metadata), flags, pre]
250 250 data.extend(sucs)
251 251 return _pack(format, *data) + metadata
252 252
253 253 def _fm0encodemeta(meta):
254 254 """Return encoded metadata string to string mapping.
255 255
256 256 Assume no ':' in key and no '\0' in both key and value."""
257 257 for key, value in meta.iteritems():
258 258 if ':' in key or '\0' in key:
259 259 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 260 if '\0' in value:
261 261 raise ValueError("':' is forbidden in metadata value'")
262 262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263 263
264 264 def _fm0decodemeta(data):
265 265 """Return string to string dictionary from encoded version."""
266 266 d = {}
267 267 for l in data.split('\0'):
268 268 if l:
269 269 key, value = l.split(':')
270 270 d[key] = value
271 271 return d
272 272
273 273 ## Parsing and writing of version "1"
274 274 #
275 275 # The header is followed by the markers. Each marker is made of:
276 276 #
277 277 # - uint32: total size of the marker (including this field)
278 278 #
279 279 # - float64: date in seconds since epoch
280 280 #
281 281 # - int16: timezone offset in minutes
282 282 #
283 283 # - uint16: a bit field. It is reserved for flags used in common
284 284 # obsolete marker operations, to avoid repeated decoding of metadata
285 285 # entries.
286 286 #
287 287 # - uint8: number of successors "N", can be zero.
288 288 #
289 289 # - uint8: number of parents "P", can be zero.
290 290 #
291 291 # 0: parents data stored but no parent,
292 292 # 1: one parent stored,
293 293 # 2: two parents stored,
294 294 # 3: no parent data stored
295 295 #
296 296 # - uint8: number of metadata entries M
297 297 #
298 298 # - 20 or 32 bytes: precursor changeset identifier.
299 299 #
300 300 # - N*(20 or 32) bytes: successors changesets identifiers.
301 301 #
302 302 # - P*(20 or 32) bytes: parents of the precursors changesets.
303 303 #
304 304 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 305 #
306 306 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 307 _fm1version = 1
308 308 _fm1fixed = '>IdhHBBB20s'
309 309 _fm1nodesha1 = '20s'
310 310 _fm1nodesha256 = '32s'
311 311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 313 _fm1fsize = _calcsize(_fm1fixed)
314 314 _fm1parentnone = 3
315 315 _fm1parentshift = 14
316 316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 317 _fm1metapair = 'BB'
318 318 _fm1metapairsize = _calcsize('BB')
319 319
320 320 def _fm1purereadmarkers(data, off):
321 321 # make some global constants local for performance
322 322 noneflag = _fm1parentnone
323 323 sha2flag = usingsha256
324 324 sha1size = _fm1nodesha1size
325 325 sha2size = _fm1nodesha256size
326 326 sha1fmt = _fm1nodesha1
327 327 sha2fmt = _fm1nodesha256
328 328 metasize = _fm1metapairsize
329 329 metafmt = _fm1metapair
330 330 fsize = _fm1fsize
331 331 unpack = _unpack
332 332
333 333 # Loop on markers
334 334 stop = len(data) - _fm1fsize
335 335 ufixed = struct.Struct(_fm1fixed).unpack
336 336
337 337 while off <= stop:
338 338 # read fixed part
339 339 o1 = off + fsize
340 340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
341 341
342 342 if flags & sha2flag:
343 343 # FIXME: prec was read as a SHA1, needs to be amended
344 344
345 345 # read 0 or more successors
346 346 if numsuc == 1:
347 347 o2 = o1 + sha2size
348 348 sucs = (data[o1:o2],)
349 349 else:
350 350 o2 = o1 + sha2size * numsuc
351 351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
352 352
353 353 # read parents
354 354 if numpar == noneflag:
355 355 o3 = o2
356 356 parents = None
357 357 elif numpar == 1:
358 358 o3 = o2 + sha2size
359 359 parents = (data[o2:o3],)
360 360 else:
361 361 o3 = o2 + sha2size * numpar
362 362 parents = unpack(sha2fmt * numpar, data[o2:o3])
363 363 else:
364 364 # read 0 or more successors
365 365 if numsuc == 1:
366 366 o2 = o1 + sha1size
367 367 sucs = (data[o1:o2],)
368 368 else:
369 369 o2 = o1 + sha1size * numsuc
370 370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
371 371
372 372 # read parents
373 373 if numpar == noneflag:
374 374 o3 = o2
375 375 parents = None
376 376 elif numpar == 1:
377 377 o3 = o2 + sha1size
378 378 parents = (data[o2:o3],)
379 379 else:
380 380 o3 = o2 + sha1size * numpar
381 381 parents = unpack(sha1fmt * numpar, data[o2:o3])
382 382
383 383 # read metadata
384 384 off = o3 + metasize * nummeta
385 385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
386 386 metadata = []
387 387 for idx in xrange(0, len(metapairsize), 2):
388 388 o1 = off + metapairsize[idx]
389 389 o2 = o1 + metapairsize[idx + 1]
390 390 metadata.append((data[off:o1], data[o1:o2]))
391 391 off = o2
392 392
393 393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
394 394
395 395 def _fm1encodeonemarker(marker):
396 396 pre, sucs, flags, metadata, date, parents = marker
397 397 # determine node size
398 398 _fm1node = _fm1nodesha1
399 399 if flags & usingsha256:
400 400 _fm1node = _fm1nodesha256
401 401 numsuc = len(sucs)
402 402 numextranodes = numsuc
403 403 if parents is None:
404 404 numpar = _fm1parentnone
405 405 else:
406 406 numpar = len(parents)
407 407 numextranodes += numpar
408 408 formatnodes = _fm1node * numextranodes
409 409 formatmeta = _fm1metapair * len(metadata)
410 410 format = _fm1fixed + formatnodes + formatmeta
411 411 # tz is stored in minutes so we divide by 60
412 412 tz = date[1]//60
413 413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
414 414 data.extend(sucs)
415 415 if parents is not None:
416 416 data.extend(parents)
417 417 totalsize = _calcsize(format)
418 418 for key, value in metadata:
419 419 lk = len(key)
420 420 lv = len(value)
421 421 data.append(lk)
422 422 data.append(lv)
423 423 totalsize += lk + lv
424 424 data[0] = totalsize
425 425 data = [_pack(format, *data)]
426 426 for key, value in metadata:
427 427 data.append(key)
428 428 data.append(value)
429 429 return ''.join(data)
430 430
431 431 def _fm1readmarkers(data, off):
432 432 native = getattr(parsers, 'fm1readmarkers', None)
433 433 if not native:
434 434 return _fm1purereadmarkers(data, off)
435 435 stop = len(data) - _fm1fsize
436 436 return native(data, off, stop)
437 437
438 438 # mapping to read/write various marker formats
439 439 # <version> -> (decoder, encoder)
440 440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
441 441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
442 442
443 443 def _readmarkerversion(data):
444 444 return _unpack('>B', data[0:1])[0]
445 445
446 446 @util.nogc
447 447 def _readmarkers(data):
448 448 """Read and enumerate markers from raw data"""
449 449 diskversion = _readmarkerversion(data)
450 450 off = 1
451 451 if diskversion not in formats:
452 452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 453 raise error.UnknownVersion(msg, version=diskversion)
454 454 return diskversion, formats[diskversion][0](data, off)
455 455
456 456 def encodeheader(version=_fm0version):
457 457 return _pack('>B', version)
458 458
459 459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 460 # Kept separate from flushmarkers(), it will be reused for
461 461 # markers exchange.
462 462 encodeone = formats[version][1]
463 463 if addheader:
464 464 yield encodeheader(version)
465 465 for marker in markers:
466 466 yield encodeone(marker)
467 467
468
469 class marker(object):
470 """Wrap obsolete marker raw data"""
471
472 def __init__(self, repo, data):
473 # the repo argument will be used to create changectx in later version
474 self._repo = repo
475 self._data = data
476 self._decodedmeta = None
477
478 def __hash__(self):
479 return hash(self._data)
480
481 def __eq__(self, other):
482 if type(other) != type(self):
483 return False
484 return self._data == other._data
485
486 def precnode(self):
487 """Precursor changeset node identifier"""
488 return self._data[0]
489
490 def succnodes(self):
491 """List of successor changesets node identifiers"""
492 return self._data[1]
493
494 def parentnodes(self):
495 """Parents of the precursors (None if not recorded)"""
496 return self._data[5]
497
498 def metadata(self):
499 """Decoded metadata dictionary"""
500 return dict(self._data[3])
501
502 def date(self):
503 """Creation date as (unixtime, offset)"""
504 return self._data[4]
505
506 def flags(self):
507 """The flags field of the marker"""
508 return self._data[2]
509
510 468 @util.nogc
511 469 def _addsuccessors(successors, markers):
512 470 for mark in markers:
513 471 successors.setdefault(mark[0], set()).add(mark)
514 472
515 473 @util.nogc
516 474 def _addprecursors(precursors, markers):
517 475 for mark in markers:
518 476 for suc in mark[1]:
519 477 precursors.setdefault(suc, set()).add(mark)
520 478
521 479 @util.nogc
522 480 def _addchildren(children, markers):
523 481 for mark in markers:
524 482 parents = mark[5]
525 483 if parents is not None:
526 484 for p in parents:
527 485 children.setdefault(p, set()).add(mark)
528 486
529 487 def _checkinvalidmarkers(markers):
530 488 """search for marker with invalid data and raise error if needed
531 489
532 490 Exist as a separated function to allow the evolve extension for a more
533 491 subtle handling.
534 492 """
535 493 for mark in markers:
536 494 if node.nullid in mark[1]:
537 495 raise error.Abort(_('bad obsolescence marker detected: '
538 496 'invalid successors nullid'))
539 497
540 498 class obsstore(object):
541 499 """Store obsolete markers
542 500
543 501 Markers can be accessed with two mappings:
544 502 - precursors[x] -> set(markers on precursors edges of x)
545 503 - successors[x] -> set(markers on successors edges of x)
546 504 - children[x] -> set(markers on precursors edges of children(x)
547 505 """
548 506
549 507 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
550 508 # prec: nodeid, precursor changesets
551 509 # succs: tuple of nodeid, successor changesets (0-N length)
552 510 # flag: integer, flag field carrying modifier for the markers (see doc)
553 511 # meta: binary blob, encoded metadata dictionary
554 512 # date: (float, int) tuple, date of marker creation
555 513 # parents: (tuple of nodeid) or None, parents of precursors
556 514 # None is used when no data has been recorded
557 515
558 516 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
559 517 # caches for various obsolescence related cache
560 518 self.caches = {}
561 519 self.svfs = svfs
562 520 self._defaultformat = defaultformat
563 521 self._readonly = readonly
564 522
565 523 def __iter__(self):
566 524 return iter(self._all)
567 525
568 526 def __len__(self):
569 527 return len(self._all)
570 528
571 529 def __nonzero__(self):
572 530 if not self._cached('_all'):
573 531 try:
574 532 return self.svfs.stat('obsstore').st_size > 1
575 533 except OSError as inst:
576 534 if inst.errno != errno.ENOENT:
577 535 raise
578 536 # just build an empty _all list if no obsstore exists, which
579 537 # avoids further stat() syscalls
580 538 pass
581 539 return bool(self._all)
582 540
583 541 __bool__ = __nonzero__
584 542
585 543 @property
586 544 def readonly(self):
587 545 """True if marker creation is disabled
588 546
589 547 Remove me in the future when obsolete marker is always on."""
590 548 return self._readonly
591 549
592 550 def create(self, transaction, prec, succs=(), flag=0, parents=None,
593 551 date=None, metadata=None, ui=None):
594 552 """obsolete: add a new obsolete marker
595 553
596 554 * ensuring it is hashable
597 555 * check mandatory metadata
598 556 * encode metadata
599 557
600 558 If you are a human writing code creating marker you want to use the
601 559 `createmarkers` function in this module instead.
602 560
603 561 return True if a new marker have been added, False if the markers
604 562 already existed (no op).
605 563 """
606 564 if metadata is None:
607 565 metadata = {}
608 566 if date is None:
609 567 if 'date' in metadata:
610 568 # as a courtesy for out-of-tree extensions
611 569 date = util.parsedate(metadata.pop('date'))
612 570 elif ui is not None:
613 571 date = ui.configdate('devel', 'default-date')
614 572 if date is None:
615 573 date = util.makedate()
616 574 else:
617 575 date = util.makedate()
618 576 if len(prec) != 20:
619 577 raise ValueError(prec)
620 578 for succ in succs:
621 579 if len(succ) != 20:
622 580 raise ValueError(succ)
623 581 if prec in succs:
624 582 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
625 583
626 584 metadata = tuple(sorted(metadata.iteritems()))
627 585
628 586 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
629 587 return bool(self.add(transaction, [marker]))
630 588
631 589 def add(self, transaction, markers):
632 590 """Add new markers to the store
633 591
634 592 Take care of filtering duplicate.
635 593 Return the number of new marker."""
636 594 if self._readonly:
637 595 raise error.Abort(_('creating obsolete markers is not enabled on '
638 596 'this repo'))
639 597 known = set()
640 598 getsuccessors = self.successors.get
641 599 new = []
642 600 for m in markers:
643 601 if m not in getsuccessors(m[0], ()) and m not in known:
644 602 known.add(m)
645 603 new.append(m)
646 604 if new:
647 605 f = self.svfs('obsstore', 'ab')
648 606 try:
649 607 offset = f.tell()
650 608 transaction.add('obsstore', offset)
651 609 # offset == 0: new file - add the version header
652 610 for bytes in encodemarkers(new, offset == 0, self._version):
653 611 f.write(bytes)
654 612 finally:
655 613 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
656 614 # call 'filecacheentry.refresh()' here
657 615 f.close()
658 616 self._addmarkers(new)
659 617 # new marker *may* have changed several set. invalidate the cache.
660 618 self.caches.clear()
661 619 # records the number of new markers for the transaction hooks
662 620 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
663 621 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
664 622 return len(new)
665 623
666 624 def mergemarkers(self, transaction, data):
667 625 """merge a binary stream of markers inside the obsstore
668 626
669 627 Returns the number of new markers added."""
670 628 version, markers = _readmarkers(data)
671 629 return self.add(transaction, markers)
672 630
673 631 @propertycache
674 632 def _data(self):
675 633 return self.svfs.tryread('obsstore')
676 634
677 635 @propertycache
678 636 def _version(self):
679 637 if len(self._data) >= 1:
680 638 return _readmarkerversion(self._data)
681 639 else:
682 640 return self._defaultformat
683 641
684 642 @propertycache
685 643 def _all(self):
686 644 data = self._data
687 645 if not data:
688 646 return []
689 647 self._version, markers = _readmarkers(data)
690 648 markers = list(markers)
691 649 _checkinvalidmarkers(markers)
692 650 return markers
693 651
694 652 @propertycache
695 653 def successors(self):
696 654 successors = {}
697 655 _addsuccessors(successors, self._all)
698 656 return successors
699 657
700 658 @propertycache
701 659 def precursors(self):
702 660 precursors = {}
703 661 _addprecursors(precursors, self._all)
704 662 return precursors
705 663
706 664 @propertycache
707 665 def children(self):
708 666 children = {}
709 667 _addchildren(children, self._all)
710 668 return children
711 669
712 670 def _cached(self, attr):
713 671 return attr in self.__dict__
714 672
715 673 def _addmarkers(self, markers):
716 674 markers = list(markers) # to allow repeated iteration
717 675 self._all.extend(markers)
718 676 if self._cached('successors'):
719 677 _addsuccessors(self.successors, markers)
720 678 if self._cached('precursors'):
721 679 _addprecursors(self.precursors, markers)
722 680 if self._cached('children'):
723 681 _addchildren(self.children, markers)
724 682 _checkinvalidmarkers(markers)
725 683
726 684 def relevantmarkers(self, nodes):
727 685 """return a set of all obsolescence markers relevant to a set of nodes.
728 686
729 687 "relevant" to a set of nodes mean:
730 688
731 689 - marker that use this changeset as successor
732 690 - prune marker of direct children on this changeset
733 691 - recursive application of the two rules on precursors of these markers
734 692
735 693 It is a set so you cannot rely on order."""
736 694
737 695 pendingnodes = set(nodes)
738 696 seenmarkers = set()
739 697 seennodes = set(pendingnodes)
740 698 precursorsmarkers = self.precursors
741 699 succsmarkers = self.successors
742 700 children = self.children
743 701 while pendingnodes:
744 702 direct = set()
745 703 for current in pendingnodes:
746 704 direct.update(precursorsmarkers.get(current, ()))
747 705 pruned = [m for m in children.get(current, ()) if not m[1]]
748 706 direct.update(pruned)
749 707 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
750 708 direct.update(pruned)
751 709 direct -= seenmarkers
752 710 pendingnodes = set([m[0] for m in direct])
753 711 seenmarkers |= direct
754 712 pendingnodes -= seennodes
755 713 seennodes |= pendingnodes
756 714 return seenmarkers
757 715
758 716 def makestore(ui, repo):
759 717 """Create an obsstore instance from a repo."""
760 718 # read default format for new obsstore.
761 719 # developer config: format.obsstore-version
762 720 defaultformat = ui.configint('format', 'obsstore-version', None)
763 721 # rely on obsstore class default when possible.
764 722 kwargs = {}
765 723 if defaultformat is not None:
766 724 kwargs['defaultformat'] = defaultformat
767 725 readonly = not isenabled(repo, createmarkersopt)
768 726 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
769 727 if store and readonly:
770 728 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
771 729 % len(list(store)))
772 730 return store
773 731
774 732 def commonversion(versions):
775 733 """Return the newest version listed in both versions and our local formats.
776 734
777 735 Returns None if no common version exists.
778 736 """
779 737 versions.sort(reverse=True)
780 738 # search for highest version known on both side
781 739 for v in versions:
782 740 if v in formats:
783 741 return v
784 742 return None
785 743
786 744 # arbitrary picked to fit into 8K limit from HTTP server
787 745 # you have to take in account:
788 746 # - the version header
789 747 # - the base85 encoding
790 748 _maxpayload = 5300
791 749
792 750 def _pushkeyescape(markers):
793 751 """encode markers into a dict suitable for pushkey exchange
794 752
795 753 - binary data is base85 encoded
796 754 - split in chunks smaller than 5300 bytes"""
797 755 keys = {}
798 756 parts = []
799 757 currentlen = _maxpayload * 2 # ensure we create a new part
800 758 for marker in markers:
801 759 nextdata = _fm0encodeonemarker(marker)
802 760 if (len(nextdata) + currentlen > _maxpayload):
803 761 currentpart = []
804 762 currentlen = 0
805 763 parts.append(currentpart)
806 764 currentpart.append(nextdata)
807 765 currentlen += len(nextdata)
808 766 for idx, part in enumerate(reversed(parts)):
809 767 data = ''.join([_pack('>B', _fm0version)] + part)
810 768 keys['dump%i' % idx] = util.b85encode(data)
811 769 return keys
812 770
813 771 def listmarkers(repo):
814 772 """List markers over pushkey"""
815 773 if not repo.obsstore:
816 774 return {}
817 775 return _pushkeyescape(sorted(repo.obsstore))
818 776
819 777 def pushmarker(repo, key, old, new):
820 778 """Push markers over pushkey"""
821 779 if not key.startswith('dump'):
822 780 repo.ui.warn(_('unknown key: %r') % key)
823 781 return False
824 782 if old:
825 783 repo.ui.warn(_('unexpected old value for %r') % key)
826 784 return False
827 785 data = util.b85decode(new)
828 786 lock = repo.lock()
829 787 try:
830 788 tr = repo.transaction('pushkey: obsolete markers')
831 789 try:
832 790 repo.obsstore.mergemarkers(tr, data)
833 791 repo.invalidatevolatilesets()
834 792 tr.close()
835 793 return True
836 794 finally:
837 795 tr.release()
838 796 finally:
839 797 lock.release()
840 798
841 799 def getmarkers(repo, nodes=None, exclusive=False):
842 800 """returns markers known in a repository
843 801
844 802 If <nodes> is specified, only markers "relevant" to those nodes are are
845 803 returned"""
846 804 if nodes is None:
847 805 rawmarkers = repo.obsstore
848 806 elif exclusive:
849 807 rawmarkers = obsutil.exclusivemarkers(repo, nodes)
850 808 else:
851 809 rawmarkers = repo.obsstore.relevantmarkers(nodes)
852 810
853 811 for markerdata in rawmarkers:
854 yield marker(repo, markerdata)
812 yield obsutil.marker(repo, markerdata)
855 813
856 814 # keep compatibility for the 4.3 cycle
857 815 def allprecursors(obsstore, nodes, ignoreflags=0):
858 816 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
859 817 util.nouideprecwarn(movemsg, '4.3')
860 818 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
861 819
862 820 def allsuccessors(obsstore, nodes, ignoreflags=0):
863 821 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
864 822 util.nouideprecwarn(movemsg, '4.3')
865 823 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
866 824
825 def marker(repo, data):
826 movemsg = 'obsolete.marker moved to obsutil.marker'
827 repo.ui.deprecwarn(movemsg, '4.3')
828 return obsutil.marker(repo, data)
829
867 830 def exclusivemarkers(repo, nodes):
868 831 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
869 832 repo.ui.deprecwarn(movemsg, '4.3')
870 833 return obsutil.exclusivemarkers(repo, nodes)
871 834
872 835 def foreground(repo, nodes):
873 836 movemsg = 'obsolete.foreground moved to obsutil.foreground'
874 837 repo.ui.deprecwarn(movemsg, '4.3')
875 838 return obsutil.foreground(repo, nodes)
876 839
877 840 def successorssets(repo, initialnode, cache=None):
878 841 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
879 842 repo.ui.deprecwarn(movemsg, '4.3')
880 843 return obsutil.successorssets(repo, initialnode, cache=cache)
881 844
882 845 # mapping of 'set-name' -> <function to compute this set>
883 846 cachefuncs = {}
884 847 def cachefor(name):
885 848 """Decorator to register a function as computing the cache for a set"""
886 849 def decorator(func):
887 850 if name in cachefuncs:
888 851 msg = "duplicated registration for volatileset '%s' (existing: %r)"
889 852 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
890 853 cachefuncs[name] = func
891 854 return func
892 855 return decorator
893 856
894 857 def getrevs(repo, name):
895 858 """Return the set of revision that belong to the <name> set
896 859
897 860 Such access may compute the set and cache it for future use"""
898 861 repo = repo.unfiltered()
899 862 if not repo.obsstore:
900 863 return frozenset()
901 864 if name not in repo.obsstore.caches:
902 865 repo.obsstore.caches[name] = cachefuncs[name](repo)
903 866 return repo.obsstore.caches[name]
904 867
905 868 # To be simple we need to invalidate obsolescence cache when:
906 869 #
907 870 # - new changeset is added:
908 871 # - public phase is changed
909 872 # - obsolescence marker are added
910 873 # - strip is used a repo
911 874 def clearobscaches(repo):
912 875 """Remove all obsolescence related cache from a repo
913 876
914 877 This remove all cache in obsstore is the obsstore already exist on the
915 878 repo.
916 879
917 880 (We could be smarter here given the exact event that trigger the cache
918 881 clearing)"""
919 882 # only clear cache is there is obsstore data in this repo
920 883 if 'obsstore' in repo._filecache:
921 884 repo.obsstore.caches.clear()
922 885
923 886 def _mutablerevs(repo):
924 887 """the set of mutable revision in the repository"""
925 888 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
926 889
927 890 @cachefor('obsolete')
928 891 def _computeobsoleteset(repo):
929 892 """the set of obsolete revisions"""
930 893 getnode = repo.changelog.node
931 894 notpublic = _mutablerevs(repo)
932 895 isobs = repo.obsstore.successors.__contains__
933 896 obs = set(r for r in notpublic if isobs(getnode(r)))
934 897 return obs
935 898
936 899 @cachefor('unstable')
937 900 def _computeunstableset(repo):
938 901 """the set of non obsolete revisions with obsolete parents"""
939 902 pfunc = repo.changelog.parentrevs
940 903 mutable = _mutablerevs(repo)
941 904 obsolete = getrevs(repo, 'obsolete')
942 905 others = mutable - obsolete
943 906 unstable = set()
944 907 for r in sorted(others):
945 908 # A rev is unstable if one of its parent is obsolete or unstable
946 909 # this works since we traverse following growing rev order
947 910 for p in pfunc(r):
948 911 if p in obsolete or p in unstable:
949 912 unstable.add(r)
950 913 break
951 914 return unstable
952 915
953 916 @cachefor('suspended')
954 917 def _computesuspendedset(repo):
955 918 """the set of obsolete parents with non obsolete descendants"""
956 919 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
957 920 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
958 921
959 922 @cachefor('extinct')
960 923 def _computeextinctset(repo):
961 924 """the set of obsolete parents without non obsolete descendants"""
962 925 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
963 926
964 927
965 928 @cachefor('bumped')
966 929 def _computebumpedset(repo):
967 930 """the set of revs trying to obsolete public revisions"""
968 931 bumped = set()
969 932 # util function (avoid attribute lookup in the loop)
970 933 phase = repo._phasecache.phase # would be faster to grab the full list
971 934 public = phases.public
972 935 cl = repo.changelog
973 936 torev = cl.nodemap.get
974 937 for ctx in repo.set('(not public()) and (not obsolete())'):
975 938 rev = ctx.rev()
976 939 # We only evaluate mutable, non-obsolete revision
977 940 node = ctx.node()
978 941 # (future) A cache of precursors may worth if split is very common
979 942 for pnode in obsutil.allprecursors(repo.obsstore, [node],
980 943 ignoreflags=bumpedfix):
981 944 prev = torev(pnode) # unfiltered! but so is phasecache
982 945 if (prev is not None) and (phase(repo, prev) <= public):
983 946 # we have a public precursor
984 947 bumped.add(rev)
985 948 break # Next draft!
986 949 return bumped
987 950
988 951 @cachefor('divergent')
989 952 def _computedivergentset(repo):
990 953 """the set of rev that compete to be the final successors of some revision.
991 954 """
992 955 divergent = set()
993 956 obsstore = repo.obsstore
994 957 newermap = {}
995 958 for ctx in repo.set('(not public()) - obsolete()'):
996 959 mark = obsstore.precursors.get(ctx.node(), ())
997 960 toprocess = set(mark)
998 961 seen = set()
999 962 while toprocess:
1000 963 prec = toprocess.pop()[0]
1001 964 if prec in seen:
1002 965 continue # emergency cycle hanging prevention
1003 966 seen.add(prec)
1004 967 if prec not in newermap:
1005 968 obsutil.successorssets(repo, prec, newermap)
1006 969 newer = [n for n in newermap[prec] if n]
1007 970 if len(newer) > 1:
1008 971 divergent.add(ctx.rev())
1009 972 break
1010 973 toprocess.update(obsstore.precursors.get(prec, ()))
1011 974 return divergent
1012 975
1013 976
1014 977 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1015 978 operation=None):
1016 979 """Add obsolete markers between changesets in a repo
1017 980
1018 981 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1019 982 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1020 983 containing metadata for this marker only. It is merged with the global
1021 984 metadata specified through the `metadata` argument of this function,
1022 985
1023 986 Trying to obsolete a public changeset will raise an exception.
1024 987
1025 988 Current user and date are used except if specified otherwise in the
1026 989 metadata attribute.
1027 990
1028 991 This function operates within a transaction of its own, but does
1029 992 not take any lock on the repo.
1030 993 """
1031 994 # prepare metadata
1032 995 if metadata is None:
1033 996 metadata = {}
1034 997 if 'user' not in metadata:
1035 998 metadata['user'] = repo.ui.username()
1036 999 useoperation = repo.ui.configbool('experimental',
1037 1000 'evolution.track-operation',
1038 1001 False)
1039 1002 if useoperation and operation:
1040 1003 metadata['operation'] = operation
1041 1004 tr = repo.transaction('add-obsolescence-marker')
1042 1005 try:
1043 1006 markerargs = []
1044 1007 for rel in relations:
1045 1008 prec = rel[0]
1046 1009 sucs = rel[1]
1047 1010 localmetadata = metadata.copy()
1048 1011 if 2 < len(rel):
1049 1012 localmetadata.update(rel[2])
1050 1013
1051 1014 if not prec.mutable():
1052 1015 raise error.Abort(_("cannot obsolete public changeset: %s")
1053 1016 % prec,
1054 1017 hint="see 'hg help phases' for details")
1055 1018 nprec = prec.node()
1056 1019 nsucs = tuple(s.node() for s in sucs)
1057 1020 npare = None
1058 1021 if not nsucs:
1059 1022 npare = tuple(p.node() for p in prec.parents())
1060 1023 if nprec in nsucs:
1061 1024 raise error.Abort(_("changeset %s cannot obsolete itself")
1062 1025 % prec)
1063 1026
1064 1027 # Creating the marker causes the hidden cache to become invalid,
1065 1028 # which causes recomputation when we ask for prec.parents() above.
1066 1029 # Resulting in n^2 behavior. So let's prepare all of the args
1067 1030 # first, then create the markers.
1068 1031 markerargs.append((nprec, nsucs, npare, localmetadata))
1069 1032
1070 1033 for args in markerargs:
1071 1034 nprec, nsucs, npare, localmetadata = args
1072 1035 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1073 1036 date=date, metadata=localmetadata,
1074 1037 ui=repo.ui)
1075 1038 repo.filteredrevcache.clear()
1076 1039 tr.close()
1077 1040 finally:
1078 1041 tr.release()
@@ -1,435 +1,476 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 class marker(object):
11 """Wrap obsolete marker raw data"""
12
13 def __init__(self, repo, data):
14 # the repo argument will be used to create changectx in later version
15 self._repo = repo
16 self._data = data
17 self._decodedmeta = None
18
19 def __hash__(self):
20 return hash(self._data)
21
22 def __eq__(self, other):
23 if type(other) != type(self):
24 return False
25 return self._data == other._data
26
27 def precnode(self):
28 """Precursor changeset node identifier"""
29 return self._data[0]
30
31 def succnodes(self):
32 """List of successor changesets node identifiers"""
33 return self._data[1]
34
35 def parentnodes(self):
36 """Parents of the precursors (None if not recorded)"""
37 return self._data[5]
38
39 def metadata(self):
40 """Decoded metadata dictionary"""
41 return dict(self._data[3])
42
43 def date(self):
44 """Creation date as (unixtime, offset)"""
45 return self._data[4]
46
47 def flags(self):
48 """The flags field of the marker"""
49 return self._data[2]
50
10 51 def closestpredecessors(repo, nodeid):
11 52 """yield the list of next predecessors pointing on visible changectx nodes
12 53
13 54 This function respect the repoview filtering, filtered revision will be
14 55 considered missing.
15 56 """
16 57
17 58 precursors = repo.obsstore.precursors
18 59 stack = [nodeid]
19 60 seen = set(stack)
20 61
21 62 while stack:
22 63 current = stack.pop()
23 64 currentpreccs = precursors.get(current, ())
24 65
25 66 for prec in currentpreccs:
26 67 precnodeid = prec[0]
27 68
28 69 # Basic cycle protection
29 70 if precnodeid in seen:
30 71 continue
31 72 seen.add(precnodeid)
32 73
33 74 if precnodeid in repo:
34 75 yield precnodeid
35 76 else:
36 77 stack.append(precnodeid)
37 78
38 79 def allprecursors(obsstore, nodes, ignoreflags=0):
39 80 """Yield node for every precursors of <nodes>.
40 81
41 82 Some precursors may be unknown locally.
42 83
43 84 This is a linear yield unsuited to detecting folded changesets. It includes
44 85 initial nodes too."""
45 86
46 87 remaining = set(nodes)
47 88 seen = set(remaining)
48 89 while remaining:
49 90 current = remaining.pop()
50 91 yield current
51 92 for mark in obsstore.precursors.get(current, ()):
52 93 # ignore marker flagged with specified flag
53 94 if mark[2] & ignoreflags:
54 95 continue
55 96 suc = mark[0]
56 97 if suc not in seen:
57 98 seen.add(suc)
58 99 remaining.add(suc)
59 100
60 101 def allsuccessors(obsstore, nodes, ignoreflags=0):
61 102 """Yield node for every successor of <nodes>.
62 103
63 104 Some successors may be unknown locally.
64 105
65 106 This is a linear yield unsuited to detecting split changesets. It includes
66 107 initial nodes too."""
67 108 remaining = set(nodes)
68 109 seen = set(remaining)
69 110 while remaining:
70 111 current = remaining.pop()
71 112 yield current
72 113 for mark in obsstore.successors.get(current, ()):
73 114 # ignore marker flagged with specified flag
74 115 if mark[2] & ignoreflags:
75 116 continue
76 117 for suc in mark[1]:
77 118 if suc not in seen:
78 119 seen.add(suc)
79 120 remaining.add(suc)
80 121
81 122 def _filterprunes(markers):
82 123 """return a set with no prune markers"""
83 124 return set(m for m in markers if m[1])
84 125
85 126 def exclusivemarkers(repo, nodes):
86 127 """set of markers relevant to "nodes" but no other locally-known nodes
87 128
88 129 This function compute the set of markers "exclusive" to a locally-known
89 130 node. This means we walk the markers starting from <nodes> until we reach a
90 131 locally-known precursors outside of <nodes>. Element of <nodes> with
91 132 locally-known successors outside of <nodes> are ignored (since their
92 133 precursors markers are also relevant to these successors).
93 134
94 135 For example:
95 136
96 137 # (A0 rewritten as A1)
97 138 #
98 139 # A0 <-1- A1 # Marker "1" is exclusive to A1
99 140
100 141 or
101 142
102 143 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
103 144 #
104 145 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
105 146
106 147 or
107 148
108 149 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
109 150 #
110 151 # <-2- A1 # Marker "2" is exclusive to A0,A1
111 152 # /
112 153 # <-1- A0
113 154 # \
114 155 # <-3- A2 # Marker "3" is exclusive to A0,A2
115 156 #
116 157 # in addition:
117 158 #
118 159 # Markers "2,3" are exclusive to A1,A2
119 160 # Markers "1,2,3" are exclusive to A0,A1,A2
120 161
121 162 See test/test-obsolete-bundle-strip.t for more examples.
122 163
123 164 An example usage is strip. When stripping a changeset, we also want to
124 165 strip the markers exclusive to this changeset. Otherwise we would have
125 166 "dangling"" obsolescence markers from its precursors: Obsolescence markers
126 167 marking a node as obsolete without any successors available locally.
127 168
128 169 As for relevant markers, the prune markers for children will be followed.
129 170 Of course, they will only be followed if the pruned children is
130 171 locally-known. Since the prune markers are relevant to the pruned node.
131 172 However, while prune markers are considered relevant to the parent of the
132 173 pruned changesets, prune markers for locally-known changeset (with no
133 174 successors) are considered exclusive to the pruned nodes. This allows
134 175 to strip the prune markers (with the rest of the exclusive chain) alongside
135 176 the pruned changesets.
136 177 """
137 178 # running on a filtered repository would be dangerous as markers could be
138 179 # reported as exclusive when they are relevant for other filtered nodes.
139 180 unfi = repo.unfiltered()
140 181
141 182 # shortcut to various useful item
142 183 nm = unfi.changelog.nodemap
143 184 precursorsmarkers = unfi.obsstore.precursors
144 185 successormarkers = unfi.obsstore.successors
145 186 childrenmarkers = unfi.obsstore.children
146 187
147 188 # exclusive markers (return of the function)
148 189 exclmarkers = set()
149 190 # we need fast membership testing
150 191 nodes = set(nodes)
151 192 # looking for head in the obshistory
152 193 #
153 194 # XXX we are ignoring all issues in regard with cycle for now.
154 195 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
155 196 stack.sort()
156 197 # nodes already stacked
157 198 seennodes = set(stack)
158 199 while stack:
159 200 current = stack.pop()
160 201 # fetch precursors markers
161 202 markers = list(precursorsmarkers.get(current, ()))
162 203 # extend the list with prune markers
163 204 for mark in successormarkers.get(current, ()):
164 205 if not mark[1]:
165 206 markers.append(mark)
166 207 # and markers from children (looking for prune)
167 208 for mark in childrenmarkers.get(current, ()):
168 209 if not mark[1]:
169 210 markers.append(mark)
170 211 # traverse the markers
171 212 for mark in markers:
172 213 if mark in exclmarkers:
173 214 # markers already selected
174 215 continue
175 216
176 217 # If the markers is about the current node, select it
177 218 #
178 219 # (this delay the addition of markers from children)
179 220 if mark[1] or mark[0] == current:
180 221 exclmarkers.add(mark)
181 222
182 223 # should we keep traversing through the precursors?
183 224 prec = mark[0]
184 225
185 226 # nodes in the stack or already processed
186 227 if prec in seennodes:
187 228 continue
188 229
189 230 # is this a locally known node ?
190 231 known = prec in nm
191 232 # if locally-known and not in the <nodes> set the traversal
192 233 # stop here.
193 234 if known and prec not in nodes:
194 235 continue
195 236
196 237 # do not keep going if there are unselected markers pointing to this
197 238 # nodes. If we end up traversing these unselected markers later the
198 239 # node will be taken care of at that point.
199 240 precmarkers = _filterprunes(successormarkers.get(prec))
200 241 if precmarkers.issubset(exclmarkers):
201 242 seennodes.add(prec)
202 243 stack.append(prec)
203 244
204 245 return exclmarkers
205 246
206 247 def foreground(repo, nodes):
207 248 """return all nodes in the "foreground" of other node
208 249
209 250 The foreground of a revision is anything reachable using parent -> children
210 251 or precursor -> successor relation. It is very similar to "descendant" but
211 252 augmented with obsolescence information.
212 253
213 254 Beware that possible obsolescence cycle may result if complex situation.
214 255 """
215 256 repo = repo.unfiltered()
216 257 foreground = set(repo.set('%ln::', nodes))
217 258 if repo.obsstore:
218 259 # We only need this complicated logic if there is obsolescence
219 260 # XXX will probably deserve an optimised revset.
220 261 nm = repo.changelog.nodemap
221 262 plen = -1
222 263 # compute the whole set of successors or descendants
223 264 while len(foreground) != plen:
224 265 plen = len(foreground)
225 266 succs = set(c.node() for c in foreground)
226 267 mutable = [c.node() for c in foreground if c.mutable()]
227 268 succs.update(allsuccessors(repo.obsstore, mutable))
228 269 known = (n for n in succs if n in nm)
229 270 foreground = set(repo.set('%ln::', known))
230 271 return set(c.node() for c in foreground)
231 272
232 273 def successorssets(repo, initialnode, cache=None):
233 274 """Return set of all latest successors of initial nodes
234 275
235 276 The successors set of a changeset A are the group of revisions that succeed
236 277 A. It succeeds A as a consistent whole, each revision being only a partial
237 278 replacement. The successors set contains non-obsolete changesets only.
238 279
239 280 This function returns the full list of successor sets which is why it
240 281 returns a list of tuples and not just a single tuple. Each tuple is a valid
241 282 successors set. Note that (A,) may be a valid successors set for changeset A
242 283 (see below).
243 284
244 285 In most cases, a changeset A will have a single element (e.g. the changeset
245 286 A is replaced by A') in its successors set. Though, it is also common for a
246 287 changeset A to have no elements in its successor set (e.g. the changeset
247 288 has been pruned). Therefore, the returned list of successors sets will be
248 289 [(A',)] or [], respectively.
249 290
250 291 When a changeset A is split into A' and B', however, it will result in a
251 292 successors set containing more than a single element, i.e. [(A',B')].
252 293 Divergent changesets will result in multiple successors sets, i.e. [(A',),
253 294 (A'')].
254 295
255 296 If a changeset A is not obsolete, then it will conceptually have no
256 297 successors set. To distinguish this from a pruned changeset, the successor
257 298 set will contain itself only, i.e. [(A,)].
258 299
259 300 Finally, successors unknown locally are considered to be pruned (obsoleted
260 301 without any successors).
261 302
262 303 The optional `cache` parameter is a dictionary that may contain precomputed
263 304 successors sets. It is meant to reuse the computation of a previous call to
264 305 `successorssets` when multiple calls are made at the same time. The cache
265 306 dictionary is updated in place. The caller is responsible for its life
266 307 span. Code that makes multiple calls to `successorssets` *must* use this
267 308 cache mechanism or suffer terrible performance.
268 309 """
269 310
270 311 succmarkers = repo.obsstore.successors
271 312
272 313 # Stack of nodes we search successors sets for
273 314 toproceed = [initialnode]
274 315 # set version of above list for fast loop detection
275 316 # element added to "toproceed" must be added here
276 317 stackedset = set(toproceed)
277 318 if cache is None:
278 319 cache = {}
279 320
280 321 # This while loop is the flattened version of a recursive search for
281 322 # successors sets
282 323 #
283 324 # def successorssets(x):
284 325 # successors = directsuccessors(x)
285 326 # ss = [[]]
286 327 # for succ in directsuccessors(x):
287 328 # # product as in itertools cartesian product
288 329 # ss = product(ss, successorssets(succ))
289 330 # return ss
290 331 #
291 332 # But we can not use plain recursive calls here:
292 333 # - that would blow the python call stack
293 334 # - obsolescence markers may have cycles, we need to handle them.
294 335 #
295 336 # The `toproceed` list act as our call stack. Every node we search
296 337 # successors set for are stacked there.
297 338 #
298 339 # The `stackedset` is set version of this stack used to check if a node is
299 340 # already stacked. This check is used to detect cycles and prevent infinite
300 341 # loop.
301 342 #
302 343 # successors set of all nodes are stored in the `cache` dictionary.
303 344 #
304 345 # After this while loop ends we use the cache to return the successors sets
305 346 # for the node requested by the caller.
306 347 while toproceed:
307 348 # Every iteration tries to compute the successors sets of the topmost
308 349 # node of the stack: CURRENT.
309 350 #
310 351 # There are four possible outcomes:
311 352 #
312 353 # 1) We already know the successors sets of CURRENT:
313 354 # -> mission accomplished, pop it from the stack.
314 355 # 2) Node is not obsolete:
315 356 # -> the node is its own successors sets. Add it to the cache.
316 357 # 3) We do not know successors set of direct successors of CURRENT:
317 358 # -> We add those successors to the stack.
318 359 # 4) We know successors sets of all direct successors of CURRENT:
319 360 # -> We can compute CURRENT successors set and add it to the
320 361 # cache.
321 362 #
322 363 current = toproceed[-1]
323 364 if current in cache:
324 365 # case (1): We already know the successors sets
325 366 stackedset.remove(toproceed.pop())
326 367 elif current not in succmarkers:
327 368 # case (2): The node is not obsolete.
328 369 if current in repo:
329 370 # We have a valid last successors.
330 371 cache[current] = [(current,)]
331 372 else:
332 373 # Final obsolete version is unknown locally.
333 374 # Do not count that as a valid successors
334 375 cache[current] = []
335 376 else:
336 377 # cases (3) and (4)
337 378 #
338 379 # We proceed in two phases. Phase 1 aims to distinguish case (3)
339 380 # from case (4):
340 381 #
341 382 # For each direct successors of CURRENT, we check whether its
342 383 # successors sets are known. If they are not, we stack the
343 384 # unknown node and proceed to the next iteration of the while
344 385 # loop. (case 3)
345 386 #
346 387 # During this step, we may detect obsolescence cycles: a node
347 388 # with unknown successors sets but already in the call stack.
348 389 # In such a situation, we arbitrary set the successors sets of
349 390 # the node to nothing (node pruned) to break the cycle.
350 391 #
351 392 # If no break was encountered we proceed to phase 2.
352 393 #
353 394 # Phase 2 computes successors sets of CURRENT (case 4); see details
354 395 # in phase 2 itself.
355 396 #
356 397 # Note the two levels of iteration in each phase.
357 398 # - The first one handles obsolescence markers using CURRENT as
358 399 # precursor (successors markers of CURRENT).
359 400 #
360 401 # Having multiple entry here means divergence.
361 402 #
362 403 # - The second one handles successors defined in each marker.
363 404 #
364 405 # Having none means pruned node, multiple successors means split,
365 406 # single successors are standard replacement.
366 407 #
367 408 for mark in sorted(succmarkers[current]):
368 409 for suc in mark[1]:
369 410 if suc not in cache:
370 411 if suc in stackedset:
371 412 # cycle breaking
372 413 cache[suc] = []
373 414 else:
374 415 # case (3) If we have not computed successors sets
375 416 # of one of those successors we add it to the
376 417 # `toproceed` stack and stop all work for this
377 418 # iteration.
378 419 toproceed.append(suc)
379 420 stackedset.add(suc)
380 421 break
381 422 else:
382 423 continue
383 424 break
384 425 else:
385 426 # case (4): we know all successors sets of all direct
386 427 # successors
387 428 #
388 429 # Successors set contributed by each marker depends on the
389 430 # successors sets of all its "successors" node.
390 431 #
391 432 # Each different marker is a divergence in the obsolescence
392 433 # history. It contributes successors sets distinct from other
393 434 # markers.
394 435 #
395 436 # Within a marker, a successor may have divergent successors
396 437 # sets. In such a case, the marker will contribute multiple
397 438 # divergent successors sets. If multiple successors have
398 439 # divergent successors sets, a Cartesian product is used.
399 440 #
400 441 # At the end we post-process successors sets to remove
401 442 # duplicated entry and successors set that are strict subset of
402 443 # another one.
403 444 succssets = []
404 445 for mark in sorted(succmarkers[current]):
405 446 # successors sets contributed by this marker
406 447 markss = [[]]
407 448 for suc in mark[1]:
408 449 # cardinal product with previous successors
409 450 productresult = []
410 451 for prefix in markss:
411 452 for suffix in cache[suc]:
412 453 newss = list(prefix)
413 454 for part in suffix:
414 455 # do not duplicated entry in successors set
415 456 # first entry wins.
416 457 if part not in newss:
417 458 newss.append(part)
418 459 productresult.append(newss)
419 460 markss = productresult
420 461 succssets.extend(markss)
421 462 # remove duplicated and subset
422 463 seen = []
423 464 final = []
424 465 candidate = sorted(((set(s), s) for s in succssets if s),
425 466 key=lambda x: len(x[1]), reverse=True)
426 467 for setversion, listversion in candidate:
427 468 for seenset in seen:
428 469 if setversion.issubset(seenset):
429 470 break
430 471 else:
431 472 final.append(listversion)
432 473 seen.append(setversion)
433 474 final.reverse() # put small successors set first
434 475 cache[current] = final
435 476 return cache[initialnode]
General Comments 0
You need to be logged in to leave comments. Login now