##// END OF EJS Templates
devel: use default-date config field when creating obsmarkers...
Boris Feld -
r32411:08d02c1d default
parent child Browse files
Show More
@@ -1,2161 +1,2161 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import difflib
11 11 import errno
12 12 import operator
13 13 import os
14 14 import random
15 15 import socket
16 16 import string
17 17 import sys
18 18 import tempfile
19 19 import time
20 20
21 21 from .i18n import _
22 22 from .node import (
23 23 bin,
24 24 hex,
25 25 nullhex,
26 26 nullid,
27 27 nullrev,
28 28 short,
29 29 )
30 30 from . import (
31 31 bundle2,
32 32 changegroup,
33 33 cmdutil,
34 34 color,
35 35 context,
36 36 dagparser,
37 37 dagutil,
38 38 encoding,
39 39 error,
40 40 exchange,
41 41 extensions,
42 42 filemerge,
43 43 fileset,
44 44 formatter,
45 45 hg,
46 46 localrepo,
47 47 lock as lockmod,
48 48 merge as mergemod,
49 49 obsolete,
50 50 policy,
51 51 pvec,
52 52 pycompat,
53 53 registrar,
54 54 repair,
55 55 revlog,
56 56 revset,
57 57 revsetlang,
58 58 scmutil,
59 59 setdiscovery,
60 60 simplemerge,
61 61 smartset,
62 62 sslutil,
63 63 streamclone,
64 64 templater,
65 65 treediscovery,
66 66 upgrade,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70
71 71 release = lockmod.release
72 72
73 73 command = registrar.command()
74 74
75 75 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
76 76 def debugancestor(ui, repo, *args):
77 77 """find the ancestor revision of two revisions in a given index"""
78 78 if len(args) == 3:
79 79 index, rev1, rev2 = args
80 80 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
81 81 lookup = r.lookup
82 82 elif len(args) == 2:
83 83 if not repo:
84 84 raise error.Abort(_('there is no Mercurial repository here '
85 85 '(.hg not found)'))
86 86 rev1, rev2 = args
87 87 r = repo.changelog
88 88 lookup = repo.lookup
89 89 else:
90 90 raise error.Abort(_('either two or three arguments required'))
91 91 a = r.ancestor(lookup(rev1), lookup(rev2))
92 92 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
93 93
94 94 @command('debugapplystreamclonebundle', [], 'FILE')
95 95 def debugapplystreamclonebundle(ui, repo, fname):
96 96 """apply a stream clone bundle file"""
97 97 f = hg.openpath(ui, fname)
98 98 gen = exchange.readbundle(ui, f, fname)
99 99 gen.apply(repo)
100 100
101 101 @command('debugbuilddag',
102 102 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
103 103 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
104 104 ('n', 'new-file', None, _('add new file at each rev'))],
105 105 _('[OPTION]... [TEXT]'))
106 106 def debugbuilddag(ui, repo, text=None,
107 107 mergeable_file=False,
108 108 overwritten_file=False,
109 109 new_file=False):
110 110 """builds a repo with a given DAG from scratch in the current empty repo
111 111
112 112 The description of the DAG is read from stdin if not given on the
113 113 command line.
114 114
115 115 Elements:
116 116
117 117 - "+n" is a linear run of n nodes based on the current default parent
118 118 - "." is a single node based on the current default parent
119 119 - "$" resets the default parent to null (implied at the start);
120 120 otherwise the default parent is always the last node created
121 121 - "<p" sets the default parent to the backref p
122 122 - "*p" is a fork at parent p, which is a backref
123 123 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
124 124 - "/p2" is a merge of the preceding node and p2
125 125 - ":tag" defines a local tag for the preceding node
126 126 - "@branch" sets the named branch for subsequent nodes
127 127 - "#...\\n" is a comment up to the end of the line
128 128
129 129 Whitespace between the above elements is ignored.
130 130
131 131 A backref is either
132 132
133 133 - a number n, which references the node curr-n, where curr is the current
134 134 node, or
135 135 - the name of a local tag you placed earlier using ":tag", or
136 136 - empty to denote the default parent.
137 137
138 138 All string valued-elements are either strictly alphanumeric, or must
139 139 be enclosed in double quotes ("..."), with "\\" as escape character.
140 140 """
141 141
142 142 if text is None:
143 143 ui.status(_("reading DAG from stdin\n"))
144 144 text = ui.fin.read()
145 145
146 146 cl = repo.changelog
147 147 if len(cl) > 0:
148 148 raise error.Abort(_('repository is not empty'))
149 149
150 150 # determine number of revs in DAG
151 151 total = 0
152 152 for type, data in dagparser.parsedag(text):
153 153 if type == 'n':
154 154 total += 1
155 155
156 156 if mergeable_file:
157 157 linesperrev = 2
158 158 # make a file with k lines per rev
159 159 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
160 160 initialmergedlines.append("")
161 161
162 162 tags = []
163 163
164 164 wlock = lock = tr = None
165 165 try:
166 166 wlock = repo.wlock()
167 167 lock = repo.lock()
168 168 tr = repo.transaction("builddag")
169 169
170 170 at = -1
171 171 atbranch = 'default'
172 172 nodeids = []
173 173 id = 0
174 174 ui.progress(_('building'), id, unit=_('revisions'), total=total)
175 175 for type, data in dagparser.parsedag(text):
176 176 if type == 'n':
177 177 ui.note(('node %s\n' % str(data)))
178 178 id, ps = data
179 179
180 180 files = []
181 181 fctxs = {}
182 182
183 183 p2 = None
184 184 if mergeable_file:
185 185 fn = "mf"
186 186 p1 = repo[ps[0]]
187 187 if len(ps) > 1:
188 188 p2 = repo[ps[1]]
189 189 pa = p1.ancestor(p2)
190 190 base, local, other = [x[fn].data() for x in (pa, p1,
191 191 p2)]
192 192 m3 = simplemerge.Merge3Text(base, local, other)
193 193 ml = [l.strip() for l in m3.merge_lines()]
194 194 ml.append("")
195 195 elif at > 0:
196 196 ml = p1[fn].data().split("\n")
197 197 else:
198 198 ml = initialmergedlines
199 199 ml[id * linesperrev] += " r%i" % id
200 200 mergedtext = "\n".join(ml)
201 201 files.append(fn)
202 202 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
203 203
204 204 if overwritten_file:
205 205 fn = "of"
206 206 files.append(fn)
207 207 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
208 208
209 209 if new_file:
210 210 fn = "nf%i" % id
211 211 files.append(fn)
212 212 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
213 213 if len(ps) > 1:
214 214 if not p2:
215 215 p2 = repo[ps[1]]
216 216 for fn in p2:
217 217 if fn.startswith("nf"):
218 218 files.append(fn)
219 219 fctxs[fn] = p2[fn]
220 220
221 221 def fctxfn(repo, cx, path):
222 222 return fctxs.get(path)
223 223
224 224 if len(ps) == 0 or ps[0] < 0:
225 225 pars = [None, None]
226 226 elif len(ps) == 1:
227 227 pars = [nodeids[ps[0]], None]
228 228 else:
229 229 pars = [nodeids[p] for p in ps]
230 230 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
231 231 date=(id, 0),
232 232 user="debugbuilddag",
233 233 extra={'branch': atbranch})
234 234 nodeid = repo.commitctx(cx)
235 235 nodeids.append(nodeid)
236 236 at = id
237 237 elif type == 'l':
238 238 id, name = data
239 239 ui.note(('tag %s\n' % name))
240 240 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
241 241 elif type == 'a':
242 242 ui.note(('branch %s\n' % data))
243 243 atbranch = data
244 244 ui.progress(_('building'), id, unit=_('revisions'), total=total)
245 245 tr.close()
246 246
247 247 if tags:
248 248 repo.vfs.write("localtags", "".join(tags))
249 249 finally:
250 250 ui.progress(_('building'), None)
251 251 release(tr, lock, wlock)
252 252
253 253 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
254 254 indent_string = ' ' * indent
255 255 if all:
256 256 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
257 257 % indent_string)
258 258
259 259 def showchunks(named):
260 260 ui.write("\n%s%s\n" % (indent_string, named))
261 261 chain = None
262 262 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
263 263 node = chunkdata['node']
264 264 p1 = chunkdata['p1']
265 265 p2 = chunkdata['p2']
266 266 cs = chunkdata['cs']
267 267 deltabase = chunkdata['deltabase']
268 268 delta = chunkdata['delta']
269 269 ui.write("%s%s %s %s %s %s %s\n" %
270 270 (indent_string, hex(node), hex(p1), hex(p2),
271 271 hex(cs), hex(deltabase), len(delta)))
272 272 chain = node
273 273
274 274 chunkdata = gen.changelogheader()
275 275 showchunks("changelog")
276 276 chunkdata = gen.manifestheader()
277 277 showchunks("manifest")
278 278 for chunkdata in iter(gen.filelogheader, {}):
279 279 fname = chunkdata['filename']
280 280 showchunks(fname)
281 281 else:
282 282 if isinstance(gen, bundle2.unbundle20):
283 283 raise error.Abort(_('use debugbundle2 for this file'))
284 284 chunkdata = gen.changelogheader()
285 285 chain = None
286 286 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
287 287 node = chunkdata['node']
288 288 ui.write("%s%s\n" % (indent_string, hex(node)))
289 289 chain = node
290 290
291 291 def _debugbundle2(ui, gen, all=None, **opts):
292 292 """lists the contents of a bundle2"""
293 293 if not isinstance(gen, bundle2.unbundle20):
294 294 raise error.Abort(_('not a bundle2 file'))
295 295 ui.write(('Stream params: %s\n' % repr(gen.params)))
296 296 for part in gen.iterparts():
297 297 ui.write('%s -- %r\n' % (part.type, repr(part.params)))
298 298 if part.type == 'changegroup':
299 299 version = part.params.get('version', '01')
300 300 cg = changegroup.getunbundler(version, part, 'UN')
301 301 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
302 302
303 303 @command('debugbundle',
304 304 [('a', 'all', None, _('show all details')),
305 305 ('', 'spec', None, _('print the bundlespec of the bundle'))],
306 306 _('FILE'),
307 307 norepo=True)
308 308 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
309 309 """lists the contents of a bundle"""
310 310 with hg.openpath(ui, bundlepath) as f:
311 311 if spec:
312 312 spec = exchange.getbundlespec(ui, f)
313 313 ui.write('%s\n' % spec)
314 314 return
315 315
316 316 gen = exchange.readbundle(ui, f, bundlepath)
317 317 if isinstance(gen, bundle2.unbundle20):
318 318 return _debugbundle2(ui, gen, all=all, **opts)
319 319 _debugchangegroup(ui, gen, all=all, **opts)
320 320
321 321 @command('debugcheckstate', [], '')
322 322 def debugcheckstate(ui, repo):
323 323 """validate the correctness of the current dirstate"""
324 324 parent1, parent2 = repo.dirstate.parents()
325 325 m1 = repo[parent1].manifest()
326 326 m2 = repo[parent2].manifest()
327 327 errors = 0
328 328 for f in repo.dirstate:
329 329 state = repo.dirstate[f]
330 330 if state in "nr" and f not in m1:
331 331 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
332 332 errors += 1
333 333 if state in "a" and f in m1:
334 334 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
335 335 errors += 1
336 336 if state in "m" and f not in m1 and f not in m2:
337 337 ui.warn(_("%s in state %s, but not in either manifest\n") %
338 338 (f, state))
339 339 errors += 1
340 340 for f in m1:
341 341 state = repo.dirstate[f]
342 342 if state not in "nrm":
343 343 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
344 344 errors += 1
345 345 if errors:
346 346 error = _(".hg/dirstate inconsistent with current parent's manifest")
347 347 raise error.Abort(error)
348 348
349 349 @command('debugcolor',
350 350 [('', 'style', None, _('show all configured styles'))],
351 351 'hg debugcolor')
352 352 def debugcolor(ui, repo, **opts):
353 353 """show available color, effects or style"""
354 354 ui.write(('color mode: %s\n') % ui._colormode)
355 355 if opts.get('style'):
356 356 return _debugdisplaystyle(ui)
357 357 else:
358 358 return _debugdisplaycolor(ui)
359 359
360 360 def _debugdisplaycolor(ui):
361 361 ui = ui.copy()
362 362 ui._styles.clear()
363 363 for effect in color._activeeffects(ui).keys():
364 364 ui._styles[effect] = effect
365 365 if ui._terminfoparams:
366 366 for k, v in ui.configitems('color'):
367 367 if k.startswith('color.'):
368 368 ui._styles[k] = k[6:]
369 369 elif k.startswith('terminfo.'):
370 370 ui._styles[k] = k[9:]
371 371 ui.write(_('available colors:\n'))
372 372 # sort label with a '_' after the other to group '_background' entry.
373 373 items = sorted(ui._styles.items(),
374 374 key=lambda i: ('_' in i[0], i[0], i[1]))
375 375 for colorname, label in items:
376 376 ui.write(('%s\n') % colorname, label=label)
377 377
378 378 def _debugdisplaystyle(ui):
379 379 ui.write(_('available style:\n'))
380 380 width = max(len(s) for s in ui._styles)
381 381 for label, effects in sorted(ui._styles.items()):
382 382 ui.write('%s' % label, label=label)
383 383 if effects:
384 384 # 50
385 385 ui.write(': ')
386 386 ui.write(' ' * (max(0, width - len(label))))
387 387 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
388 388 ui.write('\n')
389 389
390 390 @command('debugcreatestreamclonebundle', [], 'FILE')
391 391 def debugcreatestreamclonebundle(ui, repo, fname):
392 392 """create a stream clone bundle file
393 393
394 394 Stream bundles are special bundles that are essentially archives of
395 395 revlog files. They are commonly used for cloning very quickly.
396 396 """
397 397 requirements, gen = streamclone.generatebundlev1(repo)
398 398 changegroup.writechunks(ui, gen, fname)
399 399
400 400 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
401 401
402 402 @command('debugdag',
403 403 [('t', 'tags', None, _('use tags as labels')),
404 404 ('b', 'branches', None, _('annotate with branch names')),
405 405 ('', 'dots', None, _('use dots for runs')),
406 406 ('s', 'spaces', None, _('separate elements by spaces'))],
407 407 _('[OPTION]... [FILE [REV]...]'),
408 408 optionalrepo=True)
409 409 def debugdag(ui, repo, file_=None, *revs, **opts):
410 410 """format the changelog or an index DAG as a concise textual description
411 411
412 412 If you pass a revlog index, the revlog's DAG is emitted. If you list
413 413 revision numbers, they get labeled in the output as rN.
414 414
415 415 Otherwise, the changelog DAG of the current repo is emitted.
416 416 """
417 417 spaces = opts.get('spaces')
418 418 dots = opts.get('dots')
419 419 if file_:
420 420 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
421 421 file_)
422 422 revs = set((int(r) for r in revs))
423 423 def events():
424 424 for r in rlog:
425 425 yield 'n', (r, list(p for p in rlog.parentrevs(r)
426 426 if p != -1))
427 427 if r in revs:
428 428 yield 'l', (r, "r%i" % r)
429 429 elif repo:
430 430 cl = repo.changelog
431 431 tags = opts.get('tags')
432 432 branches = opts.get('branches')
433 433 if tags:
434 434 labels = {}
435 435 for l, n in repo.tags().items():
436 436 labels.setdefault(cl.rev(n), []).append(l)
437 437 def events():
438 438 b = "default"
439 439 for r in cl:
440 440 if branches:
441 441 newb = cl.read(cl.node(r))[5]['branch']
442 442 if newb != b:
443 443 yield 'a', newb
444 444 b = newb
445 445 yield 'n', (r, list(p for p in cl.parentrevs(r)
446 446 if p != -1))
447 447 if tags:
448 448 ls = labels.get(r)
449 449 if ls:
450 450 for l in ls:
451 451 yield 'l', (r, l)
452 452 else:
453 453 raise error.Abort(_('need repo for changelog dag'))
454 454
455 455 for line in dagparser.dagtextlines(events(),
456 456 addspaces=spaces,
457 457 wraplabels=True,
458 458 wrapannotations=True,
459 459 wrapnonlinear=dots,
460 460 usedots=dots,
461 461 maxlinewidth=70):
462 462 ui.write(line)
463 463 ui.write("\n")
464 464
465 465 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
466 466 def debugdata(ui, repo, file_, rev=None, **opts):
467 467 """dump the contents of a data file revision"""
468 468 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
469 469 if rev is not None:
470 470 raise error.CommandError('debugdata', _('invalid arguments'))
471 471 file_, rev = None, file_
472 472 elif rev is None:
473 473 raise error.CommandError('debugdata', _('invalid arguments'))
474 474 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
475 475 try:
476 476 ui.write(r.revision(r.lookup(rev), raw=True))
477 477 except KeyError:
478 478 raise error.Abort(_('invalid revision identifier %s') % rev)
479 479
480 480 @command('debugdate',
481 481 [('e', 'extended', None, _('try extended date formats'))],
482 482 _('[-e] DATE [RANGE]'),
483 483 norepo=True, optionalrepo=True)
484 484 def debugdate(ui, date, range=None, **opts):
485 485 """parse and display a date"""
486 486 if opts["extended"]:
487 487 d = util.parsedate(date, util.extendeddateformats)
488 488 else:
489 489 d = util.parsedate(date)
490 490 ui.write(("internal: %s %s\n") % d)
491 491 ui.write(("standard: %s\n") % util.datestr(d))
492 492 if range:
493 493 m = util.matchdate(range)
494 494 ui.write(("match: %s\n") % m(d[0]))
495 495
496 496 @command('debugdeltachain',
497 497 cmdutil.debugrevlogopts + cmdutil.formatteropts,
498 498 _('-c|-m|FILE'),
499 499 optionalrepo=True)
500 500 def debugdeltachain(ui, repo, file_=None, **opts):
501 501 """dump information about delta chains in a revlog
502 502
503 503 Output can be templatized. Available template keywords are:
504 504
505 505 :``rev``: revision number
506 506 :``chainid``: delta chain identifier (numbered by unique base)
507 507 :``chainlen``: delta chain length to this revision
508 508 :``prevrev``: previous revision in delta chain
509 509 :``deltatype``: role of delta / how it was computed
510 510 :``compsize``: compressed size of revision
511 511 :``uncompsize``: uncompressed size of revision
512 512 :``chainsize``: total size of compressed revisions in chain
513 513 :``chainratio``: total chain size divided by uncompressed revision size
514 514 (new delta chains typically start at ratio 2.00)
515 515 :``lindist``: linear distance from base revision in delta chain to end
516 516 of this revision
517 517 :``extradist``: total size of revisions not part of this delta chain from
518 518 base of delta chain to end of this revision; a measurement
519 519 of how much extra data we need to read/seek across to read
520 520 the delta chain for this revision
521 521 :``extraratio``: extradist divided by chainsize; another representation of
522 522 how much unrelated data is needed to load this delta chain
523 523 """
524 524 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
525 525 index = r.index
526 526 generaldelta = r.version & revlog.FLAG_GENERALDELTA
527 527
528 528 def revinfo(rev):
529 529 e = index[rev]
530 530 compsize = e[1]
531 531 uncompsize = e[2]
532 532 chainsize = 0
533 533
534 534 if generaldelta:
535 535 if e[3] == e[5]:
536 536 deltatype = 'p1'
537 537 elif e[3] == e[6]:
538 538 deltatype = 'p2'
539 539 elif e[3] == rev - 1:
540 540 deltatype = 'prev'
541 541 elif e[3] == rev:
542 542 deltatype = 'base'
543 543 else:
544 544 deltatype = 'other'
545 545 else:
546 546 if e[3] == rev:
547 547 deltatype = 'base'
548 548 else:
549 549 deltatype = 'prev'
550 550
551 551 chain = r._deltachain(rev)[0]
552 552 for iterrev in chain:
553 553 e = index[iterrev]
554 554 chainsize += e[1]
555 555
556 556 return compsize, uncompsize, deltatype, chain, chainsize
557 557
558 558 fm = ui.formatter('debugdeltachain', opts)
559 559
560 560 fm.plain(' rev chain# chainlen prev delta '
561 561 'size rawsize chainsize ratio lindist extradist '
562 562 'extraratio\n')
563 563
564 564 chainbases = {}
565 565 for rev in r:
566 566 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
567 567 chainbase = chain[0]
568 568 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
569 569 basestart = r.start(chainbase)
570 570 revstart = r.start(rev)
571 571 lineardist = revstart + comp - basestart
572 572 extradist = lineardist - chainsize
573 573 try:
574 574 prevrev = chain[-2]
575 575 except IndexError:
576 576 prevrev = -1
577 577
578 578 chainratio = float(chainsize) / float(uncomp)
579 579 extraratio = float(extradist) / float(chainsize)
580 580
581 581 fm.startitem()
582 582 fm.write('rev chainid chainlen prevrev deltatype compsize '
583 583 'uncompsize chainsize chainratio lindist extradist '
584 584 'extraratio',
585 585 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
586 586 rev, chainid, len(chain), prevrev, deltatype, comp,
587 587 uncomp, chainsize, chainratio, lineardist, extradist,
588 588 extraratio,
589 589 rev=rev, chainid=chainid, chainlen=len(chain),
590 590 prevrev=prevrev, deltatype=deltatype, compsize=comp,
591 591 uncompsize=uncomp, chainsize=chainsize,
592 592 chainratio=chainratio, lindist=lineardist,
593 593 extradist=extradist, extraratio=extraratio)
594 594
595 595 fm.end()
596 596
597 597 @command('debugdirstate|debugstate',
598 598 [('', 'nodates', None, _('do not display the saved mtime')),
599 599 ('', 'datesort', None, _('sort by saved mtime'))],
600 600 _('[OPTION]...'))
601 601 def debugstate(ui, repo, **opts):
602 602 """show the contents of the current dirstate"""
603 603
604 604 nodates = opts.get('nodates')
605 605 datesort = opts.get('datesort')
606 606
607 607 timestr = ""
608 608 if datesort:
609 609 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
610 610 else:
611 611 keyfunc = None # sort by filename
612 612 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
613 613 if ent[3] == -1:
614 614 timestr = 'unset '
615 615 elif nodates:
616 616 timestr = 'set '
617 617 else:
618 618 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
619 619 time.localtime(ent[3]))
620 620 if ent[1] & 0o20000:
621 621 mode = 'lnk'
622 622 else:
623 623 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
624 624 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
625 625 for f in repo.dirstate.copies():
626 626 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
627 627
628 628 @command('debugdiscovery',
629 629 [('', 'old', None, _('use old-style discovery')),
630 630 ('', 'nonheads', None,
631 631 _('use old-style discovery with non-heads included')),
632 632 ] + cmdutil.remoteopts,
633 633 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
634 634 def debugdiscovery(ui, repo, remoteurl="default", **opts):
635 635 """runs the changeset discovery protocol in isolation"""
636 636 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
637 637 opts.get('branch'))
638 638 remote = hg.peer(repo, opts, remoteurl)
639 639 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
640 640
641 641 # make sure tests are repeatable
642 642 random.seed(12323)
643 643
644 644 def doit(localheads, remoteheads, remote=remote):
645 645 if opts.get('old'):
646 646 if localheads:
647 647 raise error.Abort('cannot use localheads with old style '
648 648 'discovery')
649 649 if not util.safehasattr(remote, 'branches'):
650 650 # enable in-client legacy support
651 651 remote = localrepo.locallegacypeer(remote.local())
652 652 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
653 653 force=True)
654 654 common = set(common)
655 655 if not opts.get('nonheads'):
656 656 ui.write(("unpruned common: %s\n") %
657 657 " ".join(sorted(short(n) for n in common)))
658 658 dag = dagutil.revlogdag(repo.changelog)
659 659 all = dag.ancestorset(dag.internalizeall(common))
660 660 common = dag.externalizeall(dag.headsetofconnecteds(all))
661 661 else:
662 662 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
663 663 common = set(common)
664 664 rheads = set(hds)
665 665 lheads = set(repo.heads())
666 666 ui.write(("common heads: %s\n") %
667 667 " ".join(sorted(short(n) for n in common)))
668 668 if lheads <= common:
669 669 ui.write(("local is subset\n"))
670 670 elif rheads <= common:
671 671 ui.write(("remote is subset\n"))
672 672
673 673 serverlogs = opts.get('serverlog')
674 674 if serverlogs:
675 675 for filename in serverlogs:
676 676 with open(filename, 'r') as logfile:
677 677 line = logfile.readline()
678 678 while line:
679 679 parts = line.strip().split(';')
680 680 op = parts[1]
681 681 if op == 'cg':
682 682 pass
683 683 elif op == 'cgss':
684 684 doit(parts[2].split(' '), parts[3].split(' '))
685 685 elif op == 'unb':
686 686 doit(parts[3].split(' '), parts[2].split(' '))
687 687 line = logfile.readline()
688 688 else:
689 689 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
690 690 opts.get('remote_head'))
691 691 localrevs = opts.get('local_head')
692 692 doit(localrevs, remoterevs)
693 693
694 694 @command('debugextensions', cmdutil.formatteropts, [], norepo=True)
695 695 def debugextensions(ui, **opts):
696 696 '''show information about active extensions'''
697 697 exts = extensions.extensions(ui)
698 698 hgver = util.version()
699 699 fm = ui.formatter('debugextensions', opts)
700 700 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
701 701 isinternal = extensions.ismoduleinternal(extmod)
702 702 extsource = pycompat.fsencode(extmod.__file__)
703 703 if isinternal:
704 704 exttestedwith = [] # never expose magic string to users
705 705 else:
706 706 exttestedwith = getattr(extmod, 'testedwith', '').split()
707 707 extbuglink = getattr(extmod, 'buglink', None)
708 708
709 709 fm.startitem()
710 710
711 711 if ui.quiet or ui.verbose:
712 712 fm.write('name', '%s\n', extname)
713 713 else:
714 714 fm.write('name', '%s', extname)
715 715 if isinternal or hgver in exttestedwith:
716 716 fm.plain('\n')
717 717 elif not exttestedwith:
718 718 fm.plain(_(' (untested!)\n'))
719 719 else:
720 720 lasttestedversion = exttestedwith[-1]
721 721 fm.plain(' (%s!)\n' % lasttestedversion)
722 722
723 723 fm.condwrite(ui.verbose and extsource, 'source',
724 724 _(' location: %s\n'), extsource or "")
725 725
726 726 if ui.verbose:
727 727 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
728 728 fm.data(bundled=isinternal)
729 729
730 730 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
731 731 _(' tested with: %s\n'),
732 732 fm.formatlist(exttestedwith, name='ver'))
733 733
734 734 fm.condwrite(ui.verbose and extbuglink, 'buglink',
735 735 _(' bug reporting: %s\n'), extbuglink or "")
736 736
737 737 fm.end()
738 738
739 739 @command('debugfileset',
740 740 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
741 741 _('[-r REV] FILESPEC'))
742 742 def debugfileset(ui, repo, expr, **opts):
743 743 '''parse and apply a fileset specification'''
744 744 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
745 745 if ui.verbose:
746 746 tree = fileset.parse(expr)
747 747 ui.note(fileset.prettyformat(tree), "\n")
748 748
749 749 for f in ctx.getfileset(expr):
750 750 ui.write("%s\n" % f)
751 751
752 752 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
753 753 def debugfsinfo(ui, path="."):
754 754 """show information detected about current filesystem"""
755 755 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
756 756 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
757 757 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
758 758 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
759 759 casesensitive = '(unknown)'
760 760 try:
761 761 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
762 762 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
763 763 except OSError:
764 764 pass
765 765 ui.write(('case-sensitive: %s\n') % casesensitive)
766 766
767 767 @command('debuggetbundle',
768 768 [('H', 'head', [], _('id of head node'), _('ID')),
769 769 ('C', 'common', [], _('id of common node'), _('ID')),
770 770 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
771 771 _('REPO FILE [-H|-C ID]...'),
772 772 norepo=True)
773 773 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
774 774 """retrieves a bundle from a repo
775 775
776 776 Every ID must be a full-length hex node id string. Saves the bundle to the
777 777 given file.
778 778 """
779 779 repo = hg.peer(ui, opts, repopath)
780 780 if not repo.capable('getbundle'):
781 781 raise error.Abort("getbundle() not supported by target repository")
782 782 args = {}
783 783 if common:
784 784 args['common'] = [bin(s) for s in common]
785 785 if head:
786 786 args['heads'] = [bin(s) for s in head]
787 787 # TODO: get desired bundlecaps from command line.
788 788 args['bundlecaps'] = None
789 789 bundle = repo.getbundle('debug', **args)
790 790
791 791 bundletype = opts.get('type', 'bzip2').lower()
792 792 btypes = {'none': 'HG10UN',
793 793 'bzip2': 'HG10BZ',
794 794 'gzip': 'HG10GZ',
795 795 'bundle2': 'HG20'}
796 796 bundletype = btypes.get(bundletype)
797 797 if bundletype not in bundle2.bundletypes:
798 798 raise error.Abort(_('unknown bundle type specified with --type'))
799 799 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
800 800
801 801 @command('debugignore', [], '[FILE]')
802 802 def debugignore(ui, repo, *files, **opts):
803 803 """display the combined ignore pattern and information about ignored files
804 804
805 805 With no argument display the combined ignore pattern.
806 806
807 807 Given space separated file names, shows if the given file is ignored and
808 808 if so, show the ignore rule (file and line number) that matched it.
809 809 """
810 810 ignore = repo.dirstate._ignore
811 811 if not files:
812 812 # Show all the patterns
813 813 ui.write("%s\n" % repr(ignore))
814 814 else:
815 815 for f in files:
816 816 nf = util.normpath(f)
817 817 ignored = None
818 818 ignoredata = None
819 819 if nf != '.':
820 820 if ignore(nf):
821 821 ignored = nf
822 822 ignoredata = repo.dirstate._ignorefileandline(nf)
823 823 else:
824 824 for p in util.finddirs(nf):
825 825 if ignore(p):
826 826 ignored = p
827 827 ignoredata = repo.dirstate._ignorefileandline(p)
828 828 break
829 829 if ignored:
830 830 if ignored == nf:
831 831 ui.write(_("%s is ignored\n") % f)
832 832 else:
833 833 ui.write(_("%s is ignored because of "
834 834 "containing folder %s\n")
835 835 % (f, ignored))
836 836 ignorefile, lineno, line = ignoredata
837 837 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
838 838 % (ignorefile, lineno, line))
839 839 else:
840 840 ui.write(_("%s is not ignored\n") % f)
841 841
842 842 @command('debugindex', cmdutil.debugrevlogopts +
843 843 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
844 844 _('[-f FORMAT] -c|-m|FILE'),
845 845 optionalrepo=True)
846 846 def debugindex(ui, repo, file_=None, **opts):
847 847 """dump the contents of an index file"""
848 848 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
849 849 format = opts.get('format', 0)
850 850 if format not in (0, 1):
851 851 raise error.Abort(_("unknown format %d") % format)
852 852
853 853 generaldelta = r.version & revlog.FLAG_GENERALDELTA
854 854 if generaldelta:
855 855 basehdr = ' delta'
856 856 else:
857 857 basehdr = ' base'
858 858
859 859 if ui.debugflag:
860 860 shortfn = hex
861 861 else:
862 862 shortfn = short
863 863
864 864 # There might not be anything in r, so have a sane default
865 865 idlen = 12
866 866 for i in r:
867 867 idlen = len(shortfn(r.node(i)))
868 868 break
869 869
870 870 if format == 0:
871 871 ui.write((" rev offset length " + basehdr + " linkrev"
872 872 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
873 873 elif format == 1:
874 874 ui.write((" rev flag offset length"
875 875 " size " + basehdr + " link p1 p2"
876 876 " %s\n") % "nodeid".rjust(idlen))
877 877
878 878 for i in r:
879 879 node = r.node(i)
880 880 if generaldelta:
881 881 base = r.deltaparent(i)
882 882 else:
883 883 base = r.chainbase(i)
884 884 if format == 0:
885 885 try:
886 886 pp = r.parents(node)
887 887 except Exception:
888 888 pp = [nullid, nullid]
889 889 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
890 890 i, r.start(i), r.length(i), base, r.linkrev(i),
891 891 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
892 892 elif format == 1:
893 893 pr = r.parentrevs(i)
894 894 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
895 895 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
896 896 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
897 897
898 898 @command('debugindexdot', cmdutil.debugrevlogopts,
899 899 _('-c|-m|FILE'), optionalrepo=True)
900 900 def debugindexdot(ui, repo, file_=None, **opts):
901 901 """dump an index DAG as a graphviz dot file"""
902 902 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
903 903 ui.write(("digraph G {\n"))
904 904 for i in r:
905 905 node = r.node(i)
906 906 pp = r.parents(node)
907 907 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
908 908 if pp[1] != nullid:
909 909 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
910 910 ui.write("}\n")
911 911
912 912 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
913 913 def debuginstall(ui, **opts):
914 914 '''test Mercurial installation
915 915
916 916 Returns 0 on success.
917 917 '''
918 918
919 919 def writetemp(contents):
920 920 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
921 921 f = os.fdopen(fd, pycompat.sysstr("wb"))
922 922 f.write(contents)
923 923 f.close()
924 924 return name
925 925
926 926 problems = 0
927 927
928 928 fm = ui.formatter('debuginstall', opts)
929 929 fm.startitem()
930 930
931 931 # encoding
932 932 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
933 933 err = None
934 934 try:
935 935 encoding.fromlocal("test")
936 936 except error.Abort as inst:
937 937 err = inst
938 938 problems += 1
939 939 fm.condwrite(err, 'encodingerror', _(" %s\n"
940 940 " (check that your locale is properly set)\n"), err)
941 941
942 942 # Python
943 943 fm.write('pythonexe', _("checking Python executable (%s)\n"),
944 944 pycompat.sysexecutable)
945 945 fm.write('pythonver', _("checking Python version (%s)\n"),
946 946 ("%d.%d.%d" % sys.version_info[:3]))
947 947 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
948 948 os.path.dirname(pycompat.fsencode(os.__file__)))
949 949
950 950 security = set(sslutil.supportedprotocols)
951 951 if sslutil.hassni:
952 952 security.add('sni')
953 953
954 954 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
955 955 fm.formatlist(sorted(security), name='protocol',
956 956 fmt='%s', sep=','))
957 957
958 958 # These are warnings, not errors. So don't increment problem count. This
959 959 # may change in the future.
960 960 if 'tls1.2' not in security:
961 961 fm.plain(_(' TLS 1.2 not supported by Python install; '
962 962 'network connections lack modern security\n'))
963 963 if 'sni' not in security:
964 964 fm.plain(_(' SNI not supported by Python install; may have '
965 965 'connectivity issues with some servers\n'))
966 966
967 967 # TODO print CA cert info
968 968
969 969 # hg version
970 970 hgver = util.version()
971 971 fm.write('hgver', _("checking Mercurial version (%s)\n"),
972 972 hgver.split('+')[0])
973 973 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
974 974 '+'.join(hgver.split('+')[1:]))
975 975
976 976 # compiled modules
977 977 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
978 978 policy.policy)
979 979 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
980 980 os.path.dirname(pycompat.fsencode(__file__)))
981 981
982 982 if policy.policy in ('c', 'allow'):
983 983 err = None
984 984 try:
985 985 from .cext import (
986 986 base85,
987 987 bdiff,
988 988 mpatch,
989 989 osutil,
990 990 )
991 991 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
992 992 except Exception as inst:
993 993 err = inst
994 994 problems += 1
995 995 fm.condwrite(err, 'extensionserror', " %s\n", err)
996 996
997 997 compengines = util.compengines._engines.values()
998 998 fm.write('compengines', _('checking registered compression engines (%s)\n'),
999 999 fm.formatlist(sorted(e.name() for e in compengines),
1000 1000 name='compengine', fmt='%s', sep=', '))
1001 1001 fm.write('compenginesavail', _('checking available compression engines '
1002 1002 '(%s)\n'),
1003 1003 fm.formatlist(sorted(e.name() for e in compengines
1004 1004 if e.available()),
1005 1005 name='compengine', fmt='%s', sep=', '))
1006 1006 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1007 1007 fm.write('compenginesserver', _('checking available compression engines '
1008 1008 'for wire protocol (%s)\n'),
1009 1009 fm.formatlist([e.name() for e in wirecompengines
1010 1010 if e.wireprotosupport()],
1011 1011 name='compengine', fmt='%s', sep=', '))
1012 1012
1013 1013 # templates
1014 1014 p = templater.templatepaths()
1015 1015 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1016 1016 fm.condwrite(not p, '', _(" no template directories found\n"))
1017 1017 if p:
1018 1018 m = templater.templatepath("map-cmdline.default")
1019 1019 if m:
1020 1020 # template found, check if it is working
1021 1021 err = None
1022 1022 try:
1023 1023 templater.templater.frommapfile(m)
1024 1024 except Exception as inst:
1025 1025 err = inst
1026 1026 p = None
1027 1027 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1028 1028 else:
1029 1029 p = None
1030 1030 fm.condwrite(p, 'defaulttemplate',
1031 1031 _("checking default template (%s)\n"), m)
1032 1032 fm.condwrite(not m, 'defaulttemplatenotfound',
1033 1033 _(" template '%s' not found\n"), "default")
1034 1034 if not p:
1035 1035 problems += 1
1036 1036 fm.condwrite(not p, '',
1037 1037 _(" (templates seem to have been installed incorrectly)\n"))
1038 1038
1039 1039 # editor
1040 1040 editor = ui.geteditor()
1041 1041 editor = util.expandpath(editor)
1042 1042 fm.write('editor', _("checking commit editor... (%s)\n"), editor)
1043 1043 cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
1044 1044 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1045 1045 _(" No commit editor set and can't find %s in PATH\n"
1046 1046 " (specify a commit editor in your configuration"
1047 1047 " file)\n"), not cmdpath and editor == 'vi' and editor)
1048 1048 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1049 1049 _(" Can't find editor '%s' in PATH\n"
1050 1050 " (specify a commit editor in your configuration"
1051 1051 " file)\n"), not cmdpath and editor)
1052 1052 if not cmdpath and editor != 'vi':
1053 1053 problems += 1
1054 1054
1055 1055 # check username
1056 1056 username = None
1057 1057 err = None
1058 1058 try:
1059 1059 username = ui.username()
1060 1060 except error.Abort as e:
1061 1061 err = e
1062 1062 problems += 1
1063 1063
1064 1064 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1065 1065 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1066 1066 " (specify a username in your configuration file)\n"), err)
1067 1067
1068 1068 fm.condwrite(not problems, '',
1069 1069 _("no problems detected\n"))
1070 1070 if not problems:
1071 1071 fm.data(problems=problems)
1072 1072 fm.condwrite(problems, 'problems',
1073 1073 _("%d problems detected,"
1074 1074 " please check your install!\n"), problems)
1075 1075 fm.end()
1076 1076
1077 1077 return problems
1078 1078
1079 1079 @command('debugknown', [], _('REPO ID...'), norepo=True)
1080 1080 def debugknown(ui, repopath, *ids, **opts):
1081 1081 """test whether node ids are known to a repo
1082 1082
1083 1083 Every ID must be a full-length hex node id string. Returns a list of 0s
1084 1084 and 1s indicating unknown/known.
1085 1085 """
1086 1086 repo = hg.peer(ui, opts, repopath)
1087 1087 if not repo.capable('known'):
1088 1088 raise error.Abort("known() not supported by target repository")
1089 1089 flags = repo.known([bin(s) for s in ids])
1090 1090 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1091 1091
1092 1092 @command('debuglabelcomplete', [], _('LABEL...'))
1093 1093 def debuglabelcomplete(ui, repo, *args):
1094 1094 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1095 1095 debugnamecomplete(ui, repo, *args)
1096 1096
1097 1097 @command('debuglocks',
1098 1098 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1099 1099 ('W', 'force-wlock', None,
1100 1100 _('free the working state lock (DANGEROUS)'))],
1101 1101 _('[OPTION]...'))
1102 1102 def debuglocks(ui, repo, **opts):
1103 1103 """show or modify state of locks
1104 1104
1105 1105 By default, this command will show which locks are held. This
1106 1106 includes the user and process holding the lock, the amount of time
1107 1107 the lock has been held, and the machine name where the process is
1108 1108 running if it's not local.
1109 1109
1110 1110 Locks protect the integrity of Mercurial's data, so should be
1111 1111 treated with care. System crashes or other interruptions may cause
1112 1112 locks to not be properly released, though Mercurial will usually
1113 1113 detect and remove such stale locks automatically.
1114 1114
1115 1115 However, detecting stale locks may not always be possible (for
1116 1116 instance, on a shared filesystem). Removing locks may also be
1117 1117 blocked by filesystem permissions.
1118 1118
1119 1119 Returns 0 if no locks are held.
1120 1120
1121 1121 """
1122 1122
1123 1123 if opts.get('force_lock'):
1124 1124 repo.svfs.unlink('lock')
1125 1125 if opts.get('force_wlock'):
1126 1126 repo.vfs.unlink('wlock')
1127 1127 if opts.get('force_lock') or opts.get('force_lock'):
1128 1128 return 0
1129 1129
1130 1130 now = time.time()
1131 1131 held = 0
1132 1132
1133 1133 def report(vfs, name, method):
1134 1134 # this causes stale locks to get reaped for more accurate reporting
1135 1135 try:
1136 1136 l = method(False)
1137 1137 except error.LockHeld:
1138 1138 l = None
1139 1139
1140 1140 if l:
1141 1141 l.release()
1142 1142 else:
1143 1143 try:
1144 1144 stat = vfs.lstat(name)
1145 1145 age = now - stat.st_mtime
1146 1146 user = util.username(stat.st_uid)
1147 1147 locker = vfs.readlock(name)
1148 1148 if ":" in locker:
1149 1149 host, pid = locker.split(':')
1150 1150 if host == socket.gethostname():
1151 1151 locker = 'user %s, process %s' % (user, pid)
1152 1152 else:
1153 1153 locker = 'user %s, process %s, host %s' \
1154 1154 % (user, pid, host)
1155 1155 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1156 1156 return 1
1157 1157 except OSError as e:
1158 1158 if e.errno != errno.ENOENT:
1159 1159 raise
1160 1160
1161 1161 ui.write(("%-6s free\n") % (name + ":"))
1162 1162 return 0
1163 1163
1164 1164 held += report(repo.svfs, "lock", repo.lock)
1165 1165 held += report(repo.vfs, "wlock", repo.wlock)
1166 1166
1167 1167 return held
1168 1168
1169 1169 @command('debugmergestate', [], '')
1170 1170 def debugmergestate(ui, repo, *args):
1171 1171 """print merge state
1172 1172
1173 1173 Use --verbose to print out information about whether v1 or v2 merge state
1174 1174 was chosen."""
1175 1175 def _hashornull(h):
1176 1176 if h == nullhex:
1177 1177 return 'null'
1178 1178 else:
1179 1179 return h
1180 1180
1181 1181 def printrecords(version):
1182 1182 ui.write(('* version %s records\n') % version)
1183 1183 if version == 1:
1184 1184 records = v1records
1185 1185 else:
1186 1186 records = v2records
1187 1187
1188 1188 for rtype, record in records:
1189 1189 # pretty print some record types
1190 1190 if rtype == 'L':
1191 1191 ui.write(('local: %s\n') % record)
1192 1192 elif rtype == 'O':
1193 1193 ui.write(('other: %s\n') % record)
1194 1194 elif rtype == 'm':
1195 1195 driver, mdstate = record.split('\0', 1)
1196 1196 ui.write(('merge driver: %s (state "%s")\n')
1197 1197 % (driver, mdstate))
1198 1198 elif rtype in 'FDC':
1199 1199 r = record.split('\0')
1200 1200 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1201 1201 if version == 1:
1202 1202 onode = 'not stored in v1 format'
1203 1203 flags = r[7]
1204 1204 else:
1205 1205 onode, flags = r[7:9]
1206 1206 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1207 1207 % (f, rtype, state, _hashornull(hash)))
1208 1208 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1209 1209 ui.write((' ancestor path: %s (node %s)\n')
1210 1210 % (afile, _hashornull(anode)))
1211 1211 ui.write((' other path: %s (node %s)\n')
1212 1212 % (ofile, _hashornull(onode)))
1213 1213 elif rtype == 'f':
1214 1214 filename, rawextras = record.split('\0', 1)
1215 1215 extras = rawextras.split('\0')
1216 1216 i = 0
1217 1217 extrastrings = []
1218 1218 while i < len(extras):
1219 1219 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1220 1220 i += 2
1221 1221
1222 1222 ui.write(('file extras: %s (%s)\n')
1223 1223 % (filename, ', '.join(extrastrings)))
1224 1224 elif rtype == 'l':
1225 1225 labels = record.split('\0', 2)
1226 1226 labels = [l for l in labels if len(l) > 0]
1227 1227 ui.write(('labels:\n'))
1228 1228 ui.write((' local: %s\n' % labels[0]))
1229 1229 ui.write((' other: %s\n' % labels[1]))
1230 1230 if len(labels) > 2:
1231 1231 ui.write((' base: %s\n' % labels[2]))
1232 1232 else:
1233 1233 ui.write(('unrecognized entry: %s\t%s\n')
1234 1234 % (rtype, record.replace('\0', '\t')))
1235 1235
1236 1236 # Avoid mergestate.read() since it may raise an exception for unsupported
1237 1237 # merge state records. We shouldn't be doing this, but this is OK since this
1238 1238 # command is pretty low-level.
1239 1239 ms = mergemod.mergestate(repo)
1240 1240
1241 1241 # sort so that reasonable information is on top
1242 1242 v1records = ms._readrecordsv1()
1243 1243 v2records = ms._readrecordsv2()
1244 1244 order = 'LOml'
1245 1245 def key(r):
1246 1246 idx = order.find(r[0])
1247 1247 if idx == -1:
1248 1248 return (1, r[1])
1249 1249 else:
1250 1250 return (0, idx)
1251 1251 v1records.sort(key=key)
1252 1252 v2records.sort(key=key)
1253 1253
1254 1254 if not v1records and not v2records:
1255 1255 ui.write(('no merge state found\n'))
1256 1256 elif not v2records:
1257 1257 ui.note(('no version 2 merge state\n'))
1258 1258 printrecords(1)
1259 1259 elif ms._v1v2match(v1records, v2records):
1260 1260 ui.note(('v1 and v2 states match: using v2\n'))
1261 1261 printrecords(2)
1262 1262 else:
1263 1263 ui.note(('v1 and v2 states mismatch: using v1\n'))
1264 1264 printrecords(1)
1265 1265 if ui.verbose:
1266 1266 printrecords(2)
1267 1267
1268 1268 @command('debugnamecomplete', [], _('NAME...'))
1269 1269 def debugnamecomplete(ui, repo, *args):
1270 1270 '''complete "names" - tags, open branch names, bookmark names'''
1271 1271
1272 1272 names = set()
1273 1273 # since we previously only listed open branches, we will handle that
1274 1274 # specially (after this for loop)
1275 1275 for name, ns in repo.names.iteritems():
1276 1276 if name != 'branches':
1277 1277 names.update(ns.listnames(repo))
1278 1278 names.update(tag for (tag, heads, tip, closed)
1279 1279 in repo.branchmap().iterbranches() if not closed)
1280 1280 completions = set()
1281 1281 if not args:
1282 1282 args = ['']
1283 1283 for a in args:
1284 1284 completions.update(n for n in names if n.startswith(a))
1285 1285 ui.write('\n'.join(sorted(completions)))
1286 1286 ui.write('\n')
1287 1287
1288 1288 @command('debugobsolete',
1289 1289 [('', 'flags', 0, _('markers flag')),
1290 1290 ('', 'record-parents', False,
1291 1291 _('record parent information for the precursor')),
1292 1292 ('r', 'rev', [], _('display markers relevant to REV')),
1293 1293 ('', 'index', False, _('display index of the marker')),
1294 1294 ('', 'delete', [], _('delete markers specified by indices')),
1295 1295 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1296 1296 _('[OBSOLETED [REPLACEMENT ...]]'))
1297 1297 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1298 1298 """create arbitrary obsolete marker
1299 1299
1300 1300 With no arguments, displays the list of obsolescence markers."""
1301 1301
1302 1302 def parsenodeid(s):
1303 1303 try:
1304 1304 # We do not use revsingle/revrange functions here to accept
1305 1305 # arbitrary node identifiers, possibly not present in the
1306 1306 # local repository.
1307 1307 n = bin(s)
1308 1308 if len(n) != len(nullid):
1309 1309 raise TypeError()
1310 1310 return n
1311 1311 except TypeError:
1312 1312 raise error.Abort('changeset references must be full hexadecimal '
1313 1313 'node identifiers')
1314 1314
1315 1315 if opts.get('delete'):
1316 1316 indices = []
1317 1317 for v in opts.get('delete'):
1318 1318 try:
1319 1319 indices.append(int(v))
1320 1320 except ValueError:
1321 1321 raise error.Abort(_('invalid index value: %r') % v,
1322 1322 hint=_('use integers for indices'))
1323 1323
1324 1324 if repo.currenttransaction():
1325 1325 raise error.Abort(_('cannot delete obsmarkers in the middle '
1326 1326 'of transaction.'))
1327 1327
1328 1328 with repo.lock():
1329 1329 n = repair.deleteobsmarkers(repo.obsstore, indices)
1330 1330 ui.write(_('deleted %i obsolescence markers\n') % n)
1331 1331
1332 1332 return
1333 1333
1334 1334 if precursor is not None:
1335 1335 if opts['rev']:
1336 1336 raise error.Abort('cannot select revision when creating marker')
1337 1337 metadata = {}
1338 1338 metadata['user'] = opts['user'] or ui.username()
1339 1339 succs = tuple(parsenodeid(succ) for succ in successors)
1340 1340 l = repo.lock()
1341 1341 try:
1342 1342 tr = repo.transaction('debugobsolete')
1343 1343 try:
1344 1344 date = opts.get('date')
1345 1345 if date:
1346 1346 date = util.parsedate(date)
1347 1347 else:
1348 1348 date = None
1349 1349 prec = parsenodeid(precursor)
1350 1350 parents = None
1351 1351 if opts['record_parents']:
1352 1352 if prec not in repo.unfiltered():
1353 1353 raise error.Abort('cannot used --record-parents on '
1354 1354 'unknown changesets')
1355 1355 parents = repo.unfiltered()[prec].parents()
1356 1356 parents = tuple(p.node() for p in parents)
1357 1357 repo.obsstore.create(tr, prec, succs, opts['flags'],
1358 1358 parents=parents, date=date,
1359 metadata=metadata)
1359 metadata=metadata, ui=ui)
1360 1360 tr.close()
1361 1361 except ValueError as exc:
1362 1362 raise error.Abort(_('bad obsmarker input: %s') % exc)
1363 1363 finally:
1364 1364 tr.release()
1365 1365 finally:
1366 1366 l.release()
1367 1367 else:
1368 1368 if opts['rev']:
1369 1369 revs = scmutil.revrange(repo, opts['rev'])
1370 1370 nodes = [repo[r].node() for r in revs]
1371 1371 markers = list(obsolete.getmarkers(repo, nodes=nodes))
1372 1372 markers.sort(key=lambda x: x._data)
1373 1373 else:
1374 1374 markers = obsolete.getmarkers(repo)
1375 1375
1376 1376 markerstoiter = markers
1377 1377 isrelevant = lambda m: True
1378 1378 if opts.get('rev') and opts.get('index'):
1379 1379 markerstoiter = obsolete.getmarkers(repo)
1380 1380 markerset = set(markers)
1381 1381 isrelevant = lambda m: m in markerset
1382 1382
1383 1383 fm = ui.formatter('debugobsolete', opts)
1384 1384 for i, m in enumerate(markerstoiter):
1385 1385 if not isrelevant(m):
1386 1386 # marker can be irrelevant when we're iterating over a set
1387 1387 # of markers (markerstoiter) which is bigger than the set
1388 1388 # of markers we want to display (markers)
1389 1389 # this can happen if both --index and --rev options are
1390 1390 # provided and thus we need to iterate over all of the markers
1391 1391 # to get the correct indices, but only display the ones that
1392 1392 # are relevant to --rev value
1393 1393 continue
1394 1394 fm.startitem()
1395 1395 ind = i if opts.get('index') else None
1396 1396 cmdutil.showmarker(fm, m, index=ind)
1397 1397 fm.end()
1398 1398
1399 1399 @command('debugpathcomplete',
1400 1400 [('f', 'full', None, _('complete an entire path')),
1401 1401 ('n', 'normal', None, _('show only normal files')),
1402 1402 ('a', 'added', None, _('show only added files')),
1403 1403 ('r', 'removed', None, _('show only removed files'))],
1404 1404 _('FILESPEC...'))
1405 1405 def debugpathcomplete(ui, repo, *specs, **opts):
1406 1406 '''complete part or all of a tracked path
1407 1407
1408 1408 This command supports shells that offer path name completion. It
1409 1409 currently completes only files already known to the dirstate.
1410 1410
1411 1411 Completion extends only to the next path segment unless
1412 1412 --full is specified, in which case entire paths are used.'''
1413 1413
1414 1414 def complete(path, acceptable):
1415 1415 dirstate = repo.dirstate
1416 1416 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1417 1417 rootdir = repo.root + pycompat.ossep
1418 1418 if spec != repo.root and not spec.startswith(rootdir):
1419 1419 return [], []
1420 1420 if os.path.isdir(spec):
1421 1421 spec += '/'
1422 1422 spec = spec[len(rootdir):]
1423 1423 fixpaths = pycompat.ossep != '/'
1424 1424 if fixpaths:
1425 1425 spec = spec.replace(pycompat.ossep, '/')
1426 1426 speclen = len(spec)
1427 1427 fullpaths = opts['full']
1428 1428 files, dirs = set(), set()
1429 1429 adddir, addfile = dirs.add, files.add
1430 1430 for f, st in dirstate.iteritems():
1431 1431 if f.startswith(spec) and st[0] in acceptable:
1432 1432 if fixpaths:
1433 1433 f = f.replace('/', pycompat.ossep)
1434 1434 if fullpaths:
1435 1435 addfile(f)
1436 1436 continue
1437 1437 s = f.find(pycompat.ossep, speclen)
1438 1438 if s >= 0:
1439 1439 adddir(f[:s])
1440 1440 else:
1441 1441 addfile(f)
1442 1442 return files, dirs
1443 1443
1444 1444 acceptable = ''
1445 1445 if opts['normal']:
1446 1446 acceptable += 'nm'
1447 1447 if opts['added']:
1448 1448 acceptable += 'a'
1449 1449 if opts['removed']:
1450 1450 acceptable += 'r'
1451 1451 cwd = repo.getcwd()
1452 1452 if not specs:
1453 1453 specs = ['.']
1454 1454
1455 1455 files, dirs = set(), set()
1456 1456 for spec in specs:
1457 1457 f, d = complete(spec, acceptable or 'nmar')
1458 1458 files.update(f)
1459 1459 dirs.update(d)
1460 1460 files.update(dirs)
1461 1461 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1462 1462 ui.write('\n')
1463 1463
1464 1464 @command('debugpickmergetool',
1465 1465 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1466 1466 ('', 'changedelete', None, _('emulate merging change and delete')),
1467 1467 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1468 1468 _('[PATTERN]...'),
1469 1469 inferrepo=True)
1470 1470 def debugpickmergetool(ui, repo, *pats, **opts):
1471 1471 """examine which merge tool is chosen for specified file
1472 1472
1473 1473 As described in :hg:`help merge-tools`, Mercurial examines
1474 1474 configurations below in this order to decide which merge tool is
1475 1475 chosen for specified file.
1476 1476
1477 1477 1. ``--tool`` option
1478 1478 2. ``HGMERGE`` environment variable
1479 1479 3. configurations in ``merge-patterns`` section
1480 1480 4. configuration of ``ui.merge``
1481 1481 5. configurations in ``merge-tools`` section
1482 1482 6. ``hgmerge`` tool (for historical reason only)
1483 1483 7. default tool for fallback (``:merge`` or ``:prompt``)
1484 1484
1485 1485 This command writes out examination result in the style below::
1486 1486
1487 1487 FILE = MERGETOOL
1488 1488
1489 1489 By default, all files known in the first parent context of the
1490 1490 working directory are examined. Use file patterns and/or -I/-X
1491 1491 options to limit target files. -r/--rev is also useful to examine
1492 1492 files in another context without actual updating to it.
1493 1493
1494 1494 With --debug, this command shows warning messages while matching
1495 1495 against ``merge-patterns`` and so on, too. It is recommended to
1496 1496 use this option with explicit file patterns and/or -I/-X options,
1497 1497 because this option increases amount of output per file according
1498 1498 to configurations in hgrc.
1499 1499
1500 1500 With -v/--verbose, this command shows configurations below at
1501 1501 first (only if specified).
1502 1502
1503 1503 - ``--tool`` option
1504 1504 - ``HGMERGE`` environment variable
1505 1505 - configuration of ``ui.merge``
1506 1506
1507 1507 If merge tool is chosen before matching against
1508 1508 ``merge-patterns``, this command can't show any helpful
1509 1509 information, even with --debug. In such case, information above is
1510 1510 useful to know why a merge tool is chosen.
1511 1511 """
1512 1512 overrides = {}
1513 1513 if opts['tool']:
1514 1514 overrides[('ui', 'forcemerge')] = opts['tool']
1515 1515 ui.note(('with --tool %r\n') % (opts['tool']))
1516 1516
1517 1517 with ui.configoverride(overrides, 'debugmergepatterns'):
1518 1518 hgmerge = encoding.environ.get("HGMERGE")
1519 1519 if hgmerge is not None:
1520 1520 ui.note(('with HGMERGE=%r\n') % (hgmerge))
1521 1521 uimerge = ui.config("ui", "merge")
1522 1522 if uimerge:
1523 1523 ui.note(('with ui.merge=%r\n') % (uimerge))
1524 1524
1525 1525 ctx = scmutil.revsingle(repo, opts.get('rev'))
1526 1526 m = scmutil.match(ctx, pats, opts)
1527 1527 changedelete = opts['changedelete']
1528 1528 for path in ctx.walk(m):
1529 1529 fctx = ctx[path]
1530 1530 try:
1531 1531 if not ui.debugflag:
1532 1532 ui.pushbuffer(error=True)
1533 1533 tool, toolpath = filemerge._picktool(repo, ui, path,
1534 1534 fctx.isbinary(),
1535 1535 'l' in fctx.flags(),
1536 1536 changedelete)
1537 1537 finally:
1538 1538 if not ui.debugflag:
1539 1539 ui.popbuffer()
1540 1540 ui.write(('%s = %s\n') % (path, tool))
1541 1541
1542 1542 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1543 1543 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1544 1544 '''access the pushkey key/value protocol
1545 1545
1546 1546 With two args, list the keys in the given namespace.
1547 1547
1548 1548 With five args, set a key to new if it currently is set to old.
1549 1549 Reports success or failure.
1550 1550 '''
1551 1551
1552 1552 target = hg.peer(ui, {}, repopath)
1553 1553 if keyinfo:
1554 1554 key, old, new = keyinfo
1555 1555 r = target.pushkey(namespace, key, old, new)
1556 1556 ui.status(str(r) + '\n')
1557 1557 return not r
1558 1558 else:
1559 1559 for k, v in sorted(target.listkeys(namespace).iteritems()):
1560 1560 ui.write("%s\t%s\n" % (util.escapestr(k),
1561 1561 util.escapestr(v)))
1562 1562
1563 1563 @command('debugpvec', [], _('A B'))
1564 1564 def debugpvec(ui, repo, a, b=None):
1565 1565 ca = scmutil.revsingle(repo, a)
1566 1566 cb = scmutil.revsingle(repo, b)
1567 1567 pa = pvec.ctxpvec(ca)
1568 1568 pb = pvec.ctxpvec(cb)
1569 1569 if pa == pb:
1570 1570 rel = "="
1571 1571 elif pa > pb:
1572 1572 rel = ">"
1573 1573 elif pa < pb:
1574 1574 rel = "<"
1575 1575 elif pa | pb:
1576 1576 rel = "|"
1577 1577 ui.write(_("a: %s\n") % pa)
1578 1578 ui.write(_("b: %s\n") % pb)
1579 1579 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1580 1580 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1581 1581 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1582 1582 pa.distance(pb), rel))
1583 1583
1584 1584 @command('debugrebuilddirstate|debugrebuildstate',
1585 1585 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1586 1586 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1587 1587 'the working copy parent')),
1588 1588 ],
1589 1589 _('[-r REV]'))
1590 1590 def debugrebuilddirstate(ui, repo, rev, **opts):
1591 1591 """rebuild the dirstate as it would look like for the given revision
1592 1592
1593 1593 If no revision is specified the first current parent will be used.
1594 1594
1595 1595 The dirstate will be set to the files of the given revision.
1596 1596 The actual working directory content or existing dirstate
1597 1597 information such as adds or removes is not considered.
1598 1598
1599 1599 ``minimal`` will only rebuild the dirstate status for files that claim to be
1600 1600 tracked but are not in the parent manifest, or that exist in the parent
1601 1601 manifest but are not in the dirstate. It will not change adds, removes, or
1602 1602 modified files that are in the working copy parent.
1603 1603
1604 1604 One use of this command is to make the next :hg:`status` invocation
1605 1605 check the actual file content.
1606 1606 """
1607 1607 ctx = scmutil.revsingle(repo, rev)
1608 1608 with repo.wlock():
1609 1609 dirstate = repo.dirstate
1610 1610 changedfiles = None
1611 1611 # See command doc for what minimal does.
1612 1612 if opts.get('minimal'):
1613 1613 manifestfiles = set(ctx.manifest().keys())
1614 1614 dirstatefiles = set(dirstate)
1615 1615 manifestonly = manifestfiles - dirstatefiles
1616 1616 dsonly = dirstatefiles - manifestfiles
1617 1617 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1618 1618 changedfiles = manifestonly | dsnotadded
1619 1619
1620 1620 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1621 1621
1622 1622 @command('debugrebuildfncache', [], '')
1623 1623 def debugrebuildfncache(ui, repo):
1624 1624 """rebuild the fncache file"""
1625 1625 repair.rebuildfncache(ui, repo)
1626 1626
1627 1627 @command('debugrename',
1628 1628 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1629 1629 _('[-r REV] FILE'))
1630 1630 def debugrename(ui, repo, file1, *pats, **opts):
1631 1631 """dump rename information"""
1632 1632
1633 1633 ctx = scmutil.revsingle(repo, opts.get('rev'))
1634 1634 m = scmutil.match(ctx, (file1,) + pats, opts)
1635 1635 for abs in ctx.walk(m):
1636 1636 fctx = ctx[abs]
1637 1637 o = fctx.filelog().renamed(fctx.filenode())
1638 1638 rel = m.rel(abs)
1639 1639 if o:
1640 1640 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1641 1641 else:
1642 1642 ui.write(_("%s not renamed\n") % rel)
1643 1643
1644 1644 @command('debugrevlog', cmdutil.debugrevlogopts +
1645 1645 [('d', 'dump', False, _('dump index data'))],
1646 1646 _('-c|-m|FILE'),
1647 1647 optionalrepo=True)
1648 1648 def debugrevlog(ui, repo, file_=None, **opts):
1649 1649 """show data and statistics about a revlog"""
1650 1650 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1651 1651
1652 1652 if opts.get("dump"):
1653 1653 numrevs = len(r)
1654 1654 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1655 1655 " rawsize totalsize compression heads chainlen\n"))
1656 1656 ts = 0
1657 1657 heads = set()
1658 1658
1659 1659 for rev in xrange(numrevs):
1660 1660 dbase = r.deltaparent(rev)
1661 1661 if dbase == -1:
1662 1662 dbase = rev
1663 1663 cbase = r.chainbase(rev)
1664 1664 clen = r.chainlen(rev)
1665 1665 p1, p2 = r.parentrevs(rev)
1666 1666 rs = r.rawsize(rev)
1667 1667 ts = ts + rs
1668 1668 heads -= set(r.parentrevs(rev))
1669 1669 heads.add(rev)
1670 1670 try:
1671 1671 compression = ts / r.end(rev)
1672 1672 except ZeroDivisionError:
1673 1673 compression = 0
1674 1674 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1675 1675 "%11d %5d %8d\n" %
1676 1676 (rev, p1, p2, r.start(rev), r.end(rev),
1677 1677 r.start(dbase), r.start(cbase),
1678 1678 r.start(p1), r.start(p2),
1679 1679 rs, ts, compression, len(heads), clen))
1680 1680 return 0
1681 1681
1682 1682 v = r.version
1683 1683 format = v & 0xFFFF
1684 1684 flags = []
1685 1685 gdelta = False
1686 1686 if v & revlog.FLAG_INLINE_DATA:
1687 1687 flags.append('inline')
1688 1688 if v & revlog.FLAG_GENERALDELTA:
1689 1689 gdelta = True
1690 1690 flags.append('generaldelta')
1691 1691 if not flags:
1692 1692 flags = ['(none)']
1693 1693
1694 1694 nummerges = 0
1695 1695 numfull = 0
1696 1696 numprev = 0
1697 1697 nump1 = 0
1698 1698 nump2 = 0
1699 1699 numother = 0
1700 1700 nump1prev = 0
1701 1701 nump2prev = 0
1702 1702 chainlengths = []
1703 1703
1704 1704 datasize = [None, 0, 0]
1705 1705 fullsize = [None, 0, 0]
1706 1706 deltasize = [None, 0, 0]
1707 1707 chunktypecounts = {}
1708 1708 chunktypesizes = {}
1709 1709
1710 1710 def addsize(size, l):
1711 1711 if l[0] is None or size < l[0]:
1712 1712 l[0] = size
1713 1713 if size > l[1]:
1714 1714 l[1] = size
1715 1715 l[2] += size
1716 1716
1717 1717 numrevs = len(r)
1718 1718 for rev in xrange(numrevs):
1719 1719 p1, p2 = r.parentrevs(rev)
1720 1720 delta = r.deltaparent(rev)
1721 1721 if format > 0:
1722 1722 addsize(r.rawsize(rev), datasize)
1723 1723 if p2 != nullrev:
1724 1724 nummerges += 1
1725 1725 size = r.length(rev)
1726 1726 if delta == nullrev:
1727 1727 chainlengths.append(0)
1728 1728 numfull += 1
1729 1729 addsize(size, fullsize)
1730 1730 else:
1731 1731 chainlengths.append(chainlengths[delta] + 1)
1732 1732 addsize(size, deltasize)
1733 1733 if delta == rev - 1:
1734 1734 numprev += 1
1735 1735 if delta == p1:
1736 1736 nump1prev += 1
1737 1737 elif delta == p2:
1738 1738 nump2prev += 1
1739 1739 elif delta == p1:
1740 1740 nump1 += 1
1741 1741 elif delta == p2:
1742 1742 nump2 += 1
1743 1743 elif delta != nullrev:
1744 1744 numother += 1
1745 1745
1746 1746 # Obtain data on the raw chunks in the revlog.
1747 1747 segment = r._getsegmentforrevs(rev, rev)[1]
1748 1748 if segment:
1749 1749 chunktype = segment[0]
1750 1750 else:
1751 1751 chunktype = 'empty'
1752 1752
1753 1753 if chunktype not in chunktypecounts:
1754 1754 chunktypecounts[chunktype] = 0
1755 1755 chunktypesizes[chunktype] = 0
1756 1756
1757 1757 chunktypecounts[chunktype] += 1
1758 1758 chunktypesizes[chunktype] += size
1759 1759
1760 1760 # Adjust size min value for empty cases
1761 1761 for size in (datasize, fullsize, deltasize):
1762 1762 if size[0] is None:
1763 1763 size[0] = 0
1764 1764
1765 1765 numdeltas = numrevs - numfull
1766 1766 numoprev = numprev - nump1prev - nump2prev
1767 1767 totalrawsize = datasize[2]
1768 1768 datasize[2] /= numrevs
1769 1769 fulltotal = fullsize[2]
1770 1770 fullsize[2] /= numfull
1771 1771 deltatotal = deltasize[2]
1772 1772 if numrevs - numfull > 0:
1773 1773 deltasize[2] /= numrevs - numfull
1774 1774 totalsize = fulltotal + deltatotal
1775 1775 avgchainlen = sum(chainlengths) / numrevs
1776 1776 maxchainlen = max(chainlengths)
1777 1777 compratio = 1
1778 1778 if totalsize:
1779 1779 compratio = totalrawsize / totalsize
1780 1780
1781 1781 basedfmtstr = '%%%dd\n'
1782 1782 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1783 1783
1784 1784 def dfmtstr(max):
1785 1785 return basedfmtstr % len(str(max))
1786 1786 def pcfmtstr(max, padding=0):
1787 1787 return basepcfmtstr % (len(str(max)), ' ' * padding)
1788 1788
1789 1789 def pcfmt(value, total):
1790 1790 if total:
1791 1791 return (value, 100 * float(value) / total)
1792 1792 else:
1793 1793 return value, 100.0
1794 1794
1795 1795 ui.write(('format : %d\n') % format)
1796 1796 ui.write(('flags : %s\n') % ', '.join(flags))
1797 1797
1798 1798 ui.write('\n')
1799 1799 fmt = pcfmtstr(totalsize)
1800 1800 fmt2 = dfmtstr(totalsize)
1801 1801 ui.write(('revisions : ') + fmt2 % numrevs)
1802 1802 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
1803 1803 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
1804 1804 ui.write(('revisions : ') + fmt2 % numrevs)
1805 1805 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
1806 1806 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
1807 1807 ui.write(('revision size : ') + fmt2 % totalsize)
1808 1808 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
1809 1809 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
1810 1810
1811 1811 def fmtchunktype(chunktype):
1812 1812 if chunktype == 'empty':
1813 1813 return ' %s : ' % chunktype
1814 1814 elif chunktype in string.ascii_letters:
1815 1815 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
1816 1816 else:
1817 1817 return ' 0x%s : ' % hex(chunktype)
1818 1818
1819 1819 ui.write('\n')
1820 1820 ui.write(('chunks : ') + fmt2 % numrevs)
1821 1821 for chunktype in sorted(chunktypecounts):
1822 1822 ui.write(fmtchunktype(chunktype))
1823 1823 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
1824 1824 ui.write(('chunks size : ') + fmt2 % totalsize)
1825 1825 for chunktype in sorted(chunktypecounts):
1826 1826 ui.write(fmtchunktype(chunktype))
1827 1827 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
1828 1828
1829 1829 ui.write('\n')
1830 1830 fmt = dfmtstr(max(avgchainlen, compratio))
1831 1831 ui.write(('avg chain length : ') + fmt % avgchainlen)
1832 1832 ui.write(('max chain length : ') + fmt % maxchainlen)
1833 1833 ui.write(('compression ratio : ') + fmt % compratio)
1834 1834
1835 1835 if format > 0:
1836 1836 ui.write('\n')
1837 1837 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
1838 1838 % tuple(datasize))
1839 1839 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
1840 1840 % tuple(fullsize))
1841 1841 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
1842 1842 % tuple(deltasize))
1843 1843
1844 1844 if numdeltas > 0:
1845 1845 ui.write('\n')
1846 1846 fmt = pcfmtstr(numdeltas)
1847 1847 fmt2 = pcfmtstr(numdeltas, 4)
1848 1848 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
1849 1849 if numprev > 0:
1850 1850 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
1851 1851 numprev))
1852 1852 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
1853 1853 numprev))
1854 1854 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
1855 1855 numprev))
1856 1856 if gdelta:
1857 1857 ui.write(('deltas against p1 : ')
1858 1858 + fmt % pcfmt(nump1, numdeltas))
1859 1859 ui.write(('deltas against p2 : ')
1860 1860 + fmt % pcfmt(nump2, numdeltas))
1861 1861 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
1862 1862 numdeltas))
1863 1863
1864 1864 @command('debugrevspec',
1865 1865 [('', 'optimize', None,
1866 1866 _('print parsed tree after optimizing (DEPRECATED)')),
1867 1867 ('p', 'show-stage', [],
1868 1868 _('print parsed tree at the given stage'), _('NAME')),
1869 1869 ('', 'no-optimized', False, _('evaluate tree without optimization')),
1870 1870 ('', 'verify-optimized', False, _('verify optimized result')),
1871 1871 ],
1872 1872 ('REVSPEC'))
1873 1873 def debugrevspec(ui, repo, expr, **opts):
1874 1874 """parse and apply a revision specification
1875 1875
1876 1876 Use -p/--show-stage option to print the parsed tree at the given stages.
1877 1877 Use -p all to print tree at every stage.
1878 1878
1879 1879 Use --verify-optimized to compare the optimized result with the unoptimized
1880 1880 one. Returns 1 if the optimized result differs.
1881 1881 """
1882 1882 stages = [
1883 1883 ('parsed', lambda tree: tree),
1884 1884 ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
1885 1885 ('concatenated', revsetlang.foldconcat),
1886 1886 ('analyzed', revsetlang.analyze),
1887 1887 ('optimized', revsetlang.optimize),
1888 1888 ]
1889 1889 if opts['no_optimized']:
1890 1890 stages = stages[:-1]
1891 1891 if opts['verify_optimized'] and opts['no_optimized']:
1892 1892 raise error.Abort(_('cannot use --verify-optimized with '
1893 1893 '--no-optimized'))
1894 1894 stagenames = set(n for n, f in stages)
1895 1895
1896 1896 showalways = set()
1897 1897 showchanged = set()
1898 1898 if ui.verbose and not opts['show_stage']:
1899 1899 # show parsed tree by --verbose (deprecated)
1900 1900 showalways.add('parsed')
1901 1901 showchanged.update(['expanded', 'concatenated'])
1902 1902 if opts['optimize']:
1903 1903 showalways.add('optimized')
1904 1904 if opts['show_stage'] and opts['optimize']:
1905 1905 raise error.Abort(_('cannot use --optimize with --show-stage'))
1906 1906 if opts['show_stage'] == ['all']:
1907 1907 showalways.update(stagenames)
1908 1908 else:
1909 1909 for n in opts['show_stage']:
1910 1910 if n not in stagenames:
1911 1911 raise error.Abort(_('invalid stage name: %s') % n)
1912 1912 showalways.update(opts['show_stage'])
1913 1913
1914 1914 treebystage = {}
1915 1915 printedtree = None
1916 1916 tree = revsetlang.parse(expr, lookup=repo.__contains__)
1917 1917 for n, f in stages:
1918 1918 treebystage[n] = tree = f(tree)
1919 1919 if n in showalways or (n in showchanged and tree != printedtree):
1920 1920 if opts['show_stage'] or n != 'parsed':
1921 1921 ui.write(("* %s:\n") % n)
1922 1922 ui.write(revsetlang.prettyformat(tree), "\n")
1923 1923 printedtree = tree
1924 1924
1925 1925 if opts['verify_optimized']:
1926 1926 arevs = revset.makematcher(treebystage['analyzed'])(repo)
1927 1927 brevs = revset.makematcher(treebystage['optimized'])(repo)
1928 1928 if ui.verbose:
1929 1929 ui.note(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
1930 1930 ui.note(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
1931 1931 arevs = list(arevs)
1932 1932 brevs = list(brevs)
1933 1933 if arevs == brevs:
1934 1934 return 0
1935 1935 ui.write(('--- analyzed\n'), label='diff.file_a')
1936 1936 ui.write(('+++ optimized\n'), label='diff.file_b')
1937 1937 sm = difflib.SequenceMatcher(None, arevs, brevs)
1938 1938 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1939 1939 if tag in ('delete', 'replace'):
1940 1940 for c in arevs[alo:ahi]:
1941 1941 ui.write('-%s\n' % c, label='diff.deleted')
1942 1942 if tag in ('insert', 'replace'):
1943 1943 for c in brevs[blo:bhi]:
1944 1944 ui.write('+%s\n' % c, label='diff.inserted')
1945 1945 if tag == 'equal':
1946 1946 for c in arevs[alo:ahi]:
1947 1947 ui.write(' %s\n' % c)
1948 1948 return 1
1949 1949
1950 1950 func = revset.makematcher(tree)
1951 1951 revs = func(repo)
1952 1952 if ui.verbose:
1953 1953 ui.note(("* set:\n"), smartset.prettyformat(revs), "\n")
1954 1954 for c in revs:
1955 1955 ui.write("%s\n" % c)
1956 1956
1957 1957 @command('debugsetparents', [], _('REV1 [REV2]'))
1958 1958 def debugsetparents(ui, repo, rev1, rev2=None):
1959 1959 """manually set the parents of the current working directory
1960 1960
1961 1961 This is useful for writing repository conversion tools, but should
1962 1962 be used with care. For example, neither the working directory nor the
1963 1963 dirstate is updated, so file status may be incorrect after running this
1964 1964 command.
1965 1965
1966 1966 Returns 0 on success.
1967 1967 """
1968 1968
1969 1969 r1 = scmutil.revsingle(repo, rev1).node()
1970 1970 r2 = scmutil.revsingle(repo, rev2, 'null').node()
1971 1971
1972 1972 with repo.wlock():
1973 1973 repo.setparents(r1, r2)
1974 1974
1975 1975 @command('debugsub',
1976 1976 [('r', 'rev', '',
1977 1977 _('revision to check'), _('REV'))],
1978 1978 _('[-r REV] [REV]'))
1979 1979 def debugsub(ui, repo, rev=None):
1980 1980 ctx = scmutil.revsingle(repo, rev, None)
1981 1981 for k, v in sorted(ctx.substate.items()):
1982 1982 ui.write(('path %s\n') % k)
1983 1983 ui.write((' source %s\n') % v[0])
1984 1984 ui.write((' revision %s\n') % v[1])
1985 1985
1986 1986 @command('debugsuccessorssets',
1987 1987 [],
1988 1988 _('[REV]'))
1989 1989 def debugsuccessorssets(ui, repo, *revs):
1990 1990 """show set of successors for revision
1991 1991
1992 1992 A successors set of changeset A is a consistent group of revisions that
1993 1993 succeed A. It contains non-obsolete changesets only.
1994 1994
1995 1995 In most cases a changeset A has a single successors set containing a single
1996 1996 successor (changeset A replaced by A').
1997 1997
1998 1998 A changeset that is made obsolete with no successors are called "pruned".
1999 1999 Such changesets have no successors sets at all.
2000 2000
2001 2001 A changeset that has been "split" will have a successors set containing
2002 2002 more than one successor.
2003 2003
2004 2004 A changeset that has been rewritten in multiple different ways is called
2005 2005 "divergent". Such changesets have multiple successor sets (each of which
2006 2006 may also be split, i.e. have multiple successors).
2007 2007
2008 2008 Results are displayed as follows::
2009 2009
2010 2010 <rev1>
2011 2011 <successors-1A>
2012 2012 <rev2>
2013 2013 <successors-2A>
2014 2014 <successors-2B1> <successors-2B2> <successors-2B3>
2015 2015
2016 2016 Here rev2 has two possible (i.e. divergent) successors sets. The first
2017 2017 holds one element, whereas the second holds three (i.e. the changeset has
2018 2018 been split).
2019 2019 """
2020 2020 # passed to successorssets caching computation from one call to another
2021 2021 cache = {}
2022 2022 ctx2str = str
2023 2023 node2str = short
2024 2024 if ui.debug():
2025 2025 def ctx2str(ctx):
2026 2026 return ctx.hex()
2027 2027 node2str = hex
2028 2028 for rev in scmutil.revrange(repo, revs):
2029 2029 ctx = repo[rev]
2030 2030 ui.write('%s\n'% ctx2str(ctx))
2031 2031 for succsset in obsolete.successorssets(repo, ctx.node(), cache):
2032 2032 if succsset:
2033 2033 ui.write(' ')
2034 2034 ui.write(node2str(succsset[0]))
2035 2035 for node in succsset[1:]:
2036 2036 ui.write(' ')
2037 2037 ui.write(node2str(node))
2038 2038 ui.write('\n')
2039 2039
2040 2040 @command('debugtemplate',
2041 2041 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2042 2042 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2043 2043 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2044 2044 optionalrepo=True)
2045 2045 def debugtemplate(ui, repo, tmpl, **opts):
2046 2046 """parse and apply a template
2047 2047
2048 2048 If -r/--rev is given, the template is processed as a log template and
2049 2049 applied to the given changesets. Otherwise, it is processed as a generic
2050 2050 template.
2051 2051
2052 2052 Use --verbose to print the parsed tree.
2053 2053 """
2054 2054 revs = None
2055 2055 if opts['rev']:
2056 2056 if repo is None:
2057 2057 raise error.RepoError(_('there is no Mercurial repository here '
2058 2058 '(.hg not found)'))
2059 2059 revs = scmutil.revrange(repo, opts['rev'])
2060 2060
2061 2061 props = {}
2062 2062 for d in opts['define']:
2063 2063 try:
2064 2064 k, v = (e.strip() for e in d.split('=', 1))
2065 2065 if not k or k == 'ui':
2066 2066 raise ValueError
2067 2067 props[k] = v
2068 2068 except ValueError:
2069 2069 raise error.Abort(_('malformed keyword definition: %s') % d)
2070 2070
2071 2071 if ui.verbose:
2072 2072 aliases = ui.configitems('templatealias')
2073 2073 tree = templater.parse(tmpl)
2074 2074 ui.note(templater.prettyformat(tree), '\n')
2075 2075 newtree = templater.expandaliases(tree, aliases)
2076 2076 if newtree != tree:
2077 2077 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2078 2078
2079 2079 mapfile = None
2080 2080 if revs is None:
2081 2081 k = 'debugtemplate'
2082 2082 t = formatter.maketemplater(ui, k, tmpl)
2083 2083 ui.write(templater.stringify(t(k, ui=ui, **props)))
2084 2084 else:
2085 2085 displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl,
2086 2086 mapfile, buffered=False)
2087 2087 for r in revs:
2088 2088 displayer.show(repo[r], **props)
2089 2089 displayer.close()
2090 2090
2091 2091 @command('debugupdatecaches', [])
2092 2092 def debugupdatecaches(ui, repo, *pats, **opts):
2093 2093 """warm all known caches in the repository"""
2094 2094 with repo.wlock():
2095 2095 with repo.lock():
2096 2096 repo.updatecaches()
2097 2097
2098 2098 @command('debugupgraderepo', [
2099 2099 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2100 2100 ('', 'run', False, _('performs an upgrade')),
2101 2101 ])
2102 2102 def debugupgraderepo(ui, repo, run=False, optimize=None):
2103 2103 """upgrade a repository to use different features
2104 2104
2105 2105 If no arguments are specified, the repository is evaluated for upgrade
2106 2106 and a list of problems and potential optimizations is printed.
2107 2107
2108 2108 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2109 2109 can be influenced via additional arguments. More details will be provided
2110 2110 by the command output when run without ``--run``.
2111 2111
2112 2112 During the upgrade, the repository will be locked and no writes will be
2113 2113 allowed.
2114 2114
2115 2115 At the end of the upgrade, the repository may not be readable while new
2116 2116 repository data is swapped in. This window will be as long as it takes to
2117 2117 rename some directories inside the ``.hg`` directory. On most machines, this
2118 2118 should complete almost instantaneously and the chances of a consumer being
2119 2119 unable to access the repository should be low.
2120 2120 """
2121 2121 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2122 2122
2123 2123 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2124 2124 inferrepo=True)
2125 2125 def debugwalk(ui, repo, *pats, **opts):
2126 2126 """show how files match on given patterns"""
2127 2127 m = scmutil.match(repo[None], pats, opts)
2128 2128 items = list(repo[None].walk(m))
2129 2129 if not items:
2130 2130 return
2131 2131 f = lambda fn: fn
2132 2132 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2133 2133 f = lambda fn: util.normpath(fn)
2134 2134 fmt = 'f %%-%ds %%-%ds %%s' % (
2135 2135 max([len(abs) for abs in items]),
2136 2136 max([len(m.rel(abs)) for abs in items]))
2137 2137 for abs in items:
2138 2138 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2139 2139 ui.write("%s\n" % line.rstrip())
2140 2140
2141 2141 @command('debugwireargs',
2142 2142 [('', 'three', '', 'three'),
2143 2143 ('', 'four', '', 'four'),
2144 2144 ('', 'five', '', 'five'),
2145 2145 ] + cmdutil.remoteopts,
2146 2146 _('REPO [OPTIONS]... [ONE [TWO]]'),
2147 2147 norepo=True)
2148 2148 def debugwireargs(ui, repopath, *vals, **opts):
2149 2149 repo = hg.peer(ui, opts, repopath)
2150 2150 for opt in cmdutil.remoteopts:
2151 2151 del opts[opt[1]]
2152 2152 args = {}
2153 2153 for k, v in opts.iteritems():
2154 2154 if v:
2155 2155 args[k] = v
2156 2156 # run twice to check that we don't mess up the stream for the next command
2157 2157 res1 = repo.debugwireargs(*vals, **args)
2158 2158 res2 = repo.debugwireargs(*vals, **args)
2159 2159 ui.write("%s\n" % res1)
2160 2160 if res1 != res2:
2161 2161 ui.warn("%s\n" % res2)
@@ -1,1293 +1,1298 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 phases,
80 80 policy,
81 81 util,
82 82 )
83 83
84 84 parsers = policy.importmod(r'parsers')
85 85
86 86 _pack = struct.pack
87 87 _unpack = struct.unpack
88 88 _calcsize = struct.calcsize
89 89 propertycache = util.propertycache
90 90
91 91 # the obsolete feature is not mature enough to be enabled by default.
92 92 # you have to rely on third party extension extension to enable this.
93 93 _enabled = False
94 94
95 95 # Options for obsolescence
96 96 createmarkersopt = 'createmarkers'
97 97 allowunstableopt = 'allowunstable'
98 98 exchangeopt = 'exchange'
99 99
100 100 def isenabled(repo, option):
101 101 """Returns True if the given repository has the given obsolete option
102 102 enabled.
103 103 """
104 104 result = set(repo.ui.configlist('experimental', 'evolution'))
105 105 if 'all' in result:
106 106 return True
107 107
108 108 # For migration purposes, temporarily return true if the config hasn't been
109 109 # set but _enabled is true.
110 110 if len(result) == 0 and _enabled:
111 111 return True
112 112
113 113 # createmarkers must be enabled if other options are enabled
114 114 if ((allowunstableopt in result or exchangeopt in result) and
115 115 not createmarkersopt in result):
116 116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 117 "if other obsolete options are enabled"))
118 118
119 119 return option in result
120 120
121 121 ### obsolescence marker flag
122 122
123 123 ## bumpedfix flag
124 124 #
125 125 # When a changeset A' succeed to a changeset A which became public, we call A'
126 126 # "bumped" because it's a successors of a public changesets
127 127 #
128 128 # o A' (bumped)
129 129 # |`:
130 130 # | o A
131 131 # |/
132 132 # o Z
133 133 #
134 134 # The way to solve this situation is to create a new changeset Ad as children
135 135 # of A. This changeset have the same content than A'. So the diff from A to A'
136 136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 137 #
138 138 # o Ad
139 139 # |`:
140 140 # | x A'
141 141 # |'|
142 142 # o | A
143 143 # |/
144 144 # o Z
145 145 #
146 146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 148 # This flag mean that the successors express the changes between the public and
149 149 # bumped version and fix the situation, breaking the transitivity of
150 150 # "bumped" here.
151 151 bumpedfix = 1
152 152 usingsha256 = 2
153 153
154 154 ## Parsing and writing of version "0"
155 155 #
156 156 # The header is followed by the markers. Each marker is made of:
157 157 #
158 158 # - 1 uint8 : number of new changesets "N", can be zero.
159 159 #
160 160 # - 1 uint32: metadata size "M" in bytes.
161 161 #
162 162 # - 1 byte: a bit field. It is reserved for flags used in common
163 163 # obsolete marker operations, to avoid repeated decoding of metadata
164 164 # entries.
165 165 #
166 166 # - 20 bytes: obsoleted changeset identifier.
167 167 #
168 168 # - N*20 bytes: new changesets identifiers.
169 169 #
170 170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 171 # string contains a key and a value, separated by a colon ':', without
172 172 # additional encoding. Keys cannot contain '\0' or ':' and values
173 173 # cannot contain '\0'.
174 174 _fm0version = 0
175 175 _fm0fixed = '>BIB20s'
176 176 _fm0node = '20s'
177 177 _fm0fsize = _calcsize(_fm0fixed)
178 178 _fm0fnodesize = _calcsize(_fm0node)
179 179
180 180 def _fm0readmarkers(data, off):
181 181 # Loop on markers
182 182 l = len(data)
183 183 while off + _fm0fsize <= l:
184 184 # read fixed part
185 185 cur = data[off:off + _fm0fsize]
186 186 off += _fm0fsize
187 187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 188 # read replacement
189 189 sucs = ()
190 190 if numsuc:
191 191 s = (_fm0fnodesize * numsuc)
192 192 cur = data[off:off + s]
193 193 sucs = _unpack(_fm0node * numsuc, cur)
194 194 off += s
195 195 # read metadata
196 196 # (metadata will be decoded on demand)
197 197 metadata = data[off:off + mdsize]
198 198 if len(metadata) != mdsize:
199 199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 200 'short, %d bytes expected, got %d')
201 201 % (mdsize, len(metadata)))
202 202 off += mdsize
203 203 metadata = _fm0decodemeta(metadata)
204 204 try:
205 205 when, offset = metadata.pop('date', '0 0').split(' ')
206 206 date = float(when), int(offset)
207 207 except ValueError:
208 208 date = (0., 0)
209 209 parents = None
210 210 if 'p2' in metadata:
211 211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 212 elif 'p1' in metadata:
213 213 parents = (metadata.pop('p1', None),)
214 214 elif 'p0' in metadata:
215 215 parents = ()
216 216 if parents is not None:
217 217 try:
218 218 parents = tuple(node.bin(p) for p in parents)
219 219 # if parent content is not a nodeid, drop the data
220 220 for p in parents:
221 221 if len(p) != 20:
222 222 parents = None
223 223 break
224 224 except TypeError:
225 225 # if content cannot be translated to nodeid drop the data.
226 226 parents = None
227 227
228 228 metadata = tuple(sorted(metadata.iteritems()))
229 229
230 230 yield (pre, sucs, flags, metadata, date, parents)
231 231
232 232 def _fm0encodeonemarker(marker):
233 233 pre, sucs, flags, metadata, date, parents = marker
234 234 if flags & usingsha256:
235 235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 236 metadata = dict(metadata)
237 237 time, tz = date
238 238 metadata['date'] = '%r %i' % (time, tz)
239 239 if parents is not None:
240 240 if not parents:
241 241 # mark that we explicitly recorded no parents
242 242 metadata['p0'] = ''
243 243 for i, p in enumerate(parents, 1):
244 244 metadata['p%i' % i] = node.hex(p)
245 245 metadata = _fm0encodemeta(metadata)
246 246 numsuc = len(sucs)
247 247 format = _fm0fixed + (_fm0node * numsuc)
248 248 data = [numsuc, len(metadata), flags, pre]
249 249 data.extend(sucs)
250 250 return _pack(format, *data) + metadata
251 251
252 252 def _fm0encodemeta(meta):
253 253 """Return encoded metadata string to string mapping.
254 254
255 255 Assume no ':' in key and no '\0' in both key and value."""
256 256 for key, value in meta.iteritems():
257 257 if ':' in key or '\0' in key:
258 258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 259 if '\0' in value:
260 260 raise ValueError("':' is forbidden in metadata value'")
261 261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 262
263 263 def _fm0decodemeta(data):
264 264 """Return string to string dictionary from encoded version."""
265 265 d = {}
266 266 for l in data.split('\0'):
267 267 if l:
268 268 key, value = l.split(':')
269 269 d[key] = value
270 270 return d
271 271
272 272 ## Parsing and writing of version "1"
273 273 #
274 274 # The header is followed by the markers. Each marker is made of:
275 275 #
276 276 # - uint32: total size of the marker (including this field)
277 277 #
278 278 # - float64: date in seconds since epoch
279 279 #
280 280 # - int16: timezone offset in minutes
281 281 #
282 282 # - uint16: a bit field. It is reserved for flags used in common
283 283 # obsolete marker operations, to avoid repeated decoding of metadata
284 284 # entries.
285 285 #
286 286 # - uint8: number of successors "N", can be zero.
287 287 #
288 288 # - uint8: number of parents "P", can be zero.
289 289 #
290 290 # 0: parents data stored but no parent,
291 291 # 1: one parent stored,
292 292 # 2: two parents stored,
293 293 # 3: no parent data stored
294 294 #
295 295 # - uint8: number of metadata entries M
296 296 #
297 297 # - 20 or 32 bytes: precursor changeset identifier.
298 298 #
299 299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 300 #
301 301 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 302 #
303 303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 304 #
305 305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 306 _fm1version = 1
307 307 _fm1fixed = '>IdhHBBB20s'
308 308 _fm1nodesha1 = '20s'
309 309 _fm1nodesha256 = '32s'
310 310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 312 _fm1fsize = _calcsize(_fm1fixed)
313 313 _fm1parentnone = 3
314 314 _fm1parentshift = 14
315 315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 316 _fm1metapair = 'BB'
317 317 _fm1metapairsize = _calcsize('BB')
318 318
319 319 def _fm1purereadmarkers(data, off):
320 320 # make some global constants local for performance
321 321 noneflag = _fm1parentnone
322 322 sha2flag = usingsha256
323 323 sha1size = _fm1nodesha1size
324 324 sha2size = _fm1nodesha256size
325 325 sha1fmt = _fm1nodesha1
326 326 sha2fmt = _fm1nodesha256
327 327 metasize = _fm1metapairsize
328 328 metafmt = _fm1metapair
329 329 fsize = _fm1fsize
330 330 unpack = _unpack
331 331
332 332 # Loop on markers
333 333 stop = len(data) - _fm1fsize
334 334 ufixed = struct.Struct(_fm1fixed).unpack
335 335
336 336 while off <= stop:
337 337 # read fixed part
338 338 o1 = off + fsize
339 339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340 340
341 341 if flags & sha2flag:
342 342 # FIXME: prec was read as a SHA1, needs to be amended
343 343
344 344 # read 0 or more successors
345 345 if numsuc == 1:
346 346 o2 = o1 + sha2size
347 347 sucs = (data[o1:o2],)
348 348 else:
349 349 o2 = o1 + sha2size * numsuc
350 350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351 351
352 352 # read parents
353 353 if numpar == noneflag:
354 354 o3 = o2
355 355 parents = None
356 356 elif numpar == 1:
357 357 o3 = o2 + sha2size
358 358 parents = (data[o2:o3],)
359 359 else:
360 360 o3 = o2 + sha2size * numpar
361 361 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 362 else:
363 363 # read 0 or more successors
364 364 if numsuc == 1:
365 365 o2 = o1 + sha1size
366 366 sucs = (data[o1:o2],)
367 367 else:
368 368 o2 = o1 + sha1size * numsuc
369 369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370 370
371 371 # read parents
372 372 if numpar == noneflag:
373 373 o3 = o2
374 374 parents = None
375 375 elif numpar == 1:
376 376 o3 = o2 + sha1size
377 377 parents = (data[o2:o3],)
378 378 else:
379 379 o3 = o2 + sha1size * numpar
380 380 parents = unpack(sha1fmt * numpar, data[o2:o3])
381 381
382 382 # read metadata
383 383 off = o3 + metasize * nummeta
384 384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 385 metadata = []
386 386 for idx in xrange(0, len(metapairsize), 2):
387 387 o1 = off + metapairsize[idx]
388 388 o2 = o1 + metapairsize[idx + 1]
389 389 metadata.append((data[off:o1], data[o1:o2]))
390 390 off = o2
391 391
392 392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393 393
394 394 def _fm1encodeonemarker(marker):
395 395 pre, sucs, flags, metadata, date, parents = marker
396 396 # determine node size
397 397 _fm1node = _fm1nodesha1
398 398 if flags & usingsha256:
399 399 _fm1node = _fm1nodesha256
400 400 numsuc = len(sucs)
401 401 numextranodes = numsuc
402 402 if parents is None:
403 403 numpar = _fm1parentnone
404 404 else:
405 405 numpar = len(parents)
406 406 numextranodes += numpar
407 407 formatnodes = _fm1node * numextranodes
408 408 formatmeta = _fm1metapair * len(metadata)
409 409 format = _fm1fixed + formatnodes + formatmeta
410 410 # tz is stored in minutes so we divide by 60
411 411 tz = date[1]//60
412 412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 413 data.extend(sucs)
414 414 if parents is not None:
415 415 data.extend(parents)
416 416 totalsize = _calcsize(format)
417 417 for key, value in metadata:
418 418 lk = len(key)
419 419 lv = len(value)
420 420 data.append(lk)
421 421 data.append(lv)
422 422 totalsize += lk + lv
423 423 data[0] = totalsize
424 424 data = [_pack(format, *data)]
425 425 for key, value in metadata:
426 426 data.append(key)
427 427 data.append(value)
428 428 return ''.join(data)
429 429
430 430 def _fm1readmarkers(data, off):
431 431 native = getattr(parsers, 'fm1readmarkers', None)
432 432 if not native:
433 433 return _fm1purereadmarkers(data, off)
434 434 stop = len(data) - _fm1fsize
435 435 return native(data, off, stop)
436 436
437 437 # mapping to read/write various marker formats
438 438 # <version> -> (decoder, encoder)
439 439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441 441
442 442 @util.nogc
443 443 def _readmarkers(data):
444 444 """Read and enumerate markers from raw data"""
445 445 off = 0
446 446 diskversion = _unpack('>B', data[off:off + 1])[0]
447 447 off += 1
448 448 if diskversion not in formats:
449 449 raise error.Abort(_('parsing obsolete marker: unknown version %r')
450 450 % diskversion)
451 451 return diskversion, formats[diskversion][0](data, off)
452 452
453 453 def encodemarkers(markers, addheader=False, version=_fm0version):
454 454 # Kept separate from flushmarkers(), it will be reused for
455 455 # markers exchange.
456 456 encodeone = formats[version][1]
457 457 if addheader:
458 458 yield _pack('>B', version)
459 459 for marker in markers:
460 460 yield encodeone(marker)
461 461
462 462
463 463 class marker(object):
464 464 """Wrap obsolete marker raw data"""
465 465
466 466 def __init__(self, repo, data):
467 467 # the repo argument will be used to create changectx in later version
468 468 self._repo = repo
469 469 self._data = data
470 470 self._decodedmeta = None
471 471
472 472 def __hash__(self):
473 473 return hash(self._data)
474 474
475 475 def __eq__(self, other):
476 476 if type(other) != type(self):
477 477 return False
478 478 return self._data == other._data
479 479
480 480 def precnode(self):
481 481 """Precursor changeset node identifier"""
482 482 return self._data[0]
483 483
484 484 def succnodes(self):
485 485 """List of successor changesets node identifiers"""
486 486 return self._data[1]
487 487
488 488 def parentnodes(self):
489 489 """Parents of the precursors (None if not recorded)"""
490 490 return self._data[5]
491 491
492 492 def metadata(self):
493 493 """Decoded metadata dictionary"""
494 494 return dict(self._data[3])
495 495
496 496 def date(self):
497 497 """Creation date as (unixtime, offset)"""
498 498 return self._data[4]
499 499
500 500 def flags(self):
501 501 """The flags field of the marker"""
502 502 return self._data[2]
503 503
504 504 @util.nogc
505 505 def _addsuccessors(successors, markers):
506 506 for mark in markers:
507 507 successors.setdefault(mark[0], set()).add(mark)
508 508
509 509 @util.nogc
510 510 def _addprecursors(precursors, markers):
511 511 for mark in markers:
512 512 for suc in mark[1]:
513 513 precursors.setdefault(suc, set()).add(mark)
514 514
515 515 @util.nogc
516 516 def _addchildren(children, markers):
517 517 for mark in markers:
518 518 parents = mark[5]
519 519 if parents is not None:
520 520 for p in parents:
521 521 children.setdefault(p, set()).add(mark)
522 522
523 523 def _checkinvalidmarkers(markers):
524 524 """search for marker with invalid data and raise error if needed
525 525
526 526 Exist as a separated function to allow the evolve extension for a more
527 527 subtle handling.
528 528 """
529 529 for mark in markers:
530 530 if node.nullid in mark[1]:
531 531 raise error.Abort(_('bad obsolescence marker detected: '
532 532 'invalid successors nullid'))
533 533
534 534 class obsstore(object):
535 535 """Store obsolete markers
536 536
537 537 Markers can be accessed with two mappings:
538 538 - precursors[x] -> set(markers on precursors edges of x)
539 539 - successors[x] -> set(markers on successors edges of x)
540 540 - children[x] -> set(markers on precursors edges of children(x)
541 541 """
542 542
543 543 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
544 544 # prec: nodeid, precursor changesets
545 545 # succs: tuple of nodeid, successor changesets (0-N length)
546 546 # flag: integer, flag field carrying modifier for the markers (see doc)
547 547 # meta: binary blob, encoded metadata dictionary
548 548 # date: (float, int) tuple, date of marker creation
549 549 # parents: (tuple of nodeid) or None, parents of precursors
550 550 # None is used when no data has been recorded
551 551
552 552 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
553 553 # caches for various obsolescence related cache
554 554 self.caches = {}
555 555 self.svfs = svfs
556 556 self._version = defaultformat
557 557 self._readonly = readonly
558 558
559 559 def __iter__(self):
560 560 return iter(self._all)
561 561
562 562 def __len__(self):
563 563 return len(self._all)
564 564
565 565 def __nonzero__(self):
566 566 if not self._cached('_all'):
567 567 try:
568 568 return self.svfs.stat('obsstore').st_size > 1
569 569 except OSError as inst:
570 570 if inst.errno != errno.ENOENT:
571 571 raise
572 572 # just build an empty _all list if no obsstore exists, which
573 573 # avoids further stat() syscalls
574 574 pass
575 575 return bool(self._all)
576 576
577 577 __bool__ = __nonzero__
578 578
579 579 @property
580 580 def readonly(self):
581 581 """True if marker creation is disabled
582 582
583 583 Remove me in the future when obsolete marker is always on."""
584 584 return self._readonly
585 585
586 586 def create(self, transaction, prec, succs=(), flag=0, parents=None,
587 date=None, metadata=None):
587 date=None, metadata=None, ui=None):
588 588 """obsolete: add a new obsolete marker
589 589
590 590 * ensuring it is hashable
591 591 * check mandatory metadata
592 592 * encode metadata
593 593
594 594 If you are a human writing code creating marker you want to use the
595 595 `createmarkers` function in this module instead.
596 596
597 597 return True if a new marker have been added, False if the markers
598 598 already existed (no op).
599 599 """
600 600 if metadata is None:
601 601 metadata = {}
602 602 if date is None:
603 603 if 'date' in metadata:
604 604 # as a courtesy for out-of-tree extensions
605 605 date = util.parsedate(metadata.pop('date'))
606 elif ui is not None:
607 date = ui.configdate('devel', 'default-date')
608 if date is None:
609 date = util.makedate()
606 610 else:
607 611 date = util.makedate()
608 612 if len(prec) != 20:
609 613 raise ValueError(prec)
610 614 for succ in succs:
611 615 if len(succ) != 20:
612 616 raise ValueError(succ)
613 617 if prec in succs:
614 618 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
615 619
616 620 metadata = tuple(sorted(metadata.iteritems()))
617 621
618 622 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
619 623 return bool(self.add(transaction, [marker]))
620 624
621 625 def add(self, transaction, markers):
622 626 """Add new markers to the store
623 627
624 628 Take care of filtering duplicate.
625 629 Return the number of new marker."""
626 630 if self._readonly:
627 631 raise error.Abort(_('creating obsolete markers is not enabled on '
628 632 'this repo'))
629 633 known = set(self._all)
630 634 new = []
631 635 for m in markers:
632 636 if m not in known:
633 637 known.add(m)
634 638 new.append(m)
635 639 if new:
636 640 f = self.svfs('obsstore', 'ab')
637 641 try:
638 642 offset = f.tell()
639 643 transaction.add('obsstore', offset)
640 644 # offset == 0: new file - add the version header
641 645 for bytes in encodemarkers(new, offset == 0, self._version):
642 646 f.write(bytes)
643 647 finally:
644 648 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
645 649 # call 'filecacheentry.refresh()' here
646 650 f.close()
647 651 self._addmarkers(new)
648 652 # new marker *may* have changed several set. invalidate the cache.
649 653 self.caches.clear()
650 654 # records the number of new markers for the transaction hooks
651 655 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
652 656 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
653 657 return len(new)
654 658
655 659 def mergemarkers(self, transaction, data):
656 660 """merge a binary stream of markers inside the obsstore
657 661
658 662 Returns the number of new markers added."""
659 663 version, markers = _readmarkers(data)
660 664 return self.add(transaction, markers)
661 665
662 666 @propertycache
663 667 def _all(self):
664 668 data = self.svfs.tryread('obsstore')
665 669 if not data:
666 670 return []
667 671 self._version, markers = _readmarkers(data)
668 672 markers = list(markers)
669 673 _checkinvalidmarkers(markers)
670 674 return markers
671 675
672 676 @propertycache
673 677 def successors(self):
674 678 successors = {}
675 679 _addsuccessors(successors, self._all)
676 680 return successors
677 681
678 682 @propertycache
679 683 def precursors(self):
680 684 precursors = {}
681 685 _addprecursors(precursors, self._all)
682 686 return precursors
683 687
684 688 @propertycache
685 689 def children(self):
686 690 children = {}
687 691 _addchildren(children, self._all)
688 692 return children
689 693
690 694 def _cached(self, attr):
691 695 return attr in self.__dict__
692 696
693 697 def _addmarkers(self, markers):
694 698 markers = list(markers) # to allow repeated iteration
695 699 self._all.extend(markers)
696 700 if self._cached('successors'):
697 701 _addsuccessors(self.successors, markers)
698 702 if self._cached('precursors'):
699 703 _addprecursors(self.precursors, markers)
700 704 if self._cached('children'):
701 705 _addchildren(self.children, markers)
702 706 _checkinvalidmarkers(markers)
703 707
704 708 def relevantmarkers(self, nodes):
705 709 """return a set of all obsolescence markers relevant to a set of nodes.
706 710
707 711 "relevant" to a set of nodes mean:
708 712
709 713 - marker that use this changeset as successor
710 714 - prune marker of direct children on this changeset
711 715 - recursive application of the two rules on precursors of these markers
712 716
713 717 It is a set so you cannot rely on order."""
714 718
715 719 pendingnodes = set(nodes)
716 720 seenmarkers = set()
717 721 seennodes = set(pendingnodes)
718 722 precursorsmarkers = self.precursors
719 723 children = self.children
720 724 while pendingnodes:
721 725 direct = set()
722 726 for current in pendingnodes:
723 727 direct.update(precursorsmarkers.get(current, ()))
724 728 pruned = [m for m in children.get(current, ()) if not m[1]]
725 729 direct.update(pruned)
726 730 direct -= seenmarkers
727 731 pendingnodes = set([m[0] for m in direct])
728 732 seenmarkers |= direct
729 733 pendingnodes -= seennodes
730 734 seennodes |= pendingnodes
731 735 return seenmarkers
732 736
733 737 def commonversion(versions):
734 738 """Return the newest version listed in both versions and our local formats.
735 739
736 740 Returns None if no common version exists.
737 741 """
738 742 versions.sort(reverse=True)
739 743 # search for highest version known on both side
740 744 for v in versions:
741 745 if v in formats:
742 746 return v
743 747 return None
744 748
745 749 # arbitrary picked to fit into 8K limit from HTTP server
746 750 # you have to take in account:
747 751 # - the version header
748 752 # - the base85 encoding
749 753 _maxpayload = 5300
750 754
751 755 def _pushkeyescape(markers):
752 756 """encode markers into a dict suitable for pushkey exchange
753 757
754 758 - binary data is base85 encoded
755 759 - split in chunks smaller than 5300 bytes"""
756 760 keys = {}
757 761 parts = []
758 762 currentlen = _maxpayload * 2 # ensure we create a new part
759 763 for marker in markers:
760 764 nextdata = _fm0encodeonemarker(marker)
761 765 if (len(nextdata) + currentlen > _maxpayload):
762 766 currentpart = []
763 767 currentlen = 0
764 768 parts.append(currentpart)
765 769 currentpart.append(nextdata)
766 770 currentlen += len(nextdata)
767 771 for idx, part in enumerate(reversed(parts)):
768 772 data = ''.join([_pack('>B', _fm0version)] + part)
769 773 keys['dump%i' % idx] = util.b85encode(data)
770 774 return keys
771 775
772 776 def listmarkers(repo):
773 777 """List markers over pushkey"""
774 778 if not repo.obsstore:
775 779 return {}
776 780 return _pushkeyescape(sorted(repo.obsstore))
777 781
778 782 def pushmarker(repo, key, old, new):
779 783 """Push markers over pushkey"""
780 784 if not key.startswith('dump'):
781 785 repo.ui.warn(_('unknown key: %r') % key)
782 786 return 0
783 787 if old:
784 788 repo.ui.warn(_('unexpected old value for %r') % key)
785 789 return 0
786 790 data = util.b85decode(new)
787 791 lock = repo.lock()
788 792 try:
789 793 tr = repo.transaction('pushkey: obsolete markers')
790 794 try:
791 795 repo.obsstore.mergemarkers(tr, data)
792 796 repo.invalidatevolatilesets()
793 797 tr.close()
794 798 return 1
795 799 finally:
796 800 tr.release()
797 801 finally:
798 802 lock.release()
799 803
800 804 def getmarkers(repo, nodes=None):
801 805 """returns markers known in a repository
802 806
803 807 If <nodes> is specified, only markers "relevant" to those nodes are are
804 808 returned"""
805 809 if nodes is None:
806 810 rawmarkers = repo.obsstore
807 811 else:
808 812 rawmarkers = repo.obsstore.relevantmarkers(nodes)
809 813
810 814 for markerdata in rawmarkers:
811 815 yield marker(repo, markerdata)
812 816
813 817 def relevantmarkers(repo, node):
814 818 """all obsolete markers relevant to some revision"""
815 819 for markerdata in repo.obsstore.relevantmarkers(node):
816 820 yield marker(repo, markerdata)
817 821
818 822
819 823 def precursormarkers(ctx):
820 824 """obsolete marker marking this changeset as a successors"""
821 825 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
822 826 yield marker(ctx.repo(), data)
823 827
824 828 def successormarkers(ctx):
825 829 """obsolete marker making this changeset obsolete"""
826 830 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
827 831 yield marker(ctx.repo(), data)
828 832
829 833 def allsuccessors(obsstore, nodes, ignoreflags=0):
830 834 """Yield node for every successor of <nodes>.
831 835
832 836 Some successors may be unknown locally.
833 837
834 838 This is a linear yield unsuited to detecting split changesets. It includes
835 839 initial nodes too."""
836 840 remaining = set(nodes)
837 841 seen = set(remaining)
838 842 while remaining:
839 843 current = remaining.pop()
840 844 yield current
841 845 for mark in obsstore.successors.get(current, ()):
842 846 # ignore marker flagged with specified flag
843 847 if mark[2] & ignoreflags:
844 848 continue
845 849 for suc in mark[1]:
846 850 if suc not in seen:
847 851 seen.add(suc)
848 852 remaining.add(suc)
849 853
850 854 def allprecursors(obsstore, nodes, ignoreflags=0):
851 855 """Yield node for every precursors of <nodes>.
852 856
853 857 Some precursors may be unknown locally.
854 858
855 859 This is a linear yield unsuited to detecting folded changesets. It includes
856 860 initial nodes too."""
857 861
858 862 remaining = set(nodes)
859 863 seen = set(remaining)
860 864 while remaining:
861 865 current = remaining.pop()
862 866 yield current
863 867 for mark in obsstore.precursors.get(current, ()):
864 868 # ignore marker flagged with specified flag
865 869 if mark[2] & ignoreflags:
866 870 continue
867 871 suc = mark[0]
868 872 if suc not in seen:
869 873 seen.add(suc)
870 874 remaining.add(suc)
871 875
872 876 def foreground(repo, nodes):
873 877 """return all nodes in the "foreground" of other node
874 878
875 879 The foreground of a revision is anything reachable using parent -> children
876 880 or precursor -> successor relation. It is very similar to "descendant" but
877 881 augmented with obsolescence information.
878 882
879 883 Beware that possible obsolescence cycle may result if complex situation.
880 884 """
881 885 repo = repo.unfiltered()
882 886 foreground = set(repo.set('%ln::', nodes))
883 887 if repo.obsstore:
884 888 # We only need this complicated logic if there is obsolescence
885 889 # XXX will probably deserve an optimised revset.
886 890 nm = repo.changelog.nodemap
887 891 plen = -1
888 892 # compute the whole set of successors or descendants
889 893 while len(foreground) != plen:
890 894 plen = len(foreground)
891 895 succs = set(c.node() for c in foreground)
892 896 mutable = [c.node() for c in foreground if c.mutable()]
893 897 succs.update(allsuccessors(repo.obsstore, mutable))
894 898 known = (n for n in succs if n in nm)
895 899 foreground = set(repo.set('%ln::', known))
896 900 return set(c.node() for c in foreground)
897 901
898 902
899 903 def successorssets(repo, initialnode, cache=None):
900 904 """Return set of all latest successors of initial nodes
901 905
902 906 The successors set of a changeset A are the group of revisions that succeed
903 907 A. It succeeds A as a consistent whole, each revision being only a partial
904 908 replacement. The successors set contains non-obsolete changesets only.
905 909
906 910 This function returns the full list of successor sets which is why it
907 911 returns a list of tuples and not just a single tuple. Each tuple is a valid
908 912 successors set. Note that (A,) may be a valid successors set for changeset A
909 913 (see below).
910 914
911 915 In most cases, a changeset A will have a single element (e.g. the changeset
912 916 A is replaced by A') in its successors set. Though, it is also common for a
913 917 changeset A to have no elements in its successor set (e.g. the changeset
914 918 has been pruned). Therefore, the returned list of successors sets will be
915 919 [(A',)] or [], respectively.
916 920
917 921 When a changeset A is split into A' and B', however, it will result in a
918 922 successors set containing more than a single element, i.e. [(A',B')].
919 923 Divergent changesets will result in multiple successors sets, i.e. [(A',),
920 924 (A'')].
921 925
922 926 If a changeset A is not obsolete, then it will conceptually have no
923 927 successors set. To distinguish this from a pruned changeset, the successor
924 928 set will contain itself only, i.e. [(A,)].
925 929
926 930 Finally, successors unknown locally are considered to be pruned (obsoleted
927 931 without any successors).
928 932
929 933 The optional `cache` parameter is a dictionary that may contain precomputed
930 934 successors sets. It is meant to reuse the computation of a previous call to
931 935 `successorssets` when multiple calls are made at the same time. The cache
932 936 dictionary is updated in place. The caller is responsible for its life
933 937 span. Code that makes multiple calls to `successorssets` *must* use this
934 938 cache mechanism or suffer terrible performance.
935 939 """
936 940
937 941 succmarkers = repo.obsstore.successors
938 942
939 943 # Stack of nodes we search successors sets for
940 944 toproceed = [initialnode]
941 945 # set version of above list for fast loop detection
942 946 # element added to "toproceed" must be added here
943 947 stackedset = set(toproceed)
944 948 if cache is None:
945 949 cache = {}
946 950
947 951 # This while loop is the flattened version of a recursive search for
948 952 # successors sets
949 953 #
950 954 # def successorssets(x):
951 955 # successors = directsuccessors(x)
952 956 # ss = [[]]
953 957 # for succ in directsuccessors(x):
954 958 # # product as in itertools cartesian product
955 959 # ss = product(ss, successorssets(succ))
956 960 # return ss
957 961 #
958 962 # But we can not use plain recursive calls here:
959 963 # - that would blow the python call stack
960 964 # - obsolescence markers may have cycles, we need to handle them.
961 965 #
962 966 # The `toproceed` list act as our call stack. Every node we search
963 967 # successors set for are stacked there.
964 968 #
965 969 # The `stackedset` is set version of this stack used to check if a node is
966 970 # already stacked. This check is used to detect cycles and prevent infinite
967 971 # loop.
968 972 #
969 973 # successors set of all nodes are stored in the `cache` dictionary.
970 974 #
971 975 # After this while loop ends we use the cache to return the successors sets
972 976 # for the node requested by the caller.
973 977 while toproceed:
974 978 # Every iteration tries to compute the successors sets of the topmost
975 979 # node of the stack: CURRENT.
976 980 #
977 981 # There are four possible outcomes:
978 982 #
979 983 # 1) We already know the successors sets of CURRENT:
980 984 # -> mission accomplished, pop it from the stack.
981 985 # 2) Node is not obsolete:
982 986 # -> the node is its own successors sets. Add it to the cache.
983 987 # 3) We do not know successors set of direct successors of CURRENT:
984 988 # -> We add those successors to the stack.
985 989 # 4) We know successors sets of all direct successors of CURRENT:
986 990 # -> We can compute CURRENT successors set and add it to the
987 991 # cache.
988 992 #
989 993 current = toproceed[-1]
990 994 if current in cache:
991 995 # case (1): We already know the successors sets
992 996 stackedset.remove(toproceed.pop())
993 997 elif current not in succmarkers:
994 998 # case (2): The node is not obsolete.
995 999 if current in repo:
996 1000 # We have a valid last successors.
997 1001 cache[current] = [(current,)]
998 1002 else:
999 1003 # Final obsolete version is unknown locally.
1000 1004 # Do not count that as a valid successors
1001 1005 cache[current] = []
1002 1006 else:
1003 1007 # cases (3) and (4)
1004 1008 #
1005 1009 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1006 1010 # from case (4):
1007 1011 #
1008 1012 # For each direct successors of CURRENT, we check whether its
1009 1013 # successors sets are known. If they are not, we stack the
1010 1014 # unknown node and proceed to the next iteration of the while
1011 1015 # loop. (case 3)
1012 1016 #
1013 1017 # During this step, we may detect obsolescence cycles: a node
1014 1018 # with unknown successors sets but already in the call stack.
1015 1019 # In such a situation, we arbitrary set the successors sets of
1016 1020 # the node to nothing (node pruned) to break the cycle.
1017 1021 #
1018 1022 # If no break was encountered we proceed to phase 2.
1019 1023 #
1020 1024 # Phase 2 computes successors sets of CURRENT (case 4); see details
1021 1025 # in phase 2 itself.
1022 1026 #
1023 1027 # Note the two levels of iteration in each phase.
1024 1028 # - The first one handles obsolescence markers using CURRENT as
1025 1029 # precursor (successors markers of CURRENT).
1026 1030 #
1027 1031 # Having multiple entry here means divergence.
1028 1032 #
1029 1033 # - The second one handles successors defined in each marker.
1030 1034 #
1031 1035 # Having none means pruned node, multiple successors means split,
1032 1036 # single successors are standard replacement.
1033 1037 #
1034 1038 for mark in sorted(succmarkers[current]):
1035 1039 for suc in mark[1]:
1036 1040 if suc not in cache:
1037 1041 if suc in stackedset:
1038 1042 # cycle breaking
1039 1043 cache[suc] = []
1040 1044 else:
1041 1045 # case (3) If we have not computed successors sets
1042 1046 # of one of those successors we add it to the
1043 1047 # `toproceed` stack and stop all work for this
1044 1048 # iteration.
1045 1049 toproceed.append(suc)
1046 1050 stackedset.add(suc)
1047 1051 break
1048 1052 else:
1049 1053 continue
1050 1054 break
1051 1055 else:
1052 1056 # case (4): we know all successors sets of all direct
1053 1057 # successors
1054 1058 #
1055 1059 # Successors set contributed by each marker depends on the
1056 1060 # successors sets of all its "successors" node.
1057 1061 #
1058 1062 # Each different marker is a divergence in the obsolescence
1059 1063 # history. It contributes successors sets distinct from other
1060 1064 # markers.
1061 1065 #
1062 1066 # Within a marker, a successor may have divergent successors
1063 1067 # sets. In such a case, the marker will contribute multiple
1064 1068 # divergent successors sets. If multiple successors have
1065 1069 # divergent successors sets, a Cartesian product is used.
1066 1070 #
1067 1071 # At the end we post-process successors sets to remove
1068 1072 # duplicated entry and successors set that are strict subset of
1069 1073 # another one.
1070 1074 succssets = []
1071 1075 for mark in sorted(succmarkers[current]):
1072 1076 # successors sets contributed by this marker
1073 1077 markss = [[]]
1074 1078 for suc in mark[1]:
1075 1079 # cardinal product with previous successors
1076 1080 productresult = []
1077 1081 for prefix in markss:
1078 1082 for suffix in cache[suc]:
1079 1083 newss = list(prefix)
1080 1084 for part in suffix:
1081 1085 # do not duplicated entry in successors set
1082 1086 # first entry wins.
1083 1087 if part not in newss:
1084 1088 newss.append(part)
1085 1089 productresult.append(newss)
1086 1090 markss = productresult
1087 1091 succssets.extend(markss)
1088 1092 # remove duplicated and subset
1089 1093 seen = []
1090 1094 final = []
1091 1095 candidate = sorted(((set(s), s) for s in succssets if s),
1092 1096 key=lambda x: len(x[1]), reverse=True)
1093 1097 for setversion, listversion in candidate:
1094 1098 for seenset in seen:
1095 1099 if setversion.issubset(seenset):
1096 1100 break
1097 1101 else:
1098 1102 final.append(listversion)
1099 1103 seen.append(setversion)
1100 1104 final.reverse() # put small successors set first
1101 1105 cache[current] = final
1102 1106 return cache[initialnode]
1103 1107
1104 1108 # mapping of 'set-name' -> <function to compute this set>
1105 1109 cachefuncs = {}
1106 1110 def cachefor(name):
1107 1111 """Decorator to register a function as computing the cache for a set"""
1108 1112 def decorator(func):
1109 1113 assert name not in cachefuncs
1110 1114 cachefuncs[name] = func
1111 1115 return func
1112 1116 return decorator
1113 1117
1114 1118 def getrevs(repo, name):
1115 1119 """Return the set of revision that belong to the <name> set
1116 1120
1117 1121 Such access may compute the set and cache it for future use"""
1118 1122 repo = repo.unfiltered()
1119 1123 if not repo.obsstore:
1120 1124 return frozenset()
1121 1125 if name not in repo.obsstore.caches:
1122 1126 repo.obsstore.caches[name] = cachefuncs[name](repo)
1123 1127 return repo.obsstore.caches[name]
1124 1128
1125 1129 # To be simple we need to invalidate obsolescence cache when:
1126 1130 #
1127 1131 # - new changeset is added:
1128 1132 # - public phase is changed
1129 1133 # - obsolescence marker are added
1130 1134 # - strip is used a repo
1131 1135 def clearobscaches(repo):
1132 1136 """Remove all obsolescence related cache from a repo
1133 1137
1134 1138 This remove all cache in obsstore is the obsstore already exist on the
1135 1139 repo.
1136 1140
1137 1141 (We could be smarter here given the exact event that trigger the cache
1138 1142 clearing)"""
1139 1143 # only clear cache is there is obsstore data in this repo
1140 1144 if 'obsstore' in repo._filecache:
1141 1145 repo.obsstore.caches.clear()
1142 1146
1143 1147 @cachefor('obsolete')
1144 1148 def _computeobsoleteset(repo):
1145 1149 """the set of obsolete revisions"""
1146 1150 obs = set()
1147 1151 getnode = repo.changelog.node
1148 1152 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1149 1153 for r in notpublic:
1150 1154 if getnode(r) in repo.obsstore.successors:
1151 1155 obs.add(r)
1152 1156 return obs
1153 1157
1154 1158 @cachefor('unstable')
1155 1159 def _computeunstableset(repo):
1156 1160 """the set of non obsolete revisions with obsolete parents"""
1157 1161 revs = [(ctx.rev(), ctx) for ctx in
1158 1162 repo.set('(not public()) and (not obsolete())')]
1159 1163 revs.sort(key=lambda x:x[0])
1160 1164 unstable = set()
1161 1165 for rev, ctx in revs:
1162 1166 # A rev is unstable if one of its parent is obsolete or unstable
1163 1167 # this works since we traverse following growing rev order
1164 1168 if any((x.obsolete() or (x.rev() in unstable))
1165 1169 for x in ctx.parents()):
1166 1170 unstable.add(rev)
1167 1171 return unstable
1168 1172
1169 1173 @cachefor('suspended')
1170 1174 def _computesuspendedset(repo):
1171 1175 """the set of obsolete parents with non obsolete descendants"""
1172 1176 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1173 1177 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1174 1178
1175 1179 @cachefor('extinct')
1176 1180 def _computeextinctset(repo):
1177 1181 """the set of obsolete parents without non obsolete descendants"""
1178 1182 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1179 1183
1180 1184
1181 1185 @cachefor('bumped')
1182 1186 def _computebumpedset(repo):
1183 1187 """the set of revs trying to obsolete public revisions"""
1184 1188 bumped = set()
1185 1189 # util function (avoid attribute lookup in the loop)
1186 1190 phase = repo._phasecache.phase # would be faster to grab the full list
1187 1191 public = phases.public
1188 1192 cl = repo.changelog
1189 1193 torev = cl.nodemap.get
1190 1194 for ctx in repo.set('(not public()) and (not obsolete())'):
1191 1195 rev = ctx.rev()
1192 1196 # We only evaluate mutable, non-obsolete revision
1193 1197 node = ctx.node()
1194 1198 # (future) A cache of precursors may worth if split is very common
1195 1199 for pnode in allprecursors(repo.obsstore, [node],
1196 1200 ignoreflags=bumpedfix):
1197 1201 prev = torev(pnode) # unfiltered! but so is phasecache
1198 1202 if (prev is not None) and (phase(repo, prev) <= public):
1199 1203 # we have a public precursor
1200 1204 bumped.add(rev)
1201 1205 break # Next draft!
1202 1206 return bumped
1203 1207
1204 1208 @cachefor('divergent')
1205 1209 def _computedivergentset(repo):
1206 1210 """the set of rev that compete to be the final successors of some revision.
1207 1211 """
1208 1212 divergent = set()
1209 1213 obsstore = repo.obsstore
1210 1214 newermap = {}
1211 1215 for ctx in repo.set('(not public()) - obsolete()'):
1212 1216 mark = obsstore.precursors.get(ctx.node(), ())
1213 1217 toprocess = set(mark)
1214 1218 seen = set()
1215 1219 while toprocess:
1216 1220 prec = toprocess.pop()[0]
1217 1221 if prec in seen:
1218 1222 continue # emergency cycle hanging prevention
1219 1223 seen.add(prec)
1220 1224 if prec not in newermap:
1221 1225 successorssets(repo, prec, newermap)
1222 1226 newer = [n for n in newermap[prec] if n]
1223 1227 if len(newer) > 1:
1224 1228 divergent.add(ctx.rev())
1225 1229 break
1226 1230 toprocess.update(obsstore.precursors.get(prec, ()))
1227 1231 return divergent
1228 1232
1229 1233
1230 1234 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1231 1235 operation=None):
1232 1236 """Add obsolete markers between changesets in a repo
1233 1237
1234 1238 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1235 1239 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1236 1240 containing metadata for this marker only. It is merged with the global
1237 1241 metadata specified through the `metadata` argument of this function,
1238 1242
1239 1243 Trying to obsolete a public changeset will raise an exception.
1240 1244
1241 1245 Current user and date are used except if specified otherwise in the
1242 1246 metadata attribute.
1243 1247
1244 1248 This function operates within a transaction of its own, but does
1245 1249 not take any lock on the repo.
1246 1250 """
1247 1251 # prepare metadata
1248 1252 if metadata is None:
1249 1253 metadata = {}
1250 1254 if 'user' not in metadata:
1251 1255 metadata['user'] = repo.ui.username()
1252 1256 useoperation = repo.ui.configbool('experimental',
1253 1257 'evolution.track-operation',
1254 1258 False)
1255 1259 if useoperation and operation:
1256 1260 metadata['operation'] = operation
1257 1261 tr = repo.transaction('add-obsolescence-marker')
1258 1262 try:
1259 1263 markerargs = []
1260 1264 for rel in relations:
1261 1265 prec = rel[0]
1262 1266 sucs = rel[1]
1263 1267 localmetadata = metadata.copy()
1264 1268 if 2 < len(rel):
1265 1269 localmetadata.update(rel[2])
1266 1270
1267 1271 if not prec.mutable():
1268 1272 raise error.Abort(_("cannot obsolete public changeset: %s")
1269 1273 % prec,
1270 1274 hint="see 'hg help phases' for details")
1271 1275 nprec = prec.node()
1272 1276 nsucs = tuple(s.node() for s in sucs)
1273 1277 npare = None
1274 1278 if not nsucs:
1275 1279 npare = tuple(p.node() for p in prec.parents())
1276 1280 if nprec in nsucs:
1277 1281 raise error.Abort(_("changeset %s cannot obsolete itself")
1278 1282 % prec)
1279 1283
1280 1284 # Creating the marker causes the hidden cache to become invalid,
1281 1285 # which causes recomputation when we ask for prec.parents() above.
1282 1286 # Resulting in n^2 behavior. So let's prepare all of the args
1283 1287 # first, then create the markers.
1284 1288 markerargs.append((nprec, nsucs, npare, localmetadata))
1285 1289
1286 1290 for args in markerargs:
1287 1291 nprec, nsucs, npare, localmetadata = args
1288 1292 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1289 date=date, metadata=localmetadata)
1293 date=date, metadata=localmetadata,
1294 ui=repo.ui)
1290 1295 repo.filteredrevcache.clear()
1291 1296 tr.close()
1292 1297 finally:
1293 1298 tr.release()
@@ -1,1315 +1,1315 b''
1 1 $ cat >> $HGRCPATH << EOF
2 2 > [phases]
3 3 > # public changeset are not obsolete
4 4 > publish=false
5 5 > [ui]
6 6 > logtemplate="{rev}:{node|short} ({phase}{if(obsolete, ' *{obsolete}*')}{if(troubles, ' {troubles}')}) [{tags} {bookmarks}] {desc|firstline}\n"
7 7 > EOF
8 8 $ mkcommit() {
9 9 > echo "$1" > "$1"
10 10 > hg add "$1"
11 11 > hg ci -m "add $1"
12 12 > }
13 13 $ getid() {
14 14 > hg log -T "{node}\n" --hidden -r "desc('$1')"
15 15 > }
16 16
17 17 $ cat > debugkeys.py <<EOF
18 18 > def reposetup(ui, repo):
19 19 > class debugkeysrepo(repo.__class__):
20 20 > def listkeys(self, namespace):
21 21 > ui.write('listkeys %s\n' % (namespace,))
22 22 > return super(debugkeysrepo, self).listkeys(namespace)
23 23 >
24 24 > if repo.local():
25 25 > repo.__class__ = debugkeysrepo
26 26 > EOF
27 27
28 28 $ hg init tmpa
29 29 $ cd tmpa
30 30 $ mkcommit kill_me
31 31
32 32 Checking that the feature is properly disabled
33 33
34 34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
35 35 abort: creating obsolete markers is not enabled on this repo
36 36 [255]
37 37
38 38 Enabling it
39 39
40 40 $ cat >> $HGRCPATH << EOF
41 41 > [experimental]
42 42 > evolution=createmarkers,exchange
43 43 > EOF
44 44
45 45 Killing a single changeset without replacement
46 46
47 47 $ hg debugobsolete 0
48 48 abort: changeset references must be full hexadecimal node identifiers
49 49 [255]
50 50 $ hg debugobsolete '00'
51 51 abort: changeset references must be full hexadecimal node identifiers
52 52 [255]
53 53 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
54 54 $ hg debugobsolete
55 55 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'babar'}
56 56
57 57 (test that mercurial is not confused)
58 58
59 59 $ hg up null --quiet # having 0 as parent prevents it to be hidden
60 60 $ hg tip
61 61 -1:000000000000 (public) [tip ]
62 62 $ hg up --hidden tip --quiet
63 63
64 64 Killing a single changeset with itself should fail
65 65 (simple local safeguard)
66 66
67 67 $ hg debugobsolete `getid kill_me` `getid kill_me`
68 68 abort: bad obsmarker input: in-marker cycle with 97b7c2d76b1845ed3eb988cd612611e72406cef0
69 69 [255]
70 70
71 71 $ cd ..
72 72
73 73 Killing a single changeset with replacement
74 74 (and testing the format option)
75 75
76 76 $ hg init tmpb
77 77 $ cd tmpb
78 78 $ mkcommit a
79 79 $ mkcommit b
80 80 $ mkcommit original_c
81 81 $ hg up "desc('b')"
82 82 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
83 83 $ mkcommit new_c
84 84 created new head
85 85 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
86 86 $ hg debugobsolete --config format.obsstore-version=0 --flag 12 `getid original_c` `getid new_c` -d '121 120'
87 87 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
88 88 2:245bde4270cd add original_c
89 89 $ hg debugrevlog -cd
90 90 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
91 91 0 -1 -1 0 59 0 0 0 0 58 58 0 1 0
92 92 1 0 -1 59 118 59 59 0 0 58 116 0 1 0
93 93 2 1 -1 118 193 118 118 59 0 76 192 0 1 0
94 94 3 1 -1 193 260 193 193 59 0 66 258 0 2 0
95 95 $ hg debugobsolete
96 96 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
97 97
98 98 (check for version number of the obsstore)
99 99
100 100 $ dd bs=1 count=1 if=.hg/store/obsstore 2>/dev/null
101 101 \x00 (no-eol) (esc)
102 102
103 103 do it again (it read the obsstore before adding new changeset)
104 104
105 105 $ hg up '.^'
106 106 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
107 107 $ mkcommit new_2_c
108 108 created new head
109 109 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
110 110 $ hg debugobsolete
111 111 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
112 112 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
113 113
114 114 Register two markers with a missing node
115 115
116 116 $ hg up '.^'
117 117 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
118 118 $ mkcommit new_3_c
119 119 created new head
120 120 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
121 121 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
122 122 $ hg debugobsolete
123 123 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
124 124 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
125 125 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
126 126 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
127 127
128 128 Test the --index option of debugobsolete command
129 129 $ hg debugobsolete --index
130 130 0 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
131 131 1 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
132 132 2 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
133 133 3 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
134 134
135 135 Refuse pathological nullid successors
136 136 $ hg debugobsolete -d '9001 0' 1337133713371337133713371337133713371337 0000000000000000000000000000000000000000
137 137 transaction abort!
138 138 rollback completed
139 139 abort: bad obsolescence marker detected: invalid successors nullid
140 140 [255]
141 141
142 142 Check that graphlog detect that a changeset is obsolete:
143 143
144 144 $ hg log -G
145 145 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
146 146 |
147 147 o 1:7c3bad9141dc (draft) [ ] add b
148 148 |
149 149 o 0:1f0dee641bb7 (draft) [ ] add a
150 150
151 151
152 152 check that heads does not report them
153 153
154 154 $ hg heads
155 155 5:5601fb93a350 (draft) [tip ] add new_3_c
156 156 $ hg heads --hidden
157 157 5:5601fb93a350 (draft) [tip ] add new_3_c
158 158 4:ca819180edb9 (draft *obsolete*) [ ] add new_2_c
159 159 3:cdbce2fbb163 (draft *obsolete*) [ ] add new_c
160 160 2:245bde4270cd (draft *obsolete*) [ ] add original_c
161 161
162 162
163 163 check that summary does not report them
164 164
165 165 $ hg init ../sink
166 166 $ echo '[paths]' >> .hg/hgrc
167 167 $ echo 'default=../sink' >> .hg/hgrc
168 168 $ hg summary --remote
169 169 parent: 5:5601fb93a350 tip
170 170 add new_3_c
171 171 branch: default
172 172 commit: (clean)
173 173 update: (current)
174 174 phases: 3 draft
175 175 remote: 3 outgoing
176 176
177 177 $ hg summary --remote --hidden
178 178 parent: 5:5601fb93a350 tip
179 179 add new_3_c
180 180 branch: default
181 181 commit: (clean)
182 182 update: 3 new changesets, 4 branch heads (merge)
183 183 phases: 6 draft
184 184 remote: 3 outgoing
185 185
186 186 check that various commands work well with filtering
187 187
188 188 $ hg tip
189 189 5:5601fb93a350 (draft) [tip ] add new_3_c
190 190 $ hg log -r 6
191 191 abort: unknown revision '6'!
192 192 [255]
193 193 $ hg log -r 4
194 194 abort: hidden revision '4'!
195 195 (use --hidden to access hidden revisions)
196 196 [255]
197 197 $ hg debugrevspec 'rev(6)'
198 198 $ hg debugrevspec 'rev(4)'
199 199 $ hg debugrevspec 'null'
200 200 -1
201 201
202 202 Check that public changeset are not accounted as obsolete:
203 203
204 204 $ hg --hidden phase --public 2
205 205 $ hg log -G
206 206 @ 5:5601fb93a350 (draft bumped) [tip ] add new_3_c
207 207 |
208 208 | o 2:245bde4270cd (public) [ ] add original_c
209 209 |/
210 210 o 1:7c3bad9141dc (public) [ ] add b
211 211 |
212 212 o 0:1f0dee641bb7 (public) [ ] add a
213 213
214 214
215 215 And that bumped changeset are detected
216 216 --------------------------------------
217 217
218 218 If we didn't filtered obsolete changesets out, 3 and 4 would show up too. Also
219 219 note that the bumped changeset (5:5601fb93a350) is not a direct successor of
220 220 the public changeset
221 221
222 222 $ hg log --hidden -r 'bumped()'
223 223 5:5601fb93a350 (draft bumped) [tip ] add new_3_c
224 224
225 225 And that we can't push bumped changeset
226 226
227 227 $ hg push ../tmpa -r 0 --force #(make repo related)
228 228 pushing to ../tmpa
229 229 searching for changes
230 230 warning: repository is unrelated
231 231 adding changesets
232 232 adding manifests
233 233 adding file changes
234 234 added 1 changesets with 1 changes to 1 files (+1 heads)
235 235 $ hg push ../tmpa
236 236 pushing to ../tmpa
237 237 searching for changes
238 238 abort: push includes bumped changeset: 5601fb93a350!
239 239 [255]
240 240
241 241 Fixing "bumped" situation
242 242 We need to create a clone of 5 and add a special marker with a flag
243 243
244 244 $ hg summary
245 245 parent: 5:5601fb93a350 tip (bumped)
246 246 add new_3_c
247 247 branch: default
248 248 commit: (clean)
249 249 update: 1 new changesets, 2 branch heads (merge)
250 250 phases: 1 draft
251 251 bumped: 1 changesets
252 252 $ hg up '5^'
253 253 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
254 254 $ hg revert -ar 5
255 255 adding new_3_c
256 256 $ hg ci -m 'add n3w_3_c'
257 257 created new head
258 258 $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
259 259 $ hg log -r 'bumped()'
260 260 $ hg log -G
261 261 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
262 262 |
263 263 | o 2:245bde4270cd (public) [ ] add original_c
264 264 |/
265 265 o 1:7c3bad9141dc (public) [ ] add b
266 266 |
267 267 o 0:1f0dee641bb7 (public) [ ] add a
268 268
269 269
270 270 $ cd ..
271 271
272 272 Revision 0 is hidden
273 273 --------------------
274 274
275 275 $ hg init rev0hidden
276 276 $ cd rev0hidden
277 277
278 278 $ mkcommit kill0
279 279 $ hg up -q null
280 280 $ hg debugobsolete `getid kill0`
281 281 $ mkcommit a
282 282 $ mkcommit b
283 283
284 284 Should pick the first visible revision as "repo" node
285 285
286 286 $ hg archive ../archive-null
287 287 $ cat ../archive-null/.hg_archival.txt
288 288 repo: 1f0dee641bb7258c56bd60e93edfa2405381c41e
289 289 node: 7c3bad9141dcb46ff89abf5f61856facd56e476c
290 290 branch: default
291 291 latesttag: null
292 292 latesttagdistance: 2
293 293 changessincelatesttag: 2
294 294
295 295
296 296 $ cd ..
297 297
298 298 Exchange Test
299 299 ============================
300 300
301 301 Destination repo does not have any data
302 302 ---------------------------------------
303 303
304 304 Simple incoming test
305 305
306 306 $ hg init tmpc
307 307 $ cd tmpc
308 308 $ hg incoming ../tmpb
309 309 comparing with ../tmpb
310 310 0:1f0dee641bb7 (public) [ ] add a
311 311 1:7c3bad9141dc (public) [ ] add b
312 312 2:245bde4270cd (public) [ ] add original_c
313 313 6:6f9641995072 (draft) [tip ] add n3w_3_c
314 314
315 315 Try to pull markers
316 316 (extinct changeset are excluded but marker are pushed)
317 317
318 318 $ hg pull ../tmpb
319 319 pulling from ../tmpb
320 320 requesting all changes
321 321 adding changesets
322 322 adding manifests
323 323 adding file changes
324 324 added 4 changesets with 4 changes to 4 files (+1 heads)
325 325 5 new obsolescence markers
326 326 (run 'hg heads' to see heads, 'hg merge' to merge)
327 327 $ hg debugobsolete
328 328 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
329 329 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
330 330 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
331 331 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
332 332 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
333 333
334 334 Rollback//Transaction support
335 335
336 336 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
337 337 $ hg debugobsolete
338 338 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
339 339 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
340 340 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
341 341 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
342 342 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
343 343 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:22:20 1970 +0000) {'user': 'test'}
344 344 $ hg rollback -n
345 345 repository tip rolled back to revision 3 (undo debugobsolete)
346 346 $ hg rollback
347 347 repository tip rolled back to revision 3 (undo debugobsolete)
348 348 $ hg debugobsolete
349 349 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
350 350 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
351 351 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
352 352 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
353 353 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
354 354
355 355 $ cd ..
356 356
357 357 Try to push markers
358 358
359 359 $ hg init tmpd
360 360 $ hg -R tmpb push tmpd
361 361 pushing to tmpd
362 362 searching for changes
363 363 adding changesets
364 364 adding manifests
365 365 adding file changes
366 366 added 4 changesets with 4 changes to 4 files (+1 heads)
367 367 5 new obsolescence markers
368 368 $ hg -R tmpd debugobsolete | sort
369 369 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
370 370 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
371 371 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
372 372 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
373 373 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
374 374
375 375 Check obsolete keys are exchanged only if source has an obsolete store
376 376
377 377 $ hg init empty
378 378 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
379 379 pushing to tmpd
380 380 listkeys phases
381 381 listkeys bookmarks
382 382 no changes found
383 383 listkeys phases
384 384 [1]
385 385
386 386 clone support
387 387 (markers are copied and extinct changesets are included to allow hardlinks)
388 388
389 389 $ hg clone tmpb clone-dest
390 390 updating to branch default
391 391 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
392 392 $ hg -R clone-dest log -G --hidden
393 393 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
394 394 |
395 395 | x 5:5601fb93a350 (draft *obsolete*) [ ] add new_3_c
396 396 |/
397 397 | x 4:ca819180edb9 (draft *obsolete*) [ ] add new_2_c
398 398 |/
399 399 | x 3:cdbce2fbb163 (draft *obsolete*) [ ] add new_c
400 400 |/
401 401 | o 2:245bde4270cd (public) [ ] add original_c
402 402 |/
403 403 o 1:7c3bad9141dc (public) [ ] add b
404 404 |
405 405 o 0:1f0dee641bb7 (public) [ ] add a
406 406
407 407 $ hg -R clone-dest debugobsolete
408 408 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
409 409 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
410 410 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
411 411 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
412 412 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
413 413
414 414
415 415 Destination repo have existing data
416 416 ---------------------------------------
417 417
418 418 On pull
419 419
420 420 $ hg init tmpe
421 421 $ cd tmpe
422 422 $ hg debugobsolete -d '1339 0' 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00
423 423 $ hg pull ../tmpb
424 424 pulling from ../tmpb
425 425 requesting all changes
426 426 adding changesets
427 427 adding manifests
428 428 adding file changes
429 429 added 4 changesets with 4 changes to 4 files (+1 heads)
430 430 5 new obsolescence markers
431 431 (run 'hg heads' to see heads, 'hg merge' to merge)
432 432 $ hg debugobsolete
433 433 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
434 434 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
435 435 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
436 436 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
437 437 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
438 438 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
439 439
440 440
441 441 On push
442 442
443 443 $ hg push ../tmpc
444 444 pushing to ../tmpc
445 445 searching for changes
446 446 no changes found
447 447 1 new obsolescence markers
448 448 [1]
449 449 $ hg -R ../tmpc debugobsolete
450 450 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
451 451 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
452 452 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
453 453 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
454 454 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
455 455 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
456 456
457 457 detect outgoing obsolete and unstable
458 458 ---------------------------------------
459 459
460 460
461 461 $ hg log -G
462 462 o 3:6f9641995072 (draft) [tip ] add n3w_3_c
463 463 |
464 464 | o 2:245bde4270cd (public) [ ] add original_c
465 465 |/
466 466 o 1:7c3bad9141dc (public) [ ] add b
467 467 |
468 468 o 0:1f0dee641bb7 (public) [ ] add a
469 469
470 470 $ hg up 'desc("n3w_3_c")'
471 471 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
472 472 $ mkcommit original_d
473 473 $ mkcommit original_e
474 474 $ hg debugobsolete --record-parents `getid original_d` -d '0 0'
475 475 $ hg debugobsolete | grep `getid original_d`
476 476 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
477 477 $ hg log -r 'obsolete()'
478 478 4:94b33453f93b (draft *obsolete*) [ ] add original_d
479 479 $ hg summary
480 480 parent: 5:cda648ca50f5 tip (unstable)
481 481 add original_e
482 482 branch: default
483 483 commit: (clean)
484 484 update: 1 new changesets, 2 branch heads (merge)
485 485 phases: 3 draft
486 486 unstable: 1 changesets
487 487 $ hg log -G -r '::unstable()'
488 488 @ 5:cda648ca50f5 (draft unstable) [tip ] add original_e
489 489 |
490 490 x 4:94b33453f93b (draft *obsolete*) [ ] add original_d
491 491 |
492 492 o 3:6f9641995072 (draft) [ ] add n3w_3_c
493 493 |
494 494 o 1:7c3bad9141dc (public) [ ] add b
495 495 |
496 496 o 0:1f0dee641bb7 (public) [ ] add a
497 497
498 498
499 499 refuse to push obsolete changeset
500 500
501 501 $ hg push ../tmpc/ -r 'desc("original_d")'
502 502 pushing to ../tmpc/
503 503 searching for changes
504 504 abort: push includes obsolete changeset: 94b33453f93b!
505 505 [255]
506 506
507 507 refuse to push unstable changeset
508 508
509 509 $ hg push ../tmpc/
510 510 pushing to ../tmpc/
511 511 searching for changes
512 512 abort: push includes unstable changeset: cda648ca50f5!
513 513 [255]
514 514
515 515 Test that extinct changeset are properly detected
516 516
517 517 $ hg log -r 'extinct()'
518 518
519 519 Don't try to push extinct changeset
520 520
521 521 $ hg init ../tmpf
522 522 $ hg out ../tmpf
523 523 comparing with ../tmpf
524 524 searching for changes
525 525 0:1f0dee641bb7 (public) [ ] add a
526 526 1:7c3bad9141dc (public) [ ] add b
527 527 2:245bde4270cd (public) [ ] add original_c
528 528 3:6f9641995072 (draft) [ ] add n3w_3_c
529 529 4:94b33453f93b (draft *obsolete*) [ ] add original_d
530 530 5:cda648ca50f5 (draft unstable) [tip ] add original_e
531 531 $ hg push ../tmpf -f # -f because be push unstable too
532 532 pushing to ../tmpf
533 533 searching for changes
534 534 adding changesets
535 535 adding manifests
536 536 adding file changes
537 537 added 6 changesets with 6 changes to 6 files (+1 heads)
538 538 7 new obsolescence markers
539 539
540 540 no warning displayed
541 541
542 542 $ hg push ../tmpf
543 543 pushing to ../tmpf
544 544 searching for changes
545 545 no changes found
546 546 [1]
547 547
548 548 Do not warn about new head when the new head is a successors of a remote one
549 549
550 550 $ hg log -G
551 551 @ 5:cda648ca50f5 (draft unstable) [tip ] add original_e
552 552 |
553 553 x 4:94b33453f93b (draft *obsolete*) [ ] add original_d
554 554 |
555 555 o 3:6f9641995072 (draft) [ ] add n3w_3_c
556 556 |
557 557 | o 2:245bde4270cd (public) [ ] add original_c
558 558 |/
559 559 o 1:7c3bad9141dc (public) [ ] add b
560 560 |
561 561 o 0:1f0dee641bb7 (public) [ ] add a
562 562
563 563 $ hg up -q 'desc(n3w_3_c)'
564 564 $ mkcommit obsolete_e
565 565 created new head
566 566 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
567 567 $ hg outgoing ../tmpf # parasite hg outgoing testin
568 568 comparing with ../tmpf
569 569 searching for changes
570 570 6:3de5eca88c00 (draft) [tip ] add obsolete_e
571 571 $ hg push ../tmpf
572 572 pushing to ../tmpf
573 573 searching for changes
574 574 adding changesets
575 575 adding manifests
576 576 adding file changes
577 577 added 1 changesets with 1 changes to 1 files (+1 heads)
578 578 1 new obsolescence markers
579 579
580 580 test relevance computation
581 581 ---------------------------------------
582 582
583 583 Checking simple case of "marker relevance".
584 584
585 585
586 586 Reminder of the repo situation
587 587
588 588 $ hg log --hidden --graph
589 589 @ 6:3de5eca88c00 (draft) [tip ] add obsolete_e
590 590 |
591 591 | x 5:cda648ca50f5 (draft *obsolete*) [ ] add original_e
592 592 | |
593 593 | x 4:94b33453f93b (draft *obsolete*) [ ] add original_d
594 594 |/
595 595 o 3:6f9641995072 (draft) [ ] add n3w_3_c
596 596 |
597 597 | o 2:245bde4270cd (public) [ ] add original_c
598 598 |/
599 599 o 1:7c3bad9141dc (public) [ ] add b
600 600 |
601 601 o 0:1f0dee641bb7 (public) [ ] add a
602 602
603 603
604 604 List of all markers
605 605
606 606 $ hg debugobsolete
607 607 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
608 608 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
609 609 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
610 610 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
611 611 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
612 612 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
613 613 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
614 614 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
615 615
616 616 List of changesets with no chain
617 617
618 618 $ hg debugobsolete --hidden --rev ::2
619 619
620 620 List of changesets that are included on marker chain
621 621
622 622 $ hg debugobsolete --hidden --rev 6
623 623 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
624 624
625 625 List of changesets with a longer chain, (including a pruned children)
626 626
627 627 $ hg debugobsolete --hidden --rev 3
628 628 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
629 629 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
630 630 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
631 631 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
632 632 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
633 633 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
634 634 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
635 635
636 636 List of both
637 637
638 638 $ hg debugobsolete --hidden --rev 3::6
639 639 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
640 640 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
641 641 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
642 642 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
643 643 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
644 644 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
645 645 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
646 646 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
647 647
648 648 List of all markers in JSON
649 649
650 650 $ hg debugobsolete -Tjson
651 651 [
652 652 {
653 653 "date": [1339.0, 0],
654 654 "flag": 0,
655 655 "metadata": {"user": "test"},
656 656 "precnode": "1339133913391339133913391339133913391339",
657 657 "succnodes": ["ca819180edb99ed25ceafb3e9584ac287e240b00"]
658 658 },
659 659 {
660 660 "date": [1339.0, 0],
661 661 "flag": 0,
662 662 "metadata": {"user": "test"},
663 663 "precnode": "1337133713371337133713371337133713371337",
664 664 "succnodes": ["5601fb93a350734d935195fee37f4054c529ff39"]
665 665 },
666 666 {
667 667 "date": [121.0, 120],
668 668 "flag": 12,
669 669 "metadata": {"user": "test"},
670 670 "precnode": "245bde4270cd1072a27757984f9cda8ba26f08ca",
671 671 "succnodes": ["cdbce2fbb16313928851e97e0d85413f3f7eb77f"]
672 672 },
673 673 {
674 674 "date": [1338.0, 0],
675 675 "flag": 1,
676 676 "metadata": {"user": "test"},
677 677 "precnode": "5601fb93a350734d935195fee37f4054c529ff39",
678 678 "succnodes": ["6f96419950729f3671185b847352890f074f7557"]
679 679 },
680 680 {
681 681 "date": [1338.0, 0],
682 682 "flag": 0,
683 683 "metadata": {"user": "test"},
684 684 "precnode": "ca819180edb99ed25ceafb3e9584ac287e240b00",
685 685 "succnodes": ["1337133713371337133713371337133713371337"]
686 686 },
687 687 {
688 688 "date": [1337.0, 0],
689 689 "flag": 0,
690 690 "metadata": {"user": "test"},
691 691 "precnode": "cdbce2fbb16313928851e97e0d85413f3f7eb77f",
692 692 "succnodes": ["ca819180edb99ed25ceafb3e9584ac287e240b00"]
693 693 },
694 694 {
695 695 "date": [0.0, 0],
696 696 "flag": 0,
697 697 "metadata": {"user": "test"},
698 698 "parentnodes": ["6f96419950729f3671185b847352890f074f7557"],
699 699 "precnode": "94b33453f93bdb8d457ef9b770851a618bf413e1",
700 700 "succnodes": []
701 701 },
702 702 {
703 703 "date": *, (glob)
704 704 "flag": 0,
705 705 "metadata": {"user": "test"},
706 706 "precnode": "cda648ca50f50482b7055c0b0c4c117bba6733d9",
707 707 "succnodes": ["3de5eca88c00aa039da7399a220f4a5221faa585"]
708 708 }
709 709 ]
710 710
711 711 Template keywords
712 712
713 713 $ hg debugobsolete -r6 -T '{succnodes % "{node|short}"} {date|shortdate}\n'
714 714 3de5eca88c00 ????-??-?? (glob)
715 715 $ hg debugobsolete -r6 -T '{join(metadata % "{key}={value}", " ")}\n'
716 716 user=test
717 717 $ hg debugobsolete -r6 -T '{metadata}\n'
718 718 'user': 'test'
719 719 $ hg debugobsolete -r6 -T '{flag} {get(metadata, "user")}\n'
720 720 0 test
721 721
722 722 Test the debug output for exchange
723 723 ----------------------------------
724 724
725 725 $ hg pull ../tmpb --config 'experimental.obsmarkers-exchange-debug=True' # bundle2
726 726 pulling from ../tmpb
727 727 searching for changes
728 728 no changes found
729 729 obsmarker-exchange: 346 bytes received
730 730
731 731 check hgweb does not explode
732 732 ====================================
733 733
734 734 $ hg unbundle $TESTDIR/bundles/hgweb+obs.hg
735 735 adding changesets
736 736 adding manifests
737 737 adding file changes
738 738 added 62 changesets with 63 changes to 9 files (+60 heads)
739 739 (run 'hg heads .' to see heads, 'hg merge' to merge)
740 740 $ for node in `hg log -r 'desc(babar_)' --template '{node}\n'`;
741 741 > do
742 742 > hg debugobsolete $node
743 743 > done
744 744 $ hg up tip
745 745 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
746 746
747 747 #if serve
748 748
749 749 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
750 750 $ cat hg.pid >> $DAEMON_PIDS
751 751
752 752 check changelog view
753 753
754 754 $ get-with-headers.py --headeronly localhost:$HGPORT 'shortlog/'
755 755 200 Script output follows
756 756
757 757 check graph view
758 758
759 759 $ get-with-headers.py --headeronly localhost:$HGPORT 'graph'
760 760 200 Script output follows
761 761
762 762 check filelog view
763 763
764 764 $ get-with-headers.py --headeronly localhost:$HGPORT 'log/'`hg log -r . -T "{node}"`/'babar'
765 765 200 Script output follows
766 766
767 767 $ get-with-headers.py --headeronly localhost:$HGPORT 'rev/68'
768 768 200 Script output follows
769 769 $ get-with-headers.py --headeronly localhost:$HGPORT 'rev/67'
770 770 404 Not Found
771 771 [1]
772 772
773 773 check that web.view config option:
774 774
775 775 $ killdaemons.py hg.pid
776 776 $ cat >> .hg/hgrc << EOF
777 777 > [web]
778 778 > view=all
779 779 > EOF
780 780 $ wait
781 781 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
782 782 $ get-with-headers.py --headeronly localhost:$HGPORT 'rev/67'
783 783 200 Script output follows
784 784 $ killdaemons.py hg.pid
785 785
786 786 Checking _enable=False warning if obsolete marker exists
787 787
788 788 $ echo '[experimental]' >> $HGRCPATH
789 789 $ echo "evolution=" >> $HGRCPATH
790 790 $ hg log -r tip
791 791 obsolete feature not enabled but 68 markers found!
792 792 68:c15e9edfca13 (draft) [tip ] add celestine
793 793
794 794 reenable for later test
795 795
796 796 $ echo '[experimental]' >> $HGRCPATH
797 797 $ echo "evolution=createmarkers,exchange" >> $HGRCPATH
798 798
799 799 $ rm hg.pid access.log errors.log
800 800 #endif
801 801
802 802 Several troubles on the same changeset (create an unstable and bumped changeset)
803 803
804 804 $ hg debugobsolete `getid obsolete_e`
805 805 $ hg debugobsolete `getid original_c` `getid babar`
806 806 $ hg log --config ui.logtemplate= -r 'bumped() and unstable()'
807 807 changeset: 7:50c51b361e60
808 808 user: test
809 809 date: Thu Jan 01 00:00:00 1970 +0000
810 810 trouble: unstable, bumped
811 811 summary: add babar
812 812
813 813
814 814 test the "obsolete" templatekw
815 815
816 816 $ hg log -r 'obsolete()'
817 817 6:3de5eca88c00 (draft *obsolete*) [ ] add obsolete_e
818 818
819 819 test the "troubles" templatekw
820 820
821 821 $ hg log -r 'bumped() and unstable()'
822 822 7:50c51b361e60 (draft unstable bumped) [ ] add babar
823 823
824 824 test the default cmdline template
825 825
826 826 $ hg log -T default -r 'bumped()'
827 827 changeset: 7:50c51b361e60
828 828 user: test
829 829 date: Thu Jan 01 00:00:00 1970 +0000
830 830 trouble: unstable, bumped
831 831 summary: add babar
832 832
833 833 $ hg log -T default -r 'obsolete()'
834 834 changeset: 6:3de5eca88c00
835 835 parent: 3:6f9641995072
836 836 user: test
837 837 date: Thu Jan 01 00:00:00 1970 +0000
838 838 summary: add obsolete_e
839 839
840 840
841 841 test summary output
842 842
843 843 $ hg up -r 'bumped() and unstable()'
844 844 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
845 845 $ hg summary
846 846 parent: 7:50c51b361e60 (unstable, bumped)
847 847 add babar
848 848 branch: default
849 849 commit: (clean)
850 850 update: 2 new changesets (update)
851 851 phases: 4 draft
852 852 unstable: 2 changesets
853 853 bumped: 1 changesets
854 854 $ hg up -r 'obsolete()'
855 855 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
856 856 $ hg summary
857 857 parent: 6:3de5eca88c00 (obsolete)
858 858 add obsolete_e
859 859 branch: default
860 860 commit: (clean)
861 861 update: 3 new changesets (update)
862 862 phases: 4 draft
863 863 unstable: 2 changesets
864 864 bumped: 1 changesets
865 865
866 866 Test incoming/outcoming with changesets obsoleted remotely, known locally
867 867 ===============================================================================
868 868
869 869 This test issue 3805
870 870
871 871 $ hg init repo-issue3805
872 872 $ cd repo-issue3805
873 873 $ echo "base" > base
874 874 $ hg ci -Am "base"
875 875 adding base
876 876 $ echo "foo" > foo
877 877 $ hg ci -Am "A"
878 878 adding foo
879 879 $ hg clone . ../other-issue3805
880 880 updating to branch default
881 881 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
882 882 $ echo "bar" >> foo
883 883 $ hg ci --amend
884 884 $ cd ../other-issue3805
885 885 $ hg log -G
886 886 @ 1:29f0c6921ddd (draft) [tip ] A
887 887 |
888 888 o 0:d20a80d4def3 (draft) [ ] base
889 889
890 890 $ hg log -G -R ../repo-issue3805
891 891 @ 3:323a9c3ddd91 (draft) [tip ] A
892 892 |
893 893 o 0:d20a80d4def3 (draft) [ ] base
894 894
895 895 $ hg incoming
896 896 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
897 897 searching for changes
898 898 3:323a9c3ddd91 (draft) [tip ] A
899 899 $ hg incoming --bundle ../issue3805.hg
900 900 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
901 901 searching for changes
902 902 3:323a9c3ddd91 (draft) [tip ] A
903 903 $ hg outgoing
904 904 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
905 905 searching for changes
906 906 1:29f0c6921ddd (draft) [tip ] A
907 907
908 908 #if serve
909 909
910 910 $ hg serve -R ../repo-issue3805 -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
911 911 $ cat hg.pid >> $DAEMON_PIDS
912 912
913 913 $ hg incoming http://localhost:$HGPORT
914 914 comparing with http://localhost:$HGPORT/
915 915 searching for changes
916 916 2:323a9c3ddd91 (draft) [tip ] A
917 917 $ hg outgoing http://localhost:$HGPORT
918 918 comparing with http://localhost:$HGPORT/
919 919 searching for changes
920 920 1:29f0c6921ddd (draft) [tip ] A
921 921
922 922 $ killdaemons.py
923 923
924 924 #endif
925 925
926 926 This test issue 3814
927 927
928 928 (nothing to push but locally hidden changeset)
929 929
930 930 $ cd ..
931 931 $ hg init repo-issue3814
932 932 $ cd repo-issue3805
933 933 $ hg push -r 323a9c3ddd91 ../repo-issue3814
934 934 pushing to ../repo-issue3814
935 935 searching for changes
936 936 adding changesets
937 937 adding manifests
938 938 adding file changes
939 939 added 2 changesets with 2 changes to 2 files
940 940 2 new obsolescence markers
941 941 $ hg out ../repo-issue3814
942 942 comparing with ../repo-issue3814
943 943 searching for changes
944 944 no changes found
945 945 [1]
946 946
947 947 Test that a local tag blocks a changeset from being hidden
948 948
949 949 $ hg tag -l visible -r 1 --hidden
950 950 $ hg log -G
951 951 @ 3:323a9c3ddd91 (draft) [tip ] A
952 952 |
953 953 | x 1:29f0c6921ddd (draft *obsolete*) [visible ] A
954 954 |/
955 955 o 0:d20a80d4def3 (draft) [ ] base
956 956
957 957 Test that removing a local tag does not cause some commands to fail
958 958
959 959 $ hg tag -l -r tip tiptag
960 960 $ hg tags
961 961 tiptag 3:323a9c3ddd91
962 962 tip 3:323a9c3ddd91
963 963 visible 1:29f0c6921ddd
964 964 $ hg --config extensions.strip= strip -r tip --no-backup
965 965 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
966 966 $ hg tags
967 967 visible 1:29f0c6921ddd
968 968 tip 1:29f0c6921ddd
969 969
970 970 Test bundle overlay onto hidden revision
971 971
972 972 $ cd ..
973 973 $ hg init repo-bundleoverlay
974 974 $ cd repo-bundleoverlay
975 975 $ echo "A" > foo
976 976 $ hg ci -Am "A"
977 977 adding foo
978 978 $ echo "B" >> foo
979 979 $ hg ci -m "B"
980 980 $ hg up 0
981 981 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
982 982 $ echo "C" >> foo
983 983 $ hg ci -m "C"
984 984 created new head
985 985 $ hg log -G
986 986 @ 2:c186d7714947 (draft) [tip ] C
987 987 |
988 988 | o 1:44526ebb0f98 (draft) [ ] B
989 989 |/
990 990 o 0:4b34ecfb0d56 (draft) [ ] A
991 991
992 992
993 993 $ hg clone -r1 . ../other-bundleoverlay
994 994 adding changesets
995 995 adding manifests
996 996 adding file changes
997 997 added 2 changesets with 2 changes to 1 files
998 998 updating to branch default
999 999 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1000 1000 $ cd ../other-bundleoverlay
1001 1001 $ echo "B+" >> foo
1002 1002 $ hg ci --amend -m "B+"
1003 1003 $ hg log -G --hidden
1004 1004 @ 3:b7d587542d40 (draft) [tip ] B+
1005 1005 |
1006 1006 | x 2:eb95e9297e18 (draft *obsolete*) [ ] temporary amend commit for 44526ebb0f98
1007 1007 | |
1008 1008 | x 1:44526ebb0f98 (draft *obsolete*) [ ] B
1009 1009 |/
1010 1010 o 0:4b34ecfb0d56 (draft) [ ] A
1011 1011
1012 1012
1013 1013 $ hg incoming ../repo-bundleoverlay --bundle ../bundleoverlay.hg
1014 1014 comparing with ../repo-bundleoverlay
1015 1015 searching for changes
1016 1016 1:44526ebb0f98 (draft) [ ] B
1017 1017 2:c186d7714947 (draft) [tip ] C
1018 1018 $ hg log -G -R ../bundleoverlay.hg
1019 1019 o 4:c186d7714947 (draft) [tip ] C
1020 1020 |
1021 1021 | @ 3:b7d587542d40 (draft) [ ] B+
1022 1022 |/
1023 1023 o 0:4b34ecfb0d56 (draft) [ ] A
1024 1024
1025 1025
1026 1026 #if serve
1027 1027
1028 1028 Test issue 4506
1029 1029
1030 1030 $ cd ..
1031 1031 $ hg init repo-issue4506
1032 1032 $ cd repo-issue4506
1033 1033 $ echo "0" > foo
1034 1034 $ hg add foo
1035 1035 $ hg ci -m "content-0"
1036 1036
1037 1037 $ hg up null
1038 1038 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1039 1039 $ echo "1" > bar
1040 1040 $ hg add bar
1041 1041 $ hg ci -m "content-1"
1042 1042 created new head
1043 1043 $ hg up 0
1044 1044 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
1045 1045 $ hg graft 1
1046 1046 grafting 1:1c9eddb02162 "content-1" (tip)
1047 1047
1048 1048 $ hg debugobsolete `hg log -r1 -T'{node}'` `hg log -r2 -T'{node}'`
1049 1049
1050 1050 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
1051 1051 $ cat hg.pid >> $DAEMON_PIDS
1052 1052
1053 1053 $ get-with-headers.py --headeronly localhost:$HGPORT 'rev/1'
1054 1054 404 Not Found
1055 1055 [1]
1056 1056 $ get-with-headers.py --headeronly localhost:$HGPORT 'file/tip/bar'
1057 1057 200 Script output follows
1058 1058 $ get-with-headers.py --headeronly localhost:$HGPORT 'annotate/tip/bar'
1059 1059 200 Script output follows
1060 1060
1061 1061 $ killdaemons.py
1062 1062
1063 1063 #endif
1064 1064
1065 1065 Test heads computation on pending index changes with obsolescence markers
1066 1066 $ cd ..
1067 1067 $ cat >$TESTTMP/test_extension.py << EOF
1068 1068 > from mercurial import cmdutil, registrar
1069 1069 > from mercurial.i18n import _
1070 1070 >
1071 1071 > cmdtable = {}
1072 1072 > command = registrar.command(cmdtable)
1073 1073 > @command("amendtransient",[], _('hg amendtransient [rev]'))
1074 1074 > def amend(ui, repo, *pats, **opts):
1075 1075 > def commitfunc(ui, repo, message, match, opts):
1076 1076 > return repo.commit(message, repo['.'].user(), repo['.'].date(), match)
1077 1077 > opts['message'] = 'Test'
1078 1078 > opts['logfile'] = None
1079 1079 > cmdutil.amend(ui, repo, commitfunc, repo['.'], {}, pats, opts)
1080 1080 > ui.write('%s\n' % repo.changelog.headrevs())
1081 1081 > EOF
1082 1082 $ cat >> $HGRCPATH << EOF
1083 1083 > [extensions]
1084 1084 > testextension=$TESTTMP/test_extension.py
1085 1085 > EOF
1086 1086 $ hg init repo-issue-nativerevs-pending-changes
1087 1087 $ cd repo-issue-nativerevs-pending-changes
1088 1088 $ mkcommit a
1089 1089 $ mkcommit b
1090 1090 $ hg up ".^"
1091 1091 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1092 1092 $ echo aa > a
1093 1093 $ hg amendtransient
1094 1094 [1, 3]
1095 1095
1096 1096 Check that corrupted hidden cache does not crash
1097 1097
1098 1098 $ printf "" > .hg/cache/hidden
1099 1099 $ hg log -r . -T '{node}' --debug
1100 1100 corrupted hidden cache
1101 1101 8fd96dfc63e51ed5a8af1bec18eb4b19dbf83812 (no-eol)
1102 1102 $ hg log -r . -T '{node}' --debug
1103 1103 8fd96dfc63e51ed5a8af1bec18eb4b19dbf83812 (no-eol)
1104 1104
1105 1105 #if unix-permissions
1106 1106 Check that wrong hidden cache permission does not crash
1107 1107
1108 1108 $ chmod 000 .hg/cache/hidden
1109 1109 $ hg log -r . -T '{node}' --debug
1110 1110 cannot read hidden cache
1111 1111 error writing hidden changesets cache
1112 1112 8fd96dfc63e51ed5a8af1bec18eb4b19dbf83812 (no-eol)
1113 1113 #endif
1114 1114
1115 1115 Test cache consistency for the visible filter
1116 1116 1) We want to make sure that the cached filtered revs are invalidated when
1117 1117 bookmarks change
1118 1118 $ cd ..
1119 1119 $ cat >$TESTTMP/test_extension.py << EOF
1120 1120 > import weakref
1121 1121 > from mercurial import cmdutil, extensions, bookmarks, repoview
1122 1122 > def _bookmarkchanged(orig, bkmstoreinst, *args, **kwargs):
1123 1123 > reporef = weakref.ref(bkmstoreinst._repo)
1124 1124 > def trhook(tr):
1125 1125 > repo = reporef()
1126 1126 > hidden1 = repoview.computehidden(repo)
1127 1127 > hidden = repoview.filterrevs(repo, 'visible')
1128 1128 > if sorted(hidden1) != sorted(hidden):
1129 1129 > print "cache inconsistency"
1130 1130 > bkmstoreinst._repo.currenttransaction().addpostclose('test_extension', trhook)
1131 1131 > orig(bkmstoreinst, *args, **kwargs)
1132 1132 > def extsetup(ui):
1133 1133 > extensions.wrapfunction(bookmarks.bmstore, 'recordchange',
1134 1134 > _bookmarkchanged)
1135 1135 > EOF
1136 1136
1137 1137 $ hg init repo-cache-inconsistency
1138 1138 $ cd repo-issue-nativerevs-pending-changes
1139 1139 $ mkcommit a
1140 1140 a already tracked!
1141 1141 $ mkcommit b
1142 1142 $ hg id
1143 1143 13bedc178fce tip
1144 1144 $ echo "hello" > b
1145 1145 $ hg commit --amend -m "message"
1146 1146 $ hg book bookb -r 13bedc178fce --hidden
1147 1147 $ hg log -r 13bedc178fce
1148 1148 5:13bedc178fce (draft *obsolete*) [ bookb] add b
1149 1149 $ hg book -d bookb
1150 1150 $ hg log -r 13bedc178fce
1151 1151 abort: hidden revision '13bedc178fce'!
1152 1152 (use --hidden to access hidden revisions)
1153 1153 [255]
1154 1154
1155 1155 Empty out the test extension, as it isn't compatible with later parts
1156 1156 of the test.
1157 1157 $ echo > $TESTTMP/test_extension.py
1158 1158
1159 1159 Test ability to pull changeset with locally applying obsolescence markers
1160 1160 (issue4945)
1161 1161
1162 1162 $ cd ..
1163 1163 $ hg init issue4845
1164 1164 $ cd issue4845
1165 1165
1166 1166 $ echo foo > f0
1167 1167 $ hg add f0
1168 1168 $ hg ci -m '0'
1169 1169 $ echo foo > f1
1170 1170 $ hg add f1
1171 1171 $ hg ci -m '1'
1172 1172 $ echo foo > f2
1173 1173 $ hg add f2
1174 1174 $ hg ci -m '2'
1175 1175
1176 1176 $ echo bar > f2
1177 1177 $ hg commit --amend --config experimetnal.evolution=createmarkers
1178 1178 $ hg log -G
1179 1179 @ 4:b0551702f918 (draft) [tip ] 2
1180 1180 |
1181 1181 o 1:e016b03fd86f (draft) [ ] 1
1182 1182 |
1183 1183 o 0:a78f55e5508c (draft) [ ] 0
1184 1184
1185 1185 $ hg log -G --hidden
1186 1186 @ 4:b0551702f918 (draft) [tip ] 2
1187 1187 |
1188 1188 | x 3:f27abbcc1f77 (draft *obsolete*) [ ] temporary amend commit for e008cf283490
1189 1189 | |
1190 1190 | x 2:e008cf283490 (draft *obsolete*) [ ] 2
1191 1191 |/
1192 1192 o 1:e016b03fd86f (draft) [ ] 1
1193 1193 |
1194 1194 o 0:a78f55e5508c (draft) [ ] 0
1195 1195
1196 1196
1197 1197 $ hg strip -r 1 --config extensions.strip=
1198 1198 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1199 1199 saved backup bundle to $TESTTMP/tmpe/issue4845/.hg/strip-backup/e016b03fd86f-c41c6bcc-backup.hg (glob)
1200 1200 $ hg log -G
1201 1201 @ 0:a78f55e5508c (draft) [tip ] 0
1202 1202
1203 1203 $ hg log -G --hidden
1204 1204 @ 0:a78f55e5508c (draft) [tip ] 0
1205 1205
1206 1206
1207 1207 $ hg pull .hg/strip-backup/*
1208 1208 pulling from .hg/strip-backup/e016b03fd86f-c41c6bcc-backup.hg
1209 1209 searching for changes
1210 1210 adding changesets
1211 1211 adding manifests
1212 1212 adding file changes
1213 1213 added 2 changesets with 2 changes to 2 files
1214 1214 (run 'hg update' to get a working copy)
1215 1215 $ hg log -G
1216 1216 o 2:b0551702f918 (draft) [tip ] 2
1217 1217 |
1218 1218 o 1:e016b03fd86f (draft) [ ] 1
1219 1219 |
1220 1220 @ 0:a78f55e5508c (draft) [ ] 0
1221 1221
1222 1222 $ hg log -G --hidden
1223 1223 o 2:b0551702f918 (draft) [tip ] 2
1224 1224 |
1225 1225 o 1:e016b03fd86f (draft) [ ] 1
1226 1226 |
1227 1227 @ 0:a78f55e5508c (draft) [ ] 0
1228 1228
1229 1229 Test that 'hg debugobsolete --index --rev' can show indices of obsmarkers when
1230 1230 only a subset of those are displayed (because of --rev option)
1231 1231 $ hg init doindexrev
1232 1232 $ cd doindexrev
1233 1233 $ echo a > a
1234 1234 $ hg ci -Am a
1235 1235 adding a
1236 1236 $ hg ci --amend -m aa
1237 1237 $ echo b > b
1238 1238 $ hg ci -Am b
1239 1239 adding b
1240 1240 $ hg ci --amend -m bb
1241 1241 $ echo c > c
1242 1242 $ hg ci -Am c
1243 1243 adding c
1244 1244 $ hg ci --amend -m cc
1245 1245 $ echo d > d
1246 1246 $ hg ci -Am d
1247 1247 adding d
1248 1248 $ hg ci --amend -m dd --config experimental.evolution.track-operation=1
1249 1249 $ hg debugobsolete --index --rev "3+7"
1250 1250 1 6fdef60fcbabbd3d50e9b9cbc2a240724b91a5e1 d27fb9b066076fd921277a4b9e8b9cb48c95bc6a 0 \(.*\) {'user': 'test'} (re)
1251 1251 3 4715cf767440ed891755448016c2b8cf70760c30 7ae79c5d60f049c7b0dd02f5f25b9d60aaf7b36d 0 \(.*\) {'operation': 'amend', 'user': 'test'} (re)
1252 1252 $ hg debugobsolete --index --rev "3+7" -Tjson
1253 1253 [
1254 1254 {
1255 "date": *, (glob)
1255 "date": [0.0, 0],
1256 1256 "flag": 0,
1257 1257 "index": 1,
1258 1258 "metadata": {"user": "test"},
1259 1259 "precnode": "6fdef60fcbabbd3d50e9b9cbc2a240724b91a5e1",
1260 1260 "succnodes": ["d27fb9b066076fd921277a4b9e8b9cb48c95bc6a"]
1261 1261 },
1262 1262 {
1263 "date": *, (glob)
1263 "date": [0.0, 0],
1264 1264 "flag": 0,
1265 1265 "index": 3,
1266 1266 "metadata": {"operation": "amend", "user": "test"},
1267 1267 "precnode": "4715cf767440ed891755448016c2b8cf70760c30",
1268 1268 "succnodes": ["7ae79c5d60f049c7b0dd02f5f25b9d60aaf7b36d"]
1269 1269 }
1270 1270 ]
1271 1271
1272 1272 Test the --delete option of debugobsolete command
1273 1273 $ hg debugobsolete --index
1274 0 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b f9bd49731b0b175e42992a3c8fa6c678b2bc11f1 0 \(.*\) {'user': 'test'} (re)
1275 1 6fdef60fcbabbd3d50e9b9cbc2a240724b91a5e1 d27fb9b066076fd921277a4b9e8b9cb48c95bc6a 0 \(.*\) {'user': 'test'} (re)
1276 2 1ab51af8f9b41ef8c7f6f3312d4706d870b1fb74 29346082e4a9e27042b62d2da0e2de211c027621 0 \(.*\) {'user': 'test'} (re)
1277 3 4715cf767440ed891755448016c2b8cf70760c30 7ae79c5d60f049c7b0dd02f5f25b9d60aaf7b36d 0 (*) {'operation': 'amend', 'user': 'test'} (glob)
1274 0 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b f9bd49731b0b175e42992a3c8fa6c678b2bc11f1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
1275 1 6fdef60fcbabbd3d50e9b9cbc2a240724b91a5e1 d27fb9b066076fd921277a4b9e8b9cb48c95bc6a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
1276 2 1ab51af8f9b41ef8c7f6f3312d4706d870b1fb74 29346082e4a9e27042b62d2da0e2de211c027621 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
1277 3 4715cf767440ed891755448016c2b8cf70760c30 7ae79c5d60f049c7b0dd02f5f25b9d60aaf7b36d 0 (Thu Jan 01 00:00:00 1970 +0000) {'operation': 'amend', 'user': 'test'}
1278 1278 $ hg debugobsolete --delete 1 --delete 3
1279 1279 deleted 2 obsolescence markers
1280 1280 $ hg debugobsolete
1281 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b f9bd49731b0b175e42992a3c8fa6c678b2bc11f1 0 \(.*\) {'user': 'test'} (re)
1282 1ab51af8f9b41ef8c7f6f3312d4706d870b1fb74 29346082e4a9e27042b62d2da0e2de211c027621 0 \(.*\) {'user': 'test'} (re)
1281 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b f9bd49731b0b175e42992a3c8fa6c678b2bc11f1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
1282 1ab51af8f9b41ef8c7f6f3312d4706d870b1fb74 29346082e4a9e27042b62d2da0e2de211c027621 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
1283 1283
1284 1284 Test adding changeset after obsmarkers affecting it
1285 1285 (eg: during pull, or unbundle)
1286 1286
1287 1287 $ mkcommit e
1288 1288 $ hg bundle -r . --base .~1 ../bundle-2.hg
1289 1289 1 changesets found
1290 1290 $ getid .
1291 1291 $ hg --config extensions.strip= strip -r .
1292 1292 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1293 1293 saved backup bundle to $TESTTMP/tmpe/issue4845/doindexrev/.hg/strip-backup/9bc153528424-ee80edd4-backup.hg (glob)
1294 1294 $ hg debugobsolete 9bc153528424ea266d13e57f9ff0d799dfe61e4b
1295 1295 $ hg unbundle ../bundle-2.hg
1296 1296 adding changesets
1297 1297 adding manifests
1298 1298 adding file changes
1299 1299 added 1 changesets with 1 changes to 1 files
1300 1300 (run 'hg update' to get a working copy)
1301 1301 $ hg log -G
1302 1302 @ 7:7ae79c5d60f0 (draft) [tip ] dd
1303 1303 |
1304 1304 | o 6:4715cf767440 (draft) [ ] d
1305 1305 |/
1306 1306 o 5:29346082e4a9 (draft) [ ] cc
1307 1307 |
1308 1308 o 3:d27fb9b06607 (draft) [ ] bb
1309 1309 |
1310 1310 | o 2:6fdef60fcbab (draft) [ ] b
1311 1311 |/
1312 1312 o 1:f9bd49731b0b (draft) [ ] aa
1313 1313
1314 1314
1315 1315 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now