##// END OF EJS Templates
repair: use ProgrammingError
Jun Wu -
r31645:7095e783 default
parent child Browse files
Show More
@@ -1,1097 +1,1096 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import stat
14 14 import tempfile
15 15
16 16 from .i18n import _
17 17 from .node import short
18 18 from . import (
19 19 bundle2,
20 20 changegroup,
21 21 changelog,
22 22 error,
23 23 exchange,
24 24 manifest,
25 25 obsolete,
26 26 revlog,
27 27 scmutil,
28 28 util,
29 29 vfs as vfsmod,
30 30 )
31 31
32 32 def _bundle(repo, bases, heads, node, suffix, compress=True):
33 33 """create a bundle with the specified revisions as a backup"""
34 34 cgversion = changegroup.safeversion(repo)
35 35
36 36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
37 37 version=cgversion)
38 38 backupdir = "strip-backup"
39 39 vfs = repo.vfs
40 40 if not vfs.isdir(backupdir):
41 41 vfs.mkdir(backupdir)
42 42
43 43 # Include a hash of all the nodes in the filename for uniqueness
44 44 allcommits = repo.set('%ln::%ln', bases, heads)
45 45 allhashes = sorted(c.hex() for c in allcommits)
46 46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
47 47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
48 48
49 49 comp = None
50 50 if cgversion != '01':
51 51 bundletype = "HG20"
52 52 if compress:
53 53 comp = 'BZ'
54 54 elif compress:
55 55 bundletype = "HG10BZ"
56 56 else:
57 57 bundletype = "HG10UN"
58 58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
59 59 compression=comp)
60 60
61 61 def _collectfiles(repo, striprev):
62 62 """find out the filelogs affected by the strip"""
63 63 files = set()
64 64
65 65 for x in xrange(striprev, len(repo)):
66 66 files.update(repo[x].files())
67 67
68 68 return sorted(files)
69 69
70 70 def _collectbrokencsets(repo, files, striprev):
71 71 """return the changesets which will be broken by the truncation"""
72 72 s = set()
73 73 def collectone(revlog):
74 74 _, brokenset = revlog.getstrippoint(striprev)
75 75 s.update([revlog.linkrev(r) for r in brokenset])
76 76
77 77 collectone(repo.manifestlog._revlog)
78 78 for fname in files:
79 79 collectone(repo.file(fname))
80 80
81 81 return s
82 82
83 83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
84 84 # This function operates within a transaction of its own, but does
85 85 # not take any lock on the repo.
86 86 # Simple way to maintain backwards compatibility for this
87 87 # argument.
88 88 if backup in ['none', 'strip']:
89 89 backup = False
90 90
91 91 repo = repo.unfiltered()
92 92 repo.destroying()
93 93
94 94 cl = repo.changelog
95 95 # TODO handle undo of merge sets
96 96 if isinstance(nodelist, str):
97 97 nodelist = [nodelist]
98 98 striplist = [cl.rev(node) for node in nodelist]
99 99 striprev = min(striplist)
100 100
101 101 files = _collectfiles(repo, striprev)
102 102 saverevs = _collectbrokencsets(repo, files, striprev)
103 103
104 104 # Some revisions with rev > striprev may not be descendants of striprev.
105 105 # We have to find these revisions and put them in a bundle, so that
106 106 # we can restore them after the truncations.
107 107 # To create the bundle we use repo.changegroupsubset which requires
108 108 # the list of heads and bases of the set of interesting revisions.
109 109 # (head = revision in the set that has no descendant in the set;
110 110 # base = revision in the set that has no ancestor in the set)
111 111 tostrip = set(striplist)
112 112 saveheads = set(saverevs)
113 113 for r in cl.revs(start=striprev + 1):
114 114 if any(p in tostrip for p in cl.parentrevs(r)):
115 115 tostrip.add(r)
116 116
117 117 if r not in tostrip:
118 118 saverevs.add(r)
119 119 saveheads.difference_update(cl.parentrevs(r))
120 120 saveheads.add(r)
121 121 saveheads = [cl.node(r) for r in saveheads]
122 122
123 123 # compute base nodes
124 124 if saverevs:
125 125 descendants = set(cl.descendants(saverevs))
126 126 saverevs.difference_update(descendants)
127 127 savebases = [cl.node(r) for r in saverevs]
128 128 stripbases = [cl.node(r) for r in tostrip]
129 129
130 130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
131 131 # is much faster
132 132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
133 133 if newbmtarget:
134 134 newbmtarget = repo[newbmtarget.first()].node()
135 135 else:
136 136 newbmtarget = '.'
137 137
138 138 bm = repo._bookmarks
139 139 updatebm = []
140 140 for m in bm:
141 141 rev = repo[bm[m]].rev()
142 142 if rev in tostrip:
143 143 updatebm.append(m)
144 144
145 145 # create a changegroup for all the branches we need to keep
146 146 backupfile = None
147 147 vfs = repo.vfs
148 148 node = nodelist[-1]
149 149 if backup:
150 150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
151 151 repo.ui.status(_("saved backup bundle to %s\n") %
152 152 vfs.join(backupfile))
153 153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
154 154 vfs.join(backupfile))
155 155 tmpbundlefile = None
156 156 if saveheads:
157 157 # do not compress temporary bundle if we remove it from disk later
158 158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
159 159 compress=False)
160 160
161 161 mfst = repo.manifestlog._revlog
162 162
163 163 curtr = repo.currenttransaction()
164 164 if curtr is not None:
165 165 del curtr # avoid carrying reference to transaction for nothing
166 msg = _('programming error: cannot strip from inside a transaction')
167 raise error.Abort(msg, hint=_('contact your extension maintainer'))
166 raise error.ProgrammingError('cannot strip from inside a transaction')
168 167
169 168 try:
170 169 with repo.transaction("strip") as tr:
171 170 offset = len(tr.entries)
172 171
173 172 tr.startgroup()
174 173 cl.strip(striprev, tr)
175 174 mfst.strip(striprev, tr)
176 175 if 'treemanifest' in repo.requirements: # safe but unnecessary
177 176 # otherwise
178 177 for unencoded, encoded, size in repo.store.datafiles():
179 178 if (unencoded.startswith('meta/') and
180 179 unencoded.endswith('00manifest.i')):
181 180 dir = unencoded[5:-12]
182 181 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
183 182 for fn in files:
184 183 repo.file(fn).strip(striprev, tr)
185 184 tr.endgroup()
186 185
187 186 for i in xrange(offset, len(tr.entries)):
188 187 file, troffset, ignore = tr.entries[i]
189 188 with repo.svfs(file, 'a', checkambig=True) as fp:
190 189 fp.truncate(troffset)
191 190 if troffset == 0:
192 191 repo.store.markremoved(file)
193 192
194 193 if tmpbundlefile:
195 194 ui.note(_("adding branch\n"))
196 195 f = vfs.open(tmpbundlefile, "rb")
197 196 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
198 197 if not repo.ui.verbose:
199 198 # silence internal shuffling chatter
200 199 repo.ui.pushbuffer()
201 200 if isinstance(gen, bundle2.unbundle20):
202 201 with repo.transaction('strip') as tr:
203 202 tr.hookargs = {'source': 'strip',
204 203 'url': 'bundle:' + vfs.join(tmpbundlefile)}
205 204 bundle2.applybundle(repo, gen, tr, source='strip',
206 205 url='bundle:' + vfs.join(tmpbundlefile))
207 206 else:
208 207 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
209 208 True)
210 209 if not repo.ui.verbose:
211 210 repo.ui.popbuffer()
212 211 f.close()
213 212 repo._phasecache.invalidate()
214 213
215 214 for m in updatebm:
216 215 bm[m] = repo[newbmtarget].node()
217 216
218 217 with repo.lock():
219 218 with repo.transaction('repair') as tr:
220 219 bm.recordchange(tr)
221 220
222 221 # remove undo files
223 222 for undovfs, undofile in repo.undofiles():
224 223 try:
225 224 undovfs.unlink(undofile)
226 225 except OSError as e:
227 226 if e.errno != errno.ENOENT:
228 227 ui.warn(_('error removing %s: %s\n') %
229 228 (undovfs.join(undofile), str(e)))
230 229
231 230 except: # re-raises
232 231 if backupfile:
233 232 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
234 233 % vfs.join(backupfile))
235 234 if tmpbundlefile:
236 235 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
237 236 % vfs.join(tmpbundlefile))
238 237 ui.warn(_("(fix the problem, then recover the changesets with "
239 238 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
240 239 raise
241 240 else:
242 241 if tmpbundlefile:
243 242 # Remove temporary bundle only if there were no exceptions
244 243 vfs.unlink(tmpbundlefile)
245 244
246 245 repo.destroyed()
247 246 # return the backup file path (or None if 'backup' was False) so
248 247 # extensions can use it
249 248 return backupfile
250 249
251 250 def rebuildfncache(ui, repo):
252 251 """Rebuilds the fncache file from repo history.
253 252
254 253 Missing entries will be added. Extra entries will be removed.
255 254 """
256 255 repo = repo.unfiltered()
257 256
258 257 if 'fncache' not in repo.requirements:
259 258 ui.warn(_('(not rebuilding fncache because repository does not '
260 259 'support fncache)\n'))
261 260 return
262 261
263 262 with repo.lock():
264 263 fnc = repo.store.fncache
265 264 # Trigger load of fncache.
266 265 if 'irrelevant' in fnc:
267 266 pass
268 267
269 268 oldentries = set(fnc.entries)
270 269 newentries = set()
271 270 seenfiles = set()
272 271
273 272 repolen = len(repo)
274 273 for rev in repo:
275 274 ui.progress(_('rebuilding'), rev, total=repolen,
276 275 unit=_('changesets'))
277 276
278 277 ctx = repo[rev]
279 278 for f in ctx.files():
280 279 # This is to minimize I/O.
281 280 if f in seenfiles:
282 281 continue
283 282 seenfiles.add(f)
284 283
285 284 i = 'data/%s.i' % f
286 285 d = 'data/%s.d' % f
287 286
288 287 if repo.store._exists(i):
289 288 newentries.add(i)
290 289 if repo.store._exists(d):
291 290 newentries.add(d)
292 291
293 292 ui.progress(_('rebuilding'), None)
294 293
295 294 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
296 295 for dir in util.dirs(seenfiles):
297 296 i = 'meta/%s/00manifest.i' % dir
298 297 d = 'meta/%s/00manifest.d' % dir
299 298
300 299 if repo.store._exists(i):
301 300 newentries.add(i)
302 301 if repo.store._exists(d):
303 302 newentries.add(d)
304 303
305 304 addcount = len(newentries - oldentries)
306 305 removecount = len(oldentries - newentries)
307 306 for p in sorted(oldentries - newentries):
308 307 ui.write(_('removing %s\n') % p)
309 308 for p in sorted(newentries - oldentries):
310 309 ui.write(_('adding %s\n') % p)
311 310
312 311 if addcount or removecount:
313 312 ui.write(_('%d items added, %d removed from fncache\n') %
314 313 (addcount, removecount))
315 314 fnc.entries = newentries
316 315 fnc._dirty = True
317 316
318 317 with repo.transaction('fncache') as tr:
319 318 fnc.write(tr)
320 319 else:
321 320 ui.write(_('fncache already up to date\n'))
322 321
323 322 def stripbmrevset(repo, mark):
324 323 """
325 324 The revset to strip when strip is called with -B mark
326 325
327 326 Needs to live here so extensions can use it and wrap it even when strip is
328 327 not enabled or not present on a box.
329 328 """
330 329 return repo.revs("ancestors(bookmark(%s)) - "
331 330 "ancestors(head() and not bookmark(%s)) - "
332 331 "ancestors(bookmark() and not bookmark(%s))",
333 332 mark, mark, mark)
334 333
335 334 def deleteobsmarkers(obsstore, indices):
336 335 """Delete some obsmarkers from obsstore and return how many were deleted
337 336
338 337 'indices' is a list of ints which are the indices
339 338 of the markers to be deleted.
340 339
341 340 Every invocation of this function completely rewrites the obsstore file,
342 341 skipping the markers we want to be removed. The new temporary file is
343 342 created, remaining markers are written there and on .close() this file
344 343 gets atomically renamed to obsstore, thus guaranteeing consistency."""
345 344 if not indices:
346 345 # we don't want to rewrite the obsstore with the same content
347 346 return
348 347
349 348 left = []
350 349 current = obsstore._all
351 350 n = 0
352 351 for i, m in enumerate(current):
353 352 if i in indices:
354 353 n += 1
355 354 continue
356 355 left.append(m)
357 356
358 357 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
359 358 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
360 359 newobsstorefile.write(bytes)
361 360 newobsstorefile.close()
362 361 return n
363 362
364 363 def upgraderequiredsourcerequirements(repo):
365 364 """Obtain requirements required to be present to upgrade a repo.
366 365
367 366 An upgrade will not be allowed if the repository doesn't have the
368 367 requirements returned by this function.
369 368 """
370 369 return set([
371 370 # Introduced in Mercurial 0.9.2.
372 371 'revlogv1',
373 372 # Introduced in Mercurial 0.9.2.
374 373 'store',
375 374 ])
376 375
377 376 def upgradeblocksourcerequirements(repo):
378 377 """Obtain requirements that will prevent an upgrade from occurring.
379 378
380 379 An upgrade cannot be performed if the source repository contains a
381 380 requirements in the returned set.
382 381 """
383 382 return set([
384 383 # The upgrade code does not yet support these experimental features.
385 384 # This is an artificial limitation.
386 385 'manifestv2',
387 386 'treemanifest',
388 387 # This was a precursor to generaldelta and was never enabled by default.
389 388 # It should (hopefully) not exist in the wild.
390 389 'parentdelta',
391 390 # Upgrade should operate on the actual store, not the shared link.
392 391 'shared',
393 392 ])
394 393
395 394 def upgradesupportremovedrequirements(repo):
396 395 """Obtain requirements that can be removed during an upgrade.
397 396
398 397 If an upgrade were to create a repository that dropped a requirement,
399 398 the dropped requirement must appear in the returned set for the upgrade
400 399 to be allowed.
401 400 """
402 401 return set()
403 402
404 403 def upgradesupporteddestrequirements(repo):
405 404 """Obtain requirements that upgrade supports in the destination.
406 405
407 406 If the result of the upgrade would create requirements not in this set,
408 407 the upgrade is disallowed.
409 408
410 409 Extensions should monkeypatch this to add their custom requirements.
411 410 """
412 411 return set([
413 412 'dotencode',
414 413 'fncache',
415 414 'generaldelta',
416 415 'revlogv1',
417 416 'store',
418 417 ])
419 418
420 419 def upgradeallowednewrequirements(repo):
421 420 """Obtain requirements that can be added to a repository during upgrade.
422 421
423 422 This is used to disallow proposed requirements from being added when
424 423 they weren't present before.
425 424
426 425 We use a list of allowed requirement additions instead of a list of known
427 426 bad additions because the whitelist approach is safer and will prevent
428 427 future, unknown requirements from accidentally being added.
429 428 """
430 429 return set([
431 430 'dotencode',
432 431 'fncache',
433 432 'generaldelta',
434 433 ])
435 434
436 435 deficiency = 'deficiency'
437 436 optimisation = 'optimization'
438 437
439 438 class upgradeimprovement(object):
440 439 """Represents an improvement that can be made as part of an upgrade.
441 440
442 441 The following attributes are defined on each instance:
443 442
444 443 name
445 444 Machine-readable string uniquely identifying this improvement. It
446 445 will be mapped to an action later in the upgrade process.
447 446
448 447 type
449 448 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
450 449 problem. An optimization is an action (sometimes optional) that
451 450 can be taken to further improve the state of the repository.
452 451
453 452 description
454 453 Message intended for humans explaining the improvement in more detail,
455 454 including the implications of it. For ``deficiency`` types, should be
456 455 worded in the present tense. For ``optimisation`` types, should be
457 456 worded in the future tense.
458 457
459 458 upgrademessage
460 459 Message intended for humans explaining what an upgrade addressing this
461 460 issue will do. Should be worded in the future tense.
462 461
463 462 fromdefault (``deficiency`` types only)
464 463 Boolean indicating whether the current (deficient) state deviates
465 464 from Mercurial's default configuration.
466 465
467 466 fromconfig (``deficiency`` types only)
468 467 Boolean indicating whether the current (deficient) state deviates
469 468 from the current Mercurial configuration.
470 469 """
471 470 def __init__(self, name, type, description, upgrademessage, **kwargs):
472 471 self.name = name
473 472 self.type = type
474 473 self.description = description
475 474 self.upgrademessage = upgrademessage
476 475
477 476 for k, v in kwargs.items():
478 477 setattr(self, k, v)
479 478
480 479 def upgradefindimprovements(repo):
481 480 """Determine improvements that can be made to the repo during upgrade.
482 481
483 482 Returns a list of ``upgradeimprovement`` describing repository deficiencies
484 483 and optimizations.
485 484 """
486 485 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
487 486 from . import localrepo
488 487
489 488 newreporeqs = localrepo.newreporequirements(repo)
490 489
491 490 improvements = []
492 491
493 492 # We could detect lack of revlogv1 and store here, but they were added
494 493 # in 0.9.2 and we don't support upgrading repos without these
495 494 # requirements, so let's not bother.
496 495
497 496 if 'fncache' not in repo.requirements:
498 497 improvements.append(upgradeimprovement(
499 498 name='fncache',
500 499 type=deficiency,
501 500 description=_('long and reserved filenames may not work correctly; '
502 501 'repository performance is sub-optimal'),
503 502 upgrademessage=_('repository will be more resilient to storing '
504 503 'certain paths and performance of certain '
505 504 'operations should be improved'),
506 505 fromdefault=True,
507 506 fromconfig='fncache' in newreporeqs))
508 507
509 508 if 'dotencode' not in repo.requirements:
510 509 improvements.append(upgradeimprovement(
511 510 name='dotencode',
512 511 type=deficiency,
513 512 description=_('storage of filenames beginning with a period or '
514 513 'space may not work correctly'),
515 514 upgrademessage=_('repository will be better able to store files '
516 515 'beginning with a space or period'),
517 516 fromdefault=True,
518 517 fromconfig='dotencode' in newreporeqs))
519 518
520 519 if 'generaldelta' not in repo.requirements:
521 520 improvements.append(upgradeimprovement(
522 521 name='generaldelta',
523 522 type=deficiency,
524 523 description=_('deltas within internal storage are unable to '
525 524 'choose optimal revisions; repository is larger and '
526 525 'slower than it could be; interaction with other '
527 526 'repositories may require extra network and CPU '
528 527 'resources, making "hg push" and "hg pull" slower'),
529 528 upgrademessage=_('repository storage will be able to create '
530 529 'optimal deltas; new repository data will be '
531 530 'smaller and read times should decrease; '
532 531 'interacting with other repositories using this '
533 532 'storage model should require less network and '
534 533 'CPU resources, making "hg push" and "hg pull" '
535 534 'faster'),
536 535 fromdefault=True,
537 536 fromconfig='generaldelta' in newreporeqs))
538 537
539 538 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
540 539 # changelogs with deltas.
541 540 cl = repo.changelog
542 541 for rev in cl:
543 542 chainbase = cl.chainbase(rev)
544 543 if chainbase != rev:
545 544 improvements.append(upgradeimprovement(
546 545 name='removecldeltachain',
547 546 type=deficiency,
548 547 description=_('changelog storage is using deltas instead of '
549 548 'raw entries; changelog reading and any '
550 549 'operation relying on changelog data are slower '
551 550 'than they could be'),
552 551 upgrademessage=_('changelog storage will be reformated to '
553 552 'store raw entries; changelog reading will be '
554 553 'faster; changelog size may be reduced'),
555 554 fromdefault=True,
556 555 fromconfig=True))
557 556 break
558 557
559 558 # Now for the optimizations.
560 559
561 560 # These are unconditionally added. There is logic later that figures out
562 561 # which ones to apply.
563 562
564 563 improvements.append(upgradeimprovement(
565 564 name='redeltaparent',
566 565 type=optimisation,
567 566 description=_('deltas within internal storage will be recalculated to '
568 567 'choose an optimal base revision where this was not '
569 568 'already done; the size of the repository may shrink and '
570 569 'various operations may become faster; the first time '
571 570 'this optimization is performed could slow down upgrade '
572 571 'execution considerably; subsequent invocations should '
573 572 'not run noticeably slower'),
574 573 upgrademessage=_('deltas within internal storage will choose a new '
575 574 'base revision if needed')))
576 575
577 576 improvements.append(upgradeimprovement(
578 577 name='redeltamultibase',
579 578 type=optimisation,
580 579 description=_('deltas within internal storage will be recalculated '
581 580 'against multiple base revision and the smallest '
582 581 'difference will be used; the size of the repository may '
583 582 'shrink significantly when there are many merges; this '
584 583 'optimization will slow down execution in proportion to '
585 584 'the number of merges in the repository and the amount '
586 585 'of files in the repository; this slow down should not '
587 586 'be significant unless there are tens of thousands of '
588 587 'files and thousands of merges'),
589 588 upgrademessage=_('deltas within internal storage will choose an '
590 589 'optimal delta by computing deltas against multiple '
591 590 'parents; may slow down execution time '
592 591 'significantly')))
593 592
594 593 improvements.append(upgradeimprovement(
595 594 name='redeltaall',
596 595 type=optimisation,
597 596 description=_('deltas within internal storage will always be '
598 597 'recalculated without reusing prior deltas; this will '
599 598 'likely make execution run several times slower; this '
600 599 'optimization is typically not needed'),
601 600 upgrademessage=_('deltas within internal storage will be fully '
602 601 'recomputed; this will likely drastically slow down '
603 602 'execution time')))
604 603
605 604 return improvements
606 605
607 606 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
608 607 optimize):
609 608 """Determine upgrade actions that will be performed.
610 609
611 610 Given a list of improvements as returned by ``upgradefindimprovements``,
612 611 determine the list of upgrade actions that will be performed.
613 612
614 613 The role of this function is to filter improvements if needed, apply
615 614 recommended optimizations from the improvements list that make sense,
616 615 etc.
617 616
618 617 Returns a list of action names.
619 618 """
620 619 newactions = []
621 620
622 621 knownreqs = upgradesupporteddestrequirements(repo)
623 622
624 623 for i in improvements:
625 624 name = i.name
626 625
627 626 # If the action is a requirement that doesn't show up in the
628 627 # destination requirements, prune the action.
629 628 if name in knownreqs and name not in destreqs:
630 629 continue
631 630
632 631 if i.type == deficiency:
633 632 newactions.append(name)
634 633
635 634 newactions.extend(o for o in sorted(optimize) if o not in newactions)
636 635
637 636 # FUTURE consider adding some optimizations here for certain transitions.
638 637 # e.g. adding generaldelta could schedule parent redeltas.
639 638
640 639 return newactions
641 640
642 641 def _revlogfrompath(repo, path):
643 642 """Obtain a revlog from a repo path.
644 643
645 644 An instance of the appropriate class is returned.
646 645 """
647 646 if path == '00changelog.i':
648 647 return changelog.changelog(repo.svfs)
649 648 elif path.endswith('00manifest.i'):
650 649 mandir = path[:-len('00manifest.i')]
651 650 return manifest.manifestrevlog(repo.svfs, dir=mandir)
652 651 else:
653 652 # Filelogs don't do anything special with settings. So we can use a
654 653 # vanilla revlog.
655 654 return revlog.revlog(repo.svfs, path)
656 655
657 656 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
658 657 """Copy revlogs between 2 repos."""
659 658 revcount = 0
660 659 srcsize = 0
661 660 srcrawsize = 0
662 661 dstsize = 0
663 662 fcount = 0
664 663 frevcount = 0
665 664 fsrcsize = 0
666 665 frawsize = 0
667 666 fdstsize = 0
668 667 mcount = 0
669 668 mrevcount = 0
670 669 msrcsize = 0
671 670 mrawsize = 0
672 671 mdstsize = 0
673 672 crevcount = 0
674 673 csrcsize = 0
675 674 crawsize = 0
676 675 cdstsize = 0
677 676
678 677 # Perform a pass to collect metadata. This validates we can open all
679 678 # source files and allows a unified progress bar to be displayed.
680 679 for unencoded, encoded, size in srcrepo.store.walk():
681 680 if unencoded.endswith('.d'):
682 681 continue
683 682
684 683 rl = _revlogfrompath(srcrepo, unencoded)
685 684 revcount += len(rl)
686 685
687 686 datasize = 0
688 687 rawsize = 0
689 688 idx = rl.index
690 689 for rev in rl:
691 690 e = idx[rev]
692 691 datasize += e[1]
693 692 rawsize += e[2]
694 693
695 694 srcsize += datasize
696 695 srcrawsize += rawsize
697 696
698 697 # This is for the separate progress bars.
699 698 if isinstance(rl, changelog.changelog):
700 699 crevcount += len(rl)
701 700 csrcsize += datasize
702 701 crawsize += rawsize
703 702 elif isinstance(rl, manifest.manifestrevlog):
704 703 mcount += 1
705 704 mrevcount += len(rl)
706 705 msrcsize += datasize
707 706 mrawsize += rawsize
708 707 elif isinstance(rl, revlog.revlog):
709 708 fcount += 1
710 709 frevcount += len(rl)
711 710 fsrcsize += datasize
712 711 frawsize += rawsize
713 712
714 713 if not revcount:
715 714 return
716 715
717 716 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
718 717 '%d in changelog)\n') %
719 718 (revcount, frevcount, mrevcount, crevcount))
720 719 ui.write(_('migrating %s in store; %s tracked data\n') % (
721 720 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
722 721
723 722 # Used to keep track of progress.
724 723 progress = []
725 724 def oncopiedrevision(rl, rev, node):
726 725 progress[1] += 1
727 726 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
728 727
729 728 # Do the actual copying.
730 729 # FUTURE this operation can be farmed off to worker processes.
731 730 seen = set()
732 731 for unencoded, encoded, size in srcrepo.store.walk():
733 732 if unencoded.endswith('.d'):
734 733 continue
735 734
736 735 oldrl = _revlogfrompath(srcrepo, unencoded)
737 736 newrl = _revlogfrompath(dstrepo, unencoded)
738 737
739 738 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
740 739 ui.write(_('finished migrating %d manifest revisions across %d '
741 740 'manifests; change in size: %s\n') %
742 741 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
743 742
744 743 ui.write(_('migrating changelog containing %d revisions '
745 744 '(%s in store; %s tracked data)\n') %
746 745 (crevcount, util.bytecount(csrcsize),
747 746 util.bytecount(crawsize)))
748 747 seen.add('c')
749 748 progress[:] = [_('changelog revisions'), 0, crevcount]
750 749 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
751 750 ui.write(_('finished migrating %d filelog revisions across %d '
752 751 'filelogs; change in size: %s\n') %
753 752 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
754 753
755 754 ui.write(_('migrating %d manifests containing %d revisions '
756 755 '(%s in store; %s tracked data)\n') %
757 756 (mcount, mrevcount, util.bytecount(msrcsize),
758 757 util.bytecount(mrawsize)))
759 758 seen.add('m')
760 759 progress[:] = [_('manifest revisions'), 0, mrevcount]
761 760 elif 'f' not in seen:
762 761 ui.write(_('migrating %d filelogs containing %d revisions '
763 762 '(%s in store; %s tracked data)\n') %
764 763 (fcount, frevcount, util.bytecount(fsrcsize),
765 764 util.bytecount(frawsize)))
766 765 seen.add('f')
767 766 progress[:] = [_('file revisions'), 0, frevcount]
768 767
769 768 ui.progress(progress[0], progress[1], total=progress[2])
770 769
771 770 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
772 771 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
773 772 deltareuse=deltareuse,
774 773 aggressivemergedeltas=aggressivemergedeltas)
775 774
776 775 datasize = 0
777 776 idx = newrl.index
778 777 for rev in newrl:
779 778 datasize += idx[rev][1]
780 779
781 780 dstsize += datasize
782 781
783 782 if isinstance(newrl, changelog.changelog):
784 783 cdstsize += datasize
785 784 elif isinstance(newrl, manifest.manifestrevlog):
786 785 mdstsize += datasize
787 786 else:
788 787 fdstsize += datasize
789 788
790 789 ui.progress(progress[0], None)
791 790
792 791 ui.write(_('finished migrating %d changelog revisions; change in size: '
793 792 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
794 793
795 794 ui.write(_('finished migrating %d total revisions; total change in store '
796 795 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
797 796
798 797 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
799 798 """Determine whether to copy a store file during upgrade.
800 799
801 800 This function is called when migrating store files from ``srcrepo`` to
802 801 ``dstrepo`` as part of upgrading a repository.
803 802
804 803 Args:
805 804 srcrepo: repo we are copying from
806 805 dstrepo: repo we are copying to
807 806 requirements: set of requirements for ``dstrepo``
808 807 path: store file being examined
809 808 mode: the ``ST_MODE`` file type of ``path``
810 809 st: ``stat`` data structure for ``path``
811 810
812 811 Function should return ``True`` if the file is to be copied.
813 812 """
814 813 # Skip revlogs.
815 814 if path.endswith(('.i', '.d')):
816 815 return False
817 816 # Skip transaction related files.
818 817 if path.startswith('undo'):
819 818 return False
820 819 # Only copy regular files.
821 820 if mode != stat.S_IFREG:
822 821 return False
823 822 # Skip other skipped files.
824 823 if path in ('lock', 'fncache'):
825 824 return False
826 825
827 826 return True
828 827
829 828 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
830 829 """Hook point for extensions to perform additional actions during upgrade.
831 830
832 831 This function is called after revlogs and store files have been copied but
833 832 before the new store is swapped into the original location.
834 833 """
835 834
836 835 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
837 836 """Do the low-level work of upgrading a repository.
838 837
839 838 The upgrade is effectively performed as a copy between a source
840 839 repository and a temporary destination repository.
841 840
842 841 The source repository is unmodified for as long as possible so the
843 842 upgrade can abort at any time without causing loss of service for
844 843 readers and without corrupting the source repository.
845 844 """
846 845 assert srcrepo.currentwlock()
847 846 assert dstrepo.currentwlock()
848 847
849 848 ui.write(_('(it is safe to interrupt this process any time before '
850 849 'data migration completes)\n'))
851 850
852 851 if 'redeltaall' in actions:
853 852 deltareuse = revlog.revlog.DELTAREUSENEVER
854 853 elif 'redeltaparent' in actions:
855 854 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
856 855 elif 'redeltamultibase' in actions:
857 856 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
858 857 else:
859 858 deltareuse = revlog.revlog.DELTAREUSEALWAYS
860 859
861 860 with dstrepo.transaction('upgrade') as tr:
862 861 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
863 862 'redeltamultibase' in actions)
864 863
865 864 # Now copy other files in the store directory.
866 865 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
867 866 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
868 867 p, kind, st):
869 868 continue
870 869
871 870 srcrepo.ui.write(_('copying %s\n') % p)
872 871 src = srcrepo.store.vfs.join(p)
873 872 dst = dstrepo.store.vfs.join(p)
874 873 util.copyfile(src, dst, copystat=True)
875 874
876 875 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
877 876
878 877 ui.write(_('data fully migrated to temporary repository\n'))
879 878
880 879 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
881 880 backupvfs = vfsmod.vfs(backuppath)
882 881
883 882 # Make a backup of requires file first, as it is the first to be modified.
884 883 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
885 884
886 885 # We install an arbitrary requirement that clients must not support
887 886 # as a mechanism to lock out new clients during the data swap. This is
888 887 # better than allowing a client to continue while the repository is in
889 888 # an inconsistent state.
890 889 ui.write(_('marking source repository as being upgraded; clients will be '
891 890 'unable to read from repository\n'))
892 891 scmutil.writerequires(srcrepo.vfs,
893 892 srcrepo.requirements | set(['upgradeinprogress']))
894 893
895 894 ui.write(_('starting in-place swap of repository data\n'))
896 895 ui.write(_('replaced files will be backed up at %s\n') %
897 896 backuppath)
898 897
899 898 # Now swap in the new store directory. Doing it as a rename should make
900 899 # the operation nearly instantaneous and atomic (at least in well-behaved
901 900 # environments).
902 901 ui.write(_('replacing store...\n'))
903 902 tstart = util.timer()
904 903 util.rename(srcrepo.spath, backupvfs.join('store'))
905 904 util.rename(dstrepo.spath, srcrepo.spath)
906 905 elapsed = util.timer() - tstart
907 906 ui.write(_('store replacement complete; repository was inconsistent for '
908 907 '%0.1fs\n') % elapsed)
909 908
910 909 # We first write the requirements file. Any new requirements will lock
911 910 # out legacy clients.
912 911 ui.write(_('finalizing requirements file and making repository readable '
913 912 'again\n'))
914 913 scmutil.writerequires(srcrepo.vfs, requirements)
915 914
916 915 # The lock file from the old store won't be removed because nothing has a
917 916 # reference to its new location. So clean it up manually. Alternatively, we
918 917 # could update srcrepo.svfs and other variables to point to the new
919 918 # location. This is simpler.
920 919 backupvfs.unlink('store/lock')
921 920
922 921 return backuppath
923 922
924 923 def upgraderepo(ui, repo, run=False, optimize=None):
925 924 """Upgrade a repository in place."""
926 925 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
927 926 from . import localrepo
928 927
929 928 optimize = set(optimize or [])
930 929 repo = repo.unfiltered()
931 930
932 931 # Ensure the repository can be upgraded.
933 932 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
934 933 if missingreqs:
935 934 raise error.Abort(_('cannot upgrade repository; requirement '
936 935 'missing: %s') % _(', ').join(sorted(missingreqs)))
937 936
938 937 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
939 938 if blockedreqs:
940 939 raise error.Abort(_('cannot upgrade repository; unsupported source '
941 940 'requirement: %s') %
942 941 _(', ').join(sorted(blockedreqs)))
943 942
944 943 # FUTURE there is potentially a need to control the wanted requirements via
945 944 # command arguments or via an extension hook point.
946 945 newreqs = localrepo.newreporequirements(repo)
947 946
948 947 noremovereqs = (repo.requirements - newreqs -
949 948 upgradesupportremovedrequirements(repo))
950 949 if noremovereqs:
951 950 raise error.Abort(_('cannot upgrade repository; requirement would be '
952 951 'removed: %s') % _(', ').join(sorted(noremovereqs)))
953 952
954 953 noaddreqs = (newreqs - repo.requirements -
955 954 upgradeallowednewrequirements(repo))
956 955 if noaddreqs:
957 956 raise error.Abort(_('cannot upgrade repository; do not support adding '
958 957 'requirement: %s') %
959 958 _(', ').join(sorted(noaddreqs)))
960 959
961 960 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
962 961 if unsupportedreqs:
963 962 raise error.Abort(_('cannot upgrade repository; do not support '
964 963 'destination requirement: %s') %
965 964 _(', ').join(sorted(unsupportedreqs)))
966 965
967 966 # Find and validate all improvements that can be made.
968 967 improvements = upgradefindimprovements(repo)
969 968 for i in improvements:
970 969 if i.type not in (deficiency, optimisation):
971 970 raise error.Abort(_('unexpected improvement type %s for %s') % (
972 971 i.type, i.name))
973 972
974 973 # Validate arguments.
975 974 unknownoptimize = optimize - set(i.name for i in improvements
976 975 if i.type == optimisation)
977 976 if unknownoptimize:
978 977 raise error.Abort(_('unknown optimization action requested: %s') %
979 978 ', '.join(sorted(unknownoptimize)),
980 979 hint=_('run without arguments to see valid '
981 980 'optimizations'))
982 981
983 982 actions = upgradedetermineactions(repo, improvements, repo.requirements,
984 983 newreqs, optimize)
985 984
986 985 def printrequirements():
987 986 ui.write(_('requirements\n'))
988 987 ui.write(_(' preserved: %s\n') %
989 988 _(', ').join(sorted(newreqs & repo.requirements)))
990 989
991 990 if repo.requirements - newreqs:
992 991 ui.write(_(' removed: %s\n') %
993 992 _(', ').join(sorted(repo.requirements - newreqs)))
994 993
995 994 if newreqs - repo.requirements:
996 995 ui.write(_(' added: %s\n') %
997 996 _(', ').join(sorted(newreqs - repo.requirements)))
998 997
999 998 ui.write('\n')
1000 999
1001 1000 def printupgradeactions():
1002 1001 for action in actions:
1003 1002 for i in improvements:
1004 1003 if i.name == action:
1005 1004 ui.write('%s\n %s\n\n' %
1006 1005 (i.name, i.upgrademessage))
1007 1006
1008 1007 if not run:
1009 1008 fromdefault = []
1010 1009 fromconfig = []
1011 1010 optimizations = []
1012 1011
1013 1012 for i in improvements:
1014 1013 assert i.type in (deficiency, optimisation)
1015 1014 if i.type == deficiency:
1016 1015 if i.fromdefault:
1017 1016 fromdefault.append(i)
1018 1017 if i.fromconfig:
1019 1018 fromconfig.append(i)
1020 1019 else:
1021 1020 optimizations.append(i)
1022 1021
1023 1022 if fromdefault or fromconfig:
1024 1023 fromconfignames = set(x.name for x in fromconfig)
1025 1024 onlydefault = [i for i in fromdefault
1026 1025 if i.name not in fromconfignames]
1027 1026
1028 1027 if fromconfig:
1029 1028 ui.write(_('repository lacks features recommended by '
1030 1029 'current config options:\n\n'))
1031 1030 for i in fromconfig:
1032 1031 ui.write('%s\n %s\n\n' % (i.name, i.description))
1033 1032
1034 1033 if onlydefault:
1035 1034 ui.write(_('repository lacks features used by the default '
1036 1035 'config options:\n\n'))
1037 1036 for i in onlydefault:
1038 1037 ui.write('%s\n %s\n\n' % (i.name, i.description))
1039 1038
1040 1039 ui.write('\n')
1041 1040 else:
1042 1041 ui.write(_('(no feature deficiencies found in existing '
1043 1042 'repository)\n'))
1044 1043
1045 1044 ui.write(_('performing an upgrade with "--run" will make the following '
1046 1045 'changes:\n\n'))
1047 1046
1048 1047 printrequirements()
1049 1048 printupgradeactions()
1050 1049
1051 1050 unusedoptimize = [i for i in improvements
1052 1051 if i.name not in actions and i.type == optimisation]
1053 1052 if unusedoptimize:
1054 1053 ui.write(_('additional optimizations are available by specifying '
1055 1054 '"--optimize <name>":\n\n'))
1056 1055 for i in unusedoptimize:
1057 1056 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1058 1057 return
1059 1058
1060 1059 # Else we're in the run=true case.
1061 1060 ui.write(_('upgrade will perform the following actions:\n\n'))
1062 1061 printrequirements()
1063 1062 printupgradeactions()
1064 1063
1065 1064 ui.write(_('beginning upgrade...\n'))
1066 1065 with repo.wlock():
1067 1066 with repo.lock():
1068 1067 ui.write(_('repository locked and read-only\n'))
1069 1068 # Our strategy for upgrading the repository is to create a new,
1070 1069 # temporary repository, write data to it, then do a swap of the
1071 1070 # data. There are less heavyweight ways to do this, but it is easier
1072 1071 # to create a new repo object than to instantiate all the components
1073 1072 # (like the store) separately.
1074 1073 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1075 1074 backuppath = None
1076 1075 try:
1077 1076 ui.write(_('creating temporary repository to stage migrated '
1078 1077 'data: %s\n') % tmppath)
1079 1078 dstrepo = localrepo.localrepository(repo.baseui,
1080 1079 path=tmppath,
1081 1080 create=True)
1082 1081
1083 1082 with dstrepo.wlock():
1084 1083 with dstrepo.lock():
1085 1084 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1086 1085 actions)
1087 1086
1088 1087 finally:
1089 1088 ui.write(_('removing temporary repository %s\n') % tmppath)
1090 1089 repo.vfs.rmtree(tmppath, forcibly=True)
1091 1090
1092 1091 if backuppath:
1093 1092 ui.warn(_('copy of old repository backed up at %s\n') %
1094 1093 backuppath)
1095 1094 ui.warn(_('the old repository will not be deleted; remove '
1096 1095 'it to free up disk space once the upgraded '
1097 1096 'repository is verified\n'))
@@ -1,179 +1,178 b''
1 1
2 2 $ cat << EOF > buggylocking.py
3 3 > """A small extension that tests our developer warnings
4 4 > """
5 5 >
6 6 > from mercurial import cmdutil, repair, revset
7 7 >
8 8 > cmdtable = {}
9 9 > command = cmdutil.command(cmdtable)
10 10 >
11 11 > @command('buggylocking', [], '')
12 12 > def buggylocking(ui, repo):
13 13 > lo = repo.lock()
14 14 > wl = repo.wlock()
15 15 > wl.release()
16 16 > lo.release()
17 17 >
18 18 > @command('buggytransaction', [], '')
19 19 > def buggylocking(ui, repo):
20 20 > tr = repo.transaction('buggy')
21 21 > # make sure we rollback the transaction as we don't want to rely on the__del__
22 22 > tr.release()
23 23 >
24 24 > @command('properlocking', [], '')
25 25 > def properlocking(ui, repo):
26 26 > """check that reentrance is fine"""
27 27 > wl = repo.wlock()
28 28 > lo = repo.lock()
29 29 > tr = repo.transaction('proper')
30 30 > tr2 = repo.transaction('proper')
31 31 > lo2 = repo.lock()
32 32 > wl2 = repo.wlock()
33 33 > wl2.release()
34 34 > lo2.release()
35 35 > tr2.close()
36 36 > tr.close()
37 37 > lo.release()
38 38 > wl.release()
39 39 >
40 40 > @command('nowaitlocking', [], '')
41 41 > def nowaitlocking(ui, repo):
42 42 > lo = repo.lock()
43 43 > wl = repo.wlock(wait=False)
44 44 > wl.release()
45 45 > lo.release()
46 46 >
47 47 > @command('stripintr', [], '')
48 48 > def stripintr(ui, repo):
49 49 > lo = repo.lock()
50 50 > tr = repo.transaction('foobar')
51 51 > try:
52 52 > repair.strip(repo.ui, repo, [repo['.'].node()])
53 53 > finally:
54 54 > lo.release()
55 55 > @command('oldanddeprecated', [], '')
56 56 > def oldanddeprecated(ui, repo):
57 57 > """test deprecation warning API"""
58 58 > def foobar(ui):
59 59 > ui.deprecwarn('foorbar is deprecated, go shopping', '42.1337')
60 60 > foobar(ui)
61 61 >
62 62 > def oldstylerevset(repo, subset, x):
63 63 > return list(subset)
64 64 >
65 65 > revset.symbols['oldstyle'] = oldstylerevset
66 66 > EOF
67 67
68 68 $ cat << EOF >> $HGRCPATH
69 69 > [extensions]
70 70 > buggylocking=$TESTTMP/buggylocking.py
71 71 > mock=$TESTDIR/mockblackbox.py
72 72 > blackbox=
73 73 > [devel]
74 74 > all-warnings=1
75 75 > EOF
76 76
77 77 $ hg init lock-checker
78 78 $ cd lock-checker
79 79 $ hg buggylocking
80 80 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
81 81 $ cat << EOF >> $HGRCPATH
82 82 > [devel]
83 83 > all=0
84 84 > check-locks=1
85 85 > EOF
86 86 $ hg buggylocking
87 87 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
88 88 $ hg buggylocking --traceback
89 89 devel-warn: "wlock" acquired after "lock" at:
90 90 */hg:* in * (glob)
91 91 */mercurial/dispatch.py:* in run (glob)
92 92 */mercurial/dispatch.py:* in dispatch (glob)
93 93 */mercurial/dispatch.py:* in _runcatch (glob)
94 94 */mercurial/dispatch.py:* in callcatch (glob)
95 95 */mercurial/scmutil.py* in callcatch (glob)
96 96 */mercurial/dispatch.py:* in _runcatchfunc (glob)
97 97 */mercurial/dispatch.py:* in _dispatch (glob)
98 98 */mercurial/dispatch.py:* in runcommand (glob)
99 99 */mercurial/dispatch.py:* in _runcommand (glob)
100 100 */mercurial/dispatch.py:* in <lambda> (glob)
101 101 */mercurial/util.py:* in check (glob)
102 102 $TESTTMP/buggylocking.py:* in buggylocking (glob)
103 103 $ hg properlocking
104 104 $ hg nowaitlocking
105 105
106 106 $ echo a > a
107 107 $ hg add a
108 108 $ hg commit -m a
109 $ hg stripintr
109 $ hg stripintr 2>&1 | egrep -v '^(\*\*| )'
110 110 saved backup bundle to $TESTTMP/lock-checker/.hg/strip-backup/*-backup.hg (glob)
111 abort: programming error: cannot strip from inside a transaction
112 (contact your extension maintainer)
113 [255]
111 Traceback (most recent call last):
112 mercurial.error.ProgrammingError: cannot strip from inside a transaction
114 113
115 114 $ hg log -r "oldstyle()" -T '{rev}\n'
116 115 devel-warn: revset "oldstyle" uses list instead of smartset
117 116 (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
118 117 0
119 118 $ hg oldanddeprecated
120 119 devel-warn: foorbar is deprecated, go shopping
121 120 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
122 121
123 122 $ hg oldanddeprecated --traceback
124 123 devel-warn: foorbar is deprecated, go shopping
125 124 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
126 125 */hg:* in <module> (glob)
127 126 */mercurial/dispatch.py:* in run (glob)
128 127 */mercurial/dispatch.py:* in dispatch (glob)
129 128 */mercurial/dispatch.py:* in _runcatch (glob)
130 129 */mercurial/dispatch.py:* in callcatch (glob)
131 130 */mercurial/scmutil.py* in callcatch (glob)
132 131 */mercurial/dispatch.py:* in _runcatchfunc (glob)
133 132 */mercurial/dispatch.py:* in _dispatch (glob)
134 133 */mercurial/dispatch.py:* in runcommand (glob)
135 134 */mercurial/dispatch.py:* in _runcommand (glob)
136 135 */mercurial/dispatch.py:* in <lambda> (glob)
137 136 */mercurial/util.py:* in check (glob)
138 137 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
139 138 $ hg blackbox -l 9
140 139 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: revset "oldstyle" uses list instead of smartset
141 140 (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
142 141 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> log -r 'oldstyle()' -T '{rev}\n' exited 0 after * seconds (glob)
143 142 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
144 143 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
145 144 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
146 145 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
147 146 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
148 147 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
149 148 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
150 149 */hg:* in <module> (glob)
151 150 */mercurial/dispatch.py:* in run (glob)
152 151 */mercurial/dispatch.py:* in dispatch (glob)
153 152 */mercurial/dispatch.py:* in _runcatch (glob)
154 153 */mercurial/dispatch.py:* in callcatch (glob)
155 154 */mercurial/scmutil.py* in callcatch (glob)
156 155 */mercurial/dispatch.py:* in _runcatchfunc (glob)
157 156 */mercurial/dispatch.py:* in _dispatch (glob)
158 157 */mercurial/dispatch.py:* in runcommand (glob)
159 158 */mercurial/dispatch.py:* in _runcommand (glob)
160 159 */mercurial/dispatch.py:* in <lambda> (glob)
161 160 */mercurial/util.py:* in check (glob)
162 161 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
163 162 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
164 163 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 9
165 164
166 165 Test programming error failure:
167 166
168 167 $ hg buggytransaction 2>&1 | egrep -v '^ '
169 168 ** Unknown exception encountered with possibly-broken third-party extension buggylocking
170 169 ** which supports versions unknown of Mercurial.
171 170 ** Please disable buggylocking and try your action again.
172 171 ** If that fixes the bug please report it to the extension author.
173 172 ** Python * (glob)
174 173 ** Mercurial Distributed SCM (*) (glob)
175 174 ** Extensions loaded: * (glob)
176 175 Traceback (most recent call last):
177 176 mercurial.error.ProgrammingError: transaction requires locking
178 177
179 178 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now