##// END OF EJS Templates
upgrade: extract code in its own module...
Pierre-Yves David -
r31864:70d163b8 default
parent child Browse files
Show More
@@ -1,2115 +1,2116
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import difflib
11 11 import errno
12 12 import operator
13 13 import os
14 14 import random
15 15 import socket
16 16 import string
17 17 import sys
18 18 import tempfile
19 19 import time
20 20
21 21 from .i18n import _
22 22 from .node import (
23 23 bin,
24 24 hex,
25 25 nullhex,
26 26 nullid,
27 27 nullrev,
28 28 short,
29 29 )
30 30 from . import (
31 31 bundle2,
32 32 changegroup,
33 33 cmdutil,
34 34 color,
35 35 commands,
36 36 context,
37 37 dagparser,
38 38 dagutil,
39 39 encoding,
40 40 error,
41 41 exchange,
42 42 extensions,
43 43 fileset,
44 44 formatter,
45 45 hg,
46 46 localrepo,
47 47 lock as lockmod,
48 48 merge as mergemod,
49 49 obsolete,
50 50 policy,
51 51 pvec,
52 52 pycompat,
53 53 repair,
54 54 revlog,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 setdiscovery,
59 59 simplemerge,
60 60 smartset,
61 61 sslutil,
62 62 streamclone,
63 63 templater,
64 64 treediscovery,
65 upgrade,
65 66 util,
66 67 vfs as vfsmod,
67 68 )
68 69
69 70 release = lockmod.release
70 71
71 72 # We reuse the command table from commands because it is easier than
72 73 # teaching dispatch about multiple tables.
73 74 command = cmdutil.command(commands.table)
74 75
75 76 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
76 77 def debugancestor(ui, repo, *args):
77 78 """find the ancestor revision of two revisions in a given index"""
78 79 if len(args) == 3:
79 80 index, rev1, rev2 = args
80 81 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
81 82 lookup = r.lookup
82 83 elif len(args) == 2:
83 84 if not repo:
84 85 raise error.Abort(_('there is no Mercurial repository here '
85 86 '(.hg not found)'))
86 87 rev1, rev2 = args
87 88 r = repo.changelog
88 89 lookup = repo.lookup
89 90 else:
90 91 raise error.Abort(_('either two or three arguments required'))
91 92 a = r.ancestor(lookup(rev1), lookup(rev2))
92 93 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
93 94
94 95 @command('debugapplystreamclonebundle', [], 'FILE')
95 96 def debugapplystreamclonebundle(ui, repo, fname):
96 97 """apply a stream clone bundle file"""
97 98 f = hg.openpath(ui, fname)
98 99 gen = exchange.readbundle(ui, f, fname)
99 100 gen.apply(repo)
100 101
101 102 @command('debugbuilddag',
102 103 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
103 104 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
104 105 ('n', 'new-file', None, _('add new file at each rev'))],
105 106 _('[OPTION]... [TEXT]'))
106 107 def debugbuilddag(ui, repo, text=None,
107 108 mergeable_file=False,
108 109 overwritten_file=False,
109 110 new_file=False):
110 111 """builds a repo with a given DAG from scratch in the current empty repo
111 112
112 113 The description of the DAG is read from stdin if not given on the
113 114 command line.
114 115
115 116 Elements:
116 117
117 118 - "+n" is a linear run of n nodes based on the current default parent
118 119 - "." is a single node based on the current default parent
119 120 - "$" resets the default parent to null (implied at the start);
120 121 otherwise the default parent is always the last node created
121 122 - "<p" sets the default parent to the backref p
122 123 - "*p" is a fork at parent p, which is a backref
123 124 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
124 125 - "/p2" is a merge of the preceding node and p2
125 126 - ":tag" defines a local tag for the preceding node
126 127 - "@branch" sets the named branch for subsequent nodes
127 128 - "#...\\n" is a comment up to the end of the line
128 129
129 130 Whitespace between the above elements is ignored.
130 131
131 132 A backref is either
132 133
133 134 - a number n, which references the node curr-n, where curr is the current
134 135 node, or
135 136 - the name of a local tag you placed earlier using ":tag", or
136 137 - empty to denote the default parent.
137 138
138 139 All string valued-elements are either strictly alphanumeric, or must
139 140 be enclosed in double quotes ("..."), with "\\" as escape character.
140 141 """
141 142
142 143 if text is None:
143 144 ui.status(_("reading DAG from stdin\n"))
144 145 text = ui.fin.read()
145 146
146 147 cl = repo.changelog
147 148 if len(cl) > 0:
148 149 raise error.Abort(_('repository is not empty'))
149 150
150 151 # determine number of revs in DAG
151 152 total = 0
152 153 for type, data in dagparser.parsedag(text):
153 154 if type == 'n':
154 155 total += 1
155 156
156 157 if mergeable_file:
157 158 linesperrev = 2
158 159 # make a file with k lines per rev
159 160 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
160 161 initialmergedlines.append("")
161 162
162 163 tags = []
163 164
164 165 wlock = lock = tr = None
165 166 try:
166 167 wlock = repo.wlock()
167 168 lock = repo.lock()
168 169 tr = repo.transaction("builddag")
169 170
170 171 at = -1
171 172 atbranch = 'default'
172 173 nodeids = []
173 174 id = 0
174 175 ui.progress(_('building'), id, unit=_('revisions'), total=total)
175 176 for type, data in dagparser.parsedag(text):
176 177 if type == 'n':
177 178 ui.note(('node %s\n' % str(data)))
178 179 id, ps = data
179 180
180 181 files = []
181 182 fctxs = {}
182 183
183 184 p2 = None
184 185 if mergeable_file:
185 186 fn = "mf"
186 187 p1 = repo[ps[0]]
187 188 if len(ps) > 1:
188 189 p2 = repo[ps[1]]
189 190 pa = p1.ancestor(p2)
190 191 base, local, other = [x[fn].data() for x in (pa, p1,
191 192 p2)]
192 193 m3 = simplemerge.Merge3Text(base, local, other)
193 194 ml = [l.strip() for l in m3.merge_lines()]
194 195 ml.append("")
195 196 elif at > 0:
196 197 ml = p1[fn].data().split("\n")
197 198 else:
198 199 ml = initialmergedlines
199 200 ml[id * linesperrev] += " r%i" % id
200 201 mergedtext = "\n".join(ml)
201 202 files.append(fn)
202 203 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
203 204
204 205 if overwritten_file:
205 206 fn = "of"
206 207 files.append(fn)
207 208 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
208 209
209 210 if new_file:
210 211 fn = "nf%i" % id
211 212 files.append(fn)
212 213 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
213 214 if len(ps) > 1:
214 215 if not p2:
215 216 p2 = repo[ps[1]]
216 217 for fn in p2:
217 218 if fn.startswith("nf"):
218 219 files.append(fn)
219 220 fctxs[fn] = p2[fn]
220 221
221 222 def fctxfn(repo, cx, path):
222 223 return fctxs.get(path)
223 224
224 225 if len(ps) == 0 or ps[0] < 0:
225 226 pars = [None, None]
226 227 elif len(ps) == 1:
227 228 pars = [nodeids[ps[0]], None]
228 229 else:
229 230 pars = [nodeids[p] for p in ps]
230 231 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
231 232 date=(id, 0),
232 233 user="debugbuilddag",
233 234 extra={'branch': atbranch})
234 235 nodeid = repo.commitctx(cx)
235 236 nodeids.append(nodeid)
236 237 at = id
237 238 elif type == 'l':
238 239 id, name = data
239 240 ui.note(('tag %s\n' % name))
240 241 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
241 242 elif type == 'a':
242 243 ui.note(('branch %s\n' % data))
243 244 atbranch = data
244 245 ui.progress(_('building'), id, unit=_('revisions'), total=total)
245 246 tr.close()
246 247
247 248 if tags:
248 249 repo.vfs.write("localtags", "".join(tags))
249 250 finally:
250 251 ui.progress(_('building'), None)
251 252 release(tr, lock, wlock)
252 253
253 254 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
254 255 indent_string = ' ' * indent
255 256 if all:
256 257 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
257 258 % indent_string)
258 259
259 260 def showchunks(named):
260 261 ui.write("\n%s%s\n" % (indent_string, named))
261 262 chain = None
262 263 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
263 264 node = chunkdata['node']
264 265 p1 = chunkdata['p1']
265 266 p2 = chunkdata['p2']
266 267 cs = chunkdata['cs']
267 268 deltabase = chunkdata['deltabase']
268 269 delta = chunkdata['delta']
269 270 ui.write("%s%s %s %s %s %s %s\n" %
270 271 (indent_string, hex(node), hex(p1), hex(p2),
271 272 hex(cs), hex(deltabase), len(delta)))
272 273 chain = node
273 274
274 275 chunkdata = gen.changelogheader()
275 276 showchunks("changelog")
276 277 chunkdata = gen.manifestheader()
277 278 showchunks("manifest")
278 279 for chunkdata in iter(gen.filelogheader, {}):
279 280 fname = chunkdata['filename']
280 281 showchunks(fname)
281 282 else:
282 283 if isinstance(gen, bundle2.unbundle20):
283 284 raise error.Abort(_('use debugbundle2 for this file'))
284 285 chunkdata = gen.changelogheader()
285 286 chain = None
286 287 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
287 288 node = chunkdata['node']
288 289 ui.write("%s%s\n" % (indent_string, hex(node)))
289 290 chain = node
290 291
291 292 def _debugbundle2(ui, gen, all=None, **opts):
292 293 """lists the contents of a bundle2"""
293 294 if not isinstance(gen, bundle2.unbundle20):
294 295 raise error.Abort(_('not a bundle2 file'))
295 296 ui.write(('Stream params: %s\n' % repr(gen.params)))
296 297 for part in gen.iterparts():
297 298 ui.write('%s -- %r\n' % (part.type, repr(part.params)))
298 299 if part.type == 'changegroup':
299 300 version = part.params.get('version', '01')
300 301 cg = changegroup.getunbundler(version, part, 'UN')
301 302 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
302 303
303 304 @command('debugbundle',
304 305 [('a', 'all', None, _('show all details')),
305 306 ('', 'spec', None, _('print the bundlespec of the bundle'))],
306 307 _('FILE'),
307 308 norepo=True)
308 309 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
309 310 """lists the contents of a bundle"""
310 311 with hg.openpath(ui, bundlepath) as f:
311 312 if spec:
312 313 spec = exchange.getbundlespec(ui, f)
313 314 ui.write('%s\n' % spec)
314 315 return
315 316
316 317 gen = exchange.readbundle(ui, f, bundlepath)
317 318 if isinstance(gen, bundle2.unbundle20):
318 319 return _debugbundle2(ui, gen, all=all, **opts)
319 320 _debugchangegroup(ui, gen, all=all, **opts)
320 321
321 322 @command('debugcheckstate', [], '')
322 323 def debugcheckstate(ui, repo):
323 324 """validate the correctness of the current dirstate"""
324 325 parent1, parent2 = repo.dirstate.parents()
325 326 m1 = repo[parent1].manifest()
326 327 m2 = repo[parent2].manifest()
327 328 errors = 0
328 329 for f in repo.dirstate:
329 330 state = repo.dirstate[f]
330 331 if state in "nr" and f not in m1:
331 332 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
332 333 errors += 1
333 334 if state in "a" and f in m1:
334 335 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
335 336 errors += 1
336 337 if state in "m" and f not in m1 and f not in m2:
337 338 ui.warn(_("%s in state %s, but not in either manifest\n") %
338 339 (f, state))
339 340 errors += 1
340 341 for f in m1:
341 342 state = repo.dirstate[f]
342 343 if state not in "nrm":
343 344 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
344 345 errors += 1
345 346 if errors:
346 347 error = _(".hg/dirstate inconsistent with current parent's manifest")
347 348 raise error.Abort(error)
348 349
349 350 @command('debugcolor',
350 351 [('', 'style', None, _('show all configured styles'))],
351 352 'hg debugcolor')
352 353 def debugcolor(ui, repo, **opts):
353 354 """show available color, effects or style"""
354 355 ui.write(('color mode: %s\n') % ui._colormode)
355 356 if opts.get('style'):
356 357 return _debugdisplaystyle(ui)
357 358 else:
358 359 return _debugdisplaycolor(ui)
359 360
360 361 def _debugdisplaycolor(ui):
361 362 ui = ui.copy()
362 363 ui._styles.clear()
363 364 for effect in color._activeeffects(ui).keys():
364 365 ui._styles[effect] = effect
365 366 if ui._terminfoparams:
366 367 for k, v in ui.configitems('color'):
367 368 if k.startswith('color.'):
368 369 ui._styles[k] = k[6:]
369 370 elif k.startswith('terminfo.'):
370 371 ui._styles[k] = k[9:]
371 372 ui.write(_('available colors:\n'))
372 373 # sort label with a '_' after the other to group '_background' entry.
373 374 items = sorted(ui._styles.items(),
374 375 key=lambda i: ('_' in i[0], i[0], i[1]))
375 376 for colorname, label in items:
376 377 ui.write(('%s\n') % colorname, label=label)
377 378
378 379 def _debugdisplaystyle(ui):
379 380 ui.write(_('available style:\n'))
380 381 width = max(len(s) for s in ui._styles)
381 382 for label, effects in sorted(ui._styles.items()):
382 383 ui.write('%s' % label, label=label)
383 384 if effects:
384 385 # 50
385 386 ui.write(': ')
386 387 ui.write(' ' * (max(0, width - len(label))))
387 388 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
388 389 ui.write('\n')
389 390
390 391 @command('debugcommands', [], _('[COMMAND]'), norepo=True)
391 392 def debugcommands(ui, cmd='', *args):
392 393 """list all available commands and options"""
393 394 for cmd, vals in sorted(commands.table.iteritems()):
394 395 cmd = cmd.split('|')[0].strip('^')
395 396 opts = ', '.join([i[1] for i in vals[1]])
396 397 ui.write('%s: %s\n' % (cmd, opts))
397 398
398 399 @command('debugcomplete',
399 400 [('o', 'options', None, _('show the command options'))],
400 401 _('[-o] CMD'),
401 402 norepo=True)
402 403 def debugcomplete(ui, cmd='', **opts):
403 404 """returns the completion list associated with the given command"""
404 405
405 406 if opts.get('options'):
406 407 options = []
407 408 otables = [commands.globalopts]
408 409 if cmd:
409 410 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
410 411 otables.append(entry[1])
411 412 for t in otables:
412 413 for o in t:
413 414 if "(DEPRECATED)" in o[3]:
414 415 continue
415 416 if o[0]:
416 417 options.append('-%s' % o[0])
417 418 options.append('--%s' % o[1])
418 419 ui.write("%s\n" % "\n".join(options))
419 420 return
420 421
421 422 cmdlist, unused_allcmds = cmdutil.findpossible(cmd, commands.table)
422 423 if ui.verbose:
423 424 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
424 425 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
425 426
426 427 @command('debugcreatestreamclonebundle', [], 'FILE')
427 428 def debugcreatestreamclonebundle(ui, repo, fname):
428 429 """create a stream clone bundle file
429 430
430 431 Stream bundles are special bundles that are essentially archives of
431 432 revlog files. They are commonly used for cloning very quickly.
432 433 """
433 434 requirements, gen = streamclone.generatebundlev1(repo)
434 435 changegroup.writechunks(ui, gen, fname)
435 436
436 437 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
437 438
438 439 @command('debugdag',
439 440 [('t', 'tags', None, _('use tags as labels')),
440 441 ('b', 'branches', None, _('annotate with branch names')),
441 442 ('', 'dots', None, _('use dots for runs')),
442 443 ('s', 'spaces', None, _('separate elements by spaces'))],
443 444 _('[OPTION]... [FILE [REV]...]'),
444 445 optionalrepo=True)
445 446 def debugdag(ui, repo, file_=None, *revs, **opts):
446 447 """format the changelog or an index DAG as a concise textual description
447 448
448 449 If you pass a revlog index, the revlog's DAG is emitted. If you list
449 450 revision numbers, they get labeled in the output as rN.
450 451
451 452 Otherwise, the changelog DAG of the current repo is emitted.
452 453 """
453 454 spaces = opts.get('spaces')
454 455 dots = opts.get('dots')
455 456 if file_:
456 457 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
457 458 file_)
458 459 revs = set((int(r) for r in revs))
459 460 def events():
460 461 for r in rlog:
461 462 yield 'n', (r, list(p for p in rlog.parentrevs(r)
462 463 if p != -1))
463 464 if r in revs:
464 465 yield 'l', (r, "r%i" % r)
465 466 elif repo:
466 467 cl = repo.changelog
467 468 tags = opts.get('tags')
468 469 branches = opts.get('branches')
469 470 if tags:
470 471 labels = {}
471 472 for l, n in repo.tags().items():
472 473 labels.setdefault(cl.rev(n), []).append(l)
473 474 def events():
474 475 b = "default"
475 476 for r in cl:
476 477 if branches:
477 478 newb = cl.read(cl.node(r))[5]['branch']
478 479 if newb != b:
479 480 yield 'a', newb
480 481 b = newb
481 482 yield 'n', (r, list(p for p in cl.parentrevs(r)
482 483 if p != -1))
483 484 if tags:
484 485 ls = labels.get(r)
485 486 if ls:
486 487 for l in ls:
487 488 yield 'l', (r, l)
488 489 else:
489 490 raise error.Abort(_('need repo for changelog dag'))
490 491
491 492 for line in dagparser.dagtextlines(events(),
492 493 addspaces=spaces,
493 494 wraplabels=True,
494 495 wrapannotations=True,
495 496 wrapnonlinear=dots,
496 497 usedots=dots,
497 498 maxlinewidth=70):
498 499 ui.write(line)
499 500 ui.write("\n")
500 501
501 502 @command('debugdata', commands.debugrevlogopts, _('-c|-m|FILE REV'))
502 503 def debugdata(ui, repo, file_, rev=None, **opts):
503 504 """dump the contents of a data file revision"""
504 505 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
505 506 if rev is not None:
506 507 raise error.CommandError('debugdata', _('invalid arguments'))
507 508 file_, rev = None, file_
508 509 elif rev is None:
509 510 raise error.CommandError('debugdata', _('invalid arguments'))
510 511 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
511 512 try:
512 513 ui.write(r.revision(r.lookup(rev), raw=True))
513 514 except KeyError:
514 515 raise error.Abort(_('invalid revision identifier %s') % rev)
515 516
516 517 @command('debugdate',
517 518 [('e', 'extended', None, _('try extended date formats'))],
518 519 _('[-e] DATE [RANGE]'),
519 520 norepo=True, optionalrepo=True)
520 521 def debugdate(ui, date, range=None, **opts):
521 522 """parse and display a date"""
522 523 if opts["extended"]:
523 524 d = util.parsedate(date, util.extendeddateformats)
524 525 else:
525 526 d = util.parsedate(date)
526 527 ui.write(("internal: %s %s\n") % d)
527 528 ui.write(("standard: %s\n") % util.datestr(d))
528 529 if range:
529 530 m = util.matchdate(range)
530 531 ui.write(("match: %s\n") % m(d[0]))
531 532
532 533 @command('debugdeltachain',
533 534 commands.debugrevlogopts + commands.formatteropts,
534 535 _('-c|-m|FILE'),
535 536 optionalrepo=True)
536 537 def debugdeltachain(ui, repo, file_=None, **opts):
537 538 """dump information about delta chains in a revlog
538 539
539 540 Output can be templatized. Available template keywords are:
540 541
541 542 :``rev``: revision number
542 543 :``chainid``: delta chain identifier (numbered by unique base)
543 544 :``chainlen``: delta chain length to this revision
544 545 :``prevrev``: previous revision in delta chain
545 546 :``deltatype``: role of delta / how it was computed
546 547 :``compsize``: compressed size of revision
547 548 :``uncompsize``: uncompressed size of revision
548 549 :``chainsize``: total size of compressed revisions in chain
549 550 :``chainratio``: total chain size divided by uncompressed revision size
550 551 (new delta chains typically start at ratio 2.00)
551 552 :``lindist``: linear distance from base revision in delta chain to end
552 553 of this revision
553 554 :``extradist``: total size of revisions not part of this delta chain from
554 555 base of delta chain to end of this revision; a measurement
555 556 of how much extra data we need to read/seek across to read
556 557 the delta chain for this revision
557 558 :``extraratio``: extradist divided by chainsize; another representation of
558 559 how much unrelated data is needed to load this delta chain
559 560 """
560 561 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
561 562 index = r.index
562 563 generaldelta = r.version & revlog.REVLOGGENERALDELTA
563 564
564 565 def revinfo(rev):
565 566 e = index[rev]
566 567 compsize = e[1]
567 568 uncompsize = e[2]
568 569 chainsize = 0
569 570
570 571 if generaldelta:
571 572 if e[3] == e[5]:
572 573 deltatype = 'p1'
573 574 elif e[3] == e[6]:
574 575 deltatype = 'p2'
575 576 elif e[3] == rev - 1:
576 577 deltatype = 'prev'
577 578 elif e[3] == rev:
578 579 deltatype = 'base'
579 580 else:
580 581 deltatype = 'other'
581 582 else:
582 583 if e[3] == rev:
583 584 deltatype = 'base'
584 585 else:
585 586 deltatype = 'prev'
586 587
587 588 chain = r._deltachain(rev)[0]
588 589 for iterrev in chain:
589 590 e = index[iterrev]
590 591 chainsize += e[1]
591 592
592 593 return compsize, uncompsize, deltatype, chain, chainsize
593 594
594 595 fm = ui.formatter('debugdeltachain', opts)
595 596
596 597 fm.plain(' rev chain# chainlen prev delta '
597 598 'size rawsize chainsize ratio lindist extradist '
598 599 'extraratio\n')
599 600
600 601 chainbases = {}
601 602 for rev in r:
602 603 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
603 604 chainbase = chain[0]
604 605 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
605 606 basestart = r.start(chainbase)
606 607 revstart = r.start(rev)
607 608 lineardist = revstart + comp - basestart
608 609 extradist = lineardist - chainsize
609 610 try:
610 611 prevrev = chain[-2]
611 612 except IndexError:
612 613 prevrev = -1
613 614
614 615 chainratio = float(chainsize) / float(uncomp)
615 616 extraratio = float(extradist) / float(chainsize)
616 617
617 618 fm.startitem()
618 619 fm.write('rev chainid chainlen prevrev deltatype compsize '
619 620 'uncompsize chainsize chainratio lindist extradist '
620 621 'extraratio',
621 622 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
622 623 rev, chainid, len(chain), prevrev, deltatype, comp,
623 624 uncomp, chainsize, chainratio, lineardist, extradist,
624 625 extraratio,
625 626 rev=rev, chainid=chainid, chainlen=len(chain),
626 627 prevrev=prevrev, deltatype=deltatype, compsize=comp,
627 628 uncompsize=uncomp, chainsize=chainsize,
628 629 chainratio=chainratio, lindist=lineardist,
629 630 extradist=extradist, extraratio=extraratio)
630 631
631 632 fm.end()
632 633
633 634 @command('debugdirstate|debugstate',
634 635 [('', 'nodates', None, _('do not display the saved mtime')),
635 636 ('', 'datesort', None, _('sort by saved mtime'))],
636 637 _('[OPTION]...'))
637 638 def debugstate(ui, repo, **opts):
638 639 """show the contents of the current dirstate"""
639 640
640 641 nodates = opts.get('nodates')
641 642 datesort = opts.get('datesort')
642 643
643 644 timestr = ""
644 645 if datesort:
645 646 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
646 647 else:
647 648 keyfunc = None # sort by filename
648 649 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
649 650 if ent[3] == -1:
650 651 timestr = 'unset '
651 652 elif nodates:
652 653 timestr = 'set '
653 654 else:
654 655 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
655 656 time.localtime(ent[3]))
656 657 if ent[1] & 0o20000:
657 658 mode = 'lnk'
658 659 else:
659 660 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
660 661 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
661 662 for f in repo.dirstate.copies():
662 663 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
663 664
664 665 @command('debugdiscovery',
665 666 [('', 'old', None, _('use old-style discovery')),
666 667 ('', 'nonheads', None,
667 668 _('use old-style discovery with non-heads included')),
668 669 ] + commands.remoteopts,
669 670 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
670 671 def debugdiscovery(ui, repo, remoteurl="default", **opts):
671 672 """runs the changeset discovery protocol in isolation"""
672 673 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
673 674 opts.get('branch'))
674 675 remote = hg.peer(repo, opts, remoteurl)
675 676 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
676 677
677 678 # make sure tests are repeatable
678 679 random.seed(12323)
679 680
680 681 def doit(localheads, remoteheads, remote=remote):
681 682 if opts.get('old'):
682 683 if localheads:
683 684 raise error.Abort('cannot use localheads with old style '
684 685 'discovery')
685 686 if not util.safehasattr(remote, 'branches'):
686 687 # enable in-client legacy support
687 688 remote = localrepo.locallegacypeer(remote.local())
688 689 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
689 690 force=True)
690 691 common = set(common)
691 692 if not opts.get('nonheads'):
692 693 ui.write(("unpruned common: %s\n") %
693 694 " ".join(sorted(short(n) for n in common)))
694 695 dag = dagutil.revlogdag(repo.changelog)
695 696 all = dag.ancestorset(dag.internalizeall(common))
696 697 common = dag.externalizeall(dag.headsetofconnecteds(all))
697 698 else:
698 699 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
699 700 common = set(common)
700 701 rheads = set(hds)
701 702 lheads = set(repo.heads())
702 703 ui.write(("common heads: %s\n") %
703 704 " ".join(sorted(short(n) for n in common)))
704 705 if lheads <= common:
705 706 ui.write(("local is subset\n"))
706 707 elif rheads <= common:
707 708 ui.write(("remote is subset\n"))
708 709
709 710 serverlogs = opts.get('serverlog')
710 711 if serverlogs:
711 712 for filename in serverlogs:
712 713 with open(filename, 'r') as logfile:
713 714 line = logfile.readline()
714 715 while line:
715 716 parts = line.strip().split(';')
716 717 op = parts[1]
717 718 if op == 'cg':
718 719 pass
719 720 elif op == 'cgss':
720 721 doit(parts[2].split(' '), parts[3].split(' '))
721 722 elif op == 'unb':
722 723 doit(parts[3].split(' '), parts[2].split(' '))
723 724 line = logfile.readline()
724 725 else:
725 726 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
726 727 opts.get('remote_head'))
727 728 localrevs = opts.get('local_head')
728 729 doit(localrevs, remoterevs)
729 730
730 731 @command('debugextensions', commands.formatteropts, [], norepo=True)
731 732 def debugextensions(ui, **opts):
732 733 '''show information about active extensions'''
733 734 exts = extensions.extensions(ui)
734 735 hgver = util.version()
735 736 fm = ui.formatter('debugextensions', opts)
736 737 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
737 738 isinternal = extensions.ismoduleinternal(extmod)
738 739 extsource = pycompat.fsencode(extmod.__file__)
739 740 if isinternal:
740 741 exttestedwith = [] # never expose magic string to users
741 742 else:
742 743 exttestedwith = getattr(extmod, 'testedwith', '').split()
743 744 extbuglink = getattr(extmod, 'buglink', None)
744 745
745 746 fm.startitem()
746 747
747 748 if ui.quiet or ui.verbose:
748 749 fm.write('name', '%s\n', extname)
749 750 else:
750 751 fm.write('name', '%s', extname)
751 752 if isinternal or hgver in exttestedwith:
752 753 fm.plain('\n')
753 754 elif not exttestedwith:
754 755 fm.plain(_(' (untested!)\n'))
755 756 else:
756 757 lasttestedversion = exttestedwith[-1]
757 758 fm.plain(' (%s!)\n' % lasttestedversion)
758 759
759 760 fm.condwrite(ui.verbose and extsource, 'source',
760 761 _(' location: %s\n'), extsource or "")
761 762
762 763 if ui.verbose:
763 764 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
764 765 fm.data(bundled=isinternal)
765 766
766 767 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
767 768 _(' tested with: %s\n'),
768 769 fm.formatlist(exttestedwith, name='ver'))
769 770
770 771 fm.condwrite(ui.verbose and extbuglink, 'buglink',
771 772 _(' bug reporting: %s\n'), extbuglink or "")
772 773
773 774 fm.end()
774 775
775 776 @command('debugfileset',
776 777 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
777 778 _('[-r REV] FILESPEC'))
778 779 def debugfileset(ui, repo, expr, **opts):
779 780 '''parse and apply a fileset specification'''
780 781 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
781 782 if ui.verbose:
782 783 tree = fileset.parse(expr)
783 784 ui.note(fileset.prettyformat(tree), "\n")
784 785
785 786 for f in ctx.getfileset(expr):
786 787 ui.write("%s\n" % f)
787 788
788 789 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
789 790 def debugfsinfo(ui, path="."):
790 791 """show information detected about current filesystem"""
791 792 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
792 793 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
793 794 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
794 795 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
795 796 casesensitive = '(unknown)'
796 797 try:
797 798 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
798 799 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
799 800 except OSError:
800 801 pass
801 802 ui.write(('case-sensitive: %s\n') % casesensitive)
802 803
803 804 @command('debuggetbundle',
804 805 [('H', 'head', [], _('id of head node'), _('ID')),
805 806 ('C', 'common', [], _('id of common node'), _('ID')),
806 807 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
807 808 _('REPO FILE [-H|-C ID]...'),
808 809 norepo=True)
809 810 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
810 811 """retrieves a bundle from a repo
811 812
812 813 Every ID must be a full-length hex node id string. Saves the bundle to the
813 814 given file.
814 815 """
815 816 repo = hg.peer(ui, opts, repopath)
816 817 if not repo.capable('getbundle'):
817 818 raise error.Abort("getbundle() not supported by target repository")
818 819 args = {}
819 820 if common:
820 821 args['common'] = [bin(s) for s in common]
821 822 if head:
822 823 args['heads'] = [bin(s) for s in head]
823 824 # TODO: get desired bundlecaps from command line.
824 825 args['bundlecaps'] = None
825 826 bundle = repo.getbundle('debug', **args)
826 827
827 828 bundletype = opts.get('type', 'bzip2').lower()
828 829 btypes = {'none': 'HG10UN',
829 830 'bzip2': 'HG10BZ',
830 831 'gzip': 'HG10GZ',
831 832 'bundle2': 'HG20'}
832 833 bundletype = btypes.get(bundletype)
833 834 if bundletype not in bundle2.bundletypes:
834 835 raise error.Abort(_('unknown bundle type specified with --type'))
835 836 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
836 837
837 838 @command('debugignore', [], '[FILE]')
838 839 def debugignore(ui, repo, *files, **opts):
839 840 """display the combined ignore pattern and information about ignored files
840 841
841 842 With no argument display the combined ignore pattern.
842 843
843 844 Given space separated file names, shows if the given file is ignored and
844 845 if so, show the ignore rule (file and line number) that matched it.
845 846 """
846 847 ignore = repo.dirstate._ignore
847 848 if not files:
848 849 # Show all the patterns
849 850 includepat = getattr(ignore, 'includepat', None)
850 851 if includepat is not None:
851 852 ui.write("%s\n" % includepat)
852 853 else:
853 854 raise error.Abort(_("no ignore patterns found"))
854 855 else:
855 856 for f in files:
856 857 nf = util.normpath(f)
857 858 ignored = None
858 859 ignoredata = None
859 860 if nf != '.':
860 861 if ignore(nf):
861 862 ignored = nf
862 863 ignoredata = repo.dirstate._ignorefileandline(nf)
863 864 else:
864 865 for p in util.finddirs(nf):
865 866 if ignore(p):
866 867 ignored = p
867 868 ignoredata = repo.dirstate._ignorefileandline(p)
868 869 break
869 870 if ignored:
870 871 if ignored == nf:
871 872 ui.write(_("%s is ignored\n") % f)
872 873 else:
873 874 ui.write(_("%s is ignored because of "
874 875 "containing folder %s\n")
875 876 % (f, ignored))
876 877 ignorefile, lineno, line = ignoredata
877 878 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
878 879 % (ignorefile, lineno, line))
879 880 else:
880 881 ui.write(_("%s is not ignored\n") % f)
881 882
882 883 @command('debugindex', commands.debugrevlogopts +
883 884 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
884 885 _('[-f FORMAT] -c|-m|FILE'),
885 886 optionalrepo=True)
886 887 def debugindex(ui, repo, file_=None, **opts):
887 888 """dump the contents of an index file"""
888 889 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
889 890 format = opts.get('format', 0)
890 891 if format not in (0, 1):
891 892 raise error.Abort(_("unknown format %d") % format)
892 893
893 894 generaldelta = r.version & revlog.REVLOGGENERALDELTA
894 895 if generaldelta:
895 896 basehdr = ' delta'
896 897 else:
897 898 basehdr = ' base'
898 899
899 900 if ui.debugflag:
900 901 shortfn = hex
901 902 else:
902 903 shortfn = short
903 904
904 905 # There might not be anything in r, so have a sane default
905 906 idlen = 12
906 907 for i in r:
907 908 idlen = len(shortfn(r.node(i)))
908 909 break
909 910
910 911 if format == 0:
911 912 ui.write((" rev offset length " + basehdr + " linkrev"
912 913 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
913 914 elif format == 1:
914 915 ui.write((" rev flag offset length"
915 916 " size " + basehdr + " link p1 p2"
916 917 " %s\n") % "nodeid".rjust(idlen))
917 918
918 919 for i in r:
919 920 node = r.node(i)
920 921 if generaldelta:
921 922 base = r.deltaparent(i)
922 923 else:
923 924 base = r.chainbase(i)
924 925 if format == 0:
925 926 try:
926 927 pp = r.parents(node)
927 928 except Exception:
928 929 pp = [nullid, nullid]
929 930 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
930 931 i, r.start(i), r.length(i), base, r.linkrev(i),
931 932 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
932 933 elif format == 1:
933 934 pr = r.parentrevs(i)
934 935 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
935 936 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
936 937 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
937 938
938 939 @command('debugindexdot', commands.debugrevlogopts,
939 940 _('-c|-m|FILE'), optionalrepo=True)
940 941 def debugindexdot(ui, repo, file_=None, **opts):
941 942 """dump an index DAG as a graphviz dot file"""
942 943 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
943 944 ui.write(("digraph G {\n"))
944 945 for i in r:
945 946 node = r.node(i)
946 947 pp = r.parents(node)
947 948 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
948 949 if pp[1] != nullid:
949 950 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
950 951 ui.write("}\n")
951 952
952 953 @command('debuginstall', [] + commands.formatteropts, '', norepo=True)
953 954 def debuginstall(ui, **opts):
954 955 '''test Mercurial installation
955 956
956 957 Returns 0 on success.
957 958 '''
958 959
959 960 def writetemp(contents):
960 961 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
961 962 f = os.fdopen(fd, pycompat.sysstr("wb"))
962 963 f.write(contents)
963 964 f.close()
964 965 return name
965 966
966 967 problems = 0
967 968
968 969 fm = ui.formatter('debuginstall', opts)
969 970 fm.startitem()
970 971
971 972 # encoding
972 973 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
973 974 err = None
974 975 try:
975 976 encoding.fromlocal("test")
976 977 except error.Abort as inst:
977 978 err = inst
978 979 problems += 1
979 980 fm.condwrite(err, 'encodingerror', _(" %s\n"
980 981 " (check that your locale is properly set)\n"), err)
981 982
982 983 # Python
983 984 fm.write('pythonexe', _("checking Python executable (%s)\n"),
984 985 pycompat.sysexecutable)
985 986 fm.write('pythonver', _("checking Python version (%s)\n"),
986 987 ("%d.%d.%d" % sys.version_info[:3]))
987 988 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
988 989 os.path.dirname(pycompat.fsencode(os.__file__)))
989 990
990 991 security = set(sslutil.supportedprotocols)
991 992 if sslutil.hassni:
992 993 security.add('sni')
993 994
994 995 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
995 996 fm.formatlist(sorted(security), name='protocol',
996 997 fmt='%s', sep=','))
997 998
998 999 # These are warnings, not errors. So don't increment problem count. This
999 1000 # may change in the future.
1000 1001 if 'tls1.2' not in security:
1001 1002 fm.plain(_(' TLS 1.2 not supported by Python install; '
1002 1003 'network connections lack modern security\n'))
1003 1004 if 'sni' not in security:
1004 1005 fm.plain(_(' SNI not supported by Python install; may have '
1005 1006 'connectivity issues with some servers\n'))
1006 1007
1007 1008 # TODO print CA cert info
1008 1009
1009 1010 # hg version
1010 1011 hgver = util.version()
1011 1012 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1012 1013 hgver.split('+')[0])
1013 1014 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1014 1015 '+'.join(hgver.split('+')[1:]))
1015 1016
1016 1017 # compiled modules
1017 1018 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1018 1019 policy.policy)
1019 1020 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1020 1021 os.path.dirname(pycompat.fsencode(__file__)))
1021 1022
1022 1023 err = None
1023 1024 try:
1024 1025 from . import (
1025 1026 base85,
1026 1027 bdiff,
1027 1028 mpatch,
1028 1029 osutil,
1029 1030 )
1030 1031 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1031 1032 except Exception as inst:
1032 1033 err = inst
1033 1034 problems += 1
1034 1035 fm.condwrite(err, 'extensionserror', " %s\n", err)
1035 1036
1036 1037 compengines = util.compengines._engines.values()
1037 1038 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1038 1039 fm.formatlist(sorted(e.name() for e in compengines),
1039 1040 name='compengine', fmt='%s', sep=', '))
1040 1041 fm.write('compenginesavail', _('checking available compression engines '
1041 1042 '(%s)\n'),
1042 1043 fm.formatlist(sorted(e.name() for e in compengines
1043 1044 if e.available()),
1044 1045 name='compengine', fmt='%s', sep=', '))
1045 1046 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1046 1047 fm.write('compenginesserver', _('checking available compression engines '
1047 1048 'for wire protocol (%s)\n'),
1048 1049 fm.formatlist([e.name() for e in wirecompengines
1049 1050 if e.wireprotosupport()],
1050 1051 name='compengine', fmt='%s', sep=', '))
1051 1052
1052 1053 # templates
1053 1054 p = templater.templatepaths()
1054 1055 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1055 1056 fm.condwrite(not p, '', _(" no template directories found\n"))
1056 1057 if p:
1057 1058 m = templater.templatepath("map-cmdline.default")
1058 1059 if m:
1059 1060 # template found, check if it is working
1060 1061 err = None
1061 1062 try:
1062 1063 templater.templater.frommapfile(m)
1063 1064 except Exception as inst:
1064 1065 err = inst
1065 1066 p = None
1066 1067 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1067 1068 else:
1068 1069 p = None
1069 1070 fm.condwrite(p, 'defaulttemplate',
1070 1071 _("checking default template (%s)\n"), m)
1071 1072 fm.condwrite(not m, 'defaulttemplatenotfound',
1072 1073 _(" template '%s' not found\n"), "default")
1073 1074 if not p:
1074 1075 problems += 1
1075 1076 fm.condwrite(not p, '',
1076 1077 _(" (templates seem to have been installed incorrectly)\n"))
1077 1078
1078 1079 # editor
1079 1080 editor = ui.geteditor()
1080 1081 editor = util.expandpath(editor)
1081 1082 fm.write('editor', _("checking commit editor... (%s)\n"), editor)
1082 1083 cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
1083 1084 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1084 1085 _(" No commit editor set and can't find %s in PATH\n"
1085 1086 " (specify a commit editor in your configuration"
1086 1087 " file)\n"), not cmdpath and editor == 'vi' and editor)
1087 1088 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1088 1089 _(" Can't find editor '%s' in PATH\n"
1089 1090 " (specify a commit editor in your configuration"
1090 1091 " file)\n"), not cmdpath and editor)
1091 1092 if not cmdpath and editor != 'vi':
1092 1093 problems += 1
1093 1094
1094 1095 # check username
1095 1096 username = None
1096 1097 err = None
1097 1098 try:
1098 1099 username = ui.username()
1099 1100 except error.Abort as e:
1100 1101 err = e
1101 1102 problems += 1
1102 1103
1103 1104 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1104 1105 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1105 1106 " (specify a username in your configuration file)\n"), err)
1106 1107
1107 1108 fm.condwrite(not problems, '',
1108 1109 _("no problems detected\n"))
1109 1110 if not problems:
1110 1111 fm.data(problems=problems)
1111 1112 fm.condwrite(problems, 'problems',
1112 1113 _("%d problems detected,"
1113 1114 " please check your install!\n"), problems)
1114 1115 fm.end()
1115 1116
1116 1117 return problems
1117 1118
1118 1119 @command('debugknown', [], _('REPO ID...'), norepo=True)
1119 1120 def debugknown(ui, repopath, *ids, **opts):
1120 1121 """test whether node ids are known to a repo
1121 1122
1122 1123 Every ID must be a full-length hex node id string. Returns a list of 0s
1123 1124 and 1s indicating unknown/known.
1124 1125 """
1125 1126 repo = hg.peer(ui, opts, repopath)
1126 1127 if not repo.capable('known'):
1127 1128 raise error.Abort("known() not supported by target repository")
1128 1129 flags = repo.known([bin(s) for s in ids])
1129 1130 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1130 1131
1131 1132 @command('debuglabelcomplete', [], _('LABEL...'))
1132 1133 def debuglabelcomplete(ui, repo, *args):
1133 1134 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1134 1135 debugnamecomplete(ui, repo, *args)
1135 1136
1136 1137 @command('debuglocks',
1137 1138 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1138 1139 ('W', 'force-wlock', None,
1139 1140 _('free the working state lock (DANGEROUS)'))],
1140 1141 _('[OPTION]...'))
1141 1142 def debuglocks(ui, repo, **opts):
1142 1143 """show or modify state of locks
1143 1144
1144 1145 By default, this command will show which locks are held. This
1145 1146 includes the user and process holding the lock, the amount of time
1146 1147 the lock has been held, and the machine name where the process is
1147 1148 running if it's not local.
1148 1149
1149 1150 Locks protect the integrity of Mercurial's data, so should be
1150 1151 treated with care. System crashes or other interruptions may cause
1151 1152 locks to not be properly released, though Mercurial will usually
1152 1153 detect and remove such stale locks automatically.
1153 1154
1154 1155 However, detecting stale locks may not always be possible (for
1155 1156 instance, on a shared filesystem). Removing locks may also be
1156 1157 blocked by filesystem permissions.
1157 1158
1158 1159 Returns 0 if no locks are held.
1159 1160
1160 1161 """
1161 1162
1162 1163 if opts.get('force_lock'):
1163 1164 repo.svfs.unlink('lock')
1164 1165 if opts.get('force_wlock'):
1165 1166 repo.vfs.unlink('wlock')
1166 1167 if opts.get('force_lock') or opts.get('force_lock'):
1167 1168 return 0
1168 1169
1169 1170 now = time.time()
1170 1171 held = 0
1171 1172
1172 1173 def report(vfs, name, method):
1173 1174 # this causes stale locks to get reaped for more accurate reporting
1174 1175 try:
1175 1176 l = method(False)
1176 1177 except error.LockHeld:
1177 1178 l = None
1178 1179
1179 1180 if l:
1180 1181 l.release()
1181 1182 else:
1182 1183 try:
1183 1184 stat = vfs.lstat(name)
1184 1185 age = now - stat.st_mtime
1185 1186 user = util.username(stat.st_uid)
1186 1187 locker = vfs.readlock(name)
1187 1188 if ":" in locker:
1188 1189 host, pid = locker.split(':')
1189 1190 if host == socket.gethostname():
1190 1191 locker = 'user %s, process %s' % (user, pid)
1191 1192 else:
1192 1193 locker = 'user %s, process %s, host %s' \
1193 1194 % (user, pid, host)
1194 1195 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1195 1196 return 1
1196 1197 except OSError as e:
1197 1198 if e.errno != errno.ENOENT:
1198 1199 raise
1199 1200
1200 1201 ui.write(("%-6s free\n") % (name + ":"))
1201 1202 return 0
1202 1203
1203 1204 held += report(repo.svfs, "lock", repo.lock)
1204 1205 held += report(repo.vfs, "wlock", repo.wlock)
1205 1206
1206 1207 return held
1207 1208
1208 1209 @command('debugmergestate', [], '')
1209 1210 def debugmergestate(ui, repo, *args):
1210 1211 """print merge state
1211 1212
1212 1213 Use --verbose to print out information about whether v1 or v2 merge state
1213 1214 was chosen."""
1214 1215 def _hashornull(h):
1215 1216 if h == nullhex:
1216 1217 return 'null'
1217 1218 else:
1218 1219 return h
1219 1220
1220 1221 def printrecords(version):
1221 1222 ui.write(('* version %s records\n') % version)
1222 1223 if version == 1:
1223 1224 records = v1records
1224 1225 else:
1225 1226 records = v2records
1226 1227
1227 1228 for rtype, record in records:
1228 1229 # pretty print some record types
1229 1230 if rtype == 'L':
1230 1231 ui.write(('local: %s\n') % record)
1231 1232 elif rtype == 'O':
1232 1233 ui.write(('other: %s\n') % record)
1233 1234 elif rtype == 'm':
1234 1235 driver, mdstate = record.split('\0', 1)
1235 1236 ui.write(('merge driver: %s (state "%s")\n')
1236 1237 % (driver, mdstate))
1237 1238 elif rtype in 'FDC':
1238 1239 r = record.split('\0')
1239 1240 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1240 1241 if version == 1:
1241 1242 onode = 'not stored in v1 format'
1242 1243 flags = r[7]
1243 1244 else:
1244 1245 onode, flags = r[7:9]
1245 1246 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1246 1247 % (f, rtype, state, _hashornull(hash)))
1247 1248 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1248 1249 ui.write((' ancestor path: %s (node %s)\n')
1249 1250 % (afile, _hashornull(anode)))
1250 1251 ui.write((' other path: %s (node %s)\n')
1251 1252 % (ofile, _hashornull(onode)))
1252 1253 elif rtype == 'f':
1253 1254 filename, rawextras = record.split('\0', 1)
1254 1255 extras = rawextras.split('\0')
1255 1256 i = 0
1256 1257 extrastrings = []
1257 1258 while i < len(extras):
1258 1259 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1259 1260 i += 2
1260 1261
1261 1262 ui.write(('file extras: %s (%s)\n')
1262 1263 % (filename, ', '.join(extrastrings)))
1263 1264 elif rtype == 'l':
1264 1265 labels = record.split('\0', 2)
1265 1266 labels = [l for l in labels if len(l) > 0]
1266 1267 ui.write(('labels:\n'))
1267 1268 ui.write((' local: %s\n' % labels[0]))
1268 1269 ui.write((' other: %s\n' % labels[1]))
1269 1270 if len(labels) > 2:
1270 1271 ui.write((' base: %s\n' % labels[2]))
1271 1272 else:
1272 1273 ui.write(('unrecognized entry: %s\t%s\n')
1273 1274 % (rtype, record.replace('\0', '\t')))
1274 1275
1275 1276 # Avoid mergestate.read() since it may raise an exception for unsupported
1276 1277 # merge state records. We shouldn't be doing this, but this is OK since this
1277 1278 # command is pretty low-level.
1278 1279 ms = mergemod.mergestate(repo)
1279 1280
1280 1281 # sort so that reasonable information is on top
1281 1282 v1records = ms._readrecordsv1()
1282 1283 v2records = ms._readrecordsv2()
1283 1284 order = 'LOml'
1284 1285 def key(r):
1285 1286 idx = order.find(r[0])
1286 1287 if idx == -1:
1287 1288 return (1, r[1])
1288 1289 else:
1289 1290 return (0, idx)
1290 1291 v1records.sort(key=key)
1291 1292 v2records.sort(key=key)
1292 1293
1293 1294 if not v1records and not v2records:
1294 1295 ui.write(('no merge state found\n'))
1295 1296 elif not v2records:
1296 1297 ui.note(('no version 2 merge state\n'))
1297 1298 printrecords(1)
1298 1299 elif ms._v1v2match(v1records, v2records):
1299 1300 ui.note(('v1 and v2 states match: using v2\n'))
1300 1301 printrecords(2)
1301 1302 else:
1302 1303 ui.note(('v1 and v2 states mismatch: using v1\n'))
1303 1304 printrecords(1)
1304 1305 if ui.verbose:
1305 1306 printrecords(2)
1306 1307
1307 1308 @command('debugnamecomplete', [], _('NAME...'))
1308 1309 def debugnamecomplete(ui, repo, *args):
1309 1310 '''complete "names" - tags, open branch names, bookmark names'''
1310 1311
1311 1312 names = set()
1312 1313 # since we previously only listed open branches, we will handle that
1313 1314 # specially (after this for loop)
1314 1315 for name, ns in repo.names.iteritems():
1315 1316 if name != 'branches':
1316 1317 names.update(ns.listnames(repo))
1317 1318 names.update(tag for (tag, heads, tip, closed)
1318 1319 in repo.branchmap().iterbranches() if not closed)
1319 1320 completions = set()
1320 1321 if not args:
1321 1322 args = ['']
1322 1323 for a in args:
1323 1324 completions.update(n for n in names if n.startswith(a))
1324 1325 ui.write('\n'.join(sorted(completions)))
1325 1326 ui.write('\n')
1326 1327
1327 1328 @command('debugobsolete',
1328 1329 [('', 'flags', 0, _('markers flag')),
1329 1330 ('', 'record-parents', False,
1330 1331 _('record parent information for the precursor')),
1331 1332 ('r', 'rev', [], _('display markers relevant to REV')),
1332 1333 ('', 'index', False, _('display index of the marker')),
1333 1334 ('', 'delete', [], _('delete markers specified by indices')),
1334 1335 ] + commands.commitopts2 + commands.formatteropts,
1335 1336 _('[OBSOLETED [REPLACEMENT ...]]'))
1336 1337 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1337 1338 """create arbitrary obsolete marker
1338 1339
1339 1340 With no arguments, displays the list of obsolescence markers."""
1340 1341
1341 1342 def parsenodeid(s):
1342 1343 try:
1343 1344 # We do not use revsingle/revrange functions here to accept
1344 1345 # arbitrary node identifiers, possibly not present in the
1345 1346 # local repository.
1346 1347 n = bin(s)
1347 1348 if len(n) != len(nullid):
1348 1349 raise TypeError()
1349 1350 return n
1350 1351 except TypeError:
1351 1352 raise error.Abort('changeset references must be full hexadecimal '
1352 1353 'node identifiers')
1353 1354
1354 1355 if opts.get('delete'):
1355 1356 indices = []
1356 1357 for v in opts.get('delete'):
1357 1358 try:
1358 1359 indices.append(int(v))
1359 1360 except ValueError:
1360 1361 raise error.Abort(_('invalid index value: %r') % v,
1361 1362 hint=_('use integers for indices'))
1362 1363
1363 1364 if repo.currenttransaction():
1364 1365 raise error.Abort(_('cannot delete obsmarkers in the middle '
1365 1366 'of transaction.'))
1366 1367
1367 1368 with repo.lock():
1368 1369 n = repair.deleteobsmarkers(repo.obsstore, indices)
1369 1370 ui.write(_('deleted %i obsolescence markers\n') % n)
1370 1371
1371 1372 return
1372 1373
1373 1374 if precursor is not None:
1374 1375 if opts['rev']:
1375 1376 raise error.Abort('cannot select revision when creating marker')
1376 1377 metadata = {}
1377 1378 metadata['user'] = opts['user'] or ui.username()
1378 1379 succs = tuple(parsenodeid(succ) for succ in successors)
1379 1380 l = repo.lock()
1380 1381 try:
1381 1382 tr = repo.transaction('debugobsolete')
1382 1383 try:
1383 1384 date = opts.get('date')
1384 1385 if date:
1385 1386 date = util.parsedate(date)
1386 1387 else:
1387 1388 date = None
1388 1389 prec = parsenodeid(precursor)
1389 1390 parents = None
1390 1391 if opts['record_parents']:
1391 1392 if prec not in repo.unfiltered():
1392 1393 raise error.Abort('cannot used --record-parents on '
1393 1394 'unknown changesets')
1394 1395 parents = repo.unfiltered()[prec].parents()
1395 1396 parents = tuple(p.node() for p in parents)
1396 1397 repo.obsstore.create(tr, prec, succs, opts['flags'],
1397 1398 parents=parents, date=date,
1398 1399 metadata=metadata)
1399 1400 tr.close()
1400 1401 except ValueError as exc:
1401 1402 raise error.Abort(_('bad obsmarker input: %s') % exc)
1402 1403 finally:
1403 1404 tr.release()
1404 1405 finally:
1405 1406 l.release()
1406 1407 else:
1407 1408 if opts['rev']:
1408 1409 revs = scmutil.revrange(repo, opts['rev'])
1409 1410 nodes = [repo[r].node() for r in revs]
1410 1411 markers = list(obsolete.getmarkers(repo, nodes=nodes))
1411 1412 markers.sort(key=lambda x: x._data)
1412 1413 else:
1413 1414 markers = obsolete.getmarkers(repo)
1414 1415
1415 1416 markerstoiter = markers
1416 1417 isrelevant = lambda m: True
1417 1418 if opts.get('rev') and opts.get('index'):
1418 1419 markerstoiter = obsolete.getmarkers(repo)
1419 1420 markerset = set(markers)
1420 1421 isrelevant = lambda m: m in markerset
1421 1422
1422 1423 fm = ui.formatter('debugobsolete', opts)
1423 1424 for i, m in enumerate(markerstoiter):
1424 1425 if not isrelevant(m):
1425 1426 # marker can be irrelevant when we're iterating over a set
1426 1427 # of markers (markerstoiter) which is bigger than the set
1427 1428 # of markers we want to display (markers)
1428 1429 # this can happen if both --index and --rev options are
1429 1430 # provided and thus we need to iterate over all of the markers
1430 1431 # to get the correct indices, but only display the ones that
1431 1432 # are relevant to --rev value
1432 1433 continue
1433 1434 fm.startitem()
1434 1435 ind = i if opts.get('index') else None
1435 1436 cmdutil.showmarker(fm, m, index=ind)
1436 1437 fm.end()
1437 1438
1438 1439 @command('debugpathcomplete',
1439 1440 [('f', 'full', None, _('complete an entire path')),
1440 1441 ('n', 'normal', None, _('show only normal files')),
1441 1442 ('a', 'added', None, _('show only added files')),
1442 1443 ('r', 'removed', None, _('show only removed files'))],
1443 1444 _('FILESPEC...'))
1444 1445 def debugpathcomplete(ui, repo, *specs, **opts):
1445 1446 '''complete part or all of a tracked path
1446 1447
1447 1448 This command supports shells that offer path name completion. It
1448 1449 currently completes only files already known to the dirstate.
1449 1450
1450 1451 Completion extends only to the next path segment unless
1451 1452 --full is specified, in which case entire paths are used.'''
1452 1453
1453 1454 def complete(path, acceptable):
1454 1455 dirstate = repo.dirstate
1455 1456 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1456 1457 rootdir = repo.root + pycompat.ossep
1457 1458 if spec != repo.root and not spec.startswith(rootdir):
1458 1459 return [], []
1459 1460 if os.path.isdir(spec):
1460 1461 spec += '/'
1461 1462 spec = spec[len(rootdir):]
1462 1463 fixpaths = pycompat.ossep != '/'
1463 1464 if fixpaths:
1464 1465 spec = spec.replace(pycompat.ossep, '/')
1465 1466 speclen = len(spec)
1466 1467 fullpaths = opts['full']
1467 1468 files, dirs = set(), set()
1468 1469 adddir, addfile = dirs.add, files.add
1469 1470 for f, st in dirstate.iteritems():
1470 1471 if f.startswith(spec) and st[0] in acceptable:
1471 1472 if fixpaths:
1472 1473 f = f.replace('/', pycompat.ossep)
1473 1474 if fullpaths:
1474 1475 addfile(f)
1475 1476 continue
1476 1477 s = f.find(pycompat.ossep, speclen)
1477 1478 if s >= 0:
1478 1479 adddir(f[:s])
1479 1480 else:
1480 1481 addfile(f)
1481 1482 return files, dirs
1482 1483
1483 1484 acceptable = ''
1484 1485 if opts['normal']:
1485 1486 acceptable += 'nm'
1486 1487 if opts['added']:
1487 1488 acceptable += 'a'
1488 1489 if opts['removed']:
1489 1490 acceptable += 'r'
1490 1491 cwd = repo.getcwd()
1491 1492 if not specs:
1492 1493 specs = ['.']
1493 1494
1494 1495 files, dirs = set(), set()
1495 1496 for spec in specs:
1496 1497 f, d = complete(spec, acceptable or 'nmar')
1497 1498 files.update(f)
1498 1499 dirs.update(d)
1499 1500 files.update(dirs)
1500 1501 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1501 1502 ui.write('\n')
1502 1503
1503 1504 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1504 1505 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1505 1506 '''access the pushkey key/value protocol
1506 1507
1507 1508 With two args, list the keys in the given namespace.
1508 1509
1509 1510 With five args, set a key to new if it currently is set to old.
1510 1511 Reports success or failure.
1511 1512 '''
1512 1513
1513 1514 target = hg.peer(ui, {}, repopath)
1514 1515 if keyinfo:
1515 1516 key, old, new = keyinfo
1516 1517 r = target.pushkey(namespace, key, old, new)
1517 1518 ui.status(str(r) + '\n')
1518 1519 return not r
1519 1520 else:
1520 1521 for k, v in sorted(target.listkeys(namespace).iteritems()):
1521 1522 ui.write("%s\t%s\n" % (util.escapestr(k),
1522 1523 util.escapestr(v)))
1523 1524
1524 1525 @command('debugpvec', [], _('A B'))
1525 1526 def debugpvec(ui, repo, a, b=None):
1526 1527 ca = scmutil.revsingle(repo, a)
1527 1528 cb = scmutil.revsingle(repo, b)
1528 1529 pa = pvec.ctxpvec(ca)
1529 1530 pb = pvec.ctxpvec(cb)
1530 1531 if pa == pb:
1531 1532 rel = "="
1532 1533 elif pa > pb:
1533 1534 rel = ">"
1534 1535 elif pa < pb:
1535 1536 rel = "<"
1536 1537 elif pa | pb:
1537 1538 rel = "|"
1538 1539 ui.write(_("a: %s\n") % pa)
1539 1540 ui.write(_("b: %s\n") % pb)
1540 1541 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1541 1542 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1542 1543 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1543 1544 pa.distance(pb), rel))
1544 1545
1545 1546 @command('debugrebuilddirstate|debugrebuildstate',
1546 1547 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1547 1548 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1548 1549 'the working copy parent')),
1549 1550 ],
1550 1551 _('[-r REV]'))
1551 1552 def debugrebuilddirstate(ui, repo, rev, **opts):
1552 1553 """rebuild the dirstate as it would look like for the given revision
1553 1554
1554 1555 If no revision is specified the first current parent will be used.
1555 1556
1556 1557 The dirstate will be set to the files of the given revision.
1557 1558 The actual working directory content or existing dirstate
1558 1559 information such as adds or removes is not considered.
1559 1560
1560 1561 ``minimal`` will only rebuild the dirstate status for files that claim to be
1561 1562 tracked but are not in the parent manifest, or that exist in the parent
1562 1563 manifest but are not in the dirstate. It will not change adds, removes, or
1563 1564 modified files that are in the working copy parent.
1564 1565
1565 1566 One use of this command is to make the next :hg:`status` invocation
1566 1567 check the actual file content.
1567 1568 """
1568 1569 ctx = scmutil.revsingle(repo, rev)
1569 1570 with repo.wlock():
1570 1571 dirstate = repo.dirstate
1571 1572 changedfiles = None
1572 1573 # See command doc for what minimal does.
1573 1574 if opts.get('minimal'):
1574 1575 manifestfiles = set(ctx.manifest().keys())
1575 1576 dirstatefiles = set(dirstate)
1576 1577 manifestonly = manifestfiles - dirstatefiles
1577 1578 dsonly = dirstatefiles - manifestfiles
1578 1579 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1579 1580 changedfiles = manifestonly | dsnotadded
1580 1581
1581 1582 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1582 1583
1583 1584 @command('debugrebuildfncache', [], '')
1584 1585 def debugrebuildfncache(ui, repo):
1585 1586 """rebuild the fncache file"""
1586 1587 repair.rebuildfncache(ui, repo)
1587 1588
1588 1589 @command('debugrename',
1589 1590 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1590 1591 _('[-r REV] FILE'))
1591 1592 def debugrename(ui, repo, file1, *pats, **opts):
1592 1593 """dump rename information"""
1593 1594
1594 1595 ctx = scmutil.revsingle(repo, opts.get('rev'))
1595 1596 m = scmutil.match(ctx, (file1,) + pats, opts)
1596 1597 for abs in ctx.walk(m):
1597 1598 fctx = ctx[abs]
1598 1599 o = fctx.filelog().renamed(fctx.filenode())
1599 1600 rel = m.rel(abs)
1600 1601 if o:
1601 1602 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1602 1603 else:
1603 1604 ui.write(_("%s not renamed\n") % rel)
1604 1605
1605 1606 @command('debugrevlog', commands.debugrevlogopts +
1606 1607 [('d', 'dump', False, _('dump index data'))],
1607 1608 _('-c|-m|FILE'),
1608 1609 optionalrepo=True)
1609 1610 def debugrevlog(ui, repo, file_=None, **opts):
1610 1611 """show data and statistics about a revlog"""
1611 1612 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1612 1613
1613 1614 if opts.get("dump"):
1614 1615 numrevs = len(r)
1615 1616 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1616 1617 " rawsize totalsize compression heads chainlen\n"))
1617 1618 ts = 0
1618 1619 heads = set()
1619 1620
1620 1621 for rev in xrange(numrevs):
1621 1622 dbase = r.deltaparent(rev)
1622 1623 if dbase == -1:
1623 1624 dbase = rev
1624 1625 cbase = r.chainbase(rev)
1625 1626 clen = r.chainlen(rev)
1626 1627 p1, p2 = r.parentrevs(rev)
1627 1628 rs = r.rawsize(rev)
1628 1629 ts = ts + rs
1629 1630 heads -= set(r.parentrevs(rev))
1630 1631 heads.add(rev)
1631 1632 try:
1632 1633 compression = ts / r.end(rev)
1633 1634 except ZeroDivisionError:
1634 1635 compression = 0
1635 1636 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1636 1637 "%11d %5d %8d\n" %
1637 1638 (rev, p1, p2, r.start(rev), r.end(rev),
1638 1639 r.start(dbase), r.start(cbase),
1639 1640 r.start(p1), r.start(p2),
1640 1641 rs, ts, compression, len(heads), clen))
1641 1642 return 0
1642 1643
1643 1644 v = r.version
1644 1645 format = v & 0xFFFF
1645 1646 flags = []
1646 1647 gdelta = False
1647 1648 if v & revlog.REVLOGNGINLINEDATA:
1648 1649 flags.append('inline')
1649 1650 if v & revlog.REVLOGGENERALDELTA:
1650 1651 gdelta = True
1651 1652 flags.append('generaldelta')
1652 1653 if not flags:
1653 1654 flags = ['(none)']
1654 1655
1655 1656 nummerges = 0
1656 1657 numfull = 0
1657 1658 numprev = 0
1658 1659 nump1 = 0
1659 1660 nump2 = 0
1660 1661 numother = 0
1661 1662 nump1prev = 0
1662 1663 nump2prev = 0
1663 1664 chainlengths = []
1664 1665
1665 1666 datasize = [None, 0, 0]
1666 1667 fullsize = [None, 0, 0]
1667 1668 deltasize = [None, 0, 0]
1668 1669 chunktypecounts = {}
1669 1670 chunktypesizes = {}
1670 1671
1671 1672 def addsize(size, l):
1672 1673 if l[0] is None or size < l[0]:
1673 1674 l[0] = size
1674 1675 if size > l[1]:
1675 1676 l[1] = size
1676 1677 l[2] += size
1677 1678
1678 1679 numrevs = len(r)
1679 1680 for rev in xrange(numrevs):
1680 1681 p1, p2 = r.parentrevs(rev)
1681 1682 delta = r.deltaparent(rev)
1682 1683 if format > 0:
1683 1684 addsize(r.rawsize(rev), datasize)
1684 1685 if p2 != nullrev:
1685 1686 nummerges += 1
1686 1687 size = r.length(rev)
1687 1688 if delta == nullrev:
1688 1689 chainlengths.append(0)
1689 1690 numfull += 1
1690 1691 addsize(size, fullsize)
1691 1692 else:
1692 1693 chainlengths.append(chainlengths[delta] + 1)
1693 1694 addsize(size, deltasize)
1694 1695 if delta == rev - 1:
1695 1696 numprev += 1
1696 1697 if delta == p1:
1697 1698 nump1prev += 1
1698 1699 elif delta == p2:
1699 1700 nump2prev += 1
1700 1701 elif delta == p1:
1701 1702 nump1 += 1
1702 1703 elif delta == p2:
1703 1704 nump2 += 1
1704 1705 elif delta != nullrev:
1705 1706 numother += 1
1706 1707
1707 1708 # Obtain data on the raw chunks in the revlog.
1708 1709 chunk = r._chunkraw(rev, rev)[1]
1709 1710 if chunk:
1710 1711 chunktype = chunk[0]
1711 1712 else:
1712 1713 chunktype = 'empty'
1713 1714
1714 1715 if chunktype not in chunktypecounts:
1715 1716 chunktypecounts[chunktype] = 0
1716 1717 chunktypesizes[chunktype] = 0
1717 1718
1718 1719 chunktypecounts[chunktype] += 1
1719 1720 chunktypesizes[chunktype] += size
1720 1721
1721 1722 # Adjust size min value for empty cases
1722 1723 for size in (datasize, fullsize, deltasize):
1723 1724 if size[0] is None:
1724 1725 size[0] = 0
1725 1726
1726 1727 numdeltas = numrevs - numfull
1727 1728 numoprev = numprev - nump1prev - nump2prev
1728 1729 totalrawsize = datasize[2]
1729 1730 datasize[2] /= numrevs
1730 1731 fulltotal = fullsize[2]
1731 1732 fullsize[2] /= numfull
1732 1733 deltatotal = deltasize[2]
1733 1734 if numrevs - numfull > 0:
1734 1735 deltasize[2] /= numrevs - numfull
1735 1736 totalsize = fulltotal + deltatotal
1736 1737 avgchainlen = sum(chainlengths) / numrevs
1737 1738 maxchainlen = max(chainlengths)
1738 1739 compratio = 1
1739 1740 if totalsize:
1740 1741 compratio = totalrawsize / totalsize
1741 1742
1742 1743 basedfmtstr = '%%%dd\n'
1743 1744 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1744 1745
1745 1746 def dfmtstr(max):
1746 1747 return basedfmtstr % len(str(max))
1747 1748 def pcfmtstr(max, padding=0):
1748 1749 return basepcfmtstr % (len(str(max)), ' ' * padding)
1749 1750
1750 1751 def pcfmt(value, total):
1751 1752 if total:
1752 1753 return (value, 100 * float(value) / total)
1753 1754 else:
1754 1755 return value, 100.0
1755 1756
1756 1757 ui.write(('format : %d\n') % format)
1757 1758 ui.write(('flags : %s\n') % ', '.join(flags))
1758 1759
1759 1760 ui.write('\n')
1760 1761 fmt = pcfmtstr(totalsize)
1761 1762 fmt2 = dfmtstr(totalsize)
1762 1763 ui.write(('revisions : ') + fmt2 % numrevs)
1763 1764 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
1764 1765 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
1765 1766 ui.write(('revisions : ') + fmt2 % numrevs)
1766 1767 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
1767 1768 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
1768 1769 ui.write(('revision size : ') + fmt2 % totalsize)
1769 1770 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
1770 1771 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
1771 1772
1772 1773 def fmtchunktype(chunktype):
1773 1774 if chunktype == 'empty':
1774 1775 return ' %s : ' % chunktype
1775 1776 elif chunktype in string.ascii_letters:
1776 1777 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
1777 1778 else:
1778 1779 return ' 0x%s : ' % hex(chunktype)
1779 1780
1780 1781 ui.write('\n')
1781 1782 ui.write(('chunks : ') + fmt2 % numrevs)
1782 1783 for chunktype in sorted(chunktypecounts):
1783 1784 ui.write(fmtchunktype(chunktype))
1784 1785 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
1785 1786 ui.write(('chunks size : ') + fmt2 % totalsize)
1786 1787 for chunktype in sorted(chunktypecounts):
1787 1788 ui.write(fmtchunktype(chunktype))
1788 1789 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
1789 1790
1790 1791 ui.write('\n')
1791 1792 fmt = dfmtstr(max(avgchainlen, compratio))
1792 1793 ui.write(('avg chain length : ') + fmt % avgchainlen)
1793 1794 ui.write(('max chain length : ') + fmt % maxchainlen)
1794 1795 ui.write(('compression ratio : ') + fmt % compratio)
1795 1796
1796 1797 if format > 0:
1797 1798 ui.write('\n')
1798 1799 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
1799 1800 % tuple(datasize))
1800 1801 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
1801 1802 % tuple(fullsize))
1802 1803 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
1803 1804 % tuple(deltasize))
1804 1805
1805 1806 if numdeltas > 0:
1806 1807 ui.write('\n')
1807 1808 fmt = pcfmtstr(numdeltas)
1808 1809 fmt2 = pcfmtstr(numdeltas, 4)
1809 1810 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
1810 1811 if numprev > 0:
1811 1812 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
1812 1813 numprev))
1813 1814 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
1814 1815 numprev))
1815 1816 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
1816 1817 numprev))
1817 1818 if gdelta:
1818 1819 ui.write(('deltas against p1 : ')
1819 1820 + fmt % pcfmt(nump1, numdeltas))
1820 1821 ui.write(('deltas against p2 : ')
1821 1822 + fmt % pcfmt(nump2, numdeltas))
1822 1823 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
1823 1824 numdeltas))
1824 1825
1825 1826 @command('debugrevspec',
1826 1827 [('', 'optimize', None,
1827 1828 _('print parsed tree after optimizing (DEPRECATED)')),
1828 1829 ('p', 'show-stage', [],
1829 1830 _('print parsed tree at the given stage'), _('NAME')),
1830 1831 ('', 'no-optimized', False, _('evaluate tree without optimization')),
1831 1832 ('', 'verify-optimized', False, _('verify optimized result')),
1832 1833 ],
1833 1834 ('REVSPEC'))
1834 1835 def debugrevspec(ui, repo, expr, **opts):
1835 1836 """parse and apply a revision specification
1836 1837
1837 1838 Use -p/--show-stage option to print the parsed tree at the given stages.
1838 1839 Use -p all to print tree at every stage.
1839 1840
1840 1841 Use --verify-optimized to compare the optimized result with the unoptimized
1841 1842 one. Returns 1 if the optimized result differs.
1842 1843 """
1843 1844 stages = [
1844 1845 ('parsed', lambda tree: tree),
1845 1846 ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
1846 1847 ('concatenated', revsetlang.foldconcat),
1847 1848 ('analyzed', revsetlang.analyze),
1848 1849 ('optimized', revsetlang.optimize),
1849 1850 ]
1850 1851 if opts['no_optimized']:
1851 1852 stages = stages[:-1]
1852 1853 if opts['verify_optimized'] and opts['no_optimized']:
1853 1854 raise error.Abort(_('cannot use --verify-optimized with '
1854 1855 '--no-optimized'))
1855 1856 stagenames = set(n for n, f in stages)
1856 1857
1857 1858 showalways = set()
1858 1859 showchanged = set()
1859 1860 if ui.verbose and not opts['show_stage']:
1860 1861 # show parsed tree by --verbose (deprecated)
1861 1862 showalways.add('parsed')
1862 1863 showchanged.update(['expanded', 'concatenated'])
1863 1864 if opts['optimize']:
1864 1865 showalways.add('optimized')
1865 1866 if opts['show_stage'] and opts['optimize']:
1866 1867 raise error.Abort(_('cannot use --optimize with --show-stage'))
1867 1868 if opts['show_stage'] == ['all']:
1868 1869 showalways.update(stagenames)
1869 1870 else:
1870 1871 for n in opts['show_stage']:
1871 1872 if n not in stagenames:
1872 1873 raise error.Abort(_('invalid stage name: %s') % n)
1873 1874 showalways.update(opts['show_stage'])
1874 1875
1875 1876 treebystage = {}
1876 1877 printedtree = None
1877 1878 tree = revsetlang.parse(expr, lookup=repo.__contains__)
1878 1879 for n, f in stages:
1879 1880 treebystage[n] = tree = f(tree)
1880 1881 if n in showalways or (n in showchanged and tree != printedtree):
1881 1882 if opts['show_stage'] or n != 'parsed':
1882 1883 ui.write(("* %s:\n") % n)
1883 1884 ui.write(revsetlang.prettyformat(tree), "\n")
1884 1885 printedtree = tree
1885 1886
1886 1887 if opts['verify_optimized']:
1887 1888 arevs = revset.makematcher(treebystage['analyzed'])(repo)
1888 1889 brevs = revset.makematcher(treebystage['optimized'])(repo)
1889 1890 if ui.verbose:
1890 1891 ui.note(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
1891 1892 ui.note(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
1892 1893 arevs = list(arevs)
1893 1894 brevs = list(brevs)
1894 1895 if arevs == brevs:
1895 1896 return 0
1896 1897 ui.write(('--- analyzed\n'), label='diff.file_a')
1897 1898 ui.write(('+++ optimized\n'), label='diff.file_b')
1898 1899 sm = difflib.SequenceMatcher(None, arevs, brevs)
1899 1900 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1900 1901 if tag in ('delete', 'replace'):
1901 1902 for c in arevs[alo:ahi]:
1902 1903 ui.write('-%s\n' % c, label='diff.deleted')
1903 1904 if tag in ('insert', 'replace'):
1904 1905 for c in brevs[blo:bhi]:
1905 1906 ui.write('+%s\n' % c, label='diff.inserted')
1906 1907 if tag == 'equal':
1907 1908 for c in arevs[alo:ahi]:
1908 1909 ui.write(' %s\n' % c)
1909 1910 return 1
1910 1911
1911 1912 func = revset.makematcher(tree)
1912 1913 revs = func(repo)
1913 1914 if ui.verbose:
1914 1915 ui.note(("* set:\n"), smartset.prettyformat(revs), "\n")
1915 1916 for c in revs:
1916 1917 ui.write("%s\n" % c)
1917 1918
1918 1919 @command('debugsetparents', [], _('REV1 [REV2]'))
1919 1920 def debugsetparents(ui, repo, rev1, rev2=None):
1920 1921 """manually set the parents of the current working directory
1921 1922
1922 1923 This is useful for writing repository conversion tools, but should
1923 1924 be used with care. For example, neither the working directory nor the
1924 1925 dirstate is updated, so file status may be incorrect after running this
1925 1926 command.
1926 1927
1927 1928 Returns 0 on success.
1928 1929 """
1929 1930
1930 1931 r1 = scmutil.revsingle(repo, rev1).node()
1931 1932 r2 = scmutil.revsingle(repo, rev2, 'null').node()
1932 1933
1933 1934 with repo.wlock():
1934 1935 repo.setparents(r1, r2)
1935 1936
1936 1937 @command('debugsub',
1937 1938 [('r', 'rev', '',
1938 1939 _('revision to check'), _('REV'))],
1939 1940 _('[-r REV] [REV]'))
1940 1941 def debugsub(ui, repo, rev=None):
1941 1942 ctx = scmutil.revsingle(repo, rev, None)
1942 1943 for k, v in sorted(ctx.substate.items()):
1943 1944 ui.write(('path %s\n') % k)
1944 1945 ui.write((' source %s\n') % v[0])
1945 1946 ui.write((' revision %s\n') % v[1])
1946 1947
1947 1948 @command('debugsuccessorssets',
1948 1949 [],
1949 1950 _('[REV]'))
1950 1951 def debugsuccessorssets(ui, repo, *revs):
1951 1952 """show set of successors for revision
1952 1953
1953 1954 A successors set of changeset A is a consistent group of revisions that
1954 1955 succeed A. It contains non-obsolete changesets only.
1955 1956
1956 1957 In most cases a changeset A has a single successors set containing a single
1957 1958 successor (changeset A replaced by A').
1958 1959
1959 1960 A changeset that is made obsolete with no successors are called "pruned".
1960 1961 Such changesets have no successors sets at all.
1961 1962
1962 1963 A changeset that has been "split" will have a successors set containing
1963 1964 more than one successor.
1964 1965
1965 1966 A changeset that has been rewritten in multiple different ways is called
1966 1967 "divergent". Such changesets have multiple successor sets (each of which
1967 1968 may also be split, i.e. have multiple successors).
1968 1969
1969 1970 Results are displayed as follows::
1970 1971
1971 1972 <rev1>
1972 1973 <successors-1A>
1973 1974 <rev2>
1974 1975 <successors-2A>
1975 1976 <successors-2B1> <successors-2B2> <successors-2B3>
1976 1977
1977 1978 Here rev2 has two possible (i.e. divergent) successors sets. The first
1978 1979 holds one element, whereas the second holds three (i.e. the changeset has
1979 1980 been split).
1980 1981 """
1981 1982 # passed to successorssets caching computation from one call to another
1982 1983 cache = {}
1983 1984 ctx2str = str
1984 1985 node2str = short
1985 1986 if ui.debug():
1986 1987 def ctx2str(ctx):
1987 1988 return ctx.hex()
1988 1989 node2str = hex
1989 1990 for rev in scmutil.revrange(repo, revs):
1990 1991 ctx = repo[rev]
1991 1992 ui.write('%s\n'% ctx2str(ctx))
1992 1993 for succsset in obsolete.successorssets(repo, ctx.node(), cache):
1993 1994 if succsset:
1994 1995 ui.write(' ')
1995 1996 ui.write(node2str(succsset[0]))
1996 1997 for node in succsset[1:]:
1997 1998 ui.write(' ')
1998 1999 ui.write(node2str(node))
1999 2000 ui.write('\n')
2000 2001
2001 2002 @command('debugtemplate',
2002 2003 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2003 2004 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2004 2005 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2005 2006 optionalrepo=True)
2006 2007 def debugtemplate(ui, repo, tmpl, **opts):
2007 2008 """parse and apply a template
2008 2009
2009 2010 If -r/--rev is given, the template is processed as a log template and
2010 2011 applied to the given changesets. Otherwise, it is processed as a generic
2011 2012 template.
2012 2013
2013 2014 Use --verbose to print the parsed tree.
2014 2015 """
2015 2016 revs = None
2016 2017 if opts['rev']:
2017 2018 if repo is None:
2018 2019 raise error.RepoError(_('there is no Mercurial repository here '
2019 2020 '(.hg not found)'))
2020 2021 revs = scmutil.revrange(repo, opts['rev'])
2021 2022
2022 2023 props = {}
2023 2024 for d in opts['define']:
2024 2025 try:
2025 2026 k, v = (e.strip() for e in d.split('=', 1))
2026 2027 if not k or k == 'ui':
2027 2028 raise ValueError
2028 2029 props[k] = v
2029 2030 except ValueError:
2030 2031 raise error.Abort(_('malformed keyword definition: %s') % d)
2031 2032
2032 2033 if ui.verbose:
2033 2034 aliases = ui.configitems('templatealias')
2034 2035 tree = templater.parse(tmpl)
2035 2036 ui.note(templater.prettyformat(tree), '\n')
2036 2037 newtree = templater.expandaliases(tree, aliases)
2037 2038 if newtree != tree:
2038 2039 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2039 2040
2040 2041 mapfile = None
2041 2042 if revs is None:
2042 2043 k = 'debugtemplate'
2043 2044 t = formatter.maketemplater(ui, k, tmpl)
2044 2045 ui.write(templater.stringify(t(k, ui=ui, **props)))
2045 2046 else:
2046 2047 displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl,
2047 2048 mapfile, buffered=False)
2048 2049 for r in revs:
2049 2050 displayer.show(repo[r], **props)
2050 2051 displayer.close()
2051 2052
2052 2053 @command('debugupgraderepo', [
2053 2054 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2054 2055 ('', 'run', False, _('performs an upgrade')),
2055 2056 ])
2056 2057 def debugupgraderepo(ui, repo, run=False, optimize=None):
2057 2058 """upgrade a repository to use different features
2058 2059
2059 2060 If no arguments are specified, the repository is evaluated for upgrade
2060 2061 and a list of problems and potential optimizations is printed.
2061 2062
2062 2063 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2063 2064 can be influenced via additional arguments. More details will be provided
2064 2065 by the command output when run without ``--run``.
2065 2066
2066 2067 During the upgrade, the repository will be locked and no writes will be
2067 2068 allowed.
2068 2069
2069 2070 At the end of the upgrade, the repository may not be readable while new
2070 2071 repository data is swapped in. This window will be as long as it takes to
2071 2072 rename some directories inside the ``.hg`` directory. On most machines, this
2072 2073 should complete almost instantaneously and the chances of a consumer being
2073 2074 unable to access the repository should be low.
2074 2075 """
2075 return repair.upgraderepo(ui, repo, run=run, optimize=optimize)
2076 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2076 2077
2077 2078 @command('debugwalk', commands.walkopts, _('[OPTION]... [FILE]...'),
2078 2079 inferrepo=True)
2079 2080 def debugwalk(ui, repo, *pats, **opts):
2080 2081 """show how files match on given patterns"""
2081 2082 m = scmutil.match(repo[None], pats, opts)
2082 2083 items = list(repo.walk(m))
2083 2084 if not items:
2084 2085 return
2085 2086 f = lambda fn: fn
2086 2087 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2087 2088 f = lambda fn: util.normpath(fn)
2088 2089 fmt = 'f %%-%ds %%-%ds %%s' % (
2089 2090 max([len(abs) for abs in items]),
2090 2091 max([len(m.rel(abs)) for abs in items]))
2091 2092 for abs in items:
2092 2093 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2093 2094 ui.write("%s\n" % line.rstrip())
2094 2095
2095 2096 @command('debugwireargs',
2096 2097 [('', 'three', '', 'three'),
2097 2098 ('', 'four', '', 'four'),
2098 2099 ('', 'five', '', 'five'),
2099 2100 ] + commands.remoteopts,
2100 2101 _('REPO [OPTIONS]... [ONE [TWO]]'),
2101 2102 norepo=True)
2102 2103 def debugwireargs(ui, repopath, *vals, **opts):
2103 2104 repo = hg.peer(ui, opts, repopath)
2104 2105 for opt in commands.remoteopts:
2105 2106 del opts[opt[1]]
2106 2107 args = {}
2107 2108 for k, v in opts.iteritems():
2108 2109 if v:
2109 2110 args[k] = v
2110 2111 # run twice to check that we don't mess up the stream for the next command
2111 2112 res1 = repo.debugwireargs(*vals, **args)
2112 2113 res2 = repo.debugwireargs(*vals, **args)
2113 2114 ui.write("%s\n" % res1)
2114 2115 if res1 != res2:
2115 2116 ui.warn("%s\n" % res2)
This diff has been collapsed as it changes many lines, (742 lines changed) Show them Hide them
@@ -1,1096 +1,354
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 import stat
14 import tempfile
15 13
16 14 from .i18n import _
17 15 from .node import short
18 16 from . import (
19 17 bundle2,
20 18 changegroup,
21 changelog,
22 19 error,
23 20 exchange,
24 manifest,
25 21 obsolete,
26 revlog,
27 scmutil,
28 22 util,
29 vfs as vfsmod,
30 23 )
31 24
32 25 def _bundle(repo, bases, heads, node, suffix, compress=True):
33 26 """create a bundle with the specified revisions as a backup"""
34 27 cgversion = changegroup.safeversion(repo)
35 28
36 29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
37 30 version=cgversion)
38 31 backupdir = "strip-backup"
39 32 vfs = repo.vfs
40 33 if not vfs.isdir(backupdir):
41 34 vfs.mkdir(backupdir)
42 35
43 36 # Include a hash of all the nodes in the filename for uniqueness
44 37 allcommits = repo.set('%ln::%ln', bases, heads)
45 38 allhashes = sorted(c.hex() for c in allcommits)
46 39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
47 40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
48 41
49 42 comp = None
50 43 if cgversion != '01':
51 44 bundletype = "HG20"
52 45 if compress:
53 46 comp = 'BZ'
54 47 elif compress:
55 48 bundletype = "HG10BZ"
56 49 else:
57 50 bundletype = "HG10UN"
58 51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
59 52 compression=comp)
60 53
61 54 def _collectfiles(repo, striprev):
62 55 """find out the filelogs affected by the strip"""
63 56 files = set()
64 57
65 58 for x in xrange(striprev, len(repo)):
66 59 files.update(repo[x].files())
67 60
68 61 return sorted(files)
69 62
70 63 def _collectbrokencsets(repo, files, striprev):
71 64 """return the changesets which will be broken by the truncation"""
72 65 s = set()
73 66 def collectone(revlog):
74 67 _, brokenset = revlog.getstrippoint(striprev)
75 68 s.update([revlog.linkrev(r) for r in brokenset])
76 69
77 70 collectone(repo.manifestlog._revlog)
78 71 for fname in files:
79 72 collectone(repo.file(fname))
80 73
81 74 return s
82 75
83 76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
84 77 # This function operates within a transaction of its own, but does
85 78 # not take any lock on the repo.
86 79 # Simple way to maintain backwards compatibility for this
87 80 # argument.
88 81 if backup in ['none', 'strip']:
89 82 backup = False
90 83
91 84 repo = repo.unfiltered()
92 85 repo.destroying()
93 86
94 87 cl = repo.changelog
95 88 # TODO handle undo of merge sets
96 89 if isinstance(nodelist, str):
97 90 nodelist = [nodelist]
98 91 striplist = [cl.rev(node) for node in nodelist]
99 92 striprev = min(striplist)
100 93
101 94 files = _collectfiles(repo, striprev)
102 95 saverevs = _collectbrokencsets(repo, files, striprev)
103 96
104 97 # Some revisions with rev > striprev may not be descendants of striprev.
105 98 # We have to find these revisions and put them in a bundle, so that
106 99 # we can restore them after the truncations.
107 100 # To create the bundle we use repo.changegroupsubset which requires
108 101 # the list of heads and bases of the set of interesting revisions.
109 102 # (head = revision in the set that has no descendant in the set;
110 103 # base = revision in the set that has no ancestor in the set)
111 104 tostrip = set(striplist)
112 105 saveheads = set(saverevs)
113 106 for r in cl.revs(start=striprev + 1):
114 107 if any(p in tostrip for p in cl.parentrevs(r)):
115 108 tostrip.add(r)
116 109
117 110 if r not in tostrip:
118 111 saverevs.add(r)
119 112 saveheads.difference_update(cl.parentrevs(r))
120 113 saveheads.add(r)
121 114 saveheads = [cl.node(r) for r in saveheads]
122 115
123 116 # compute base nodes
124 117 if saverevs:
125 118 descendants = set(cl.descendants(saverevs))
126 119 saverevs.difference_update(descendants)
127 120 savebases = [cl.node(r) for r in saverevs]
128 121 stripbases = [cl.node(r) for r in tostrip]
129 122
130 123 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
131 124 # is much faster
132 125 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
133 126 if newbmtarget:
134 127 newbmtarget = repo[newbmtarget.first()].node()
135 128 else:
136 129 newbmtarget = '.'
137 130
138 131 bm = repo._bookmarks
139 132 updatebm = []
140 133 for m in bm:
141 134 rev = repo[bm[m]].rev()
142 135 if rev in tostrip:
143 136 updatebm.append(m)
144 137
145 138 # create a changegroup for all the branches we need to keep
146 139 backupfile = None
147 140 vfs = repo.vfs
148 141 node = nodelist[-1]
149 142 if backup:
150 143 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
151 144 repo.ui.status(_("saved backup bundle to %s\n") %
152 145 vfs.join(backupfile))
153 146 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
154 147 vfs.join(backupfile))
155 148 tmpbundlefile = None
156 149 if saveheads:
157 150 # do not compress temporary bundle if we remove it from disk later
158 151 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
159 152 compress=False)
160 153
161 154 mfst = repo.manifestlog._revlog
162 155
163 156 curtr = repo.currenttransaction()
164 157 if curtr is not None:
165 158 del curtr # avoid carrying reference to transaction for nothing
166 159 raise error.ProgrammingError('cannot strip from inside a transaction')
167 160
168 161 try:
169 162 with repo.transaction("strip") as tr:
170 163 offset = len(tr.entries)
171 164
172 165 tr.startgroup()
173 166 cl.strip(striprev, tr)
174 167 mfst.strip(striprev, tr)
175 168 if 'treemanifest' in repo.requirements: # safe but unnecessary
176 169 # otherwise
177 170 for unencoded, encoded, size in repo.store.datafiles():
178 171 if (unencoded.startswith('meta/') and
179 172 unencoded.endswith('00manifest.i')):
180 173 dir = unencoded[5:-12]
181 174 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
182 175 for fn in files:
183 176 repo.file(fn).strip(striprev, tr)
184 177 tr.endgroup()
185 178
186 179 for i in xrange(offset, len(tr.entries)):
187 180 file, troffset, ignore = tr.entries[i]
188 181 with repo.svfs(file, 'a', checkambig=True) as fp:
189 182 fp.truncate(troffset)
190 183 if troffset == 0:
191 184 repo.store.markremoved(file)
192 185
193 186 if tmpbundlefile:
194 187 ui.note(_("adding branch\n"))
195 188 f = vfs.open(tmpbundlefile, "rb")
196 189 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
197 190 if not repo.ui.verbose:
198 191 # silence internal shuffling chatter
199 192 repo.ui.pushbuffer()
200 193 if isinstance(gen, bundle2.unbundle20):
201 194 with repo.transaction('strip') as tr:
202 195 tr.hookargs = {'source': 'strip',
203 196 'url': 'bundle:' + vfs.join(tmpbundlefile)}
204 197 bundle2.applybundle(repo, gen, tr, source='strip',
205 198 url='bundle:' + vfs.join(tmpbundlefile))
206 199 else:
207 200 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
208 201 True)
209 202 if not repo.ui.verbose:
210 203 repo.ui.popbuffer()
211 204 f.close()
212 205 repo._phasecache.invalidate()
213 206
214 207 for m in updatebm:
215 208 bm[m] = repo[newbmtarget].node()
216 209
217 210 with repo.lock():
218 211 with repo.transaction('repair') as tr:
219 212 bm.recordchange(tr)
220 213
221 214 # remove undo files
222 215 for undovfs, undofile in repo.undofiles():
223 216 try:
224 217 undovfs.unlink(undofile)
225 218 except OSError as e:
226 219 if e.errno != errno.ENOENT:
227 220 ui.warn(_('error removing %s: %s\n') %
228 221 (undovfs.join(undofile), str(e)))
229 222
230 223 except: # re-raises
231 224 if backupfile:
232 225 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
233 226 % vfs.join(backupfile))
234 227 if tmpbundlefile:
235 228 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
236 229 % vfs.join(tmpbundlefile))
237 230 ui.warn(_("(fix the problem, then recover the changesets with "
238 231 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
239 232 raise
240 233 else:
241 234 if tmpbundlefile:
242 235 # Remove temporary bundle only if there were no exceptions
243 236 vfs.unlink(tmpbundlefile)
244 237
245 238 repo.destroyed()
246 239 # return the backup file path (or None if 'backup' was False) so
247 240 # extensions can use it
248 241 return backupfile
249 242
250 243 def rebuildfncache(ui, repo):
251 244 """Rebuilds the fncache file from repo history.
252 245
253 246 Missing entries will be added. Extra entries will be removed.
254 247 """
255 248 repo = repo.unfiltered()
256 249
257 250 if 'fncache' not in repo.requirements:
258 251 ui.warn(_('(not rebuilding fncache because repository does not '
259 252 'support fncache)\n'))
260 253 return
261 254
262 255 with repo.lock():
263 256 fnc = repo.store.fncache
264 257 # Trigger load of fncache.
265 258 if 'irrelevant' in fnc:
266 259 pass
267 260
268 261 oldentries = set(fnc.entries)
269 262 newentries = set()
270 263 seenfiles = set()
271 264
272 265 repolen = len(repo)
273 266 for rev in repo:
274 267 ui.progress(_('rebuilding'), rev, total=repolen,
275 268 unit=_('changesets'))
276 269
277 270 ctx = repo[rev]
278 271 for f in ctx.files():
279 272 # This is to minimize I/O.
280 273 if f in seenfiles:
281 274 continue
282 275 seenfiles.add(f)
283 276
284 277 i = 'data/%s.i' % f
285 278 d = 'data/%s.d' % f
286 279
287 280 if repo.store._exists(i):
288 281 newentries.add(i)
289 282 if repo.store._exists(d):
290 283 newentries.add(d)
291 284
292 285 ui.progress(_('rebuilding'), None)
293 286
294 287 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
295 288 for dir in util.dirs(seenfiles):
296 289 i = 'meta/%s/00manifest.i' % dir
297 290 d = 'meta/%s/00manifest.d' % dir
298 291
299 292 if repo.store._exists(i):
300 293 newentries.add(i)
301 294 if repo.store._exists(d):
302 295 newentries.add(d)
303 296
304 297 addcount = len(newentries - oldentries)
305 298 removecount = len(oldentries - newentries)
306 299 for p in sorted(oldentries - newentries):
307 300 ui.write(_('removing %s\n') % p)
308 301 for p in sorted(newentries - oldentries):
309 302 ui.write(_('adding %s\n') % p)
310 303
311 304 if addcount or removecount:
312 305 ui.write(_('%d items added, %d removed from fncache\n') %
313 306 (addcount, removecount))
314 307 fnc.entries = newentries
315 308 fnc._dirty = True
316 309
317 310 with repo.transaction('fncache') as tr:
318 311 fnc.write(tr)
319 312 else:
320 313 ui.write(_('fncache already up to date\n'))
321 314
322 315 def stripbmrevset(repo, mark):
323 316 """
324 317 The revset to strip when strip is called with -B mark
325 318
326 319 Needs to live here so extensions can use it and wrap it even when strip is
327 320 not enabled or not present on a box.
328 321 """
329 322 return repo.revs("ancestors(bookmark(%s)) - "
330 323 "ancestors(head() and not bookmark(%s)) - "
331 324 "ancestors(bookmark() and not bookmark(%s))",
332 325 mark, mark, mark)
333 326
334 327 def deleteobsmarkers(obsstore, indices):
335 328 """Delete some obsmarkers from obsstore and return how many were deleted
336 329
337 330 'indices' is a list of ints which are the indices
338 331 of the markers to be deleted.
339 332
340 333 Every invocation of this function completely rewrites the obsstore file,
341 334 skipping the markers we want to be removed. The new temporary file is
342 335 created, remaining markers are written there and on .close() this file
343 336 gets atomically renamed to obsstore, thus guaranteeing consistency."""
344 337 if not indices:
345 338 # we don't want to rewrite the obsstore with the same content
346 339 return
347 340
348 341 left = []
349 342 current = obsstore._all
350 343 n = 0
351 344 for i, m in enumerate(current):
352 345 if i in indices:
353 346 n += 1
354 347 continue
355 348 left.append(m)
356 349
357 350 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
358 351 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
359 352 newobsstorefile.write(bytes)
360 353 newobsstorefile.close()
361 354 return n
362
363 def upgraderequiredsourcerequirements(repo):
364 """Obtain requirements required to be present to upgrade a repo.
365
366 An upgrade will not be allowed if the repository doesn't have the
367 requirements returned by this function.
368 """
369 return set([
370 # Introduced in Mercurial 0.9.2.
371 'revlogv1',
372 # Introduced in Mercurial 0.9.2.
373 'store',
374 ])
375
376 def upgradeblocksourcerequirements(repo):
377 """Obtain requirements that will prevent an upgrade from occurring.
378
379 An upgrade cannot be performed if the source repository contains a
380 requirements in the returned set.
381 """
382 return set([
383 # The upgrade code does not yet support these experimental features.
384 # This is an artificial limitation.
385 'manifestv2',
386 'treemanifest',
387 # This was a precursor to generaldelta and was never enabled by default.
388 # It should (hopefully) not exist in the wild.
389 'parentdelta',
390 # Upgrade should operate on the actual store, not the shared link.
391 'shared',
392 ])
393
394 def upgradesupportremovedrequirements(repo):
395 """Obtain requirements that can be removed during an upgrade.
396
397 If an upgrade were to create a repository that dropped a requirement,
398 the dropped requirement must appear in the returned set for the upgrade
399 to be allowed.
400 """
401 return set()
402
403 def upgradesupporteddestrequirements(repo):
404 """Obtain requirements that upgrade supports in the destination.
405
406 If the result of the upgrade would create requirements not in this set,
407 the upgrade is disallowed.
408
409 Extensions should monkeypatch this to add their custom requirements.
410 """
411 return set([
412 'dotencode',
413 'fncache',
414 'generaldelta',
415 'revlogv1',
416 'store',
417 ])
418
419 def upgradeallowednewrequirements(repo):
420 """Obtain requirements that can be added to a repository during upgrade.
421
422 This is used to disallow proposed requirements from being added when
423 they weren't present before.
424
425 We use a list of allowed requirement additions instead of a list of known
426 bad additions because the whitelist approach is safer and will prevent
427 future, unknown requirements from accidentally being added.
428 """
429 return set([
430 'dotencode',
431 'fncache',
432 'generaldelta',
433 ])
434
435 deficiency = 'deficiency'
436 optimisation = 'optimization'
437
438 class upgradeimprovement(object):
439 """Represents an improvement that can be made as part of an upgrade.
440
441 The following attributes are defined on each instance:
442
443 name
444 Machine-readable string uniquely identifying this improvement. It
445 will be mapped to an action later in the upgrade process.
446
447 type
448 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
449 problem. An optimization is an action (sometimes optional) that
450 can be taken to further improve the state of the repository.
451
452 description
453 Message intended for humans explaining the improvement in more detail,
454 including the implications of it. For ``deficiency`` types, should be
455 worded in the present tense. For ``optimisation`` types, should be
456 worded in the future tense.
457
458 upgrademessage
459 Message intended for humans explaining what an upgrade addressing this
460 issue will do. Should be worded in the future tense.
461
462 fromdefault (``deficiency`` types only)
463 Boolean indicating whether the current (deficient) state deviates
464 from Mercurial's default configuration.
465
466 fromconfig (``deficiency`` types only)
467 Boolean indicating whether the current (deficient) state deviates
468 from the current Mercurial configuration.
469 """
470 def __init__(self, name, type, description, upgrademessage, **kwargs):
471 self.name = name
472 self.type = type
473 self.description = description
474 self.upgrademessage = upgrademessage
475
476 for k, v in kwargs.items():
477 setattr(self, k, v)
478
479 def upgradefindimprovements(repo):
480 """Determine improvements that can be made to the repo during upgrade.
481
482 Returns a list of ``upgradeimprovement`` describing repository deficiencies
483 and optimizations.
484 """
485 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
486 from . import localrepo
487
488 newreporeqs = localrepo.newreporequirements(repo)
489
490 improvements = []
491
492 # We could detect lack of revlogv1 and store here, but they were added
493 # in 0.9.2 and we don't support upgrading repos without these
494 # requirements, so let's not bother.
495
496 if 'fncache' not in repo.requirements:
497 improvements.append(upgradeimprovement(
498 name='fncache',
499 type=deficiency,
500 description=_('long and reserved filenames may not work correctly; '
501 'repository performance is sub-optimal'),
502 upgrademessage=_('repository will be more resilient to storing '
503 'certain paths and performance of certain '
504 'operations should be improved'),
505 fromdefault=True,
506 fromconfig='fncache' in newreporeqs))
507
508 if 'dotencode' not in repo.requirements:
509 improvements.append(upgradeimprovement(
510 name='dotencode',
511 type=deficiency,
512 description=_('storage of filenames beginning with a period or '
513 'space may not work correctly'),
514 upgrademessage=_('repository will be better able to store files '
515 'beginning with a space or period'),
516 fromdefault=True,
517 fromconfig='dotencode' in newreporeqs))
518
519 if 'generaldelta' not in repo.requirements:
520 improvements.append(upgradeimprovement(
521 name='generaldelta',
522 type=deficiency,
523 description=_('deltas within internal storage are unable to '
524 'choose optimal revisions; repository is larger and '
525 'slower than it could be; interaction with other '
526 'repositories may require extra network and CPU '
527 'resources, making "hg push" and "hg pull" slower'),
528 upgrademessage=_('repository storage will be able to create '
529 'optimal deltas; new repository data will be '
530 'smaller and read times should decrease; '
531 'interacting with other repositories using this '
532 'storage model should require less network and '
533 'CPU resources, making "hg push" and "hg pull" '
534 'faster'),
535 fromdefault=True,
536 fromconfig='generaldelta' in newreporeqs))
537
538 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
539 # changelogs with deltas.
540 cl = repo.changelog
541 for rev in cl:
542 chainbase = cl.chainbase(rev)
543 if chainbase != rev:
544 improvements.append(upgradeimprovement(
545 name='removecldeltachain',
546 type=deficiency,
547 description=_('changelog storage is using deltas instead of '
548 'raw entries; changelog reading and any '
549 'operation relying on changelog data are slower '
550 'than they could be'),
551 upgrademessage=_('changelog storage will be reformated to '
552 'store raw entries; changelog reading will be '
553 'faster; changelog size may be reduced'),
554 fromdefault=True,
555 fromconfig=True))
556 break
557
558 # Now for the optimizations.
559
560 # These are unconditionally added. There is logic later that figures out
561 # which ones to apply.
562
563 improvements.append(upgradeimprovement(
564 name='redeltaparent',
565 type=optimisation,
566 description=_('deltas within internal storage will be recalculated to '
567 'choose an optimal base revision where this was not '
568 'already done; the size of the repository may shrink and '
569 'various operations may become faster; the first time '
570 'this optimization is performed could slow down upgrade '
571 'execution considerably; subsequent invocations should '
572 'not run noticeably slower'),
573 upgrademessage=_('deltas within internal storage will choose a new '
574 'base revision if needed')))
575
576 improvements.append(upgradeimprovement(
577 name='redeltamultibase',
578 type=optimisation,
579 description=_('deltas within internal storage will be recalculated '
580 'against multiple base revision and the smallest '
581 'difference will be used; the size of the repository may '
582 'shrink significantly when there are many merges; this '
583 'optimization will slow down execution in proportion to '
584 'the number of merges in the repository and the amount '
585 'of files in the repository; this slow down should not '
586 'be significant unless there are tens of thousands of '
587 'files and thousands of merges'),
588 upgrademessage=_('deltas within internal storage will choose an '
589 'optimal delta by computing deltas against multiple '
590 'parents; may slow down execution time '
591 'significantly')))
592
593 improvements.append(upgradeimprovement(
594 name='redeltaall',
595 type=optimisation,
596 description=_('deltas within internal storage will always be '
597 'recalculated without reusing prior deltas; this will '
598 'likely make execution run several times slower; this '
599 'optimization is typically not needed'),
600 upgrademessage=_('deltas within internal storage will be fully '
601 'recomputed; this will likely drastically slow down '
602 'execution time')))
603
604 return improvements
605
606 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
607 optimize):
608 """Determine upgrade actions that will be performed.
609
610 Given a list of improvements as returned by ``upgradefindimprovements``,
611 determine the list of upgrade actions that will be performed.
612
613 The role of this function is to filter improvements if needed, apply
614 recommended optimizations from the improvements list that make sense,
615 etc.
616
617 Returns a list of action names.
618 """
619 newactions = []
620
621 knownreqs = upgradesupporteddestrequirements(repo)
622
623 for i in improvements:
624 name = i.name
625
626 # If the action is a requirement that doesn't show up in the
627 # destination requirements, prune the action.
628 if name in knownreqs and name not in destreqs:
629 continue
630
631 if i.type == deficiency:
632 newactions.append(name)
633
634 newactions.extend(o for o in sorted(optimize) if o not in newactions)
635
636 # FUTURE consider adding some optimizations here for certain transitions.
637 # e.g. adding generaldelta could schedule parent redeltas.
638
639 return newactions
640
641 def _revlogfrompath(repo, path):
642 """Obtain a revlog from a repo path.
643
644 An instance of the appropriate class is returned.
645 """
646 if path == '00changelog.i':
647 return changelog.changelog(repo.svfs)
648 elif path.endswith('00manifest.i'):
649 mandir = path[:-len('00manifest.i')]
650 return manifest.manifestrevlog(repo.svfs, dir=mandir)
651 else:
652 # Filelogs don't do anything special with settings. So we can use a
653 # vanilla revlog.
654 return revlog.revlog(repo.svfs, path)
655
656 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
657 """Copy revlogs between 2 repos."""
658 revcount = 0
659 srcsize = 0
660 srcrawsize = 0
661 dstsize = 0
662 fcount = 0
663 frevcount = 0
664 fsrcsize = 0
665 frawsize = 0
666 fdstsize = 0
667 mcount = 0
668 mrevcount = 0
669 msrcsize = 0
670 mrawsize = 0
671 mdstsize = 0
672 crevcount = 0
673 csrcsize = 0
674 crawsize = 0
675 cdstsize = 0
676
677 # Perform a pass to collect metadata. This validates we can open all
678 # source files and allows a unified progress bar to be displayed.
679 for unencoded, encoded, size in srcrepo.store.walk():
680 if unencoded.endswith('.d'):
681 continue
682
683 rl = _revlogfrompath(srcrepo, unencoded)
684 revcount += len(rl)
685
686 datasize = 0
687 rawsize = 0
688 idx = rl.index
689 for rev in rl:
690 e = idx[rev]
691 datasize += e[1]
692 rawsize += e[2]
693
694 srcsize += datasize
695 srcrawsize += rawsize
696
697 # This is for the separate progress bars.
698 if isinstance(rl, changelog.changelog):
699 crevcount += len(rl)
700 csrcsize += datasize
701 crawsize += rawsize
702 elif isinstance(rl, manifest.manifestrevlog):
703 mcount += 1
704 mrevcount += len(rl)
705 msrcsize += datasize
706 mrawsize += rawsize
707 elif isinstance(rl, revlog.revlog):
708 fcount += 1
709 frevcount += len(rl)
710 fsrcsize += datasize
711 frawsize += rawsize
712
713 if not revcount:
714 return
715
716 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
717 '%d in changelog)\n') %
718 (revcount, frevcount, mrevcount, crevcount))
719 ui.write(_('migrating %s in store; %s tracked data\n') % (
720 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
721
722 # Used to keep track of progress.
723 progress = []
724 def oncopiedrevision(rl, rev, node):
725 progress[1] += 1
726 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
727
728 # Do the actual copying.
729 # FUTURE this operation can be farmed off to worker processes.
730 seen = set()
731 for unencoded, encoded, size in srcrepo.store.walk():
732 if unencoded.endswith('.d'):
733 continue
734
735 oldrl = _revlogfrompath(srcrepo, unencoded)
736 newrl = _revlogfrompath(dstrepo, unencoded)
737
738 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
739 ui.write(_('finished migrating %d manifest revisions across %d '
740 'manifests; change in size: %s\n') %
741 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
742
743 ui.write(_('migrating changelog containing %d revisions '
744 '(%s in store; %s tracked data)\n') %
745 (crevcount, util.bytecount(csrcsize),
746 util.bytecount(crawsize)))
747 seen.add('c')
748 progress[:] = [_('changelog revisions'), 0, crevcount]
749 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
750 ui.write(_('finished migrating %d filelog revisions across %d '
751 'filelogs; change in size: %s\n') %
752 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
753
754 ui.write(_('migrating %d manifests containing %d revisions '
755 '(%s in store; %s tracked data)\n') %
756 (mcount, mrevcount, util.bytecount(msrcsize),
757 util.bytecount(mrawsize)))
758 seen.add('m')
759 progress[:] = [_('manifest revisions'), 0, mrevcount]
760 elif 'f' not in seen:
761 ui.write(_('migrating %d filelogs containing %d revisions '
762 '(%s in store; %s tracked data)\n') %
763 (fcount, frevcount, util.bytecount(fsrcsize),
764 util.bytecount(frawsize)))
765 seen.add('f')
766 progress[:] = [_('file revisions'), 0, frevcount]
767
768 ui.progress(progress[0], progress[1], total=progress[2])
769
770 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
771 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
772 deltareuse=deltareuse,
773 aggressivemergedeltas=aggressivemergedeltas)
774
775 datasize = 0
776 idx = newrl.index
777 for rev in newrl:
778 datasize += idx[rev][1]
779
780 dstsize += datasize
781
782 if isinstance(newrl, changelog.changelog):
783 cdstsize += datasize
784 elif isinstance(newrl, manifest.manifestrevlog):
785 mdstsize += datasize
786 else:
787 fdstsize += datasize
788
789 ui.progress(progress[0], None)
790
791 ui.write(_('finished migrating %d changelog revisions; change in size: '
792 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
793
794 ui.write(_('finished migrating %d total revisions; total change in store '
795 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
796
797 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
798 """Determine whether to copy a store file during upgrade.
799
800 This function is called when migrating store files from ``srcrepo`` to
801 ``dstrepo`` as part of upgrading a repository.
802
803 Args:
804 srcrepo: repo we are copying from
805 dstrepo: repo we are copying to
806 requirements: set of requirements for ``dstrepo``
807 path: store file being examined
808 mode: the ``ST_MODE`` file type of ``path``
809 st: ``stat`` data structure for ``path``
810
811 Function should return ``True`` if the file is to be copied.
812 """
813 # Skip revlogs.
814 if path.endswith(('.i', '.d')):
815 return False
816 # Skip transaction related files.
817 if path.startswith('undo'):
818 return False
819 # Only copy regular files.
820 if mode != stat.S_IFREG:
821 return False
822 # Skip other skipped files.
823 if path in ('lock', 'fncache'):
824 return False
825
826 return True
827
828 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
829 """Hook point for extensions to perform additional actions during upgrade.
830
831 This function is called after revlogs and store files have been copied but
832 before the new store is swapped into the original location.
833 """
834
835 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
836 """Do the low-level work of upgrading a repository.
837
838 The upgrade is effectively performed as a copy between a source
839 repository and a temporary destination repository.
840
841 The source repository is unmodified for as long as possible so the
842 upgrade can abort at any time without causing loss of service for
843 readers and without corrupting the source repository.
844 """
845 assert srcrepo.currentwlock()
846 assert dstrepo.currentwlock()
847
848 ui.write(_('(it is safe to interrupt this process any time before '
849 'data migration completes)\n'))
850
851 if 'redeltaall' in actions:
852 deltareuse = revlog.revlog.DELTAREUSENEVER
853 elif 'redeltaparent' in actions:
854 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
855 elif 'redeltamultibase' in actions:
856 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
857 else:
858 deltareuse = revlog.revlog.DELTAREUSEALWAYS
859
860 with dstrepo.transaction('upgrade') as tr:
861 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
862 'redeltamultibase' in actions)
863
864 # Now copy other files in the store directory.
865 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
866 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
867 p, kind, st):
868 continue
869
870 srcrepo.ui.write(_('copying %s\n') % p)
871 src = srcrepo.store.vfs.join(p)
872 dst = dstrepo.store.vfs.join(p)
873 util.copyfile(src, dst, copystat=True)
874
875 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
876
877 ui.write(_('data fully migrated to temporary repository\n'))
878
879 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
880 backupvfs = vfsmod.vfs(backuppath)
881
882 # Make a backup of requires file first, as it is the first to be modified.
883 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
884
885 # We install an arbitrary requirement that clients must not support
886 # as a mechanism to lock out new clients during the data swap. This is
887 # better than allowing a client to continue while the repository is in
888 # an inconsistent state.
889 ui.write(_('marking source repository as being upgraded; clients will be '
890 'unable to read from repository\n'))
891 scmutil.writerequires(srcrepo.vfs,
892 srcrepo.requirements | set(['upgradeinprogress']))
893
894 ui.write(_('starting in-place swap of repository data\n'))
895 ui.write(_('replaced files will be backed up at %s\n') %
896 backuppath)
897
898 # Now swap in the new store directory. Doing it as a rename should make
899 # the operation nearly instantaneous and atomic (at least in well-behaved
900 # environments).
901 ui.write(_('replacing store...\n'))
902 tstart = util.timer()
903 util.rename(srcrepo.spath, backupvfs.join('store'))
904 util.rename(dstrepo.spath, srcrepo.spath)
905 elapsed = util.timer() - tstart
906 ui.write(_('store replacement complete; repository was inconsistent for '
907 '%0.1fs\n') % elapsed)
908
909 # We first write the requirements file. Any new requirements will lock
910 # out legacy clients.
911 ui.write(_('finalizing requirements file and making repository readable '
912 'again\n'))
913 scmutil.writerequires(srcrepo.vfs, requirements)
914
915 # The lock file from the old store won't be removed because nothing has a
916 # reference to its new location. So clean it up manually. Alternatively, we
917 # could update srcrepo.svfs and other variables to point to the new
918 # location. This is simpler.
919 backupvfs.unlink('store/lock')
920
921 return backuppath
922
923 def upgraderepo(ui, repo, run=False, optimize=None):
924 """Upgrade a repository in place."""
925 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
926 from . import localrepo
927
928 optimize = set(optimize or [])
929 repo = repo.unfiltered()
930
931 # Ensure the repository can be upgraded.
932 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
933 if missingreqs:
934 raise error.Abort(_('cannot upgrade repository; requirement '
935 'missing: %s') % _(', ').join(sorted(missingreqs)))
936
937 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
938 if blockedreqs:
939 raise error.Abort(_('cannot upgrade repository; unsupported source '
940 'requirement: %s') %
941 _(', ').join(sorted(blockedreqs)))
942
943 # FUTURE there is potentially a need to control the wanted requirements via
944 # command arguments or via an extension hook point.
945 newreqs = localrepo.newreporequirements(repo)
946
947 noremovereqs = (repo.requirements - newreqs -
948 upgradesupportremovedrequirements(repo))
949 if noremovereqs:
950 raise error.Abort(_('cannot upgrade repository; requirement would be '
951 'removed: %s') % _(', ').join(sorted(noremovereqs)))
952
953 noaddreqs = (newreqs - repo.requirements -
954 upgradeallowednewrequirements(repo))
955 if noaddreqs:
956 raise error.Abort(_('cannot upgrade repository; do not support adding '
957 'requirement: %s') %
958 _(', ').join(sorted(noaddreqs)))
959
960 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
961 if unsupportedreqs:
962 raise error.Abort(_('cannot upgrade repository; do not support '
963 'destination requirement: %s') %
964 _(', ').join(sorted(unsupportedreqs)))
965
966 # Find and validate all improvements that can be made.
967 improvements = upgradefindimprovements(repo)
968 for i in improvements:
969 if i.type not in (deficiency, optimisation):
970 raise error.Abort(_('unexpected improvement type %s for %s') % (
971 i.type, i.name))
972
973 # Validate arguments.
974 unknownoptimize = optimize - set(i.name for i in improvements
975 if i.type == optimisation)
976 if unknownoptimize:
977 raise error.Abort(_('unknown optimization action requested: %s') %
978 ', '.join(sorted(unknownoptimize)),
979 hint=_('run without arguments to see valid '
980 'optimizations'))
981
982 actions = upgradedetermineactions(repo, improvements, repo.requirements,
983 newreqs, optimize)
984
985 def printrequirements():
986 ui.write(_('requirements\n'))
987 ui.write(_(' preserved: %s\n') %
988 _(', ').join(sorted(newreqs & repo.requirements)))
989
990 if repo.requirements - newreqs:
991 ui.write(_(' removed: %s\n') %
992 _(', ').join(sorted(repo.requirements - newreqs)))
993
994 if newreqs - repo.requirements:
995 ui.write(_(' added: %s\n') %
996 _(', ').join(sorted(newreqs - repo.requirements)))
997
998 ui.write('\n')
999
1000 def printupgradeactions():
1001 for action in actions:
1002 for i in improvements:
1003 if i.name == action:
1004 ui.write('%s\n %s\n\n' %
1005 (i.name, i.upgrademessage))
1006
1007 if not run:
1008 fromdefault = []
1009 fromconfig = []
1010 optimizations = []
1011
1012 for i in improvements:
1013 assert i.type in (deficiency, optimisation)
1014 if i.type == deficiency:
1015 if i.fromdefault:
1016 fromdefault.append(i)
1017 if i.fromconfig:
1018 fromconfig.append(i)
1019 else:
1020 optimizations.append(i)
1021
1022 if fromdefault or fromconfig:
1023 fromconfignames = set(x.name for x in fromconfig)
1024 onlydefault = [i for i in fromdefault
1025 if i.name not in fromconfignames]
1026
1027 if fromconfig:
1028 ui.write(_('repository lacks features recommended by '
1029 'current config options:\n\n'))
1030 for i in fromconfig:
1031 ui.write('%s\n %s\n\n' % (i.name, i.description))
1032
1033 if onlydefault:
1034 ui.write(_('repository lacks features used by the default '
1035 'config options:\n\n'))
1036 for i in onlydefault:
1037 ui.write('%s\n %s\n\n' % (i.name, i.description))
1038
1039 ui.write('\n')
1040 else:
1041 ui.write(_('(no feature deficiencies found in existing '
1042 'repository)\n'))
1043
1044 ui.write(_('performing an upgrade with "--run" will make the following '
1045 'changes:\n\n'))
1046
1047 printrequirements()
1048 printupgradeactions()
1049
1050 unusedoptimize = [i for i in improvements
1051 if i.name not in actions and i.type == optimisation]
1052 if unusedoptimize:
1053 ui.write(_('additional optimizations are available by specifying '
1054 '"--optimize <name>":\n\n'))
1055 for i in unusedoptimize:
1056 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1057 return
1058
1059 # Else we're in the run=true case.
1060 ui.write(_('upgrade will perform the following actions:\n\n'))
1061 printrequirements()
1062 printupgradeactions()
1063
1064 ui.write(_('beginning upgrade...\n'))
1065 with repo.wlock():
1066 with repo.lock():
1067 ui.write(_('repository locked and read-only\n'))
1068 # Our strategy for upgrading the repository is to create a new,
1069 # temporary repository, write data to it, then do a swap of the
1070 # data. There are less heavyweight ways to do this, but it is easier
1071 # to create a new repo object than to instantiate all the components
1072 # (like the store) separately.
1073 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1074 backuppath = None
1075 try:
1076 ui.write(_('creating temporary repository to stage migrated '
1077 'data: %s\n') % tmppath)
1078 dstrepo = localrepo.localrepository(repo.baseui,
1079 path=tmppath,
1080 create=True)
1081
1082 with dstrepo.wlock():
1083 with dstrepo.lock():
1084 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1085 actions)
1086
1087 finally:
1088 ui.write(_('removing temporary repository %s\n') % tmppath)
1089 repo.vfs.rmtree(tmppath, forcibly=True)
1090
1091 if backuppath:
1092 ui.warn(_('copy of old repository backed up at %s\n') %
1093 backuppath)
1094 ui.warn(_('the old repository will not be deleted; remove '
1095 'it to free up disk space once the upgraded '
1096 'repository is verified\n'))
@@ -1,1096 +1,758
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 import errno
12 import hashlib
13 11 import stat
14 12 import tempfile
15 13
16 14 from .i18n import _
17 from .node import short
18 15 from . import (
19 bundle2,
20 changegroup,
21 16 changelog,
22 17 error,
23 exchange,
24 18 manifest,
25 obsolete,
26 19 revlog,
27 20 scmutil,
28 21 util,
29 22 vfs as vfsmod,
30 23 )
31 24
32 def _bundle(repo, bases, heads, node, suffix, compress=True):
33 """create a bundle with the specified revisions as a backup"""
34 cgversion = changegroup.safeversion(repo)
35
36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
37 version=cgversion)
38 backupdir = "strip-backup"
39 vfs = repo.vfs
40 if not vfs.isdir(backupdir):
41 vfs.mkdir(backupdir)
42
43 # Include a hash of all the nodes in the filename for uniqueness
44 allcommits = repo.set('%ln::%ln', bases, heads)
45 allhashes = sorted(c.hex() for c in allcommits)
46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
48
49 comp = None
50 if cgversion != '01':
51 bundletype = "HG20"
52 if compress:
53 comp = 'BZ'
54 elif compress:
55 bundletype = "HG10BZ"
56 else:
57 bundletype = "HG10UN"
58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
59 compression=comp)
60
61 def _collectfiles(repo, striprev):
62 """find out the filelogs affected by the strip"""
63 files = set()
64
65 for x in xrange(striprev, len(repo)):
66 files.update(repo[x].files())
67
68 return sorted(files)
69
70 def _collectbrokencsets(repo, files, striprev):
71 """return the changesets which will be broken by the truncation"""
72 s = set()
73 def collectone(revlog):
74 _, brokenset = revlog.getstrippoint(striprev)
75 s.update([revlog.linkrev(r) for r in brokenset])
76
77 collectone(repo.manifestlog._revlog)
78 for fname in files:
79 collectone(repo.file(fname))
80
81 return s
82
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
84 # This function operates within a transaction of its own, but does
85 # not take any lock on the repo.
86 # Simple way to maintain backwards compatibility for this
87 # argument.
88 if backup in ['none', 'strip']:
89 backup = False
90
91 repo = repo.unfiltered()
92 repo.destroying()
93
94 cl = repo.changelog
95 # TODO handle undo of merge sets
96 if isinstance(nodelist, str):
97 nodelist = [nodelist]
98 striplist = [cl.rev(node) for node in nodelist]
99 striprev = min(striplist)
100
101 files = _collectfiles(repo, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
103
104 # Some revisions with rev > striprev may not be descendants of striprev.
105 # We have to find these revisions and put them in a bundle, so that
106 # we can restore them after the truncations.
107 # To create the bundle we use repo.changegroupsubset which requires
108 # the list of heads and bases of the set of interesting revisions.
109 # (head = revision in the set that has no descendant in the set;
110 # base = revision in the set that has no ancestor in the set)
111 tostrip = set(striplist)
112 saveheads = set(saverevs)
113 for r in cl.revs(start=striprev + 1):
114 if any(p in tostrip for p in cl.parentrevs(r)):
115 tostrip.add(r)
116
117 if r not in tostrip:
118 saverevs.add(r)
119 saveheads.difference_update(cl.parentrevs(r))
120 saveheads.add(r)
121 saveheads = [cl.node(r) for r in saveheads]
122
123 # compute base nodes
124 if saverevs:
125 descendants = set(cl.descendants(saverevs))
126 saverevs.difference_update(descendants)
127 savebases = [cl.node(r) for r in saverevs]
128 stripbases = [cl.node(r) for r in tostrip]
129
130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
131 # is much faster
132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
133 if newbmtarget:
134 newbmtarget = repo[newbmtarget.first()].node()
135 else:
136 newbmtarget = '.'
137
138 bm = repo._bookmarks
139 updatebm = []
140 for m in bm:
141 rev = repo[bm[m]].rev()
142 if rev in tostrip:
143 updatebm.append(m)
144
145 # create a changegroup for all the branches we need to keep
146 backupfile = None
147 vfs = repo.vfs
148 node = nodelist[-1]
149 if backup:
150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
151 repo.ui.status(_("saved backup bundle to %s\n") %
152 vfs.join(backupfile))
153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
154 vfs.join(backupfile))
155 tmpbundlefile = None
156 if saveheads:
157 # do not compress temporary bundle if we remove it from disk later
158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
159 compress=False)
160
161 mfst = repo.manifestlog._revlog
162
163 curtr = repo.currenttransaction()
164 if curtr is not None:
165 del curtr # avoid carrying reference to transaction for nothing
166 raise error.ProgrammingError('cannot strip from inside a transaction')
167
168 try:
169 with repo.transaction("strip") as tr:
170 offset = len(tr.entries)
171
172 tr.startgroup()
173 cl.strip(striprev, tr)
174 mfst.strip(striprev, tr)
175 if 'treemanifest' in repo.requirements: # safe but unnecessary
176 # otherwise
177 for unencoded, encoded, size in repo.store.datafiles():
178 if (unencoded.startswith('meta/') and
179 unencoded.endswith('00manifest.i')):
180 dir = unencoded[5:-12]
181 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
182 for fn in files:
183 repo.file(fn).strip(striprev, tr)
184 tr.endgroup()
185
186 for i in xrange(offset, len(tr.entries)):
187 file, troffset, ignore = tr.entries[i]
188 with repo.svfs(file, 'a', checkambig=True) as fp:
189 fp.truncate(troffset)
190 if troffset == 0:
191 repo.store.markremoved(file)
192
193 if tmpbundlefile:
194 ui.note(_("adding branch\n"))
195 f = vfs.open(tmpbundlefile, "rb")
196 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
197 if not repo.ui.verbose:
198 # silence internal shuffling chatter
199 repo.ui.pushbuffer()
200 if isinstance(gen, bundle2.unbundle20):
201 with repo.transaction('strip') as tr:
202 tr.hookargs = {'source': 'strip',
203 'url': 'bundle:' + vfs.join(tmpbundlefile)}
204 bundle2.applybundle(repo, gen, tr, source='strip',
205 url='bundle:' + vfs.join(tmpbundlefile))
206 else:
207 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
208 True)
209 if not repo.ui.verbose:
210 repo.ui.popbuffer()
211 f.close()
212 repo._phasecache.invalidate()
213
214 for m in updatebm:
215 bm[m] = repo[newbmtarget].node()
216
217 with repo.lock():
218 with repo.transaction('repair') as tr:
219 bm.recordchange(tr)
220
221 # remove undo files
222 for undovfs, undofile in repo.undofiles():
223 try:
224 undovfs.unlink(undofile)
225 except OSError as e:
226 if e.errno != errno.ENOENT:
227 ui.warn(_('error removing %s: %s\n') %
228 (undovfs.join(undofile), str(e)))
229
230 except: # re-raises
231 if backupfile:
232 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
233 % vfs.join(backupfile))
234 if tmpbundlefile:
235 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
236 % vfs.join(tmpbundlefile))
237 ui.warn(_("(fix the problem, then recover the changesets with "
238 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
239 raise
240 else:
241 if tmpbundlefile:
242 # Remove temporary bundle only if there were no exceptions
243 vfs.unlink(tmpbundlefile)
244
245 repo.destroyed()
246 # return the backup file path (or None if 'backup' was False) so
247 # extensions can use it
248 return backupfile
249
250 def rebuildfncache(ui, repo):
251 """Rebuilds the fncache file from repo history.
252
253 Missing entries will be added. Extra entries will be removed.
254 """
255 repo = repo.unfiltered()
256
257 if 'fncache' not in repo.requirements:
258 ui.warn(_('(not rebuilding fncache because repository does not '
259 'support fncache)\n'))
260 return
261
262 with repo.lock():
263 fnc = repo.store.fncache
264 # Trigger load of fncache.
265 if 'irrelevant' in fnc:
266 pass
267
268 oldentries = set(fnc.entries)
269 newentries = set()
270 seenfiles = set()
271
272 repolen = len(repo)
273 for rev in repo:
274 ui.progress(_('rebuilding'), rev, total=repolen,
275 unit=_('changesets'))
276
277 ctx = repo[rev]
278 for f in ctx.files():
279 # This is to minimize I/O.
280 if f in seenfiles:
281 continue
282 seenfiles.add(f)
283
284 i = 'data/%s.i' % f
285 d = 'data/%s.d' % f
286
287 if repo.store._exists(i):
288 newentries.add(i)
289 if repo.store._exists(d):
290 newentries.add(d)
291
292 ui.progress(_('rebuilding'), None)
293
294 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
295 for dir in util.dirs(seenfiles):
296 i = 'meta/%s/00manifest.i' % dir
297 d = 'meta/%s/00manifest.d' % dir
298
299 if repo.store._exists(i):
300 newentries.add(i)
301 if repo.store._exists(d):
302 newentries.add(d)
303
304 addcount = len(newentries - oldentries)
305 removecount = len(oldentries - newentries)
306 for p in sorted(oldentries - newentries):
307 ui.write(_('removing %s\n') % p)
308 for p in sorted(newentries - oldentries):
309 ui.write(_('adding %s\n') % p)
310
311 if addcount or removecount:
312 ui.write(_('%d items added, %d removed from fncache\n') %
313 (addcount, removecount))
314 fnc.entries = newentries
315 fnc._dirty = True
316
317 with repo.transaction('fncache') as tr:
318 fnc.write(tr)
319 else:
320 ui.write(_('fncache already up to date\n'))
321
322 def stripbmrevset(repo, mark):
323 """
324 The revset to strip when strip is called with -B mark
325
326 Needs to live here so extensions can use it and wrap it even when strip is
327 not enabled or not present on a box.
328 """
329 return repo.revs("ancestors(bookmark(%s)) - "
330 "ancestors(head() and not bookmark(%s)) - "
331 "ancestors(bookmark() and not bookmark(%s))",
332 mark, mark, mark)
333
334 def deleteobsmarkers(obsstore, indices):
335 """Delete some obsmarkers from obsstore and return how many were deleted
336
337 'indices' is a list of ints which are the indices
338 of the markers to be deleted.
339
340 Every invocation of this function completely rewrites the obsstore file,
341 skipping the markers we want to be removed. The new temporary file is
342 created, remaining markers are written there and on .close() this file
343 gets atomically renamed to obsstore, thus guaranteeing consistency."""
344 if not indices:
345 # we don't want to rewrite the obsstore with the same content
346 return
347
348 left = []
349 current = obsstore._all
350 n = 0
351 for i, m in enumerate(current):
352 if i in indices:
353 n += 1
354 continue
355 left.append(m)
356
357 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
358 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
359 newobsstorefile.write(bytes)
360 newobsstorefile.close()
361 return n
362
363 25 def upgraderequiredsourcerequirements(repo):
364 26 """Obtain requirements required to be present to upgrade a repo.
365 27
366 28 An upgrade will not be allowed if the repository doesn't have the
367 29 requirements returned by this function.
368 30 """
369 31 return set([
370 32 # Introduced in Mercurial 0.9.2.
371 33 'revlogv1',
372 34 # Introduced in Mercurial 0.9.2.
373 35 'store',
374 36 ])
375 37
376 38 def upgradeblocksourcerequirements(repo):
377 39 """Obtain requirements that will prevent an upgrade from occurring.
378 40
379 41 An upgrade cannot be performed if the source repository contains a
380 42 requirements in the returned set.
381 43 """
382 44 return set([
383 45 # The upgrade code does not yet support these experimental features.
384 46 # This is an artificial limitation.
385 47 'manifestv2',
386 48 'treemanifest',
387 49 # This was a precursor to generaldelta and was never enabled by default.
388 50 # It should (hopefully) not exist in the wild.
389 51 'parentdelta',
390 52 # Upgrade should operate on the actual store, not the shared link.
391 53 'shared',
392 54 ])
393 55
394 56 def upgradesupportremovedrequirements(repo):
395 57 """Obtain requirements that can be removed during an upgrade.
396 58
397 59 If an upgrade were to create a repository that dropped a requirement,
398 60 the dropped requirement must appear in the returned set for the upgrade
399 61 to be allowed.
400 62 """
401 63 return set()
402 64
403 65 def upgradesupporteddestrequirements(repo):
404 66 """Obtain requirements that upgrade supports in the destination.
405 67
406 68 If the result of the upgrade would create requirements not in this set,
407 69 the upgrade is disallowed.
408 70
409 71 Extensions should monkeypatch this to add their custom requirements.
410 72 """
411 73 return set([
412 74 'dotencode',
413 75 'fncache',
414 76 'generaldelta',
415 77 'revlogv1',
416 78 'store',
417 79 ])
418 80
419 81 def upgradeallowednewrequirements(repo):
420 82 """Obtain requirements that can be added to a repository during upgrade.
421 83
422 84 This is used to disallow proposed requirements from being added when
423 85 they weren't present before.
424 86
425 87 We use a list of allowed requirement additions instead of a list of known
426 88 bad additions because the whitelist approach is safer and will prevent
427 89 future, unknown requirements from accidentally being added.
428 90 """
429 91 return set([
430 92 'dotencode',
431 93 'fncache',
432 94 'generaldelta',
433 95 ])
434 96
435 97 deficiency = 'deficiency'
436 98 optimisation = 'optimization'
437 99
438 100 class upgradeimprovement(object):
439 101 """Represents an improvement that can be made as part of an upgrade.
440 102
441 103 The following attributes are defined on each instance:
442 104
443 105 name
444 106 Machine-readable string uniquely identifying this improvement. It
445 107 will be mapped to an action later in the upgrade process.
446 108
447 109 type
448 110 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
449 111 problem. An optimization is an action (sometimes optional) that
450 112 can be taken to further improve the state of the repository.
451 113
452 114 description
453 115 Message intended for humans explaining the improvement in more detail,
454 116 including the implications of it. For ``deficiency`` types, should be
455 117 worded in the present tense. For ``optimisation`` types, should be
456 118 worded in the future tense.
457 119
458 120 upgrademessage
459 121 Message intended for humans explaining what an upgrade addressing this
460 122 issue will do. Should be worded in the future tense.
461 123
462 124 fromdefault (``deficiency`` types only)
463 125 Boolean indicating whether the current (deficient) state deviates
464 126 from Mercurial's default configuration.
465 127
466 128 fromconfig (``deficiency`` types only)
467 129 Boolean indicating whether the current (deficient) state deviates
468 130 from the current Mercurial configuration.
469 131 """
470 132 def __init__(self, name, type, description, upgrademessage, **kwargs):
471 133 self.name = name
472 134 self.type = type
473 135 self.description = description
474 136 self.upgrademessage = upgrademessage
475 137
476 138 for k, v in kwargs.items():
477 139 setattr(self, k, v)
478 140
479 141 def upgradefindimprovements(repo):
480 142 """Determine improvements that can be made to the repo during upgrade.
481 143
482 144 Returns a list of ``upgradeimprovement`` describing repository deficiencies
483 145 and optimizations.
484 146 """
485 147 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
486 148 from . import localrepo
487 149
488 150 newreporeqs = localrepo.newreporequirements(repo)
489 151
490 152 improvements = []
491 153
492 154 # We could detect lack of revlogv1 and store here, but they were added
493 155 # in 0.9.2 and we don't support upgrading repos without these
494 156 # requirements, so let's not bother.
495 157
496 158 if 'fncache' not in repo.requirements:
497 159 improvements.append(upgradeimprovement(
498 160 name='fncache',
499 161 type=deficiency,
500 162 description=_('long and reserved filenames may not work correctly; '
501 163 'repository performance is sub-optimal'),
502 164 upgrademessage=_('repository will be more resilient to storing '
503 165 'certain paths and performance of certain '
504 166 'operations should be improved'),
505 167 fromdefault=True,
506 168 fromconfig='fncache' in newreporeqs))
507 169
508 170 if 'dotencode' not in repo.requirements:
509 171 improvements.append(upgradeimprovement(
510 172 name='dotencode',
511 173 type=deficiency,
512 174 description=_('storage of filenames beginning with a period or '
513 175 'space may not work correctly'),
514 176 upgrademessage=_('repository will be better able to store files '
515 177 'beginning with a space or period'),
516 178 fromdefault=True,
517 179 fromconfig='dotencode' in newreporeqs))
518 180
519 181 if 'generaldelta' not in repo.requirements:
520 182 improvements.append(upgradeimprovement(
521 183 name='generaldelta',
522 184 type=deficiency,
523 185 description=_('deltas within internal storage are unable to '
524 186 'choose optimal revisions; repository is larger and '
525 187 'slower than it could be; interaction with other '
526 188 'repositories may require extra network and CPU '
527 189 'resources, making "hg push" and "hg pull" slower'),
528 190 upgrademessage=_('repository storage will be able to create '
529 191 'optimal deltas; new repository data will be '
530 192 'smaller and read times should decrease; '
531 193 'interacting with other repositories using this '
532 194 'storage model should require less network and '
533 195 'CPU resources, making "hg push" and "hg pull" '
534 196 'faster'),
535 197 fromdefault=True,
536 198 fromconfig='generaldelta' in newreporeqs))
537 199
538 200 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
539 201 # changelogs with deltas.
540 202 cl = repo.changelog
541 203 for rev in cl:
542 204 chainbase = cl.chainbase(rev)
543 205 if chainbase != rev:
544 206 improvements.append(upgradeimprovement(
545 207 name='removecldeltachain',
546 208 type=deficiency,
547 209 description=_('changelog storage is using deltas instead of '
548 210 'raw entries; changelog reading and any '
549 211 'operation relying on changelog data are slower '
550 212 'than they could be'),
551 213 upgrademessage=_('changelog storage will be reformated to '
552 214 'store raw entries; changelog reading will be '
553 215 'faster; changelog size may be reduced'),
554 216 fromdefault=True,
555 217 fromconfig=True))
556 218 break
557 219
558 220 # Now for the optimizations.
559 221
560 222 # These are unconditionally added. There is logic later that figures out
561 223 # which ones to apply.
562 224
563 225 improvements.append(upgradeimprovement(
564 226 name='redeltaparent',
565 227 type=optimisation,
566 228 description=_('deltas within internal storage will be recalculated to '
567 229 'choose an optimal base revision where this was not '
568 230 'already done; the size of the repository may shrink and '
569 231 'various operations may become faster; the first time '
570 232 'this optimization is performed could slow down upgrade '
571 233 'execution considerably; subsequent invocations should '
572 234 'not run noticeably slower'),
573 235 upgrademessage=_('deltas within internal storage will choose a new '
574 236 'base revision if needed')))
575 237
576 238 improvements.append(upgradeimprovement(
577 239 name='redeltamultibase',
578 240 type=optimisation,
579 241 description=_('deltas within internal storage will be recalculated '
580 242 'against multiple base revision and the smallest '
581 243 'difference will be used; the size of the repository may '
582 244 'shrink significantly when there are many merges; this '
583 245 'optimization will slow down execution in proportion to '
584 246 'the number of merges in the repository and the amount '
585 247 'of files in the repository; this slow down should not '
586 248 'be significant unless there are tens of thousands of '
587 249 'files and thousands of merges'),
588 250 upgrademessage=_('deltas within internal storage will choose an '
589 251 'optimal delta by computing deltas against multiple '
590 252 'parents; may slow down execution time '
591 253 'significantly')))
592 254
593 255 improvements.append(upgradeimprovement(
594 256 name='redeltaall',
595 257 type=optimisation,
596 258 description=_('deltas within internal storage will always be '
597 259 'recalculated without reusing prior deltas; this will '
598 260 'likely make execution run several times slower; this '
599 261 'optimization is typically not needed'),
600 262 upgrademessage=_('deltas within internal storage will be fully '
601 263 'recomputed; this will likely drastically slow down '
602 264 'execution time')))
603 265
604 266 return improvements
605 267
606 268 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
607 269 optimize):
608 270 """Determine upgrade actions that will be performed.
609 271
610 272 Given a list of improvements as returned by ``upgradefindimprovements``,
611 273 determine the list of upgrade actions that will be performed.
612 274
613 275 The role of this function is to filter improvements if needed, apply
614 276 recommended optimizations from the improvements list that make sense,
615 277 etc.
616 278
617 279 Returns a list of action names.
618 280 """
619 281 newactions = []
620 282
621 283 knownreqs = upgradesupporteddestrequirements(repo)
622 284
623 285 for i in improvements:
624 286 name = i.name
625 287
626 288 # If the action is a requirement that doesn't show up in the
627 289 # destination requirements, prune the action.
628 290 if name in knownreqs and name not in destreqs:
629 291 continue
630 292
631 293 if i.type == deficiency:
632 294 newactions.append(name)
633 295
634 296 newactions.extend(o for o in sorted(optimize) if o not in newactions)
635 297
636 298 # FUTURE consider adding some optimizations here for certain transitions.
637 299 # e.g. adding generaldelta could schedule parent redeltas.
638 300
639 301 return newactions
640 302
641 303 def _revlogfrompath(repo, path):
642 304 """Obtain a revlog from a repo path.
643 305
644 306 An instance of the appropriate class is returned.
645 307 """
646 308 if path == '00changelog.i':
647 309 return changelog.changelog(repo.svfs)
648 310 elif path.endswith('00manifest.i'):
649 311 mandir = path[:-len('00manifest.i')]
650 312 return manifest.manifestrevlog(repo.svfs, dir=mandir)
651 313 else:
652 314 # Filelogs don't do anything special with settings. So we can use a
653 315 # vanilla revlog.
654 316 return revlog.revlog(repo.svfs, path)
655 317
656 318 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
657 319 """Copy revlogs between 2 repos."""
658 320 revcount = 0
659 321 srcsize = 0
660 322 srcrawsize = 0
661 323 dstsize = 0
662 324 fcount = 0
663 325 frevcount = 0
664 326 fsrcsize = 0
665 327 frawsize = 0
666 328 fdstsize = 0
667 329 mcount = 0
668 330 mrevcount = 0
669 331 msrcsize = 0
670 332 mrawsize = 0
671 333 mdstsize = 0
672 334 crevcount = 0
673 335 csrcsize = 0
674 336 crawsize = 0
675 337 cdstsize = 0
676 338
677 339 # Perform a pass to collect metadata. This validates we can open all
678 340 # source files and allows a unified progress bar to be displayed.
679 341 for unencoded, encoded, size in srcrepo.store.walk():
680 342 if unencoded.endswith('.d'):
681 343 continue
682 344
683 345 rl = _revlogfrompath(srcrepo, unencoded)
684 346 revcount += len(rl)
685 347
686 348 datasize = 0
687 349 rawsize = 0
688 350 idx = rl.index
689 351 for rev in rl:
690 352 e = idx[rev]
691 353 datasize += e[1]
692 354 rawsize += e[2]
693 355
694 356 srcsize += datasize
695 357 srcrawsize += rawsize
696 358
697 359 # This is for the separate progress bars.
698 360 if isinstance(rl, changelog.changelog):
699 361 crevcount += len(rl)
700 362 csrcsize += datasize
701 363 crawsize += rawsize
702 364 elif isinstance(rl, manifest.manifestrevlog):
703 365 mcount += 1
704 366 mrevcount += len(rl)
705 367 msrcsize += datasize
706 368 mrawsize += rawsize
707 369 elif isinstance(rl, revlog.revlog):
708 370 fcount += 1
709 371 frevcount += len(rl)
710 372 fsrcsize += datasize
711 373 frawsize += rawsize
712 374
713 375 if not revcount:
714 376 return
715 377
716 378 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
717 379 '%d in changelog)\n') %
718 380 (revcount, frevcount, mrevcount, crevcount))
719 381 ui.write(_('migrating %s in store; %s tracked data\n') % (
720 382 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
721 383
722 384 # Used to keep track of progress.
723 385 progress = []
724 386 def oncopiedrevision(rl, rev, node):
725 387 progress[1] += 1
726 388 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
727 389
728 390 # Do the actual copying.
729 391 # FUTURE this operation can be farmed off to worker processes.
730 392 seen = set()
731 393 for unencoded, encoded, size in srcrepo.store.walk():
732 394 if unencoded.endswith('.d'):
733 395 continue
734 396
735 397 oldrl = _revlogfrompath(srcrepo, unencoded)
736 398 newrl = _revlogfrompath(dstrepo, unencoded)
737 399
738 400 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
739 401 ui.write(_('finished migrating %d manifest revisions across %d '
740 402 'manifests; change in size: %s\n') %
741 403 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
742 404
743 405 ui.write(_('migrating changelog containing %d revisions '
744 406 '(%s in store; %s tracked data)\n') %
745 407 (crevcount, util.bytecount(csrcsize),
746 408 util.bytecount(crawsize)))
747 409 seen.add('c')
748 410 progress[:] = [_('changelog revisions'), 0, crevcount]
749 411 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
750 412 ui.write(_('finished migrating %d filelog revisions across %d '
751 413 'filelogs; change in size: %s\n') %
752 414 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
753 415
754 416 ui.write(_('migrating %d manifests containing %d revisions '
755 417 '(%s in store; %s tracked data)\n') %
756 418 (mcount, mrevcount, util.bytecount(msrcsize),
757 419 util.bytecount(mrawsize)))
758 420 seen.add('m')
759 421 progress[:] = [_('manifest revisions'), 0, mrevcount]
760 422 elif 'f' not in seen:
761 423 ui.write(_('migrating %d filelogs containing %d revisions '
762 424 '(%s in store; %s tracked data)\n') %
763 425 (fcount, frevcount, util.bytecount(fsrcsize),
764 426 util.bytecount(frawsize)))
765 427 seen.add('f')
766 428 progress[:] = [_('file revisions'), 0, frevcount]
767 429
768 430 ui.progress(progress[0], progress[1], total=progress[2])
769 431
770 432 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
771 433 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
772 434 deltareuse=deltareuse,
773 435 aggressivemergedeltas=aggressivemergedeltas)
774 436
775 437 datasize = 0
776 438 idx = newrl.index
777 439 for rev in newrl:
778 440 datasize += idx[rev][1]
779 441
780 442 dstsize += datasize
781 443
782 444 if isinstance(newrl, changelog.changelog):
783 445 cdstsize += datasize
784 446 elif isinstance(newrl, manifest.manifestrevlog):
785 447 mdstsize += datasize
786 448 else:
787 449 fdstsize += datasize
788 450
789 451 ui.progress(progress[0], None)
790 452
791 453 ui.write(_('finished migrating %d changelog revisions; change in size: '
792 454 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
793 455
794 456 ui.write(_('finished migrating %d total revisions; total change in store '
795 457 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
796 458
797 459 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
798 460 """Determine whether to copy a store file during upgrade.
799 461
800 462 This function is called when migrating store files from ``srcrepo`` to
801 463 ``dstrepo`` as part of upgrading a repository.
802 464
803 465 Args:
804 466 srcrepo: repo we are copying from
805 467 dstrepo: repo we are copying to
806 468 requirements: set of requirements for ``dstrepo``
807 469 path: store file being examined
808 470 mode: the ``ST_MODE`` file type of ``path``
809 471 st: ``stat`` data structure for ``path``
810 472
811 473 Function should return ``True`` if the file is to be copied.
812 474 """
813 475 # Skip revlogs.
814 476 if path.endswith(('.i', '.d')):
815 477 return False
816 478 # Skip transaction related files.
817 479 if path.startswith('undo'):
818 480 return False
819 481 # Only copy regular files.
820 482 if mode != stat.S_IFREG:
821 483 return False
822 484 # Skip other skipped files.
823 485 if path in ('lock', 'fncache'):
824 486 return False
825 487
826 488 return True
827 489
828 490 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
829 491 """Hook point for extensions to perform additional actions during upgrade.
830 492
831 493 This function is called after revlogs and store files have been copied but
832 494 before the new store is swapped into the original location.
833 495 """
834 496
835 497 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
836 498 """Do the low-level work of upgrading a repository.
837 499
838 500 The upgrade is effectively performed as a copy between a source
839 501 repository and a temporary destination repository.
840 502
841 503 The source repository is unmodified for as long as possible so the
842 504 upgrade can abort at any time without causing loss of service for
843 505 readers and without corrupting the source repository.
844 506 """
845 507 assert srcrepo.currentwlock()
846 508 assert dstrepo.currentwlock()
847 509
848 510 ui.write(_('(it is safe to interrupt this process any time before '
849 511 'data migration completes)\n'))
850 512
851 513 if 'redeltaall' in actions:
852 514 deltareuse = revlog.revlog.DELTAREUSENEVER
853 515 elif 'redeltaparent' in actions:
854 516 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
855 517 elif 'redeltamultibase' in actions:
856 518 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
857 519 else:
858 520 deltareuse = revlog.revlog.DELTAREUSEALWAYS
859 521
860 522 with dstrepo.transaction('upgrade') as tr:
861 523 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
862 524 'redeltamultibase' in actions)
863 525
864 526 # Now copy other files in the store directory.
865 527 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
866 528 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
867 529 p, kind, st):
868 530 continue
869 531
870 532 srcrepo.ui.write(_('copying %s\n') % p)
871 533 src = srcrepo.store.vfs.join(p)
872 534 dst = dstrepo.store.vfs.join(p)
873 535 util.copyfile(src, dst, copystat=True)
874 536
875 537 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
876 538
877 539 ui.write(_('data fully migrated to temporary repository\n'))
878 540
879 541 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
880 542 backupvfs = vfsmod.vfs(backuppath)
881 543
882 544 # Make a backup of requires file first, as it is the first to be modified.
883 545 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
884 546
885 547 # We install an arbitrary requirement that clients must not support
886 548 # as a mechanism to lock out new clients during the data swap. This is
887 549 # better than allowing a client to continue while the repository is in
888 550 # an inconsistent state.
889 551 ui.write(_('marking source repository as being upgraded; clients will be '
890 552 'unable to read from repository\n'))
891 553 scmutil.writerequires(srcrepo.vfs,
892 554 srcrepo.requirements | set(['upgradeinprogress']))
893 555
894 556 ui.write(_('starting in-place swap of repository data\n'))
895 557 ui.write(_('replaced files will be backed up at %s\n') %
896 558 backuppath)
897 559
898 560 # Now swap in the new store directory. Doing it as a rename should make
899 561 # the operation nearly instantaneous and atomic (at least in well-behaved
900 562 # environments).
901 563 ui.write(_('replacing store...\n'))
902 564 tstart = util.timer()
903 565 util.rename(srcrepo.spath, backupvfs.join('store'))
904 566 util.rename(dstrepo.spath, srcrepo.spath)
905 567 elapsed = util.timer() - tstart
906 568 ui.write(_('store replacement complete; repository was inconsistent for '
907 569 '%0.1fs\n') % elapsed)
908 570
909 571 # We first write the requirements file. Any new requirements will lock
910 572 # out legacy clients.
911 573 ui.write(_('finalizing requirements file and making repository readable '
912 574 'again\n'))
913 575 scmutil.writerequires(srcrepo.vfs, requirements)
914 576
915 577 # The lock file from the old store won't be removed because nothing has a
916 578 # reference to its new location. So clean it up manually. Alternatively, we
917 579 # could update srcrepo.svfs and other variables to point to the new
918 580 # location. This is simpler.
919 581 backupvfs.unlink('store/lock')
920 582
921 583 return backuppath
922 584
923 585 def upgraderepo(ui, repo, run=False, optimize=None):
924 586 """Upgrade a repository in place."""
925 587 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
926 588 from . import localrepo
927 589
928 590 optimize = set(optimize or [])
929 591 repo = repo.unfiltered()
930 592
931 593 # Ensure the repository can be upgraded.
932 594 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
933 595 if missingreqs:
934 596 raise error.Abort(_('cannot upgrade repository; requirement '
935 597 'missing: %s') % _(', ').join(sorted(missingreqs)))
936 598
937 599 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
938 600 if blockedreqs:
939 601 raise error.Abort(_('cannot upgrade repository; unsupported source '
940 602 'requirement: %s') %
941 603 _(', ').join(sorted(blockedreqs)))
942 604
943 605 # FUTURE there is potentially a need to control the wanted requirements via
944 606 # command arguments or via an extension hook point.
945 607 newreqs = localrepo.newreporequirements(repo)
946 608
947 609 noremovereqs = (repo.requirements - newreqs -
948 610 upgradesupportremovedrequirements(repo))
949 611 if noremovereqs:
950 612 raise error.Abort(_('cannot upgrade repository; requirement would be '
951 613 'removed: %s') % _(', ').join(sorted(noremovereqs)))
952 614
953 615 noaddreqs = (newreqs - repo.requirements -
954 616 upgradeallowednewrequirements(repo))
955 617 if noaddreqs:
956 618 raise error.Abort(_('cannot upgrade repository; do not support adding '
957 619 'requirement: %s') %
958 620 _(', ').join(sorted(noaddreqs)))
959 621
960 622 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
961 623 if unsupportedreqs:
962 624 raise error.Abort(_('cannot upgrade repository; do not support '
963 625 'destination requirement: %s') %
964 626 _(', ').join(sorted(unsupportedreqs)))
965 627
966 628 # Find and validate all improvements that can be made.
967 629 improvements = upgradefindimprovements(repo)
968 630 for i in improvements:
969 631 if i.type not in (deficiency, optimisation):
970 632 raise error.Abort(_('unexpected improvement type %s for %s') % (
971 633 i.type, i.name))
972 634
973 635 # Validate arguments.
974 636 unknownoptimize = optimize - set(i.name for i in improvements
975 637 if i.type == optimisation)
976 638 if unknownoptimize:
977 639 raise error.Abort(_('unknown optimization action requested: %s') %
978 640 ', '.join(sorted(unknownoptimize)),
979 641 hint=_('run without arguments to see valid '
980 642 'optimizations'))
981 643
982 644 actions = upgradedetermineactions(repo, improvements, repo.requirements,
983 645 newreqs, optimize)
984 646
985 647 def printrequirements():
986 648 ui.write(_('requirements\n'))
987 649 ui.write(_(' preserved: %s\n') %
988 650 _(', ').join(sorted(newreqs & repo.requirements)))
989 651
990 652 if repo.requirements - newreqs:
991 653 ui.write(_(' removed: %s\n') %
992 654 _(', ').join(sorted(repo.requirements - newreqs)))
993 655
994 656 if newreqs - repo.requirements:
995 657 ui.write(_(' added: %s\n') %
996 658 _(', ').join(sorted(newreqs - repo.requirements)))
997 659
998 660 ui.write('\n')
999 661
1000 662 def printupgradeactions():
1001 663 for action in actions:
1002 664 for i in improvements:
1003 665 if i.name == action:
1004 666 ui.write('%s\n %s\n\n' %
1005 667 (i.name, i.upgrademessage))
1006 668
1007 669 if not run:
1008 670 fromdefault = []
1009 671 fromconfig = []
1010 672 optimizations = []
1011 673
1012 674 for i in improvements:
1013 675 assert i.type in (deficiency, optimisation)
1014 676 if i.type == deficiency:
1015 677 if i.fromdefault:
1016 678 fromdefault.append(i)
1017 679 if i.fromconfig:
1018 680 fromconfig.append(i)
1019 681 else:
1020 682 optimizations.append(i)
1021 683
1022 684 if fromdefault or fromconfig:
1023 685 fromconfignames = set(x.name for x in fromconfig)
1024 686 onlydefault = [i for i in fromdefault
1025 687 if i.name not in fromconfignames]
1026 688
1027 689 if fromconfig:
1028 690 ui.write(_('repository lacks features recommended by '
1029 691 'current config options:\n\n'))
1030 692 for i in fromconfig:
1031 693 ui.write('%s\n %s\n\n' % (i.name, i.description))
1032 694
1033 695 if onlydefault:
1034 696 ui.write(_('repository lacks features used by the default '
1035 697 'config options:\n\n'))
1036 698 for i in onlydefault:
1037 699 ui.write('%s\n %s\n\n' % (i.name, i.description))
1038 700
1039 701 ui.write('\n')
1040 702 else:
1041 703 ui.write(_('(no feature deficiencies found in existing '
1042 704 'repository)\n'))
1043 705
1044 706 ui.write(_('performing an upgrade with "--run" will make the following '
1045 707 'changes:\n\n'))
1046 708
1047 709 printrequirements()
1048 710 printupgradeactions()
1049 711
1050 712 unusedoptimize = [i for i in improvements
1051 713 if i.name not in actions and i.type == optimisation]
1052 714 if unusedoptimize:
1053 715 ui.write(_('additional optimizations are available by specifying '
1054 716 '"--optimize <name>":\n\n'))
1055 717 for i in unusedoptimize:
1056 718 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1057 719 return
1058 720
1059 721 # Else we're in the run=true case.
1060 722 ui.write(_('upgrade will perform the following actions:\n\n'))
1061 723 printrequirements()
1062 724 printupgradeactions()
1063 725
1064 726 ui.write(_('beginning upgrade...\n'))
1065 727 with repo.wlock():
1066 728 with repo.lock():
1067 729 ui.write(_('repository locked and read-only\n'))
1068 730 # Our strategy for upgrading the repository is to create a new,
1069 731 # temporary repository, write data to it, then do a swap of the
1070 732 # data. There are less heavyweight ways to do this, but it is easier
1071 733 # to create a new repo object than to instantiate all the components
1072 734 # (like the store) separately.
1073 735 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1074 736 backuppath = None
1075 737 try:
1076 738 ui.write(_('creating temporary repository to stage migrated '
1077 739 'data: %s\n') % tmppath)
1078 740 dstrepo = localrepo.localrepository(repo.baseui,
1079 741 path=tmppath,
1080 742 create=True)
1081 743
1082 744 with dstrepo.wlock():
1083 745 with dstrepo.lock():
1084 746 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1085 747 actions)
1086 748
1087 749 finally:
1088 750 ui.write(_('removing temporary repository %s\n') % tmppath)
1089 751 repo.vfs.rmtree(tmppath, forcibly=True)
1090 752
1091 753 if backuppath:
1092 754 ui.warn(_('copy of old repository backed up at %s\n') %
1093 755 backuppath)
1094 756 ui.warn(_('the old repository will not be deleted; remove '
1095 757 'it to free up disk space once the upgraded '
1096 758 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now