Show More
@@ -1,3496 +1,3499 | |||
|
1 | 1 | # debugcommands.py - command processing for debug* commands |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2016 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import codecs |
|
11 | 11 | import collections |
|
12 | 12 | import difflib |
|
13 | 13 | import errno |
|
14 | 14 | import operator |
|
15 | 15 | import os |
|
16 | 16 | import random |
|
17 | 17 | import re |
|
18 | 18 | import socket |
|
19 | 19 | import ssl |
|
20 | 20 | import stat |
|
21 | 21 | import string |
|
22 | 22 | import subprocess |
|
23 | 23 | import sys |
|
24 | 24 | import time |
|
25 | 25 | |
|
26 | 26 | from .i18n import _ |
|
27 | 27 | from .node import ( |
|
28 | 28 | bin, |
|
29 | 29 | hex, |
|
30 | 30 | nullhex, |
|
31 | 31 | nullid, |
|
32 | 32 | nullrev, |
|
33 | 33 | short, |
|
34 | 34 | ) |
|
35 | 35 | from . import ( |
|
36 | 36 | bundle2, |
|
37 | 37 | changegroup, |
|
38 | 38 | cmdutil, |
|
39 | 39 | color, |
|
40 | 40 | context, |
|
41 | 41 | copies, |
|
42 | 42 | dagparser, |
|
43 | 43 | encoding, |
|
44 | 44 | error, |
|
45 | 45 | exchange, |
|
46 | 46 | extensions, |
|
47 | 47 | filemerge, |
|
48 | 48 | filesetlang, |
|
49 | 49 | formatter, |
|
50 | 50 | hg, |
|
51 | 51 | httppeer, |
|
52 | 52 | localrepo, |
|
53 | 53 | lock as lockmod, |
|
54 | 54 | logcmdutil, |
|
55 | 55 | merge as mergemod, |
|
56 | 56 | obsolete, |
|
57 | 57 | obsutil, |
|
58 | 58 | phases, |
|
59 | 59 | policy, |
|
60 | 60 | pvec, |
|
61 | 61 | pycompat, |
|
62 | 62 | registrar, |
|
63 | 63 | repair, |
|
64 | 64 | revlog, |
|
65 | 65 | revset, |
|
66 | 66 | revsetlang, |
|
67 | 67 | scmutil, |
|
68 | 68 | setdiscovery, |
|
69 | 69 | simplemerge, |
|
70 | 70 | sshpeer, |
|
71 | 71 | sslutil, |
|
72 | 72 | streamclone, |
|
73 | 73 | templater, |
|
74 | 74 | treediscovery, |
|
75 | 75 | upgrade, |
|
76 | 76 | url as urlmod, |
|
77 | 77 | util, |
|
78 | 78 | vfs as vfsmod, |
|
79 | 79 | wireprotoframing, |
|
80 | 80 | wireprotoserver, |
|
81 | 81 | wireprotov2peer, |
|
82 | 82 | ) |
|
83 | 83 | from .utils import ( |
|
84 | 84 | cborutil, |
|
85 | 85 | compression, |
|
86 | 86 | dateutil, |
|
87 | 87 | procutil, |
|
88 | 88 | stringutil, |
|
89 | 89 | ) |
|
90 | 90 | |
|
91 | 91 | from .revlogutils import ( |
|
92 | 92 | deltas as deltautil |
|
93 | 93 | ) |
|
94 | 94 | |
|
95 | 95 | release = lockmod.release |
|
96 | 96 | |
|
97 | 97 | command = registrar.command() |
|
98 | 98 | |
|
99 | 99 | @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True) |
|
100 | 100 | def debugancestor(ui, repo, *args): |
|
101 | 101 | """find the ancestor revision of two revisions in a given index""" |
|
102 | 102 | if len(args) == 3: |
|
103 | 103 | index, rev1, rev2 = args |
|
104 | 104 | r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index) |
|
105 | 105 | lookup = r.lookup |
|
106 | 106 | elif len(args) == 2: |
|
107 | 107 | if not repo: |
|
108 | 108 | raise error.Abort(_('there is no Mercurial repository here ' |
|
109 | 109 | '(.hg not found)')) |
|
110 | 110 | rev1, rev2 = args |
|
111 | 111 | r = repo.changelog |
|
112 | 112 | lookup = repo.lookup |
|
113 | 113 | else: |
|
114 | 114 | raise error.Abort(_('either two or three arguments required')) |
|
115 | 115 | a = r.ancestor(lookup(rev1), lookup(rev2)) |
|
116 | 116 | ui.write('%d:%s\n' % (r.rev(a), hex(a))) |
|
117 | 117 | |
|
118 | 118 | @command('debugapplystreamclonebundle', [], 'FILE') |
|
119 | 119 | def debugapplystreamclonebundle(ui, repo, fname): |
|
120 | 120 | """apply a stream clone bundle file""" |
|
121 | 121 | f = hg.openpath(ui, fname) |
|
122 | 122 | gen = exchange.readbundle(ui, f, fname) |
|
123 | 123 | gen.apply(repo) |
|
124 | 124 | |
|
125 | 125 | @command('debugbuilddag', |
|
126 | 126 | [('m', 'mergeable-file', None, _('add single file mergeable changes')), |
|
127 | 127 | ('o', 'overwritten-file', None, _('add single file all revs overwrite')), |
|
128 | 128 | ('n', 'new-file', None, _('add new file at each rev'))], |
|
129 | 129 | _('[OPTION]... [TEXT]')) |
|
130 | 130 | def debugbuilddag(ui, repo, text=None, |
|
131 | 131 | mergeable_file=False, |
|
132 | 132 | overwritten_file=False, |
|
133 | 133 | new_file=False): |
|
134 | 134 | """builds a repo with a given DAG from scratch in the current empty repo |
|
135 | 135 | |
|
136 | 136 | The description of the DAG is read from stdin if not given on the |
|
137 | 137 | command line. |
|
138 | 138 | |
|
139 | 139 | Elements: |
|
140 | 140 | |
|
141 | 141 | - "+n" is a linear run of n nodes based on the current default parent |
|
142 | 142 | - "." is a single node based on the current default parent |
|
143 | 143 | - "$" resets the default parent to null (implied at the start); |
|
144 | 144 | otherwise the default parent is always the last node created |
|
145 | 145 | - "<p" sets the default parent to the backref p |
|
146 | 146 | - "*p" is a fork at parent p, which is a backref |
|
147 | 147 | - "*p1/p2" is a merge of parents p1 and p2, which are backrefs |
|
148 | 148 | - "/p2" is a merge of the preceding node and p2 |
|
149 | 149 | - ":tag" defines a local tag for the preceding node |
|
150 | 150 | - "@branch" sets the named branch for subsequent nodes |
|
151 | 151 | - "#...\\n" is a comment up to the end of the line |
|
152 | 152 | |
|
153 | 153 | Whitespace between the above elements is ignored. |
|
154 | 154 | |
|
155 | 155 | A backref is either |
|
156 | 156 | |
|
157 | 157 | - a number n, which references the node curr-n, where curr is the current |
|
158 | 158 | node, or |
|
159 | 159 | - the name of a local tag you placed earlier using ":tag", or |
|
160 | 160 | - empty to denote the default parent. |
|
161 | 161 | |
|
162 | 162 | All string valued-elements are either strictly alphanumeric, or must |
|
163 | 163 | be enclosed in double quotes ("..."), with "\\" as escape character. |
|
164 | 164 | """ |
|
165 | 165 | |
|
166 | 166 | if text is None: |
|
167 | 167 | ui.status(_("reading DAG from stdin\n")) |
|
168 | 168 | text = ui.fin.read() |
|
169 | 169 | |
|
170 | 170 | cl = repo.changelog |
|
171 | 171 | if len(cl) > 0: |
|
172 | 172 | raise error.Abort(_('repository is not empty')) |
|
173 | 173 | |
|
174 | 174 | # determine number of revs in DAG |
|
175 | 175 | total = 0 |
|
176 | 176 | for type, data in dagparser.parsedag(text): |
|
177 | 177 | if type == 'n': |
|
178 | 178 | total += 1 |
|
179 | 179 | |
|
180 | 180 | if mergeable_file: |
|
181 | 181 | linesperrev = 2 |
|
182 | 182 | # make a file with k lines per rev |
|
183 | 183 | initialmergedlines = ['%d' % i |
|
184 | 184 | for i in pycompat.xrange(0, total * linesperrev)] |
|
185 | 185 | initialmergedlines.append("") |
|
186 | 186 | |
|
187 | 187 | tags = [] |
|
188 | 188 | progress = ui.makeprogress(_('building'), unit=_('revisions'), |
|
189 | 189 | total=total) |
|
190 | 190 | with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"): |
|
191 | 191 | at = -1 |
|
192 | 192 | atbranch = 'default' |
|
193 | 193 | nodeids = [] |
|
194 | 194 | id = 0 |
|
195 | 195 | progress.update(id) |
|
196 | 196 | for type, data in dagparser.parsedag(text): |
|
197 | 197 | if type == 'n': |
|
198 | 198 | ui.note(('node %s\n' % pycompat.bytestr(data))) |
|
199 | 199 | id, ps = data |
|
200 | 200 | |
|
201 | 201 | files = [] |
|
202 | 202 | filecontent = {} |
|
203 | 203 | |
|
204 | 204 | p2 = None |
|
205 | 205 | if mergeable_file: |
|
206 | 206 | fn = "mf" |
|
207 | 207 | p1 = repo[ps[0]] |
|
208 | 208 | if len(ps) > 1: |
|
209 | 209 | p2 = repo[ps[1]] |
|
210 | 210 | pa = p1.ancestor(p2) |
|
211 | 211 | base, local, other = [x[fn].data() for x in (pa, p1, |
|
212 | 212 | p2)] |
|
213 | 213 | m3 = simplemerge.Merge3Text(base, local, other) |
|
214 | 214 | ml = [l.strip() for l in m3.merge_lines()] |
|
215 | 215 | ml.append("") |
|
216 | 216 | elif at > 0: |
|
217 | 217 | ml = p1[fn].data().split("\n") |
|
218 | 218 | else: |
|
219 | 219 | ml = initialmergedlines |
|
220 | 220 | ml[id * linesperrev] += " r%i" % id |
|
221 | 221 | mergedtext = "\n".join(ml) |
|
222 | 222 | files.append(fn) |
|
223 | 223 | filecontent[fn] = mergedtext |
|
224 | 224 | |
|
225 | 225 | if overwritten_file: |
|
226 | 226 | fn = "of" |
|
227 | 227 | files.append(fn) |
|
228 | 228 | filecontent[fn] = "r%i\n" % id |
|
229 | 229 | |
|
230 | 230 | if new_file: |
|
231 | 231 | fn = "nf%i" % id |
|
232 | 232 | files.append(fn) |
|
233 | 233 | filecontent[fn] = "r%i\n" % id |
|
234 | 234 | if len(ps) > 1: |
|
235 | 235 | if not p2: |
|
236 | 236 | p2 = repo[ps[1]] |
|
237 | 237 | for fn in p2: |
|
238 | 238 | if fn.startswith("nf"): |
|
239 | 239 | files.append(fn) |
|
240 | 240 | filecontent[fn] = p2[fn].data() |
|
241 | 241 | |
|
242 | 242 | def fctxfn(repo, cx, path): |
|
243 | 243 | if path in filecontent: |
|
244 | 244 | return context.memfilectx(repo, cx, path, |
|
245 | 245 | filecontent[path]) |
|
246 | 246 | return None |
|
247 | 247 | |
|
248 | 248 | if len(ps) == 0 or ps[0] < 0: |
|
249 | 249 | pars = [None, None] |
|
250 | 250 | elif len(ps) == 1: |
|
251 | 251 | pars = [nodeids[ps[0]], None] |
|
252 | 252 | else: |
|
253 | 253 | pars = [nodeids[p] for p in ps] |
|
254 | 254 | cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn, |
|
255 | 255 | date=(id, 0), |
|
256 | 256 | user="debugbuilddag", |
|
257 | 257 | extra={'branch': atbranch}) |
|
258 | 258 | nodeid = repo.commitctx(cx) |
|
259 | 259 | nodeids.append(nodeid) |
|
260 | 260 | at = id |
|
261 | 261 | elif type == 'l': |
|
262 | 262 | id, name = data |
|
263 | 263 | ui.note(('tag %s\n' % name)) |
|
264 | 264 | tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name)) |
|
265 | 265 | elif type == 'a': |
|
266 | 266 | ui.note(('branch %s\n' % data)) |
|
267 | 267 | atbranch = data |
|
268 | 268 | progress.update(id) |
|
269 | 269 | |
|
270 | 270 | if tags: |
|
271 | 271 | repo.vfs.write("localtags", "".join(tags)) |
|
272 | 272 | |
|
273 | 273 | def _debugchangegroup(ui, gen, all=None, indent=0, **opts): |
|
274 | 274 | indent_string = ' ' * indent |
|
275 | 275 | if all: |
|
276 | 276 | ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n") |
|
277 | 277 | % indent_string) |
|
278 | 278 | |
|
279 | 279 | def showchunks(named): |
|
280 | 280 | ui.write("\n%s%s\n" % (indent_string, named)) |
|
281 | 281 | for deltadata in gen.deltaiter(): |
|
282 | 282 | node, p1, p2, cs, deltabase, delta, flags = deltadata |
|
283 | 283 | ui.write("%s%s %s %s %s %s %d\n" % |
|
284 | 284 | (indent_string, hex(node), hex(p1), hex(p2), |
|
285 | 285 | hex(cs), hex(deltabase), len(delta))) |
|
286 | 286 | |
|
287 | 287 | chunkdata = gen.changelogheader() |
|
288 | 288 | showchunks("changelog") |
|
289 | 289 | chunkdata = gen.manifestheader() |
|
290 | 290 | showchunks("manifest") |
|
291 | 291 | for chunkdata in iter(gen.filelogheader, {}): |
|
292 | 292 | fname = chunkdata['filename'] |
|
293 | 293 | showchunks(fname) |
|
294 | 294 | else: |
|
295 | 295 | if isinstance(gen, bundle2.unbundle20): |
|
296 | 296 | raise error.Abort(_('use debugbundle2 for this file')) |
|
297 | 297 | chunkdata = gen.changelogheader() |
|
298 | 298 | for deltadata in gen.deltaiter(): |
|
299 | 299 | node, p1, p2, cs, deltabase, delta, flags = deltadata |
|
300 | 300 | ui.write("%s%s\n" % (indent_string, hex(node))) |
|
301 | 301 | |
|
302 | 302 | def _debugobsmarkers(ui, part, indent=0, **opts): |
|
303 | 303 | """display version and markers contained in 'data'""" |
|
304 | 304 | opts = pycompat.byteskwargs(opts) |
|
305 | 305 | data = part.read() |
|
306 | 306 | indent_string = ' ' * indent |
|
307 | 307 | try: |
|
308 | 308 | version, markers = obsolete._readmarkers(data) |
|
309 | 309 | except error.UnknownVersion as exc: |
|
310 | 310 | msg = "%sunsupported version: %s (%d bytes)\n" |
|
311 | 311 | msg %= indent_string, exc.version, len(data) |
|
312 | 312 | ui.write(msg) |
|
313 | 313 | else: |
|
314 | 314 | msg = "%sversion: %d (%d bytes)\n" |
|
315 | 315 | msg %= indent_string, version, len(data) |
|
316 | 316 | ui.write(msg) |
|
317 | 317 | fm = ui.formatter('debugobsolete', opts) |
|
318 | 318 | for rawmarker in sorted(markers): |
|
319 | 319 | m = obsutil.marker(None, rawmarker) |
|
320 | 320 | fm.startitem() |
|
321 | 321 | fm.plain(indent_string) |
|
322 | 322 | cmdutil.showmarker(fm, m) |
|
323 | 323 | fm.end() |
|
324 | 324 | |
|
325 | 325 | def _debugphaseheads(ui, data, indent=0): |
|
326 | 326 | """display version and markers contained in 'data'""" |
|
327 | 327 | indent_string = ' ' * indent |
|
328 | 328 | headsbyphase = phases.binarydecode(data) |
|
329 | 329 | for phase in phases.allphases: |
|
330 | 330 | for head in headsbyphase[phase]: |
|
331 | 331 | ui.write(indent_string) |
|
332 | 332 | ui.write('%s %s\n' % (hex(head), phases.phasenames[phase])) |
|
333 | 333 | |
|
334 | 334 | def _quasirepr(thing): |
|
335 | 335 | if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)): |
|
336 | 336 | return '{%s}' % ( |
|
337 | 337 | b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))) |
|
338 | 338 | return pycompat.bytestr(repr(thing)) |
|
339 | 339 | |
|
340 | 340 | def _debugbundle2(ui, gen, all=None, **opts): |
|
341 | 341 | """lists the contents of a bundle2""" |
|
342 | 342 | if not isinstance(gen, bundle2.unbundle20): |
|
343 | 343 | raise error.Abort(_('not a bundle2 file')) |
|
344 | 344 | ui.write(('Stream params: %s\n' % _quasirepr(gen.params))) |
|
345 | 345 | parttypes = opts.get(r'part_type', []) |
|
346 | 346 | for part in gen.iterparts(): |
|
347 | 347 | if parttypes and part.type not in parttypes: |
|
348 | 348 | continue |
|
349 | 349 | msg = '%s -- %s (mandatory: %r)\n' |
|
350 | 350 | ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory))) |
|
351 | 351 | if part.type == 'changegroup': |
|
352 | 352 | version = part.params.get('version', '01') |
|
353 | 353 | cg = changegroup.getunbundler(version, part, 'UN') |
|
354 | 354 | if not ui.quiet: |
|
355 | 355 | _debugchangegroup(ui, cg, all=all, indent=4, **opts) |
|
356 | 356 | if part.type == 'obsmarkers': |
|
357 | 357 | if not ui.quiet: |
|
358 | 358 | _debugobsmarkers(ui, part, indent=4, **opts) |
|
359 | 359 | if part.type == 'phase-heads': |
|
360 | 360 | if not ui.quiet: |
|
361 | 361 | _debugphaseheads(ui, part, indent=4) |
|
362 | 362 | |
|
363 | 363 | @command('debugbundle', |
|
364 | 364 | [('a', 'all', None, _('show all details')), |
|
365 | 365 | ('', 'part-type', [], _('show only the named part type')), |
|
366 | 366 | ('', 'spec', None, _('print the bundlespec of the bundle'))], |
|
367 | 367 | _('FILE'), |
|
368 | 368 | norepo=True) |
|
369 | 369 | def debugbundle(ui, bundlepath, all=None, spec=None, **opts): |
|
370 | 370 | """lists the contents of a bundle""" |
|
371 | 371 | with hg.openpath(ui, bundlepath) as f: |
|
372 | 372 | if spec: |
|
373 | 373 | spec = exchange.getbundlespec(ui, f) |
|
374 | 374 | ui.write('%s\n' % spec) |
|
375 | 375 | return |
|
376 | 376 | |
|
377 | 377 | gen = exchange.readbundle(ui, f, bundlepath) |
|
378 | 378 | if isinstance(gen, bundle2.unbundle20): |
|
379 | 379 | return _debugbundle2(ui, gen, all=all, **opts) |
|
380 | 380 | _debugchangegroup(ui, gen, all=all, **opts) |
|
381 | 381 | |
|
382 | 382 | @command('debugcapabilities', |
|
383 | 383 | [], _('PATH'), |
|
384 | 384 | norepo=True) |
|
385 | 385 | def debugcapabilities(ui, path, **opts): |
|
386 | 386 | """lists the capabilities of a remote peer""" |
|
387 | 387 | opts = pycompat.byteskwargs(opts) |
|
388 | 388 | peer = hg.peer(ui, opts, path) |
|
389 | 389 | caps = peer.capabilities() |
|
390 | 390 | ui.write(('Main capabilities:\n')) |
|
391 | 391 | for c in sorted(caps): |
|
392 | 392 | ui.write((' %s\n') % c) |
|
393 | 393 | b2caps = bundle2.bundle2caps(peer) |
|
394 | 394 | if b2caps: |
|
395 | 395 | ui.write(('Bundle2 capabilities:\n')) |
|
396 | 396 | for key, values in sorted(b2caps.iteritems()): |
|
397 | 397 | ui.write((' %s\n') % key) |
|
398 | 398 | for v in values: |
|
399 | 399 | ui.write((' %s\n') % v) |
|
400 | 400 | |
|
401 | 401 | @command('debugcheckstate', [], '') |
|
402 | 402 | def debugcheckstate(ui, repo): |
|
403 | 403 | """validate the correctness of the current dirstate""" |
|
404 | 404 | parent1, parent2 = repo.dirstate.parents() |
|
405 | 405 | m1 = repo[parent1].manifest() |
|
406 | 406 | m2 = repo[parent2].manifest() |
|
407 | 407 | errors = 0 |
|
408 | 408 | for f in repo.dirstate: |
|
409 | 409 | state = repo.dirstate[f] |
|
410 | 410 | if state in "nr" and f not in m1: |
|
411 | 411 | ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state)) |
|
412 | 412 | errors += 1 |
|
413 | 413 | if state in "a" and f in m1: |
|
414 | 414 | ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state)) |
|
415 | 415 | errors += 1 |
|
416 | 416 | if state in "m" and f not in m1 and f not in m2: |
|
417 | 417 | ui.warn(_("%s in state %s, but not in either manifest\n") % |
|
418 | 418 | (f, state)) |
|
419 | 419 | errors += 1 |
|
420 | 420 | for f in m1: |
|
421 | 421 | state = repo.dirstate[f] |
|
422 | 422 | if state not in "nrm": |
|
423 | 423 | ui.warn(_("%s in manifest1, but listed as state %s") % (f, state)) |
|
424 | 424 | errors += 1 |
|
425 | 425 | if errors: |
|
426 | 426 | error = _(".hg/dirstate inconsistent with current parent's manifest") |
|
427 | 427 | raise error.Abort(error) |
|
428 | 428 | |
|
429 | 429 | @command('debugcolor', |
|
430 | 430 | [('', 'style', None, _('show all configured styles'))], |
|
431 | 431 | 'hg debugcolor') |
|
432 | 432 | def debugcolor(ui, repo, **opts): |
|
433 | 433 | """show available color, effects or style""" |
|
434 | 434 | ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode)) |
|
435 | 435 | if opts.get(r'style'): |
|
436 | 436 | return _debugdisplaystyle(ui) |
|
437 | 437 | else: |
|
438 | 438 | return _debugdisplaycolor(ui) |
|
439 | 439 | |
|
440 | 440 | def _debugdisplaycolor(ui): |
|
441 | 441 | ui = ui.copy() |
|
442 | 442 | ui._styles.clear() |
|
443 | 443 | for effect in color._activeeffects(ui).keys(): |
|
444 | 444 | ui._styles[effect] = effect |
|
445 | 445 | if ui._terminfoparams: |
|
446 | 446 | for k, v in ui.configitems('color'): |
|
447 | 447 | if k.startswith('color.'): |
|
448 | 448 | ui._styles[k] = k[6:] |
|
449 | 449 | elif k.startswith('terminfo.'): |
|
450 | 450 | ui._styles[k] = k[9:] |
|
451 | 451 | ui.write(_('available colors:\n')) |
|
452 | 452 | # sort label with a '_' after the other to group '_background' entry. |
|
453 | 453 | items = sorted(ui._styles.items(), |
|
454 | 454 | key=lambda i: ('_' in i[0], i[0], i[1])) |
|
455 | 455 | for colorname, label in items: |
|
456 | 456 | ui.write(('%s\n') % colorname, label=label) |
|
457 | 457 | |
|
458 | 458 | def _debugdisplaystyle(ui): |
|
459 | 459 | ui.write(_('available style:\n')) |
|
460 | 460 | if not ui._styles: |
|
461 | 461 | return |
|
462 | 462 | width = max(len(s) for s in ui._styles) |
|
463 | 463 | for label, effects in sorted(ui._styles.items()): |
|
464 | 464 | ui.write('%s' % label, label=label) |
|
465 | 465 | if effects: |
|
466 | 466 | # 50 |
|
467 | 467 | ui.write(': ') |
|
468 | 468 | ui.write(' ' * (max(0, width - len(label)))) |
|
469 | 469 | ui.write(', '.join(ui.label(e, e) for e in effects.split())) |
|
470 | 470 | ui.write('\n') |
|
471 | 471 | |
|
472 | 472 | @command('debugcreatestreamclonebundle', [], 'FILE') |
|
473 | 473 | def debugcreatestreamclonebundle(ui, repo, fname): |
|
474 | 474 | """create a stream clone bundle file |
|
475 | 475 | |
|
476 | 476 | Stream bundles are special bundles that are essentially archives of |
|
477 | 477 | revlog files. They are commonly used for cloning very quickly. |
|
478 | 478 | """ |
|
479 | 479 | # TODO we may want to turn this into an abort when this functionality |
|
480 | 480 | # is moved into `hg bundle`. |
|
481 | 481 | if phases.hassecret(repo): |
|
482 | 482 | ui.warn(_('(warning: stream clone bundle will contain secret ' |
|
483 | 483 | 'revisions)\n')) |
|
484 | 484 | |
|
485 | 485 | requirements, gen = streamclone.generatebundlev1(repo) |
|
486 | 486 | changegroup.writechunks(ui, gen, fname) |
|
487 | 487 | |
|
488 | 488 | ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements))) |
|
489 | 489 | |
|
490 | 490 | @command('debugdag', |
|
491 | 491 | [('t', 'tags', None, _('use tags as labels')), |
|
492 | 492 | ('b', 'branches', None, _('annotate with branch names')), |
|
493 | 493 | ('', 'dots', None, _('use dots for runs')), |
|
494 | 494 | ('s', 'spaces', None, _('separate elements by spaces'))], |
|
495 | 495 | _('[OPTION]... [FILE [REV]...]'), |
|
496 | 496 | optionalrepo=True) |
|
497 | 497 | def debugdag(ui, repo, file_=None, *revs, **opts): |
|
498 | 498 | """format the changelog or an index DAG as a concise textual description |
|
499 | 499 | |
|
500 | 500 | If you pass a revlog index, the revlog's DAG is emitted. If you list |
|
501 | 501 | revision numbers, they get labeled in the output as rN. |
|
502 | 502 | |
|
503 | 503 | Otherwise, the changelog DAG of the current repo is emitted. |
|
504 | 504 | """ |
|
505 | 505 | spaces = opts.get(r'spaces') |
|
506 | 506 | dots = opts.get(r'dots') |
|
507 | 507 | if file_: |
|
508 | 508 | rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), |
|
509 | 509 | file_) |
|
510 | 510 | revs = set((int(r) for r in revs)) |
|
511 | 511 | def events(): |
|
512 | 512 | for r in rlog: |
|
513 | 513 | yield 'n', (r, list(p for p in rlog.parentrevs(r) |
|
514 | 514 | if p != -1)) |
|
515 | 515 | if r in revs: |
|
516 | 516 | yield 'l', (r, "r%i" % r) |
|
517 | 517 | elif repo: |
|
518 | 518 | cl = repo.changelog |
|
519 | 519 | tags = opts.get(r'tags') |
|
520 | 520 | branches = opts.get(r'branches') |
|
521 | 521 | if tags: |
|
522 | 522 | labels = {} |
|
523 | 523 | for l, n in repo.tags().items(): |
|
524 | 524 | labels.setdefault(cl.rev(n), []).append(l) |
|
525 | 525 | def events(): |
|
526 | 526 | b = "default" |
|
527 | 527 | for r in cl: |
|
528 | 528 | if branches: |
|
529 | 529 | newb = cl.read(cl.node(r))[5]['branch'] |
|
530 | 530 | if newb != b: |
|
531 | 531 | yield 'a', newb |
|
532 | 532 | b = newb |
|
533 | 533 | yield 'n', (r, list(p for p in cl.parentrevs(r) |
|
534 | 534 | if p != -1)) |
|
535 | 535 | if tags: |
|
536 | 536 | ls = labels.get(r) |
|
537 | 537 | if ls: |
|
538 | 538 | for l in ls: |
|
539 | 539 | yield 'l', (r, l) |
|
540 | 540 | else: |
|
541 | 541 | raise error.Abort(_('need repo for changelog dag')) |
|
542 | 542 | |
|
543 | 543 | for line in dagparser.dagtextlines(events(), |
|
544 | 544 | addspaces=spaces, |
|
545 | 545 | wraplabels=True, |
|
546 | 546 | wrapannotations=True, |
|
547 | 547 | wrapnonlinear=dots, |
|
548 | 548 | usedots=dots, |
|
549 | 549 | maxlinewidth=70): |
|
550 | 550 | ui.write(line) |
|
551 | 551 | ui.write("\n") |
|
552 | 552 | |
|
553 | 553 | @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV')) |
|
554 | 554 | def debugdata(ui, repo, file_, rev=None, **opts): |
|
555 | 555 | """dump the contents of a data file revision""" |
|
556 | 556 | opts = pycompat.byteskwargs(opts) |
|
557 | 557 | if opts.get('changelog') or opts.get('manifest') or opts.get('dir'): |
|
558 | 558 | if rev is not None: |
|
559 | 559 | raise error.CommandError('debugdata', _('invalid arguments')) |
|
560 | 560 | file_, rev = None, file_ |
|
561 | 561 | elif rev is None: |
|
562 | 562 | raise error.CommandError('debugdata', _('invalid arguments')) |
|
563 | 563 | r = cmdutil.openstorage(repo, 'debugdata', file_, opts) |
|
564 | 564 | try: |
|
565 | 565 | ui.write(r.rawdata(r.lookup(rev))) |
|
566 | 566 | except KeyError: |
|
567 | 567 | raise error.Abort(_('invalid revision identifier %s') % rev) |
|
568 | 568 | |
|
569 | 569 | @command('debugdate', |
|
570 | 570 | [('e', 'extended', None, _('try extended date formats'))], |
|
571 | 571 | _('[-e] DATE [RANGE]'), |
|
572 | 572 | norepo=True, optionalrepo=True) |
|
573 | 573 | def debugdate(ui, date, range=None, **opts): |
|
574 | 574 | """parse and display a date""" |
|
575 | 575 | if opts[r"extended"]: |
|
576 | 576 | d = dateutil.parsedate(date, util.extendeddateformats) |
|
577 | 577 | else: |
|
578 | 578 | d = dateutil.parsedate(date) |
|
579 | 579 | ui.write(("internal: %d %d\n") % d) |
|
580 | 580 | ui.write(("standard: %s\n") % dateutil.datestr(d)) |
|
581 | 581 | if range: |
|
582 | 582 | m = dateutil.matchdate(range) |
|
583 | 583 | ui.write(("match: %s\n") % m(d[0])) |
|
584 | 584 | |
|
585 | 585 | @command('debugdeltachain', |
|
586 | 586 | cmdutil.debugrevlogopts + cmdutil.formatteropts, |
|
587 | 587 | _('-c|-m|FILE'), |
|
588 | 588 | optionalrepo=True) |
|
589 | 589 | def debugdeltachain(ui, repo, file_=None, **opts): |
|
590 | 590 | """dump information about delta chains in a revlog |
|
591 | 591 | |
|
592 | 592 | Output can be templatized. Available template keywords are: |
|
593 | 593 | |
|
594 | 594 | :``rev``: revision number |
|
595 | 595 | :``chainid``: delta chain identifier (numbered by unique base) |
|
596 | 596 | :``chainlen``: delta chain length to this revision |
|
597 | 597 | :``prevrev``: previous revision in delta chain |
|
598 | 598 | :``deltatype``: role of delta / how it was computed |
|
599 | 599 | :``compsize``: compressed size of revision |
|
600 | 600 | :``uncompsize``: uncompressed size of revision |
|
601 | 601 | :``chainsize``: total size of compressed revisions in chain |
|
602 | 602 | :``chainratio``: total chain size divided by uncompressed revision size |
|
603 | 603 | (new delta chains typically start at ratio 2.00) |
|
604 | 604 | :``lindist``: linear distance from base revision in delta chain to end |
|
605 | 605 | of this revision |
|
606 | 606 | :``extradist``: total size of revisions not part of this delta chain from |
|
607 | 607 | base of delta chain to end of this revision; a measurement |
|
608 | 608 | of how much extra data we need to read/seek across to read |
|
609 | 609 | the delta chain for this revision |
|
610 | 610 | :``extraratio``: extradist divided by chainsize; another representation of |
|
611 | 611 | how much unrelated data is needed to load this delta chain |
|
612 | 612 | |
|
613 | 613 | If the repository is configured to use the sparse read, additional keywords |
|
614 | 614 | are available: |
|
615 | 615 | |
|
616 | 616 | :``readsize``: total size of data read from the disk for a revision |
|
617 | 617 | (sum of the sizes of all the blocks) |
|
618 | 618 | :``largestblock``: size of the largest block of data read from the disk |
|
619 | 619 | :``readdensity``: density of useful bytes in the data read from the disk |
|
620 | 620 | :``srchunks``: in how many data hunks the whole revision would be read |
|
621 | 621 | |
|
622 | 622 | The sparse read can be enabled with experimental.sparse-read = True |
|
623 | 623 | """ |
|
624 | 624 | opts = pycompat.byteskwargs(opts) |
|
625 | 625 | r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts) |
|
626 | 626 | index = r.index |
|
627 | 627 | start = r.start |
|
628 | 628 | length = r.length |
|
629 | 629 | generaldelta = r.version & revlog.FLAG_GENERALDELTA |
|
630 | 630 | withsparseread = getattr(r, '_withsparseread', False) |
|
631 | 631 | |
|
632 | 632 | def revinfo(rev): |
|
633 | 633 | e = index[rev] |
|
634 | 634 | compsize = e[1] |
|
635 | 635 | uncompsize = e[2] |
|
636 | 636 | chainsize = 0 |
|
637 | 637 | |
|
638 | 638 | if generaldelta: |
|
639 | 639 | if e[3] == e[5]: |
|
640 | 640 | deltatype = 'p1' |
|
641 | 641 | elif e[3] == e[6]: |
|
642 | 642 | deltatype = 'p2' |
|
643 | 643 | elif e[3] == rev - 1: |
|
644 | 644 | deltatype = 'prev' |
|
645 | 645 | elif e[3] == rev: |
|
646 | 646 | deltatype = 'base' |
|
647 | 647 | else: |
|
648 | 648 | deltatype = 'other' |
|
649 | 649 | else: |
|
650 | 650 | if e[3] == rev: |
|
651 | 651 | deltatype = 'base' |
|
652 | 652 | else: |
|
653 | 653 | deltatype = 'prev' |
|
654 | 654 | |
|
655 | 655 | chain = r._deltachain(rev)[0] |
|
656 | 656 | for iterrev in chain: |
|
657 | 657 | e = index[iterrev] |
|
658 | 658 | chainsize += e[1] |
|
659 | 659 | |
|
660 | 660 | return compsize, uncompsize, deltatype, chain, chainsize |
|
661 | 661 | |
|
662 | 662 | fm = ui.formatter('debugdeltachain', opts) |
|
663 | 663 | |
|
664 | 664 | fm.plain(' rev chain# chainlen prev delta ' |
|
665 | 665 | 'size rawsize chainsize ratio lindist extradist ' |
|
666 | 666 | 'extraratio') |
|
667 | 667 | if withsparseread: |
|
668 | 668 | fm.plain(' readsize largestblk rddensity srchunks') |
|
669 | 669 | fm.plain('\n') |
|
670 | 670 | |
|
671 | 671 | chainbases = {} |
|
672 | 672 | for rev in r: |
|
673 | 673 | comp, uncomp, deltatype, chain, chainsize = revinfo(rev) |
|
674 | 674 | chainbase = chain[0] |
|
675 | 675 | chainid = chainbases.setdefault(chainbase, len(chainbases) + 1) |
|
676 | 676 | basestart = start(chainbase) |
|
677 | 677 | revstart = start(rev) |
|
678 | 678 | lineardist = revstart + comp - basestart |
|
679 | 679 | extradist = lineardist - chainsize |
|
680 | 680 | try: |
|
681 | 681 | prevrev = chain[-2] |
|
682 | 682 | except IndexError: |
|
683 | 683 | prevrev = -1 |
|
684 | 684 | |
|
685 | 685 | if uncomp != 0: |
|
686 | 686 | chainratio = float(chainsize) / float(uncomp) |
|
687 | 687 | else: |
|
688 | 688 | chainratio = chainsize |
|
689 | 689 | |
|
690 | 690 | if chainsize != 0: |
|
691 | 691 | extraratio = float(extradist) / float(chainsize) |
|
692 | 692 | else: |
|
693 | 693 | extraratio = extradist |
|
694 | 694 | |
|
695 | 695 | fm.startitem() |
|
696 | 696 | fm.write('rev chainid chainlen prevrev deltatype compsize ' |
|
697 | 697 | 'uncompsize chainsize chainratio lindist extradist ' |
|
698 | 698 | 'extraratio', |
|
699 | 699 | '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f', |
|
700 | 700 | rev, chainid, len(chain), prevrev, deltatype, comp, |
|
701 | 701 | uncomp, chainsize, chainratio, lineardist, extradist, |
|
702 | 702 | extraratio, |
|
703 | 703 | rev=rev, chainid=chainid, chainlen=len(chain), |
|
704 | 704 | prevrev=prevrev, deltatype=deltatype, compsize=comp, |
|
705 | 705 | uncompsize=uncomp, chainsize=chainsize, |
|
706 | 706 | chainratio=chainratio, lindist=lineardist, |
|
707 | 707 | extradist=extradist, extraratio=extraratio) |
|
708 | 708 | if withsparseread: |
|
709 | 709 | readsize = 0 |
|
710 | 710 | largestblock = 0 |
|
711 | 711 | srchunks = 0 |
|
712 | 712 | |
|
713 | 713 | for revschunk in deltautil.slicechunk(r, chain): |
|
714 | 714 | srchunks += 1 |
|
715 | 715 | blkend = start(revschunk[-1]) + length(revschunk[-1]) |
|
716 | 716 | blksize = blkend - start(revschunk[0]) |
|
717 | 717 | |
|
718 | 718 | readsize += blksize |
|
719 | 719 | if largestblock < blksize: |
|
720 | 720 | largestblock = blksize |
|
721 | 721 | |
|
722 | 722 | if readsize: |
|
723 | 723 | readdensity = float(chainsize) / float(readsize) |
|
724 | 724 | else: |
|
725 | 725 | readdensity = 1 |
|
726 | 726 | |
|
727 | 727 | fm.write('readsize largestblock readdensity srchunks', |
|
728 | 728 | ' %10d %10d %9.5f %8d', |
|
729 | 729 | readsize, largestblock, readdensity, srchunks, |
|
730 | 730 | readsize=readsize, largestblock=largestblock, |
|
731 | 731 | readdensity=readdensity, srchunks=srchunks) |
|
732 | 732 | |
|
733 | 733 | fm.plain('\n') |
|
734 | 734 | |
|
735 | 735 | fm.end() |
|
736 | 736 | |
|
737 | 737 | @command('debugdirstate|debugstate', |
|
738 | 738 | [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')), |
|
739 | 739 | ('', 'dates', True, _('display the saved mtime')), |
|
740 | 740 | ('', 'datesort', None, _('sort by saved mtime'))], |
|
741 | 741 | _('[OPTION]...')) |
|
742 | 742 | def debugstate(ui, repo, **opts): |
|
743 | 743 | """show the contents of the current dirstate""" |
|
744 | 744 | |
|
745 | 745 | nodates = not opts[r'dates'] |
|
746 | 746 | if opts.get(r'nodates') is not None: |
|
747 | 747 | nodates = True |
|
748 | 748 | datesort = opts.get(r'datesort') |
|
749 | 749 | |
|
750 | 750 | if datesort: |
|
751 | 751 | keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename |
|
752 | 752 | else: |
|
753 | 753 | keyfunc = None # sort by filename |
|
754 | 754 | for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc): |
|
755 | 755 | if ent[3] == -1: |
|
756 | 756 | timestr = 'unset ' |
|
757 | 757 | elif nodates: |
|
758 | 758 | timestr = 'set ' |
|
759 | 759 | else: |
|
760 | 760 | timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ", |
|
761 | 761 | time.localtime(ent[3])) |
|
762 | 762 | timestr = encoding.strtolocal(timestr) |
|
763 | 763 | if ent[1] & 0o20000: |
|
764 | 764 | mode = 'lnk' |
|
765 | 765 | else: |
|
766 | 766 | mode = '%3o' % (ent[1] & 0o777 & ~util.umask) |
|
767 | 767 | ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_)) |
|
768 | 768 | for f in repo.dirstate.copies(): |
|
769 | 769 | ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f)) |
|
770 | 770 | |
|
771 | 771 | @command('debugdiscovery', |
|
772 | 772 | [('', 'old', None, _('use old-style discovery')), |
|
773 | 773 | ('', 'nonheads', None, |
|
774 | 774 | _('use old-style discovery with non-heads included')), |
|
775 | 775 | ('', 'rev', [], 'restrict discovery to this set of revs'), |
|
776 | 776 | ('', 'seed', '12323', 'specify the random seed use for discovery'), |
|
777 | 777 | ] + cmdutil.remoteopts, |
|
778 | 778 | _('[--rev REV] [OTHER]')) |
|
779 | 779 | def debugdiscovery(ui, repo, remoteurl="default", **opts): |
|
780 | 780 | """runs the changeset discovery protocol in isolation""" |
|
781 | 781 | opts = pycompat.byteskwargs(opts) |
|
782 | 782 | remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl)) |
|
783 | 783 | remote = hg.peer(repo, opts, remoteurl) |
|
784 | 784 | ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl)) |
|
785 | 785 | |
|
786 | 786 | # make sure tests are repeatable |
|
787 | 787 | random.seed(int(opts['seed'])) |
|
788 | 788 | |
|
789 | 789 | |
|
790 | 790 | |
|
791 | 791 | if opts.get('old'): |
|
792 | 792 | def doit(pushedrevs, remoteheads, remote=remote): |
|
793 | 793 | if not util.safehasattr(remote, 'branches'): |
|
794 | 794 | # enable in-client legacy support |
|
795 | 795 | remote = localrepo.locallegacypeer(remote.local()) |
|
796 | 796 | common, _in, hds = treediscovery.findcommonincoming(repo, remote, |
|
797 | 797 | force=True) |
|
798 | 798 | common = set(common) |
|
799 | 799 | if not opts.get('nonheads'): |
|
800 | 800 | ui.write(("unpruned common: %s\n") % |
|
801 | 801 | " ".join(sorted(short(n) for n in common))) |
|
802 | 802 | |
|
803 | 803 | clnode = repo.changelog.node |
|
804 | 804 | common = repo.revs('heads(::%ln)', common) |
|
805 | 805 | common = {clnode(r) for r in common} |
|
806 | 806 | return common, hds |
|
807 | 807 | else: |
|
808 | 808 | def doit(pushedrevs, remoteheads, remote=remote): |
|
809 | 809 | nodes = None |
|
810 | 810 | if pushedrevs: |
|
811 | 811 | revs = scmutil.revrange(repo, pushedrevs) |
|
812 | 812 | nodes = [repo[r].node() for r in revs] |
|
813 | 813 | common, any, hds = setdiscovery.findcommonheads(ui, repo, remote, |
|
814 | 814 | ancestorsof=nodes) |
|
815 | 815 | return common, hds |
|
816 | 816 | |
|
817 | 817 | remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None) |
|
818 | 818 | localrevs = opts['rev'] |
|
819 | 819 | with util.timedcm('debug-discovery') as t: |
|
820 | 820 | common, hds = doit(localrevs, remoterevs) |
|
821 | 821 | |
|
822 | 822 | # compute all statistics |
|
823 | 823 | common = set(common) |
|
824 | 824 | rheads = set(hds) |
|
825 | 825 | lheads = set(repo.heads()) |
|
826 | 826 | |
|
827 | 827 | data = {} |
|
828 | 828 | data['elapsed'] = t.elapsed |
|
829 | 829 | data['nb-common'] = len(common) |
|
830 | 830 | data['nb-common-local'] = len(common & lheads) |
|
831 | 831 | data['nb-common-remote'] = len(common & rheads) |
|
832 | 832 | data['nb-common-both'] = len(common & rheads & lheads) |
|
833 | 833 | data['nb-local'] = len(lheads) |
|
834 | 834 | data['nb-local-missing'] = data['nb-local'] - data['nb-common-local'] |
|
835 | 835 | data['nb-remote'] = len(rheads) |
|
836 | 836 | data['nb-remote-unknown'] = data['nb-remote'] - data['nb-common-remote'] |
|
837 | 837 | data['nb-revs'] = len(repo.revs('all()')) |
|
838 | 838 | data['nb-revs-common'] = len(repo.revs('::%ln', common)) |
|
839 | 839 | data['nb-revs-missing'] = data['nb-revs'] - data['nb-revs-common'] |
|
840 | 840 | |
|
841 | 841 | # display discovery summary |
|
842 | 842 | ui.write(("elapsed time: %(elapsed)f seconds\n") % data) |
|
843 | 843 | ui.write(("heads summary:\n")) |
|
844 | 844 | ui.write((" total common heads: %(nb-common)9d\n") % data) |
|
845 | 845 | ui.write((" also local heads: %(nb-common-local)9d\n") % data) |
|
846 | 846 | ui.write((" also remote heads: %(nb-common-remote)9d\n") % data) |
|
847 | 847 | ui.write((" both: %(nb-common-both)9d\n") % data) |
|
848 | 848 | ui.write((" local heads: %(nb-local)9d\n") % data) |
|
849 | 849 | ui.write((" common: %(nb-common-local)9d\n") % data) |
|
850 | 850 | ui.write((" missing: %(nb-local-missing)9d\n") % data) |
|
851 | 851 | ui.write((" remote heads: %(nb-remote)9d\n") % data) |
|
852 | 852 | ui.write((" common: %(nb-common-remote)9d\n") % data) |
|
853 | 853 | ui.write((" unknown: %(nb-remote-unknown)9d\n") % data) |
|
854 | 854 | ui.write(("local changesets: %(nb-revs)9d\n") % data) |
|
855 | 855 | ui.write((" common: %(nb-revs-common)9d\n") % data) |
|
856 | 856 | ui.write((" missing: %(nb-revs-missing)9d\n") % data) |
|
857 | 857 | |
|
858 | 858 | if ui.verbose: |
|
859 | 859 | ui.write(("common heads: %s\n") % |
|
860 | 860 | " ".join(sorted(short(n) for n in common))) |
|
861 | 861 | |
|
862 | 862 | _chunksize = 4 << 10 |
|
863 | 863 | |
|
864 | 864 | @command('debugdownload', |
|
865 | 865 | [ |
|
866 | 866 | ('o', 'output', '', _('path')), |
|
867 | 867 | ], |
|
868 | 868 | optionalrepo=True) |
|
869 | 869 | def debugdownload(ui, repo, url, output=None, **opts): |
|
870 | 870 | """download a resource using Mercurial logic and config |
|
871 | 871 | """ |
|
872 | 872 | fh = urlmod.open(ui, url, output) |
|
873 | 873 | |
|
874 | 874 | dest = ui |
|
875 | 875 | if output: |
|
876 | 876 | dest = open(output, "wb", _chunksize) |
|
877 | 877 | try: |
|
878 | 878 | data = fh.read(_chunksize) |
|
879 | 879 | while data: |
|
880 | 880 | dest.write(data) |
|
881 | 881 | data = fh.read(_chunksize) |
|
882 | 882 | finally: |
|
883 | 883 | if output: |
|
884 | 884 | dest.close() |
|
885 | 885 | |
|
886 | 886 | @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True) |
|
887 | 887 | def debugextensions(ui, repo, **opts): |
|
888 | 888 | '''show information about active extensions''' |
|
889 | 889 | opts = pycompat.byteskwargs(opts) |
|
890 | 890 | exts = extensions.extensions(ui) |
|
891 | 891 | hgver = util.version() |
|
892 | 892 | fm = ui.formatter('debugextensions', opts) |
|
893 | 893 | for extname, extmod in sorted(exts, key=operator.itemgetter(0)): |
|
894 | 894 | isinternal = extensions.ismoduleinternal(extmod) |
|
895 | 895 | extsource = pycompat.fsencode(extmod.__file__) |
|
896 | 896 | if isinternal: |
|
897 | 897 | exttestedwith = [] # never expose magic string to users |
|
898 | 898 | else: |
|
899 | 899 | exttestedwith = getattr(extmod, 'testedwith', '').split() |
|
900 | 900 | extbuglink = getattr(extmod, 'buglink', None) |
|
901 | 901 | |
|
902 | 902 | fm.startitem() |
|
903 | 903 | |
|
904 | 904 | if ui.quiet or ui.verbose: |
|
905 | 905 | fm.write('name', '%s\n', extname) |
|
906 | 906 | else: |
|
907 | 907 | fm.write('name', '%s', extname) |
|
908 | 908 | if isinternal or hgver in exttestedwith: |
|
909 | 909 | fm.plain('\n') |
|
910 | 910 | elif not exttestedwith: |
|
911 | 911 | fm.plain(_(' (untested!)\n')) |
|
912 | 912 | else: |
|
913 | 913 | lasttestedversion = exttestedwith[-1] |
|
914 | 914 | fm.plain(' (%s!)\n' % lasttestedversion) |
|
915 | 915 | |
|
916 | 916 | fm.condwrite(ui.verbose and extsource, 'source', |
|
917 | 917 | _(' location: %s\n'), extsource or "") |
|
918 | 918 | |
|
919 | 919 | if ui.verbose: |
|
920 | 920 | fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal]) |
|
921 | 921 | fm.data(bundled=isinternal) |
|
922 | 922 | |
|
923 | 923 | fm.condwrite(ui.verbose and exttestedwith, 'testedwith', |
|
924 | 924 | _(' tested with: %s\n'), |
|
925 | 925 | fm.formatlist(exttestedwith, name='ver')) |
|
926 | 926 | |
|
927 | 927 | fm.condwrite(ui.verbose and extbuglink, 'buglink', |
|
928 | 928 | _(' bug reporting: %s\n'), extbuglink or "") |
|
929 | 929 | |
|
930 | 930 | fm.end() |
|
931 | 931 | |
|
932 | 932 | @command('debugfileset', |
|
933 | 933 | [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')), |
|
934 | 934 | ('', 'all-files', False, |
|
935 | 935 | _('test files from all revisions and working directory')), |
|
936 | 936 | ('s', 'show-matcher', None, |
|
937 | 937 | _('print internal representation of matcher')), |
|
938 | 938 | ('p', 'show-stage', [], |
|
939 | 939 | _('print parsed tree at the given stage'), _('NAME'))], |
|
940 | 940 | _('[-r REV] [--all-files] [OPTION]... FILESPEC')) |
|
941 | 941 | def debugfileset(ui, repo, expr, **opts): |
|
942 | 942 | '''parse and apply a fileset specification''' |
|
943 | 943 | from . import fileset |
|
944 | 944 | fileset.symbols # force import of fileset so we have predicates to optimize |
|
945 | 945 | opts = pycompat.byteskwargs(opts) |
|
946 | 946 | ctx = scmutil.revsingle(repo, opts.get('rev'), None) |
|
947 | 947 | |
|
948 | 948 | stages = [ |
|
949 | 949 | ('parsed', pycompat.identity), |
|
950 | 950 | ('analyzed', filesetlang.analyze), |
|
951 | 951 | ('optimized', filesetlang.optimize), |
|
952 | 952 | ] |
|
953 | 953 | stagenames = set(n for n, f in stages) |
|
954 | 954 | |
|
955 | 955 | showalways = set() |
|
956 | 956 | if ui.verbose and not opts['show_stage']: |
|
957 | 957 | # show parsed tree by --verbose (deprecated) |
|
958 | 958 | showalways.add('parsed') |
|
959 | 959 | if opts['show_stage'] == ['all']: |
|
960 | 960 | showalways.update(stagenames) |
|
961 | 961 | else: |
|
962 | 962 | for n in opts['show_stage']: |
|
963 | 963 | if n not in stagenames: |
|
964 | 964 | raise error.Abort(_('invalid stage name: %s') % n) |
|
965 | 965 | showalways.update(opts['show_stage']) |
|
966 | 966 | |
|
967 | 967 | tree = filesetlang.parse(expr) |
|
968 | 968 | for n, f in stages: |
|
969 | 969 | tree = f(tree) |
|
970 | 970 | if n in showalways: |
|
971 | 971 | if opts['show_stage'] or n != 'parsed': |
|
972 | 972 | ui.write(("* %s:\n") % n) |
|
973 | 973 | ui.write(filesetlang.prettyformat(tree), "\n") |
|
974 | 974 | |
|
975 | 975 | files = set() |
|
976 | 976 | if opts['all_files']: |
|
977 | 977 | for r in repo: |
|
978 | 978 | c = repo[r] |
|
979 | 979 | files.update(c.files()) |
|
980 | 980 | files.update(c.substate) |
|
981 | 981 | if opts['all_files'] or ctx.rev() is None: |
|
982 | 982 | wctx = repo[None] |
|
983 | 983 | files.update(repo.dirstate.walk(scmutil.matchall(repo), |
|
984 | 984 | subrepos=list(wctx.substate), |
|
985 | 985 | unknown=True, ignored=True)) |
|
986 | 986 | files.update(wctx.substate) |
|
987 | 987 | else: |
|
988 | 988 | files.update(ctx.files()) |
|
989 | 989 | files.update(ctx.substate) |
|
990 | 990 | |
|
991 | 991 | m = ctx.matchfileset(expr) |
|
992 | 992 | if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose): |
|
993 | 993 | ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n') |
|
994 | 994 | for f in sorted(files): |
|
995 | 995 | if not m(f): |
|
996 | 996 | continue |
|
997 | 997 | ui.write("%s\n" % f) |
|
998 | 998 | |
|
999 | 999 | @command('debugformat', |
|
1000 | 1000 | [] + cmdutil.formatteropts) |
|
1001 | 1001 | def debugformat(ui, repo, **opts): |
|
1002 | 1002 | """display format information about the current repository |
|
1003 | 1003 | |
|
1004 | 1004 | Use --verbose to get extra information about current config value and |
|
1005 | 1005 | Mercurial default.""" |
|
1006 | 1006 | opts = pycompat.byteskwargs(opts) |
|
1007 | 1007 | maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant) |
|
1008 | 1008 | maxvariantlength = max(len('format-variant'), maxvariantlength) |
|
1009 | 1009 | |
|
1010 | 1010 | def makeformatname(name): |
|
1011 | 1011 | return '%s:' + (' ' * (maxvariantlength - len(name))) |
|
1012 | 1012 | |
|
1013 | 1013 | fm = ui.formatter('debugformat', opts) |
|
1014 | 1014 | if fm.isplain(): |
|
1015 | 1015 | def formatvalue(value): |
|
1016 | 1016 | if util.safehasattr(value, 'startswith'): |
|
1017 | 1017 | return value |
|
1018 | 1018 | if value: |
|
1019 | 1019 | return 'yes' |
|
1020 | 1020 | else: |
|
1021 | 1021 | return 'no' |
|
1022 | 1022 | else: |
|
1023 | 1023 | formatvalue = pycompat.identity |
|
1024 | 1024 | |
|
1025 | 1025 | fm.plain('format-variant') |
|
1026 | 1026 | fm.plain(' ' * (maxvariantlength - len('format-variant'))) |
|
1027 | 1027 | fm.plain(' repo') |
|
1028 | 1028 | if ui.verbose: |
|
1029 | 1029 | fm.plain(' config default') |
|
1030 | 1030 | fm.plain('\n') |
|
1031 | 1031 | for fv in upgrade.allformatvariant: |
|
1032 | 1032 | fm.startitem() |
|
1033 | 1033 | repovalue = fv.fromrepo(repo) |
|
1034 | 1034 | configvalue = fv.fromconfig(repo) |
|
1035 | 1035 | |
|
1036 | 1036 | if repovalue != configvalue: |
|
1037 | 1037 | namelabel = 'formatvariant.name.mismatchconfig' |
|
1038 | 1038 | repolabel = 'formatvariant.repo.mismatchconfig' |
|
1039 | 1039 | elif repovalue != fv.default: |
|
1040 | 1040 | namelabel = 'formatvariant.name.mismatchdefault' |
|
1041 | 1041 | repolabel = 'formatvariant.repo.mismatchdefault' |
|
1042 | 1042 | else: |
|
1043 | 1043 | namelabel = 'formatvariant.name.uptodate' |
|
1044 | 1044 | repolabel = 'formatvariant.repo.uptodate' |
|
1045 | 1045 | |
|
1046 | 1046 | fm.write('name', makeformatname(fv.name), fv.name, |
|
1047 | 1047 | label=namelabel) |
|
1048 | 1048 | fm.write('repo', ' %3s', formatvalue(repovalue), |
|
1049 | 1049 | label=repolabel) |
|
1050 | 1050 | if fv.default != configvalue: |
|
1051 | 1051 | configlabel = 'formatvariant.config.special' |
|
1052 | 1052 | else: |
|
1053 | 1053 | configlabel = 'formatvariant.config.default' |
|
1054 | 1054 | fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue), |
|
1055 | 1055 | label=configlabel) |
|
1056 | 1056 | fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default), |
|
1057 | 1057 | label='formatvariant.default') |
|
1058 | 1058 | fm.plain('\n') |
|
1059 | 1059 | fm.end() |
|
1060 | 1060 | |
|
1061 | 1061 | @command('debugfsinfo', [], _('[PATH]'), norepo=True) |
|
1062 | 1062 | def debugfsinfo(ui, path="."): |
|
1063 | 1063 | """show information detected about current filesystem""" |
|
1064 | 1064 | ui.write(('path: %s\n') % path) |
|
1065 | 1065 | ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)')) |
|
1066 | 1066 | ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no')) |
|
1067 | 1067 | ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)')) |
|
1068 | 1068 | ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no')) |
|
1069 | 1069 | ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no')) |
|
1070 | 1070 | casesensitive = '(unknown)' |
|
1071 | 1071 | try: |
|
1072 | 1072 | with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f: |
|
1073 | 1073 | casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no' |
|
1074 | 1074 | except OSError: |
|
1075 | 1075 | pass |
|
1076 | 1076 | ui.write(('case-sensitive: %s\n') % casesensitive) |
|
1077 | 1077 | |
|
1078 | 1078 | @command('debuggetbundle', |
|
1079 | 1079 | [('H', 'head', [], _('id of head node'), _('ID')), |
|
1080 | 1080 | ('C', 'common', [], _('id of common node'), _('ID')), |
|
1081 | 1081 | ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))], |
|
1082 | 1082 | _('REPO FILE [-H|-C ID]...'), |
|
1083 | 1083 | norepo=True) |
|
1084 | 1084 | def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts): |
|
1085 | 1085 | """retrieves a bundle from a repo |
|
1086 | 1086 | |
|
1087 | 1087 | Every ID must be a full-length hex node id string. Saves the bundle to the |
|
1088 | 1088 | given file. |
|
1089 | 1089 | """ |
|
1090 | 1090 | opts = pycompat.byteskwargs(opts) |
|
1091 | 1091 | repo = hg.peer(ui, opts, repopath) |
|
1092 | 1092 | if not repo.capable('getbundle'): |
|
1093 | 1093 | raise error.Abort("getbundle() not supported by target repository") |
|
1094 | 1094 | args = {} |
|
1095 | 1095 | if common: |
|
1096 | 1096 | args[r'common'] = [bin(s) for s in common] |
|
1097 | 1097 | if head: |
|
1098 | 1098 | args[r'heads'] = [bin(s) for s in head] |
|
1099 | 1099 | # TODO: get desired bundlecaps from command line. |
|
1100 | 1100 | args[r'bundlecaps'] = None |
|
1101 | 1101 | bundle = repo.getbundle('debug', **args) |
|
1102 | 1102 | |
|
1103 | 1103 | bundletype = opts.get('type', 'bzip2').lower() |
|
1104 | 1104 | btypes = {'none': 'HG10UN', |
|
1105 | 1105 | 'bzip2': 'HG10BZ', |
|
1106 | 1106 | 'gzip': 'HG10GZ', |
|
1107 | 1107 | 'bundle2': 'HG20'} |
|
1108 | 1108 | bundletype = btypes.get(bundletype) |
|
1109 | 1109 | if bundletype not in bundle2.bundletypes: |
|
1110 | 1110 | raise error.Abort(_('unknown bundle type specified with --type')) |
|
1111 | 1111 | bundle2.writebundle(ui, bundle, bundlepath, bundletype) |
|
1112 | 1112 | |
|
1113 | 1113 | @command('debugignore', [], '[FILE]') |
|
1114 | 1114 | def debugignore(ui, repo, *files, **opts): |
|
1115 | 1115 | """display the combined ignore pattern and information about ignored files |
|
1116 | 1116 | |
|
1117 | 1117 | With no argument display the combined ignore pattern. |
|
1118 | 1118 | |
|
1119 | 1119 | Given space separated file names, shows if the given file is ignored and |
|
1120 | 1120 | if so, show the ignore rule (file and line number) that matched it. |
|
1121 | 1121 | """ |
|
1122 | 1122 | ignore = repo.dirstate._ignore |
|
1123 | 1123 | if not files: |
|
1124 | 1124 | # Show all the patterns |
|
1125 | 1125 | ui.write("%s\n" % pycompat.byterepr(ignore)) |
|
1126 | 1126 | else: |
|
1127 | 1127 | m = scmutil.match(repo[None], pats=files) |
|
1128 | 1128 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) |
|
1129 | 1129 | for f in m.files(): |
|
1130 | 1130 | nf = util.normpath(f) |
|
1131 | 1131 | ignored = None |
|
1132 | 1132 | ignoredata = None |
|
1133 | 1133 | if nf != '.': |
|
1134 | 1134 | if ignore(nf): |
|
1135 | 1135 | ignored = nf |
|
1136 | 1136 | ignoredata = repo.dirstate._ignorefileandline(nf) |
|
1137 | 1137 | else: |
|
1138 | 1138 | for p in util.finddirs(nf): |
|
1139 | 1139 | if ignore(p): |
|
1140 | 1140 | ignored = p |
|
1141 | 1141 | ignoredata = repo.dirstate._ignorefileandline(p) |
|
1142 | 1142 | break |
|
1143 | 1143 | if ignored: |
|
1144 | 1144 | if ignored == nf: |
|
1145 | 1145 | ui.write(_("%s is ignored\n") % uipathfn(f)) |
|
1146 | 1146 | else: |
|
1147 | 1147 | ui.write(_("%s is ignored because of " |
|
1148 | 1148 | "containing directory %s\n") |
|
1149 | 1149 | % (uipathfn(f), ignored)) |
|
1150 | 1150 | ignorefile, lineno, line = ignoredata |
|
1151 | 1151 | ui.write(_("(ignore rule in %s, line %d: '%s')\n") |
|
1152 | 1152 | % (ignorefile, lineno, line)) |
|
1153 | 1153 | else: |
|
1154 | 1154 | ui.write(_("%s is not ignored\n") % uipathfn(f)) |
|
1155 | 1155 | |
|
1156 | 1156 | @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts, |
|
1157 | 1157 | _('-c|-m|FILE')) |
|
1158 | 1158 | def debugindex(ui, repo, file_=None, **opts): |
|
1159 | 1159 | """dump index data for a storage primitive""" |
|
1160 | 1160 | opts = pycompat.byteskwargs(opts) |
|
1161 | 1161 | store = cmdutil.openstorage(repo, 'debugindex', file_, opts) |
|
1162 | 1162 | |
|
1163 | 1163 | if ui.debugflag: |
|
1164 | 1164 | shortfn = hex |
|
1165 | 1165 | else: |
|
1166 | 1166 | shortfn = short |
|
1167 | 1167 | |
|
1168 | 1168 | idlen = 12 |
|
1169 | 1169 | for i in store: |
|
1170 | 1170 | idlen = len(shortfn(store.node(i))) |
|
1171 | 1171 | break |
|
1172 | 1172 | |
|
1173 | 1173 | fm = ui.formatter('debugindex', opts) |
|
1174 | 1174 | fm.plain(b' rev linkrev %s %s p2\n' % ( |
|
1175 | 1175 | b'nodeid'.ljust(idlen), |
|
1176 | 1176 | b'p1'.ljust(idlen))) |
|
1177 | 1177 | |
|
1178 | 1178 | for rev in store: |
|
1179 | 1179 | node = store.node(rev) |
|
1180 | 1180 | parents = store.parents(node) |
|
1181 | 1181 | |
|
1182 | 1182 | fm.startitem() |
|
1183 | 1183 | fm.write(b'rev', b'%6d ', rev) |
|
1184 | 1184 | fm.write(b'linkrev', '%7d ', store.linkrev(rev)) |
|
1185 | 1185 | fm.write(b'node', '%s ', shortfn(node)) |
|
1186 | 1186 | fm.write(b'p1', '%s ', shortfn(parents[0])) |
|
1187 | 1187 | fm.write(b'p2', '%s', shortfn(parents[1])) |
|
1188 | 1188 | fm.plain(b'\n') |
|
1189 | 1189 | |
|
1190 | 1190 | fm.end() |
|
1191 | 1191 | |
|
1192 | 1192 | @command('debugindexdot', cmdutil.debugrevlogopts, |
|
1193 | 1193 | _('-c|-m|FILE'), optionalrepo=True) |
|
1194 | 1194 | def debugindexdot(ui, repo, file_=None, **opts): |
|
1195 | 1195 | """dump an index DAG as a graphviz dot file""" |
|
1196 | 1196 | opts = pycompat.byteskwargs(opts) |
|
1197 | 1197 | r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts) |
|
1198 | 1198 | ui.write(("digraph G {\n")) |
|
1199 | 1199 | for i in r: |
|
1200 | 1200 | node = r.node(i) |
|
1201 | 1201 | pp = r.parents(node) |
|
1202 | 1202 | ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) |
|
1203 | 1203 | if pp[1] != nullid: |
|
1204 | 1204 | ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) |
|
1205 | 1205 | ui.write("}\n") |
|
1206 | 1206 | |
|
1207 | 1207 | @command('debugindexstats', []) |
|
1208 | 1208 | def debugindexstats(ui, repo): |
|
1209 | 1209 | """show stats related to the changelog index""" |
|
1210 | 1210 | repo.changelog.shortest(nullid, 1) |
|
1211 | 1211 | index = repo.changelog.index |
|
1212 | 1212 | if not util.safehasattr(index, 'stats'): |
|
1213 | 1213 | raise error.Abort(_('debugindexstats only works with native code')) |
|
1214 | 1214 | for k, v in sorted(index.stats().items()): |
|
1215 | 1215 | ui.write('%s: %d\n' % (k, v)) |
|
1216 | 1216 | |
|
1217 | 1217 | @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True) |
|
1218 | 1218 | def debuginstall(ui, **opts): |
|
1219 | 1219 | '''test Mercurial installation |
|
1220 | 1220 | |
|
1221 | 1221 | Returns 0 on success. |
|
1222 | 1222 | ''' |
|
1223 | 1223 | opts = pycompat.byteskwargs(opts) |
|
1224 | 1224 | |
|
1225 | 1225 | problems = 0 |
|
1226 | 1226 | |
|
1227 | 1227 | fm = ui.formatter('debuginstall', opts) |
|
1228 | 1228 | fm.startitem() |
|
1229 | 1229 | |
|
1230 | 1230 | # encoding |
|
1231 | 1231 | fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding) |
|
1232 | 1232 | err = None |
|
1233 | 1233 | try: |
|
1234 | 1234 | codecs.lookup(pycompat.sysstr(encoding.encoding)) |
|
1235 | 1235 | except LookupError as inst: |
|
1236 | 1236 | err = stringutil.forcebytestr(inst) |
|
1237 | 1237 | problems += 1 |
|
1238 | 1238 | fm.condwrite(err, 'encodingerror', _(" %s\n" |
|
1239 | 1239 | " (check that your locale is properly set)\n"), err) |
|
1240 | 1240 | |
|
1241 | 1241 | # Python |
|
1242 | 1242 | fm.write('pythonexe', _("checking Python executable (%s)\n"), |
|
1243 | 1243 | pycompat.sysexecutable or _("unknown")) |
|
1244 | 1244 | fm.write('pythonver', _("checking Python version (%s)\n"), |
|
1245 | 1245 | ("%d.%d.%d" % sys.version_info[:3])) |
|
1246 | 1246 | fm.write('pythonlib', _("checking Python lib (%s)...\n"), |
|
1247 | 1247 | os.path.dirname(pycompat.fsencode(os.__file__))) |
|
1248 | 1248 | |
|
1249 | 1249 | security = set(sslutil.supportedprotocols) |
|
1250 | 1250 | if sslutil.hassni: |
|
1251 | 1251 | security.add('sni') |
|
1252 | 1252 | |
|
1253 | 1253 | fm.write('pythonsecurity', _("checking Python security support (%s)\n"), |
|
1254 | 1254 | fm.formatlist(sorted(security), name='protocol', |
|
1255 | 1255 | fmt='%s', sep=',')) |
|
1256 | 1256 | |
|
1257 | 1257 | # These are warnings, not errors. So don't increment problem count. This |
|
1258 | 1258 | # may change in the future. |
|
1259 | 1259 | if 'tls1.2' not in security: |
|
1260 | 1260 | fm.plain(_(' TLS 1.2 not supported by Python install; ' |
|
1261 | 1261 | 'network connections lack modern security\n')) |
|
1262 | 1262 | if 'sni' not in security: |
|
1263 | 1263 | fm.plain(_(' SNI not supported by Python install; may have ' |
|
1264 | 1264 | 'connectivity issues with some servers\n')) |
|
1265 | 1265 | |
|
1266 | 1266 | # TODO print CA cert info |
|
1267 | 1267 | |
|
1268 | 1268 | # hg version |
|
1269 | 1269 | hgver = util.version() |
|
1270 | 1270 | fm.write('hgver', _("checking Mercurial version (%s)\n"), |
|
1271 | 1271 | hgver.split('+')[0]) |
|
1272 | 1272 | fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"), |
|
1273 | 1273 | '+'.join(hgver.split('+')[1:])) |
|
1274 | 1274 | |
|
1275 | 1275 | # compiled modules |
|
1276 | 1276 | fm.write('hgmodulepolicy', _("checking module policy (%s)\n"), |
|
1277 | 1277 | policy.policy) |
|
1278 | 1278 | fm.write('hgmodules', _("checking installed modules (%s)...\n"), |
|
1279 | 1279 | os.path.dirname(pycompat.fsencode(__file__))) |
|
1280 | 1280 | |
|
1281 | 1281 | rustandc = policy.policy in ('rust+c', 'rust+c-allow') |
|
1282 | 1282 | rustext = rustandc # for now, that's the only case |
|
1283 | 1283 | cext = policy.policy in ('c', 'allow') or rustandc |
|
1284 | 1284 | nopure = cext or rustext |
|
1285 | 1285 | if nopure: |
|
1286 | 1286 | err = None |
|
1287 | 1287 | try: |
|
1288 | 1288 | if cext: |
|
1289 | 1289 | from .cext import ( |
|
1290 | 1290 | base85, |
|
1291 | 1291 | bdiff, |
|
1292 | 1292 | mpatch, |
|
1293 | 1293 | osutil, |
|
1294 | 1294 | ) |
|
1295 | 1295 | # quiet pyflakes |
|
1296 | 1296 | dir(bdiff), dir(mpatch), dir(base85), dir(osutil) |
|
1297 | 1297 | if rustext: |
|
1298 | 1298 | from .rustext import ( |
|
1299 | 1299 | ancestor, |
|
1300 | 1300 | dirstate, |
|
1301 | 1301 | ) |
|
1302 | 1302 | dir(ancestor), dir(dirstate) # quiet pyflakes |
|
1303 | 1303 | except Exception as inst: |
|
1304 | 1304 | err = stringutil.forcebytestr(inst) |
|
1305 | 1305 | problems += 1 |
|
1306 | 1306 | fm.condwrite(err, 'extensionserror', " %s\n", err) |
|
1307 | 1307 | |
|
1308 | 1308 | compengines = util.compengines._engines.values() |
|
1309 | 1309 | fm.write('compengines', _('checking registered compression engines (%s)\n'), |
|
1310 | 1310 | fm.formatlist(sorted(e.name() for e in compengines), |
|
1311 | 1311 | name='compengine', fmt='%s', sep=', ')) |
|
1312 | 1312 | fm.write('compenginesavail', _('checking available compression engines ' |
|
1313 | 1313 | '(%s)\n'), |
|
1314 | 1314 | fm.formatlist(sorted(e.name() for e in compengines |
|
1315 | 1315 | if e.available()), |
|
1316 | 1316 | name='compengine', fmt='%s', sep=', ')) |
|
1317 | 1317 | wirecompengines = compression.compengines.supportedwireengines( |
|
1318 | 1318 | compression.SERVERROLE) |
|
1319 | 1319 | fm.write('compenginesserver', _('checking available compression engines ' |
|
1320 | 1320 | 'for wire protocol (%s)\n'), |
|
1321 | 1321 | fm.formatlist([e.name() for e in wirecompengines |
|
1322 | 1322 | if e.wireprotosupport()], |
|
1323 | 1323 | name='compengine', fmt='%s', sep=', ')) |
|
1324 | 1324 | re2 = 'missing' |
|
1325 | 1325 | if util._re2: |
|
1326 | 1326 | re2 = 'available' |
|
1327 | 1327 | fm.plain(_('checking "re2" regexp engine (%s)\n') % re2) |
|
1328 | 1328 | fm.data(re2=bool(util._re2)) |
|
1329 | 1329 | |
|
1330 | 1330 | # templates |
|
1331 | 1331 | p = templater.templatepaths() |
|
1332 | 1332 | fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p)) |
|
1333 | 1333 | fm.condwrite(not p, '', _(" no template directories found\n")) |
|
1334 | 1334 | if p: |
|
1335 | 1335 | m = templater.templatepath("map-cmdline.default") |
|
1336 | 1336 | if m: |
|
1337 | 1337 | # template found, check if it is working |
|
1338 | 1338 | err = None |
|
1339 | 1339 | try: |
|
1340 | 1340 | templater.templater.frommapfile(m) |
|
1341 | 1341 | except Exception as inst: |
|
1342 | 1342 | err = stringutil.forcebytestr(inst) |
|
1343 | 1343 | p = None |
|
1344 | 1344 | fm.condwrite(err, 'defaulttemplateerror', " %s\n", err) |
|
1345 | 1345 | else: |
|
1346 | 1346 | p = None |
|
1347 | 1347 | fm.condwrite(p, 'defaulttemplate', |
|
1348 | 1348 | _("checking default template (%s)\n"), m) |
|
1349 | 1349 | fm.condwrite(not m, 'defaulttemplatenotfound', |
|
1350 | 1350 | _(" template '%s' not found\n"), "default") |
|
1351 | 1351 | if not p: |
|
1352 | 1352 | problems += 1 |
|
1353 | 1353 | fm.condwrite(not p, '', |
|
1354 | 1354 | _(" (templates seem to have been installed incorrectly)\n")) |
|
1355 | 1355 | |
|
1356 | 1356 | # editor |
|
1357 | 1357 | editor = ui.geteditor() |
|
1358 | 1358 | editor = util.expandpath(editor) |
|
1359 | 1359 | editorbin = procutil.shellsplit(editor)[0] |
|
1360 | 1360 | fm.write('editor', _("checking commit editor... (%s)\n"), editorbin) |
|
1361 | 1361 | cmdpath = procutil.findexe(editorbin) |
|
1362 | 1362 | fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound', |
|
1363 | 1363 | _(" No commit editor set and can't find %s in PATH\n" |
|
1364 | 1364 | " (specify a commit editor in your configuration" |
|
1365 | 1365 | " file)\n"), not cmdpath and editor == 'vi' and editorbin) |
|
1366 | 1366 | fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound', |
|
1367 | 1367 | _(" Can't find editor '%s' in PATH\n" |
|
1368 | 1368 | " (specify a commit editor in your configuration" |
|
1369 | 1369 | " file)\n"), not cmdpath and editorbin) |
|
1370 | 1370 | if not cmdpath and editor != 'vi': |
|
1371 | 1371 | problems += 1 |
|
1372 | 1372 | |
|
1373 | 1373 | # check username |
|
1374 | 1374 | username = None |
|
1375 | 1375 | err = None |
|
1376 | 1376 | try: |
|
1377 | 1377 | username = ui.username() |
|
1378 | 1378 | except error.Abort as e: |
|
1379 | 1379 | err = stringutil.forcebytestr(e) |
|
1380 | 1380 | problems += 1 |
|
1381 | 1381 | |
|
1382 | 1382 | fm.condwrite(username, 'username', _("checking username (%s)\n"), username) |
|
1383 | 1383 | fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n" |
|
1384 | 1384 | " (specify a username in your configuration file)\n"), err) |
|
1385 | 1385 | |
|
1386 | 1386 | for name, mod in extensions.extensions(): |
|
1387 | 1387 | handler = getattr(mod, 'debuginstall', None) |
|
1388 | 1388 | if handler is not None: |
|
1389 | 1389 | problems += handler(ui, fm) |
|
1390 | 1390 | |
|
1391 | 1391 | fm.condwrite(not problems, '', |
|
1392 | 1392 | _("no problems detected\n")) |
|
1393 | 1393 | if not problems: |
|
1394 | 1394 | fm.data(problems=problems) |
|
1395 | 1395 | fm.condwrite(problems, 'problems', |
|
1396 | 1396 | _("%d problems detected," |
|
1397 | 1397 | " please check your install!\n"), problems) |
|
1398 | 1398 | fm.end() |
|
1399 | 1399 | |
|
1400 | 1400 | return problems |
|
1401 | 1401 | |
|
1402 | 1402 | @command('debugknown', [], _('REPO ID...'), norepo=True) |
|
1403 | 1403 | def debugknown(ui, repopath, *ids, **opts): |
|
1404 | 1404 | """test whether node ids are known to a repo |
|
1405 | 1405 | |
|
1406 | 1406 | Every ID must be a full-length hex node id string. Returns a list of 0s |
|
1407 | 1407 | and 1s indicating unknown/known. |
|
1408 | 1408 | """ |
|
1409 | 1409 | opts = pycompat.byteskwargs(opts) |
|
1410 | 1410 | repo = hg.peer(ui, opts, repopath) |
|
1411 | 1411 | if not repo.capable('known'): |
|
1412 | 1412 | raise error.Abort("known() not supported by target repository") |
|
1413 | 1413 | flags = repo.known([bin(s) for s in ids]) |
|
1414 | 1414 | ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags]))) |
|
1415 | 1415 | |
|
1416 | 1416 | @command('debuglabelcomplete', [], _('LABEL...')) |
|
1417 | 1417 | def debuglabelcomplete(ui, repo, *args): |
|
1418 | 1418 | '''backwards compatibility with old bash completion scripts (DEPRECATED)''' |
|
1419 | 1419 | debugnamecomplete(ui, repo, *args) |
|
1420 | 1420 | |
|
1421 | 1421 | @command('debuglocks', |
|
1422 | 1422 | [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')), |
|
1423 | 1423 | ('W', 'force-wlock', None, |
|
1424 | 1424 | _('free the working state lock (DANGEROUS)')), |
|
1425 | 1425 | ('s', 'set-lock', None, _('set the store lock until stopped')), |
|
1426 | 1426 | ('S', 'set-wlock', None, |
|
1427 | 1427 | _('set the working state lock until stopped'))], |
|
1428 | 1428 | _('[OPTION]...')) |
|
1429 | 1429 | def debuglocks(ui, repo, **opts): |
|
1430 | 1430 | """show or modify state of locks |
|
1431 | 1431 | |
|
1432 | 1432 | By default, this command will show which locks are held. This |
|
1433 | 1433 | includes the user and process holding the lock, the amount of time |
|
1434 | 1434 | the lock has been held, and the machine name where the process is |
|
1435 | 1435 | running if it's not local. |
|
1436 | 1436 | |
|
1437 | 1437 | Locks protect the integrity of Mercurial's data, so should be |
|
1438 | 1438 | treated with care. System crashes or other interruptions may cause |
|
1439 | 1439 | locks to not be properly released, though Mercurial will usually |
|
1440 | 1440 | detect and remove such stale locks automatically. |
|
1441 | 1441 | |
|
1442 | 1442 | However, detecting stale locks may not always be possible (for |
|
1443 | 1443 | instance, on a shared filesystem). Removing locks may also be |
|
1444 | 1444 | blocked by filesystem permissions. |
|
1445 | 1445 | |
|
1446 | 1446 | Setting a lock will prevent other commands from changing the data. |
|
1447 | 1447 | The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs. |
|
1448 | 1448 | The set locks are removed when the command exits. |
|
1449 | 1449 | |
|
1450 | 1450 | Returns 0 if no locks are held. |
|
1451 | 1451 | |
|
1452 | 1452 | """ |
|
1453 | 1453 | |
|
1454 | 1454 | if opts.get(r'force_lock'): |
|
1455 | 1455 | repo.svfs.unlink('lock') |
|
1456 | 1456 | if opts.get(r'force_wlock'): |
|
1457 | 1457 | repo.vfs.unlink('wlock') |
|
1458 | 1458 | if opts.get(r'force_lock') or opts.get(r'force_wlock'): |
|
1459 | 1459 | return 0 |
|
1460 | 1460 | |
|
1461 | 1461 | locks = [] |
|
1462 | 1462 | try: |
|
1463 | 1463 | if opts.get(r'set_wlock'): |
|
1464 | 1464 | try: |
|
1465 | 1465 | locks.append(repo.wlock(False)) |
|
1466 | 1466 | except error.LockHeld: |
|
1467 | 1467 | raise error.Abort(_('wlock is already held')) |
|
1468 | 1468 | if opts.get(r'set_lock'): |
|
1469 | 1469 | try: |
|
1470 | 1470 | locks.append(repo.lock(False)) |
|
1471 | 1471 | except error.LockHeld: |
|
1472 | 1472 | raise error.Abort(_('lock is already held')) |
|
1473 | 1473 | if len(locks): |
|
1474 | 1474 | ui.promptchoice(_("ready to release the lock (y)? $$ &Yes")) |
|
1475 | 1475 | return 0 |
|
1476 | 1476 | finally: |
|
1477 | 1477 | release(*locks) |
|
1478 | 1478 | |
|
1479 | 1479 | now = time.time() |
|
1480 | 1480 | held = 0 |
|
1481 | 1481 | |
|
1482 | 1482 | def report(vfs, name, method): |
|
1483 | 1483 | # this causes stale locks to get reaped for more accurate reporting |
|
1484 | 1484 | try: |
|
1485 | 1485 | l = method(False) |
|
1486 | 1486 | except error.LockHeld: |
|
1487 | 1487 | l = None |
|
1488 | 1488 | |
|
1489 | 1489 | if l: |
|
1490 | 1490 | l.release() |
|
1491 | 1491 | else: |
|
1492 | 1492 | try: |
|
1493 | 1493 | st = vfs.lstat(name) |
|
1494 | 1494 | age = now - st[stat.ST_MTIME] |
|
1495 | 1495 | user = util.username(st.st_uid) |
|
1496 | 1496 | locker = vfs.readlock(name) |
|
1497 | 1497 | if ":" in locker: |
|
1498 | 1498 | host, pid = locker.split(':') |
|
1499 | 1499 | if host == socket.gethostname(): |
|
1500 | 1500 | locker = 'user %s, process %s' % (user or b'None', pid) |
|
1501 | 1501 | else: |
|
1502 | 1502 | locker = ('user %s, process %s, host %s' |
|
1503 | 1503 | % (user or b'None', pid, host)) |
|
1504 | 1504 | ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age)) |
|
1505 | 1505 | return 1 |
|
1506 | 1506 | except OSError as e: |
|
1507 | 1507 | if e.errno != errno.ENOENT: |
|
1508 | 1508 | raise |
|
1509 | 1509 | |
|
1510 | 1510 | ui.write(("%-6s free\n") % (name + ":")) |
|
1511 | 1511 | return 0 |
|
1512 | 1512 | |
|
1513 | 1513 | held += report(repo.svfs, "lock", repo.lock) |
|
1514 | 1514 | held += report(repo.vfs, "wlock", repo.wlock) |
|
1515 | 1515 | |
|
1516 | 1516 | return held |
|
1517 | 1517 | |
|
1518 | 1518 | @command('debugmanifestfulltextcache', [ |
|
1519 | 1519 | ('', 'clear', False, _('clear the cache')), |
|
1520 | 1520 | ('a', 'add', [], _('add the given manifest nodes to the cache'), |
|
1521 | 1521 | _('NODE')) |
|
1522 | 1522 | ], '') |
|
1523 | 1523 | def debugmanifestfulltextcache(ui, repo, add=(), **opts): |
|
1524 | 1524 | """show, clear or amend the contents of the manifest fulltext cache""" |
|
1525 | 1525 | |
|
1526 | 1526 | def getcache(): |
|
1527 | 1527 | r = repo.manifestlog.getstorage(b'') |
|
1528 | 1528 | try: |
|
1529 | 1529 | return r._fulltextcache |
|
1530 | 1530 | except AttributeError: |
|
1531 | 1531 | msg = _("Current revlog implementation doesn't appear to have a " |
|
1532 | 1532 | "manifest fulltext cache\n") |
|
1533 | 1533 | raise error.Abort(msg) |
|
1534 | 1534 | |
|
1535 | 1535 | if opts.get(r'clear'): |
|
1536 | 1536 | with repo.wlock(): |
|
1537 | 1537 | cache = getcache() |
|
1538 | 1538 | cache.clear(clear_persisted_data=True) |
|
1539 | 1539 | return |
|
1540 | 1540 | |
|
1541 | 1541 | if add: |
|
1542 | 1542 | with repo.wlock(): |
|
1543 | 1543 | m = repo.manifestlog |
|
1544 | 1544 | store = m.getstorage(b'') |
|
1545 | 1545 | for n in add: |
|
1546 | 1546 | try: |
|
1547 | 1547 | manifest = m[store.lookup(n)] |
|
1548 | 1548 | except error.LookupError as e: |
|
1549 | 1549 | raise error.Abort(e, hint="Check your manifest node id") |
|
1550 | 1550 | manifest.read() # stores revisision in cache too |
|
1551 | 1551 | return |
|
1552 | 1552 | |
|
1553 | 1553 | cache = getcache() |
|
1554 | 1554 | if not len(cache): |
|
1555 | 1555 | ui.write(_('cache empty\n')) |
|
1556 | 1556 | else: |
|
1557 | 1557 | ui.write( |
|
1558 | 1558 | _('cache contains %d manifest entries, in order of most to ' |
|
1559 | 1559 | 'least recent:\n') % (len(cache),)) |
|
1560 | 1560 | totalsize = 0 |
|
1561 | 1561 | for nodeid in cache: |
|
1562 | 1562 | # Use cache.get to not update the LRU order |
|
1563 | 1563 | data = cache.peek(nodeid) |
|
1564 | 1564 | size = len(data) |
|
1565 | 1565 | totalsize += size + 24 # 20 bytes nodeid, 4 bytes size |
|
1566 | 1566 | ui.write(_('id: %s, size %s\n') % ( |
|
1567 | 1567 | hex(nodeid), util.bytecount(size))) |
|
1568 | 1568 | ondisk = cache._opener.stat('manifestfulltextcache').st_size |
|
1569 | 1569 | ui.write( |
|
1570 | 1570 | _('total cache data size %s, on-disk %s\n') % ( |
|
1571 | 1571 | util.bytecount(totalsize), util.bytecount(ondisk)) |
|
1572 | 1572 | ) |
|
1573 | 1573 | |
|
1574 | 1574 | @command('debugmergestate', [], '') |
|
1575 | 1575 | def debugmergestate(ui, repo, *args): |
|
1576 | 1576 | """print merge state |
|
1577 | 1577 | |
|
1578 | 1578 | Use --verbose to print out information about whether v1 or v2 merge state |
|
1579 | 1579 | was chosen.""" |
|
1580 | 1580 | def _hashornull(h): |
|
1581 | 1581 | if h == nullhex: |
|
1582 | 1582 | return 'null' |
|
1583 | 1583 | else: |
|
1584 | 1584 | return h |
|
1585 | 1585 | |
|
1586 | 1586 | def printrecords(version): |
|
1587 | 1587 | ui.write(('* version %d records\n') % version) |
|
1588 | 1588 | if version == 1: |
|
1589 | 1589 | records = v1records |
|
1590 | 1590 | else: |
|
1591 | 1591 | records = v2records |
|
1592 | 1592 | |
|
1593 | 1593 | for rtype, record in records: |
|
1594 | 1594 | # pretty print some record types |
|
1595 | 1595 | if rtype == 'L': |
|
1596 | 1596 | ui.write(('local: %s\n') % record) |
|
1597 | 1597 | elif rtype == 'O': |
|
1598 | 1598 | ui.write(('other: %s\n') % record) |
|
1599 | 1599 | elif rtype == 'm': |
|
1600 | 1600 | driver, mdstate = record.split('\0', 1) |
|
1601 | 1601 | ui.write(('merge driver: %s (state "%s")\n') |
|
1602 | 1602 | % (driver, mdstate)) |
|
1603 | 1603 | elif rtype in 'FDC': |
|
1604 | 1604 | r = record.split('\0') |
|
1605 | 1605 | f, state, hash, lfile, afile, anode, ofile = r[0:7] |
|
1606 | 1606 | if version == 1: |
|
1607 | 1607 | onode = 'not stored in v1 format' |
|
1608 | 1608 | flags = r[7] |
|
1609 | 1609 | else: |
|
1610 | 1610 | onode, flags = r[7:9] |
|
1611 | 1611 | ui.write(('file: %s (record type "%s", state "%s", hash %s)\n') |
|
1612 | 1612 | % (f, rtype, state, _hashornull(hash))) |
|
1613 | 1613 | ui.write((' local path: %s (flags "%s")\n') % (lfile, flags)) |
|
1614 | 1614 | ui.write((' ancestor path: %s (node %s)\n') |
|
1615 | 1615 | % (afile, _hashornull(anode))) |
|
1616 | 1616 | ui.write((' other path: %s (node %s)\n') |
|
1617 | 1617 | % (ofile, _hashornull(onode))) |
|
1618 | 1618 | elif rtype == 'f': |
|
1619 | 1619 | filename, rawextras = record.split('\0', 1) |
|
1620 | 1620 | extras = rawextras.split('\0') |
|
1621 | 1621 | i = 0 |
|
1622 | 1622 | extrastrings = [] |
|
1623 | 1623 | while i < len(extras): |
|
1624 | 1624 | extrastrings.append('%s = %s' % (extras[i], extras[i + 1])) |
|
1625 | 1625 | i += 2 |
|
1626 | 1626 | |
|
1627 | 1627 | ui.write(('file extras: %s (%s)\n') |
|
1628 | 1628 | % (filename, ', '.join(extrastrings))) |
|
1629 | 1629 | elif rtype == 'l': |
|
1630 | 1630 | labels = record.split('\0', 2) |
|
1631 | 1631 | labels = [l for l in labels if len(l) > 0] |
|
1632 | 1632 | ui.write(('labels:\n')) |
|
1633 | 1633 | ui.write((' local: %s\n' % labels[0])) |
|
1634 | 1634 | ui.write((' other: %s\n' % labels[1])) |
|
1635 | 1635 | if len(labels) > 2: |
|
1636 | 1636 | ui.write((' base: %s\n' % labels[2])) |
|
1637 | 1637 | else: |
|
1638 | 1638 | ui.write(('unrecognized entry: %s\t%s\n') |
|
1639 | 1639 | % (rtype, record.replace('\0', '\t'))) |
|
1640 | 1640 | |
|
1641 | 1641 | # Avoid mergestate.read() since it may raise an exception for unsupported |
|
1642 | 1642 | # merge state records. We shouldn't be doing this, but this is OK since this |
|
1643 | 1643 | # command is pretty low-level. |
|
1644 | 1644 | ms = mergemod.mergestate(repo) |
|
1645 | 1645 | |
|
1646 | 1646 | # sort so that reasonable information is on top |
|
1647 | 1647 | v1records = ms._readrecordsv1() |
|
1648 | 1648 | v2records = ms._readrecordsv2() |
|
1649 | 1649 | order = 'LOml' |
|
1650 | 1650 | def key(r): |
|
1651 | 1651 | idx = order.find(r[0]) |
|
1652 | 1652 | if idx == -1: |
|
1653 | 1653 | return (1, r[1]) |
|
1654 | 1654 | else: |
|
1655 | 1655 | return (0, idx) |
|
1656 | 1656 | v1records.sort(key=key) |
|
1657 | 1657 | v2records.sort(key=key) |
|
1658 | 1658 | |
|
1659 | 1659 | if not v1records and not v2records: |
|
1660 | 1660 | ui.write(('no merge state found\n')) |
|
1661 | 1661 | elif not v2records: |
|
1662 | 1662 | ui.note(('no version 2 merge state\n')) |
|
1663 | 1663 | printrecords(1) |
|
1664 | 1664 | elif ms._v1v2match(v1records, v2records): |
|
1665 | 1665 | ui.note(('v1 and v2 states match: using v2\n')) |
|
1666 | 1666 | printrecords(2) |
|
1667 | 1667 | else: |
|
1668 | 1668 | ui.note(('v1 and v2 states mismatch: using v1\n')) |
|
1669 | 1669 | printrecords(1) |
|
1670 | 1670 | if ui.verbose: |
|
1671 | 1671 | printrecords(2) |
|
1672 | 1672 | |
|
1673 | 1673 | @command('debugnamecomplete', [], _('NAME...')) |
|
1674 | 1674 | def debugnamecomplete(ui, repo, *args): |
|
1675 | 1675 | '''complete "names" - tags, open branch names, bookmark names''' |
|
1676 | 1676 | |
|
1677 | 1677 | names = set() |
|
1678 | 1678 | # since we previously only listed open branches, we will handle that |
|
1679 | 1679 | # specially (after this for loop) |
|
1680 | 1680 | for name, ns in repo.names.iteritems(): |
|
1681 | 1681 | if name != 'branches': |
|
1682 | 1682 | names.update(ns.listnames(repo)) |
|
1683 | 1683 | names.update(tag for (tag, heads, tip, closed) |
|
1684 | 1684 | in repo.branchmap().iterbranches() if not closed) |
|
1685 | 1685 | completions = set() |
|
1686 | 1686 | if not args: |
|
1687 | 1687 | args = [''] |
|
1688 | 1688 | for a in args: |
|
1689 | 1689 | completions.update(n for n in names if n.startswith(a)) |
|
1690 | 1690 | ui.write('\n'.join(sorted(completions))) |
|
1691 | 1691 | ui.write('\n') |
|
1692 | 1692 | |
|
1693 | 1693 | @command('debugobsolete', |
|
1694 | 1694 | [('', 'flags', 0, _('markers flag')), |
|
1695 | 1695 | ('', 'record-parents', False, |
|
1696 | 1696 | _('record parent information for the precursor')), |
|
1697 | 1697 | ('r', 'rev', [], _('display markers relevant to REV')), |
|
1698 | 1698 | ('', 'exclusive', False, _('restrict display to markers only ' |
|
1699 | 1699 | 'relevant to REV')), |
|
1700 | 1700 | ('', 'index', False, _('display index of the marker')), |
|
1701 | 1701 | ('', 'delete', [], _('delete markers specified by indices')), |
|
1702 | 1702 | ] + cmdutil.commitopts2 + cmdutil.formatteropts, |
|
1703 | 1703 | _('[OBSOLETED [REPLACEMENT ...]]')) |
|
1704 | 1704 | def debugobsolete(ui, repo, precursor=None, *successors, **opts): |
|
1705 | 1705 | """create arbitrary obsolete marker |
|
1706 | 1706 | |
|
1707 | 1707 | With no arguments, displays the list of obsolescence markers.""" |
|
1708 | 1708 | |
|
1709 | 1709 | opts = pycompat.byteskwargs(opts) |
|
1710 | 1710 | |
|
1711 | 1711 | def parsenodeid(s): |
|
1712 | 1712 | try: |
|
1713 | 1713 | # We do not use revsingle/revrange functions here to accept |
|
1714 | 1714 | # arbitrary node identifiers, possibly not present in the |
|
1715 | 1715 | # local repository. |
|
1716 | 1716 | n = bin(s) |
|
1717 | 1717 | if len(n) != len(nullid): |
|
1718 | 1718 | raise TypeError() |
|
1719 | 1719 | return n |
|
1720 | 1720 | except TypeError: |
|
1721 | 1721 | raise error.Abort('changeset references must be full hexadecimal ' |
|
1722 | 1722 | 'node identifiers') |
|
1723 | 1723 | |
|
1724 | 1724 | if opts.get('delete'): |
|
1725 | 1725 | indices = [] |
|
1726 | 1726 | for v in opts.get('delete'): |
|
1727 | 1727 | try: |
|
1728 | 1728 | indices.append(int(v)) |
|
1729 | 1729 | except ValueError: |
|
1730 | 1730 | raise error.Abort(_('invalid index value: %r') % v, |
|
1731 | 1731 | hint=_('use integers for indices')) |
|
1732 | 1732 | |
|
1733 | 1733 | if repo.currenttransaction(): |
|
1734 | 1734 | raise error.Abort(_('cannot delete obsmarkers in the middle ' |
|
1735 | 1735 | 'of transaction.')) |
|
1736 | 1736 | |
|
1737 | 1737 | with repo.lock(): |
|
1738 | 1738 | n = repair.deleteobsmarkers(repo.obsstore, indices) |
|
1739 | 1739 | ui.write(_('deleted %i obsolescence markers\n') % n) |
|
1740 | 1740 | |
|
1741 | 1741 | return |
|
1742 | 1742 | |
|
1743 | 1743 | if precursor is not None: |
|
1744 | 1744 | if opts['rev']: |
|
1745 | 1745 | raise error.Abort('cannot select revision when creating marker') |
|
1746 | 1746 | metadata = {} |
|
1747 | 1747 | metadata['user'] = encoding.fromlocal(opts['user'] or ui.username()) |
|
1748 | 1748 | succs = tuple(parsenodeid(succ) for succ in successors) |
|
1749 | 1749 | l = repo.lock() |
|
1750 | 1750 | try: |
|
1751 | 1751 | tr = repo.transaction('debugobsolete') |
|
1752 | 1752 | try: |
|
1753 | 1753 | date = opts.get('date') |
|
1754 | 1754 | if date: |
|
1755 | 1755 | date = dateutil.parsedate(date) |
|
1756 | 1756 | else: |
|
1757 | 1757 | date = None |
|
1758 | 1758 | prec = parsenodeid(precursor) |
|
1759 | 1759 | parents = None |
|
1760 | 1760 | if opts['record_parents']: |
|
1761 | 1761 | if prec not in repo.unfiltered(): |
|
1762 | 1762 | raise error.Abort('cannot used --record-parents on ' |
|
1763 | 1763 | 'unknown changesets') |
|
1764 | 1764 | parents = repo.unfiltered()[prec].parents() |
|
1765 | 1765 | parents = tuple(p.node() for p in parents) |
|
1766 | 1766 | repo.obsstore.create(tr, prec, succs, opts['flags'], |
|
1767 | 1767 | parents=parents, date=date, |
|
1768 | 1768 | metadata=metadata, ui=ui) |
|
1769 | 1769 | tr.close() |
|
1770 | 1770 | except ValueError as exc: |
|
1771 | 1771 | raise error.Abort(_('bad obsmarker input: %s') % |
|
1772 | 1772 | pycompat.bytestr(exc)) |
|
1773 | 1773 | finally: |
|
1774 | 1774 | tr.release() |
|
1775 | 1775 | finally: |
|
1776 | 1776 | l.release() |
|
1777 | 1777 | else: |
|
1778 | 1778 | if opts['rev']: |
|
1779 | 1779 | revs = scmutil.revrange(repo, opts['rev']) |
|
1780 | 1780 | nodes = [repo[r].node() for r in revs] |
|
1781 | 1781 | markers = list(obsutil.getmarkers(repo, nodes=nodes, |
|
1782 | 1782 | exclusive=opts['exclusive'])) |
|
1783 | 1783 | markers.sort(key=lambda x: x._data) |
|
1784 | 1784 | else: |
|
1785 | 1785 | markers = obsutil.getmarkers(repo) |
|
1786 | 1786 | |
|
1787 | 1787 | markerstoiter = markers |
|
1788 | 1788 | isrelevant = lambda m: True |
|
1789 | 1789 | if opts.get('rev') and opts.get('index'): |
|
1790 | 1790 | markerstoiter = obsutil.getmarkers(repo) |
|
1791 | 1791 | markerset = set(markers) |
|
1792 | 1792 | isrelevant = lambda m: m in markerset |
|
1793 | 1793 | |
|
1794 | 1794 | fm = ui.formatter('debugobsolete', opts) |
|
1795 | 1795 | for i, m in enumerate(markerstoiter): |
|
1796 | 1796 | if not isrelevant(m): |
|
1797 | 1797 | # marker can be irrelevant when we're iterating over a set |
|
1798 | 1798 | # of markers (markerstoiter) which is bigger than the set |
|
1799 | 1799 | # of markers we want to display (markers) |
|
1800 | 1800 | # this can happen if both --index and --rev options are |
|
1801 | 1801 | # provided and thus we need to iterate over all of the markers |
|
1802 | 1802 | # to get the correct indices, but only display the ones that |
|
1803 | 1803 | # are relevant to --rev value |
|
1804 | 1804 | continue |
|
1805 | 1805 | fm.startitem() |
|
1806 | 1806 | ind = i if opts.get('index') else None |
|
1807 | 1807 | cmdutil.showmarker(fm, m, index=ind) |
|
1808 | 1808 | fm.end() |
|
1809 | 1809 | |
|
1810 | 1810 | @command('debugp1copies', |
|
1811 | 1811 | [('r', 'rev', '', _('revision to debug'), _('REV'))], |
|
1812 | 1812 | _('[-r REV]')) |
|
1813 | 1813 | def debugp1copies(ui, repo, **opts): |
|
1814 | 1814 | """dump copy information compared to p1""" |
|
1815 | 1815 | |
|
1816 | 1816 | opts = pycompat.byteskwargs(opts) |
|
1817 | 1817 | ctx = scmutil.revsingle(repo, opts.get('rev'), default=None) |
|
1818 | 1818 | for dst, src in ctx.p1copies().items(): |
|
1819 | 1819 | ui.write('%s -> %s\n' % (src, dst)) |
|
1820 | 1820 | |
|
1821 | 1821 | @command('debugp2copies', |
|
1822 | 1822 | [('r', 'rev', '', _('revision to debug'), _('REV'))], |
|
1823 | 1823 | _('[-r REV]')) |
|
1824 | 1824 | def debugp1copies(ui, repo, **opts): |
|
1825 | 1825 | """dump copy information compared to p2""" |
|
1826 | 1826 | |
|
1827 | 1827 | opts = pycompat.byteskwargs(opts) |
|
1828 | 1828 | ctx = scmutil.revsingle(repo, opts.get('rev'), default=None) |
|
1829 | 1829 | for dst, src in ctx.p2copies().items(): |
|
1830 | 1830 | ui.write('%s -> %s\n' % (src, dst)) |
|
1831 | 1831 | |
|
1832 | 1832 | @command('debugpathcomplete', |
|
1833 | 1833 | [('f', 'full', None, _('complete an entire path')), |
|
1834 | 1834 | ('n', 'normal', None, _('show only normal files')), |
|
1835 | 1835 | ('a', 'added', None, _('show only added files')), |
|
1836 | 1836 | ('r', 'removed', None, _('show only removed files'))], |
|
1837 | 1837 | _('FILESPEC...')) |
|
1838 | 1838 | def debugpathcomplete(ui, repo, *specs, **opts): |
|
1839 | 1839 | '''complete part or all of a tracked path |
|
1840 | 1840 | |
|
1841 | 1841 | This command supports shells that offer path name completion. It |
|
1842 | 1842 | currently completes only files already known to the dirstate. |
|
1843 | 1843 | |
|
1844 | 1844 | Completion extends only to the next path segment unless |
|
1845 | 1845 | --full is specified, in which case entire paths are used.''' |
|
1846 | 1846 | |
|
1847 | 1847 | def complete(path, acceptable): |
|
1848 | 1848 | dirstate = repo.dirstate |
|
1849 | 1849 | spec = os.path.normpath(os.path.join(encoding.getcwd(), path)) |
|
1850 | 1850 | rootdir = repo.root + pycompat.ossep |
|
1851 | 1851 | if spec != repo.root and not spec.startswith(rootdir): |
|
1852 | 1852 | return [], [] |
|
1853 | 1853 | if os.path.isdir(spec): |
|
1854 | 1854 | spec += '/' |
|
1855 | 1855 | spec = spec[len(rootdir):] |
|
1856 | 1856 | fixpaths = pycompat.ossep != '/' |
|
1857 | 1857 | if fixpaths: |
|
1858 | 1858 | spec = spec.replace(pycompat.ossep, '/') |
|
1859 | 1859 | speclen = len(spec) |
|
1860 | 1860 | fullpaths = opts[r'full'] |
|
1861 | 1861 | files, dirs = set(), set() |
|
1862 | 1862 | adddir, addfile = dirs.add, files.add |
|
1863 | 1863 | for f, st in dirstate.iteritems(): |
|
1864 | 1864 | if f.startswith(spec) and st[0] in acceptable: |
|
1865 | 1865 | if fixpaths: |
|
1866 | 1866 | f = f.replace('/', pycompat.ossep) |
|
1867 | 1867 | if fullpaths: |
|
1868 | 1868 | addfile(f) |
|
1869 | 1869 | continue |
|
1870 | 1870 | s = f.find(pycompat.ossep, speclen) |
|
1871 | 1871 | if s >= 0: |
|
1872 | 1872 | adddir(f[:s]) |
|
1873 | 1873 | else: |
|
1874 | 1874 | addfile(f) |
|
1875 | 1875 | return files, dirs |
|
1876 | 1876 | |
|
1877 | 1877 | acceptable = '' |
|
1878 | 1878 | if opts[r'normal']: |
|
1879 | 1879 | acceptable += 'nm' |
|
1880 | 1880 | if opts[r'added']: |
|
1881 | 1881 | acceptable += 'a' |
|
1882 | 1882 | if opts[r'removed']: |
|
1883 | 1883 | acceptable += 'r' |
|
1884 | 1884 | cwd = repo.getcwd() |
|
1885 | 1885 | if not specs: |
|
1886 | 1886 | specs = ['.'] |
|
1887 | 1887 | |
|
1888 | 1888 | files, dirs = set(), set() |
|
1889 | 1889 | for spec in specs: |
|
1890 | 1890 | f, d = complete(spec, acceptable or 'nmar') |
|
1891 | 1891 | files.update(f) |
|
1892 | 1892 | dirs.update(d) |
|
1893 | 1893 | files.update(dirs) |
|
1894 | 1894 | ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files))) |
|
1895 | 1895 | ui.write('\n') |
|
1896 | 1896 | |
|
1897 | 1897 | @command('debugpathcopies', |
|
1898 | 1898 | cmdutil.walkopts, |
|
1899 | 1899 | 'hg debugpathcopies REV1 REV2 [FILE]', |
|
1900 | 1900 | inferrepo=True) |
|
1901 | 1901 | def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts): |
|
1902 | 1902 | """show copies between two revisions""" |
|
1903 | 1903 | ctx1 = scmutil.revsingle(repo, rev1) |
|
1904 | 1904 | ctx2 = scmutil.revsingle(repo, rev2) |
|
1905 | 1905 | m = scmutil.match(ctx1, pats, opts) |
|
1906 | 1906 | for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()): |
|
1907 | 1907 | ui.write('%s -> %s\n' % (src, dst)) |
|
1908 | 1908 | |
|
1909 | 1909 | @command('debugpeer', [], _('PATH'), norepo=True) |
|
1910 | 1910 | def debugpeer(ui, path): |
|
1911 | 1911 | """establish a connection to a peer repository""" |
|
1912 | 1912 | # Always enable peer request logging. Requires --debug to display |
|
1913 | 1913 | # though. |
|
1914 | 1914 | overrides = { |
|
1915 | 1915 | ('devel', 'debug.peer-request'): True, |
|
1916 | 1916 | } |
|
1917 | 1917 | |
|
1918 | 1918 | with ui.configoverride(overrides): |
|
1919 | 1919 | peer = hg.peer(ui, {}, path) |
|
1920 | 1920 | |
|
1921 | 1921 | local = peer.local() is not None |
|
1922 | 1922 | canpush = peer.canpush() |
|
1923 | 1923 | |
|
1924 | 1924 | ui.write(_('url: %s\n') % peer.url()) |
|
1925 | 1925 | ui.write(_('local: %s\n') % (_('yes') if local else _('no'))) |
|
1926 | 1926 | ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no'))) |
|
1927 | 1927 | |
|
1928 | 1928 | @command('debugpickmergetool', |
|
1929 | 1929 | [('r', 'rev', '', _('check for files in this revision'), _('REV')), |
|
1930 | 1930 | ('', 'changedelete', None, _('emulate merging change and delete')), |
|
1931 | 1931 | ] + cmdutil.walkopts + cmdutil.mergetoolopts, |
|
1932 | 1932 | _('[PATTERN]...'), |
|
1933 | 1933 | inferrepo=True) |
|
1934 | 1934 | def debugpickmergetool(ui, repo, *pats, **opts): |
|
1935 | 1935 | """examine which merge tool is chosen for specified file |
|
1936 | 1936 | |
|
1937 | 1937 | As described in :hg:`help merge-tools`, Mercurial examines |
|
1938 | 1938 | configurations below in this order to decide which merge tool is |
|
1939 | 1939 | chosen for specified file. |
|
1940 | 1940 | |
|
1941 | 1941 | 1. ``--tool`` option |
|
1942 | 1942 | 2. ``HGMERGE`` environment variable |
|
1943 | 1943 | 3. configurations in ``merge-patterns`` section |
|
1944 | 1944 | 4. configuration of ``ui.merge`` |
|
1945 | 1945 | 5. configurations in ``merge-tools`` section |
|
1946 | 1946 | 6. ``hgmerge`` tool (for historical reason only) |
|
1947 | 1947 | 7. default tool for fallback (``:merge`` or ``:prompt``) |
|
1948 | 1948 | |
|
1949 | 1949 | This command writes out examination result in the style below:: |
|
1950 | 1950 | |
|
1951 | 1951 | FILE = MERGETOOL |
|
1952 | 1952 | |
|
1953 | 1953 | By default, all files known in the first parent context of the |
|
1954 | 1954 | working directory are examined. Use file patterns and/or -I/-X |
|
1955 | 1955 | options to limit target files. -r/--rev is also useful to examine |
|
1956 | 1956 | files in another context without actual updating to it. |
|
1957 | 1957 | |
|
1958 | 1958 | With --debug, this command shows warning messages while matching |
|
1959 | 1959 | against ``merge-patterns`` and so on, too. It is recommended to |
|
1960 | 1960 | use this option with explicit file patterns and/or -I/-X options, |
|
1961 | 1961 | because this option increases amount of output per file according |
|
1962 | 1962 | to configurations in hgrc. |
|
1963 | 1963 | |
|
1964 | 1964 | With -v/--verbose, this command shows configurations below at |
|
1965 | 1965 | first (only if specified). |
|
1966 | 1966 | |
|
1967 | 1967 | - ``--tool`` option |
|
1968 | 1968 | - ``HGMERGE`` environment variable |
|
1969 | 1969 | - configuration of ``ui.merge`` |
|
1970 | 1970 | |
|
1971 | 1971 | If merge tool is chosen before matching against |
|
1972 | 1972 | ``merge-patterns``, this command can't show any helpful |
|
1973 | 1973 | information, even with --debug. In such case, information above is |
|
1974 | 1974 | useful to know why a merge tool is chosen. |
|
1975 | 1975 | """ |
|
1976 | 1976 | opts = pycompat.byteskwargs(opts) |
|
1977 | 1977 | overrides = {} |
|
1978 | 1978 | if opts['tool']: |
|
1979 | 1979 | overrides[('ui', 'forcemerge')] = opts['tool'] |
|
1980 | 1980 | ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool']))) |
|
1981 | 1981 | |
|
1982 | 1982 | with ui.configoverride(overrides, 'debugmergepatterns'): |
|
1983 | 1983 | hgmerge = encoding.environ.get("HGMERGE") |
|
1984 | 1984 | if hgmerge is not None: |
|
1985 | 1985 | ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge))) |
|
1986 | 1986 | uimerge = ui.config("ui", "merge") |
|
1987 | 1987 | if uimerge: |
|
1988 | 1988 | ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge))) |
|
1989 | 1989 | |
|
1990 | 1990 | ctx = scmutil.revsingle(repo, opts.get('rev')) |
|
1991 | 1991 | m = scmutil.match(ctx, pats, opts) |
|
1992 | 1992 | changedelete = opts['changedelete'] |
|
1993 | 1993 | for path in ctx.walk(m): |
|
1994 | 1994 | fctx = ctx[path] |
|
1995 | 1995 | try: |
|
1996 | 1996 | if not ui.debugflag: |
|
1997 | 1997 | ui.pushbuffer(error=True) |
|
1998 | 1998 | tool, toolpath = filemerge._picktool(repo, ui, path, |
|
1999 | 1999 | fctx.isbinary(), |
|
2000 | 2000 | 'l' in fctx.flags(), |
|
2001 | 2001 | changedelete) |
|
2002 | 2002 | finally: |
|
2003 | 2003 | if not ui.debugflag: |
|
2004 | 2004 | ui.popbuffer() |
|
2005 | 2005 | ui.write(('%s = %s\n') % (path, tool)) |
|
2006 | 2006 | |
|
2007 | 2007 | @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True) |
|
2008 | 2008 | def debugpushkey(ui, repopath, namespace, *keyinfo, **opts): |
|
2009 | 2009 | '''access the pushkey key/value protocol |
|
2010 | 2010 | |
|
2011 | 2011 | With two args, list the keys in the given namespace. |
|
2012 | 2012 | |
|
2013 | 2013 | With five args, set a key to new if it currently is set to old. |
|
2014 | 2014 | Reports success or failure. |
|
2015 | 2015 | ''' |
|
2016 | 2016 | |
|
2017 | 2017 | target = hg.peer(ui, {}, repopath) |
|
2018 | 2018 | if keyinfo: |
|
2019 | 2019 | key, old, new = keyinfo |
|
2020 | 2020 | with target.commandexecutor() as e: |
|
2021 | 2021 | r = e.callcommand('pushkey', { |
|
2022 | 2022 | 'namespace': namespace, |
|
2023 | 2023 | 'key': key, |
|
2024 | 2024 | 'old': old, |
|
2025 | 2025 | 'new': new, |
|
2026 | 2026 | }).result() |
|
2027 | 2027 | |
|
2028 | 2028 | ui.status(pycompat.bytestr(r) + '\n') |
|
2029 | 2029 | return not r |
|
2030 | 2030 | else: |
|
2031 | 2031 | for k, v in sorted(target.listkeys(namespace).iteritems()): |
|
2032 | 2032 | ui.write("%s\t%s\n" % (stringutil.escapestr(k), |
|
2033 | 2033 | stringutil.escapestr(v))) |
|
2034 | 2034 | |
|
2035 | 2035 | @command('debugpvec', [], _('A B')) |
|
2036 | 2036 | def debugpvec(ui, repo, a, b=None): |
|
2037 | 2037 | ca = scmutil.revsingle(repo, a) |
|
2038 | 2038 | cb = scmutil.revsingle(repo, b) |
|
2039 | 2039 | pa = pvec.ctxpvec(ca) |
|
2040 | 2040 | pb = pvec.ctxpvec(cb) |
|
2041 | 2041 | if pa == pb: |
|
2042 | 2042 | rel = "=" |
|
2043 | 2043 | elif pa > pb: |
|
2044 | 2044 | rel = ">" |
|
2045 | 2045 | elif pa < pb: |
|
2046 | 2046 | rel = "<" |
|
2047 | 2047 | elif pa | pb: |
|
2048 | 2048 | rel = "|" |
|
2049 | 2049 | ui.write(_("a: %s\n") % pa) |
|
2050 | 2050 | ui.write(_("b: %s\n") % pb) |
|
2051 | 2051 | ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth)) |
|
2052 | 2052 | ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") % |
|
2053 | 2053 | (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec), |
|
2054 | 2054 | pa.distance(pb), rel)) |
|
2055 | 2055 | |
|
2056 | 2056 | @command('debugrebuilddirstate|debugrebuildstate', |
|
2057 | 2057 | [('r', 'rev', '', _('revision to rebuild to'), _('REV')), |
|
2058 | 2058 | ('', 'minimal', None, _('only rebuild files that are inconsistent with ' |
|
2059 | 2059 | 'the working copy parent')), |
|
2060 | 2060 | ], |
|
2061 | 2061 | _('[-r REV]')) |
|
2062 | 2062 | def debugrebuilddirstate(ui, repo, rev, **opts): |
|
2063 | 2063 | """rebuild the dirstate as it would look like for the given revision |
|
2064 | 2064 | |
|
2065 | 2065 | If no revision is specified the first current parent will be used. |
|
2066 | 2066 | |
|
2067 | 2067 | The dirstate will be set to the files of the given revision. |
|
2068 | 2068 | The actual working directory content or existing dirstate |
|
2069 | 2069 | information such as adds or removes is not considered. |
|
2070 | 2070 | |
|
2071 | 2071 | ``minimal`` will only rebuild the dirstate status for files that claim to be |
|
2072 | 2072 | tracked but are not in the parent manifest, or that exist in the parent |
|
2073 | 2073 | manifest but are not in the dirstate. It will not change adds, removes, or |
|
2074 | 2074 | modified files that are in the working copy parent. |
|
2075 | 2075 | |
|
2076 | 2076 | One use of this command is to make the next :hg:`status` invocation |
|
2077 | 2077 | check the actual file content. |
|
2078 | 2078 | """ |
|
2079 | 2079 | ctx = scmutil.revsingle(repo, rev) |
|
2080 | 2080 | with repo.wlock(): |
|
2081 | 2081 | dirstate = repo.dirstate |
|
2082 | 2082 | changedfiles = None |
|
2083 | 2083 | # See command doc for what minimal does. |
|
2084 | 2084 | if opts.get(r'minimal'): |
|
2085 | 2085 | manifestfiles = set(ctx.manifest().keys()) |
|
2086 | 2086 | dirstatefiles = set(dirstate) |
|
2087 | 2087 | manifestonly = manifestfiles - dirstatefiles |
|
2088 | 2088 | dsonly = dirstatefiles - manifestfiles |
|
2089 | 2089 | dsnotadded = set(f for f in dsonly if dirstate[f] != 'a') |
|
2090 | 2090 | changedfiles = manifestonly | dsnotadded |
|
2091 | 2091 | |
|
2092 | 2092 | dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles) |
|
2093 | 2093 | |
|
2094 | 2094 | @command('debugrebuildfncache', [], '') |
|
2095 | 2095 | def debugrebuildfncache(ui, repo): |
|
2096 | 2096 | """rebuild the fncache file""" |
|
2097 | 2097 | repair.rebuildfncache(ui, repo) |
|
2098 | 2098 | |
|
2099 | 2099 | @command('debugrename', |
|
2100 | 2100 | [('r', 'rev', '', _('revision to debug'), _('REV'))], |
|
2101 | 2101 | _('[-r REV] [FILE]...')) |
|
2102 | 2102 | def debugrename(ui, repo, *pats, **opts): |
|
2103 | 2103 | """dump rename information""" |
|
2104 | 2104 | |
|
2105 | 2105 | opts = pycompat.byteskwargs(opts) |
|
2106 | 2106 | ctx = scmutil.revsingle(repo, opts.get('rev')) |
|
2107 | 2107 | m = scmutil.match(ctx, pats, opts) |
|
2108 | 2108 | for abs in ctx.walk(m): |
|
2109 | 2109 | fctx = ctx[abs] |
|
2110 | 2110 | o = fctx.filelog().renamed(fctx.filenode()) |
|
2111 | 2111 | rel = repo.pathto(abs) |
|
2112 | 2112 | if o: |
|
2113 | 2113 | ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1]))) |
|
2114 | 2114 | else: |
|
2115 | 2115 | ui.write(_("%s not renamed\n") % rel) |
|
2116 | 2116 | |
|
2117 | 2117 | @command('debugrevlog', cmdutil.debugrevlogopts + |
|
2118 | 2118 | [('d', 'dump', False, _('dump index data'))], |
|
2119 | 2119 | _('-c|-m|FILE'), |
|
2120 | 2120 | optionalrepo=True) |
|
2121 | 2121 | def debugrevlog(ui, repo, file_=None, **opts): |
|
2122 | 2122 | """show data and statistics about a revlog""" |
|
2123 | 2123 | opts = pycompat.byteskwargs(opts) |
|
2124 | 2124 | r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts) |
|
2125 | 2125 | |
|
2126 | 2126 | if opts.get("dump"): |
|
2127 | 2127 | numrevs = len(r) |
|
2128 | 2128 | ui.write(("# rev p1rev p2rev start end deltastart base p1 p2" |
|
2129 | 2129 | " rawsize totalsize compression heads chainlen\n")) |
|
2130 | 2130 | ts = 0 |
|
2131 | 2131 | heads = set() |
|
2132 | 2132 | |
|
2133 | 2133 | for rev in pycompat.xrange(numrevs): |
|
2134 | 2134 | dbase = r.deltaparent(rev) |
|
2135 | 2135 | if dbase == -1: |
|
2136 | 2136 | dbase = rev |
|
2137 | 2137 | cbase = r.chainbase(rev) |
|
2138 | 2138 | clen = r.chainlen(rev) |
|
2139 | 2139 | p1, p2 = r.parentrevs(rev) |
|
2140 | 2140 | rs = r.rawsize(rev) |
|
2141 | 2141 | ts = ts + rs |
|
2142 | 2142 | heads -= set(r.parentrevs(rev)) |
|
2143 | 2143 | heads.add(rev) |
|
2144 | 2144 | try: |
|
2145 | 2145 | compression = ts / r.end(rev) |
|
2146 | 2146 | except ZeroDivisionError: |
|
2147 | 2147 | compression = 0 |
|
2148 | 2148 | ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d " |
|
2149 | 2149 | "%11d %5d %8d\n" % |
|
2150 | 2150 | (rev, p1, p2, r.start(rev), r.end(rev), |
|
2151 | 2151 | r.start(dbase), r.start(cbase), |
|
2152 | 2152 | r.start(p1), r.start(p2), |
|
2153 | 2153 | rs, ts, compression, len(heads), clen)) |
|
2154 | 2154 | return 0 |
|
2155 | 2155 | |
|
2156 | 2156 | v = r.version |
|
2157 | 2157 | format = v & 0xFFFF |
|
2158 | 2158 | flags = [] |
|
2159 | 2159 | gdelta = False |
|
2160 | 2160 | if v & revlog.FLAG_INLINE_DATA: |
|
2161 | 2161 | flags.append('inline') |
|
2162 | 2162 | if v & revlog.FLAG_GENERALDELTA: |
|
2163 | 2163 | gdelta = True |
|
2164 | 2164 | flags.append('generaldelta') |
|
2165 | 2165 | if not flags: |
|
2166 | 2166 | flags = ['(none)'] |
|
2167 | 2167 | |
|
2168 | 2168 | ### tracks merge vs single parent |
|
2169 | 2169 | nummerges = 0 |
|
2170 | 2170 | |
|
2171 | 2171 | ### tracks ways the "delta" are build |
|
2172 | 2172 | # nodelta |
|
2173 | 2173 | numempty = 0 |
|
2174 | 2174 | numemptytext = 0 |
|
2175 | 2175 | numemptydelta = 0 |
|
2176 | 2176 | # full file content |
|
2177 | 2177 | numfull = 0 |
|
2178 | 2178 | # intermediate snapshot against a prior snapshot |
|
2179 | 2179 | numsemi = 0 |
|
2180 | 2180 | # snapshot count per depth |
|
2181 | 2181 | numsnapdepth = collections.defaultdict(lambda: 0) |
|
2182 | 2182 | # delta against previous revision |
|
2183 | 2183 | numprev = 0 |
|
2184 | 2184 | # delta against first or second parent (not prev) |
|
2185 | 2185 | nump1 = 0 |
|
2186 | 2186 | nump2 = 0 |
|
2187 | 2187 | # delta against neither prev nor parents |
|
2188 | 2188 | numother = 0 |
|
2189 | 2189 | # delta against prev that are also first or second parent |
|
2190 | 2190 | # (details of `numprev`) |
|
2191 | 2191 | nump1prev = 0 |
|
2192 | 2192 | nump2prev = 0 |
|
2193 | 2193 | |
|
2194 | 2194 | # data about delta chain of each revs |
|
2195 | 2195 | chainlengths = [] |
|
2196 | 2196 | chainbases = [] |
|
2197 | 2197 | chainspans = [] |
|
2198 | 2198 | |
|
2199 | 2199 | # data about each revision |
|
2200 | 2200 | datasize = [None, 0, 0] |
|
2201 | 2201 | fullsize = [None, 0, 0] |
|
2202 | 2202 | semisize = [None, 0, 0] |
|
2203 | 2203 | # snapshot count per depth |
|
2204 | 2204 | snapsizedepth = collections.defaultdict(lambda: [None, 0, 0]) |
|
2205 | 2205 | deltasize = [None, 0, 0] |
|
2206 | 2206 | chunktypecounts = {} |
|
2207 | 2207 | chunktypesizes = {} |
|
2208 | 2208 | |
|
2209 | 2209 | def addsize(size, l): |
|
2210 | 2210 | if l[0] is None or size < l[0]: |
|
2211 | 2211 | l[0] = size |
|
2212 | 2212 | if size > l[1]: |
|
2213 | 2213 | l[1] = size |
|
2214 | 2214 | l[2] += size |
|
2215 | 2215 | |
|
2216 | 2216 | numrevs = len(r) |
|
2217 | 2217 | for rev in pycompat.xrange(numrevs): |
|
2218 | 2218 | p1, p2 = r.parentrevs(rev) |
|
2219 | 2219 | delta = r.deltaparent(rev) |
|
2220 | 2220 | if format > 0: |
|
2221 | 2221 | addsize(r.rawsize(rev), datasize) |
|
2222 | 2222 | if p2 != nullrev: |
|
2223 | 2223 | nummerges += 1 |
|
2224 | 2224 | size = r.length(rev) |
|
2225 | 2225 | if delta == nullrev: |
|
2226 | 2226 | chainlengths.append(0) |
|
2227 | 2227 | chainbases.append(r.start(rev)) |
|
2228 | 2228 | chainspans.append(size) |
|
2229 | 2229 | if size == 0: |
|
2230 | 2230 | numempty += 1 |
|
2231 | 2231 | numemptytext += 1 |
|
2232 | 2232 | else: |
|
2233 | 2233 | numfull += 1 |
|
2234 | 2234 | numsnapdepth[0] += 1 |
|
2235 | 2235 | addsize(size, fullsize) |
|
2236 | 2236 | addsize(size, snapsizedepth[0]) |
|
2237 | 2237 | else: |
|
2238 | 2238 | chainlengths.append(chainlengths[delta] + 1) |
|
2239 | 2239 | baseaddr = chainbases[delta] |
|
2240 | 2240 | revaddr = r.start(rev) |
|
2241 | 2241 | chainbases.append(baseaddr) |
|
2242 | 2242 | chainspans.append((revaddr - baseaddr) + size) |
|
2243 | 2243 | if size == 0: |
|
2244 | 2244 | numempty += 1 |
|
2245 | 2245 | numemptydelta += 1 |
|
2246 | 2246 | elif r.issnapshot(rev): |
|
2247 | 2247 | addsize(size, semisize) |
|
2248 | 2248 | numsemi += 1 |
|
2249 | 2249 | depth = r.snapshotdepth(rev) |
|
2250 | 2250 | numsnapdepth[depth] += 1 |
|
2251 | 2251 | addsize(size, snapsizedepth[depth]) |
|
2252 | 2252 | else: |
|
2253 | 2253 | addsize(size, deltasize) |
|
2254 | 2254 | if delta == rev - 1: |
|
2255 | 2255 | numprev += 1 |
|
2256 | 2256 | if delta == p1: |
|
2257 | 2257 | nump1prev += 1 |
|
2258 | 2258 | elif delta == p2: |
|
2259 | 2259 | nump2prev += 1 |
|
2260 | 2260 | elif delta == p1: |
|
2261 | 2261 | nump1 += 1 |
|
2262 | 2262 | elif delta == p2: |
|
2263 | 2263 | nump2 += 1 |
|
2264 | 2264 | elif delta != nullrev: |
|
2265 | 2265 | numother += 1 |
|
2266 | 2266 | |
|
2267 | 2267 | # Obtain data on the raw chunks in the revlog. |
|
2268 | 2268 | if util.safehasattr(r, '_getsegmentforrevs'): |
|
2269 | 2269 | segment = r._getsegmentforrevs(rev, rev)[1] |
|
2270 | 2270 | else: |
|
2271 | 2271 | segment = r._revlog._getsegmentforrevs(rev, rev)[1] |
|
2272 | 2272 | if segment: |
|
2273 | 2273 | chunktype = bytes(segment[0:1]) |
|
2274 | 2274 | else: |
|
2275 | 2275 | chunktype = 'empty' |
|
2276 | 2276 | |
|
2277 | 2277 | if chunktype not in chunktypecounts: |
|
2278 | 2278 | chunktypecounts[chunktype] = 0 |
|
2279 | 2279 | chunktypesizes[chunktype] = 0 |
|
2280 | 2280 | |
|
2281 | 2281 | chunktypecounts[chunktype] += 1 |
|
2282 | 2282 | chunktypesizes[chunktype] += size |
|
2283 | 2283 | |
|
2284 | 2284 | # Adjust size min value for empty cases |
|
2285 | 2285 | for size in (datasize, fullsize, semisize, deltasize): |
|
2286 | 2286 | if size[0] is None: |
|
2287 | 2287 | size[0] = 0 |
|
2288 | 2288 | |
|
2289 | 2289 | numdeltas = numrevs - numfull - numempty - numsemi |
|
2290 | 2290 | numoprev = numprev - nump1prev - nump2prev |
|
2291 | 2291 | totalrawsize = datasize[2] |
|
2292 | 2292 | datasize[2] /= numrevs |
|
2293 | 2293 | fulltotal = fullsize[2] |
|
2294 | 2294 | if numfull == 0: |
|
2295 | 2295 | fullsize[2] = 0 |
|
2296 | 2296 | else: |
|
2297 | 2297 | fullsize[2] /= numfull |
|
2298 | 2298 | semitotal = semisize[2] |
|
2299 | 2299 | snaptotal = {} |
|
2300 | 2300 | if numsemi > 0: |
|
2301 | 2301 | semisize[2] /= numsemi |
|
2302 | 2302 | for depth in snapsizedepth: |
|
2303 | 2303 | snaptotal[depth] = snapsizedepth[depth][2] |
|
2304 | 2304 | snapsizedepth[depth][2] /= numsnapdepth[depth] |
|
2305 | 2305 | |
|
2306 | 2306 | deltatotal = deltasize[2] |
|
2307 | 2307 | if numdeltas > 0: |
|
2308 | 2308 | deltasize[2] /= numdeltas |
|
2309 | 2309 | totalsize = fulltotal + semitotal + deltatotal |
|
2310 | 2310 | avgchainlen = sum(chainlengths) / numrevs |
|
2311 | 2311 | maxchainlen = max(chainlengths) |
|
2312 | 2312 | maxchainspan = max(chainspans) |
|
2313 | 2313 | compratio = 1 |
|
2314 | 2314 | if totalsize: |
|
2315 | 2315 | compratio = totalrawsize / totalsize |
|
2316 | 2316 | |
|
2317 | 2317 | basedfmtstr = '%%%dd\n' |
|
2318 | 2318 | basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n' |
|
2319 | 2319 | |
|
2320 | 2320 | def dfmtstr(max): |
|
2321 | 2321 | return basedfmtstr % len(str(max)) |
|
2322 | 2322 | def pcfmtstr(max, padding=0): |
|
2323 | 2323 | return basepcfmtstr % (len(str(max)), ' ' * padding) |
|
2324 | 2324 | |
|
2325 | 2325 | def pcfmt(value, total): |
|
2326 | 2326 | if total: |
|
2327 | 2327 | return (value, 100 * float(value) / total) |
|
2328 | 2328 | else: |
|
2329 | 2329 | return value, 100.0 |
|
2330 | 2330 | |
|
2331 | 2331 | ui.write(('format : %d\n') % format) |
|
2332 | 2332 | ui.write(('flags : %s\n') % ', '.join(flags)) |
|
2333 | 2333 | |
|
2334 | 2334 | ui.write('\n') |
|
2335 | 2335 | fmt = pcfmtstr(totalsize) |
|
2336 | 2336 | fmt2 = dfmtstr(totalsize) |
|
2337 | 2337 | ui.write(('revisions : ') + fmt2 % numrevs) |
|
2338 | 2338 | ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs)) |
|
2339 | 2339 | ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs)) |
|
2340 | 2340 | ui.write(('revisions : ') + fmt2 % numrevs) |
|
2341 | 2341 | ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs)) |
|
2342 | 2342 | ui.write((' text : ') |
|
2343 | 2343 | + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)) |
|
2344 | 2344 | ui.write((' delta : ') |
|
2345 | 2345 | + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)) |
|
2346 | 2346 | ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs)) |
|
2347 | 2347 | for depth in sorted(numsnapdepth): |
|
2348 | 2348 | ui.write((' lvl-%-3d : ' % depth) |
|
2349 | 2349 | + fmt % pcfmt(numsnapdepth[depth], numrevs)) |
|
2350 | 2350 | ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs)) |
|
2351 | 2351 | ui.write(('revision size : ') + fmt2 % totalsize) |
|
2352 | 2352 | ui.write((' snapshot : ') |
|
2353 | 2353 | + fmt % pcfmt(fulltotal + semitotal, totalsize)) |
|
2354 | 2354 | for depth in sorted(numsnapdepth): |
|
2355 | 2355 | ui.write((' lvl-%-3d : ' % depth) |
|
2356 | 2356 | + fmt % pcfmt(snaptotal[depth], totalsize)) |
|
2357 | 2357 | ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize)) |
|
2358 | 2358 | |
|
2359 | 2359 | def fmtchunktype(chunktype): |
|
2360 | 2360 | if chunktype == 'empty': |
|
2361 | 2361 | return ' %s : ' % chunktype |
|
2362 | 2362 | elif chunktype in pycompat.bytestr(string.ascii_letters): |
|
2363 | 2363 | return ' 0x%s (%s) : ' % (hex(chunktype), chunktype) |
|
2364 | 2364 | else: |
|
2365 | 2365 | return ' 0x%s : ' % hex(chunktype) |
|
2366 | 2366 | |
|
2367 | 2367 | ui.write('\n') |
|
2368 | 2368 | ui.write(('chunks : ') + fmt2 % numrevs) |
|
2369 | 2369 | for chunktype in sorted(chunktypecounts): |
|
2370 | 2370 | ui.write(fmtchunktype(chunktype)) |
|
2371 | 2371 | ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs)) |
|
2372 | 2372 | ui.write(('chunks size : ') + fmt2 % totalsize) |
|
2373 | 2373 | for chunktype in sorted(chunktypecounts): |
|
2374 | 2374 | ui.write(fmtchunktype(chunktype)) |
|
2375 | 2375 | ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize)) |
|
2376 | 2376 | |
|
2377 | 2377 | ui.write('\n') |
|
2378 | 2378 | fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio)) |
|
2379 | 2379 | ui.write(('avg chain length : ') + fmt % avgchainlen) |
|
2380 | 2380 | ui.write(('max chain length : ') + fmt % maxchainlen) |
|
2381 | 2381 | ui.write(('max chain reach : ') + fmt % maxchainspan) |
|
2382 | 2382 | ui.write(('compression ratio : ') + fmt % compratio) |
|
2383 | 2383 | |
|
2384 | 2384 | if format > 0: |
|
2385 | 2385 | ui.write('\n') |
|
2386 | 2386 | ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n') |
|
2387 | 2387 | % tuple(datasize)) |
|
2388 | 2388 | ui.write(('full revision size (min/max/avg) : %d / %d / %d\n') |
|
2389 | 2389 | % tuple(fullsize)) |
|
2390 | 2390 | ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n') |
|
2391 | 2391 | % tuple(semisize)) |
|
2392 | 2392 | for depth in sorted(snapsizedepth): |
|
2393 | 2393 | if depth == 0: |
|
2394 | 2394 | continue |
|
2395 | 2395 | ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n') |
|
2396 | 2396 | % ((depth,) + tuple(snapsizedepth[depth]))) |
|
2397 | 2397 | ui.write(('delta size (min/max/avg) : %d / %d / %d\n') |
|
2398 | 2398 | % tuple(deltasize)) |
|
2399 | 2399 | |
|
2400 | 2400 | if numdeltas > 0: |
|
2401 | 2401 | ui.write('\n') |
|
2402 | 2402 | fmt = pcfmtstr(numdeltas) |
|
2403 | 2403 | fmt2 = pcfmtstr(numdeltas, 4) |
|
2404 | 2404 | ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas)) |
|
2405 | 2405 | if numprev > 0: |
|
2406 | 2406 | ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev, |
|
2407 | 2407 | numprev)) |
|
2408 | 2408 | ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev, |
|
2409 | 2409 | numprev)) |
|
2410 | 2410 | ui.write((' other : ') + fmt2 % pcfmt(numoprev, |
|
2411 | 2411 | numprev)) |
|
2412 | 2412 | if gdelta: |
|
2413 | 2413 | ui.write(('deltas against p1 : ') |
|
2414 | 2414 | + fmt % pcfmt(nump1, numdeltas)) |
|
2415 | 2415 | ui.write(('deltas against p2 : ') |
|
2416 | 2416 | + fmt % pcfmt(nump2, numdeltas)) |
|
2417 | 2417 | ui.write(('deltas against other : ') + fmt % pcfmt(numother, |
|
2418 | 2418 | numdeltas)) |
|
2419 | 2419 | |
|
2420 | 2420 | @command('debugrevlogindex', cmdutil.debugrevlogopts + |
|
2421 | 2421 | [('f', 'format', 0, _('revlog format'), _('FORMAT'))], |
|
2422 | 2422 | _('[-f FORMAT] -c|-m|FILE'), |
|
2423 | 2423 | optionalrepo=True) |
|
2424 | 2424 | def debugrevlogindex(ui, repo, file_=None, **opts): |
|
2425 | 2425 | """dump the contents of a revlog index""" |
|
2426 | 2426 | opts = pycompat.byteskwargs(opts) |
|
2427 | 2427 | r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts) |
|
2428 | 2428 | format = opts.get('format', 0) |
|
2429 | 2429 | if format not in (0, 1): |
|
2430 | 2430 | raise error.Abort(_("unknown format %d") % format) |
|
2431 | 2431 | |
|
2432 | 2432 | if ui.debugflag: |
|
2433 | 2433 | shortfn = hex |
|
2434 | 2434 | else: |
|
2435 | 2435 | shortfn = short |
|
2436 | 2436 | |
|
2437 | 2437 | # There might not be anything in r, so have a sane default |
|
2438 | 2438 | idlen = 12 |
|
2439 | 2439 | for i in r: |
|
2440 | 2440 | idlen = len(shortfn(r.node(i))) |
|
2441 | 2441 | break |
|
2442 | 2442 | |
|
2443 | 2443 | if format == 0: |
|
2444 | 2444 | if ui.verbose: |
|
2445 | 2445 | ui.write((" rev offset length linkrev" |
|
2446 | 2446 | " %s %s p2\n") % ("nodeid".ljust(idlen), |
|
2447 | 2447 | "p1".ljust(idlen))) |
|
2448 | 2448 | else: |
|
2449 | 2449 | ui.write((" rev linkrev %s %s p2\n") % ( |
|
2450 | 2450 | "nodeid".ljust(idlen), "p1".ljust(idlen))) |
|
2451 | 2451 | elif format == 1: |
|
2452 | 2452 | if ui.verbose: |
|
2453 | 2453 | ui.write((" rev flag offset length size link p1" |
|
2454 | 2454 | " p2 %s\n") % "nodeid".rjust(idlen)) |
|
2455 | 2455 | else: |
|
2456 | 2456 | ui.write((" rev flag size link p1 p2 %s\n") % |
|
2457 | 2457 | "nodeid".rjust(idlen)) |
|
2458 | 2458 | |
|
2459 | 2459 | for i in r: |
|
2460 | 2460 | node = r.node(i) |
|
2461 | 2461 | if format == 0: |
|
2462 | 2462 | try: |
|
2463 | 2463 | pp = r.parents(node) |
|
2464 | 2464 | except Exception: |
|
2465 | 2465 | pp = [nullid, nullid] |
|
2466 | 2466 | if ui.verbose: |
|
2467 | 2467 | ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % ( |
|
2468 | 2468 | i, r.start(i), r.length(i), r.linkrev(i), |
|
2469 | 2469 | shortfn(node), shortfn(pp[0]), shortfn(pp[1]))) |
|
2470 | 2470 | else: |
|
2471 | 2471 | ui.write("% 6d % 7d %s %s %s\n" % ( |
|
2472 | 2472 | i, r.linkrev(i), shortfn(node), shortfn(pp[0]), |
|
2473 | 2473 | shortfn(pp[1]))) |
|
2474 | 2474 | elif format == 1: |
|
2475 | 2475 | pr = r.parentrevs(i) |
|
2476 | 2476 | if ui.verbose: |
|
2477 | 2477 | ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % ( |
|
2478 | 2478 | i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), |
|
2479 | 2479 | r.linkrev(i), pr[0], pr[1], shortfn(node))) |
|
2480 | 2480 | else: |
|
2481 | 2481 | ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % ( |
|
2482 | 2482 | i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1], |
|
2483 | 2483 | shortfn(node))) |
|
2484 | 2484 | |
|
2485 | 2485 | @command('debugrevspec', |
|
2486 | 2486 | [('', 'optimize', None, |
|
2487 | 2487 | _('print parsed tree after optimizing (DEPRECATED)')), |
|
2488 | 2488 | ('', 'show-revs', True, _('print list of result revisions (default)')), |
|
2489 | 2489 | ('s', 'show-set', None, _('print internal representation of result set')), |
|
2490 | 2490 | ('p', 'show-stage', [], |
|
2491 | 2491 | _('print parsed tree at the given stage'), _('NAME')), |
|
2492 | 2492 | ('', 'no-optimized', False, _('evaluate tree without optimization')), |
|
2493 | 2493 | ('', 'verify-optimized', False, _('verify optimized result')), |
|
2494 | 2494 | ], |
|
2495 | 2495 | ('REVSPEC')) |
|
2496 | 2496 | def debugrevspec(ui, repo, expr, **opts): |
|
2497 | 2497 | """parse and apply a revision specification |
|
2498 | 2498 | |
|
2499 | 2499 | Use -p/--show-stage option to print the parsed tree at the given stages. |
|
2500 | 2500 | Use -p all to print tree at every stage. |
|
2501 | 2501 | |
|
2502 | 2502 | Use --no-show-revs option with -s or -p to print only the set |
|
2503 | 2503 | representation or the parsed tree respectively. |
|
2504 | 2504 | |
|
2505 | 2505 | Use --verify-optimized to compare the optimized result with the unoptimized |
|
2506 | 2506 | one. Returns 1 if the optimized result differs. |
|
2507 | 2507 | """ |
|
2508 | 2508 | opts = pycompat.byteskwargs(opts) |
|
2509 | 2509 | aliases = ui.configitems('revsetalias') |
|
2510 | 2510 | stages = [ |
|
2511 | 2511 | ('parsed', lambda tree: tree), |
|
2512 | 2512 | ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases, |
|
2513 | 2513 | ui.warn)), |
|
2514 | 2514 | ('concatenated', revsetlang.foldconcat), |
|
2515 | 2515 | ('analyzed', revsetlang.analyze), |
|
2516 | 2516 | ('optimized', revsetlang.optimize), |
|
2517 | 2517 | ] |
|
2518 | 2518 | if opts['no_optimized']: |
|
2519 | 2519 | stages = stages[:-1] |
|
2520 | 2520 | if opts['verify_optimized'] and opts['no_optimized']: |
|
2521 | 2521 | raise error.Abort(_('cannot use --verify-optimized with ' |
|
2522 | 2522 | '--no-optimized')) |
|
2523 | 2523 | stagenames = set(n for n, f in stages) |
|
2524 | 2524 | |
|
2525 | 2525 | showalways = set() |
|
2526 | 2526 | showchanged = set() |
|
2527 | 2527 | if ui.verbose and not opts['show_stage']: |
|
2528 | 2528 | # show parsed tree by --verbose (deprecated) |
|
2529 | 2529 | showalways.add('parsed') |
|
2530 | 2530 | showchanged.update(['expanded', 'concatenated']) |
|
2531 | 2531 | if opts['optimize']: |
|
2532 | 2532 | showalways.add('optimized') |
|
2533 | 2533 | if opts['show_stage'] and opts['optimize']: |
|
2534 | 2534 | raise error.Abort(_('cannot use --optimize with --show-stage')) |
|
2535 | 2535 | if opts['show_stage'] == ['all']: |
|
2536 | 2536 | showalways.update(stagenames) |
|
2537 | 2537 | else: |
|
2538 | 2538 | for n in opts['show_stage']: |
|
2539 | 2539 | if n not in stagenames: |
|
2540 | 2540 | raise error.Abort(_('invalid stage name: %s') % n) |
|
2541 | 2541 | showalways.update(opts['show_stage']) |
|
2542 | 2542 | |
|
2543 | 2543 | treebystage = {} |
|
2544 | 2544 | printedtree = None |
|
2545 | 2545 | tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo)) |
|
2546 | 2546 | for n, f in stages: |
|
2547 | 2547 | treebystage[n] = tree = f(tree) |
|
2548 | 2548 | if n in showalways or (n in showchanged and tree != printedtree): |
|
2549 | 2549 | if opts['show_stage'] or n != 'parsed': |
|
2550 | 2550 | ui.write(("* %s:\n") % n) |
|
2551 | 2551 | ui.write(revsetlang.prettyformat(tree), "\n") |
|
2552 | 2552 | printedtree = tree |
|
2553 | 2553 | |
|
2554 | 2554 | if opts['verify_optimized']: |
|
2555 | 2555 | arevs = revset.makematcher(treebystage['analyzed'])(repo) |
|
2556 | 2556 | brevs = revset.makematcher(treebystage['optimized'])(repo) |
|
2557 | 2557 | if opts['show_set'] or (opts['show_set'] is None and ui.verbose): |
|
2558 | 2558 | ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n") |
|
2559 | 2559 | ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n") |
|
2560 | 2560 | arevs = list(arevs) |
|
2561 | 2561 | brevs = list(brevs) |
|
2562 | 2562 | if arevs == brevs: |
|
2563 | 2563 | return 0 |
|
2564 | 2564 | ui.write(('--- analyzed\n'), label='diff.file_a') |
|
2565 | 2565 | ui.write(('+++ optimized\n'), label='diff.file_b') |
|
2566 | 2566 | sm = difflib.SequenceMatcher(None, arevs, brevs) |
|
2567 | 2567 | for tag, alo, ahi, blo, bhi in sm.get_opcodes(): |
|
2568 | 2568 | if tag in (r'delete', r'replace'): |
|
2569 | 2569 | for c in arevs[alo:ahi]: |
|
2570 | 2570 | ui.write('-%d\n' % c, label='diff.deleted') |
|
2571 | 2571 | if tag in (r'insert', r'replace'): |
|
2572 | 2572 | for c in brevs[blo:bhi]: |
|
2573 | 2573 | ui.write('+%d\n' % c, label='diff.inserted') |
|
2574 | 2574 | if tag == r'equal': |
|
2575 | 2575 | for c in arevs[alo:ahi]: |
|
2576 | 2576 | ui.write(' %d\n' % c) |
|
2577 | 2577 | return 1 |
|
2578 | 2578 | |
|
2579 | 2579 | func = revset.makematcher(tree) |
|
2580 | 2580 | revs = func(repo) |
|
2581 | 2581 | if opts['show_set'] or (opts['show_set'] is None and ui.verbose): |
|
2582 | 2582 | ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n") |
|
2583 | 2583 | if not opts['show_revs']: |
|
2584 | 2584 | return |
|
2585 | 2585 | for c in revs: |
|
2586 | 2586 | ui.write("%d\n" % c) |
|
2587 | 2587 | |
|
2588 | 2588 | @command('debugserve', [ |
|
2589 | 2589 | ('', 'sshstdio', False, _('run an SSH server bound to process handles')), |
|
2590 | 2590 | ('', 'logiofd', '', _('file descriptor to log server I/O to')), |
|
2591 | 2591 | ('', 'logiofile', '', _('file to log server I/O to')), |
|
2592 | 2592 | ], '') |
|
2593 | 2593 | def debugserve(ui, repo, **opts): |
|
2594 | 2594 | """run a server with advanced settings |
|
2595 | 2595 | |
|
2596 | 2596 | This command is similar to :hg:`serve`. It exists partially as a |
|
2597 | 2597 | workaround to the fact that ``hg serve --stdio`` must have specific |
|
2598 | 2598 | arguments for security reasons. |
|
2599 | 2599 | """ |
|
2600 | 2600 | opts = pycompat.byteskwargs(opts) |
|
2601 | 2601 | |
|
2602 | 2602 | if not opts['sshstdio']: |
|
2603 | 2603 | raise error.Abort(_('only --sshstdio is currently supported')) |
|
2604 | 2604 | |
|
2605 | 2605 | logfh = None |
|
2606 | 2606 | |
|
2607 | 2607 | if opts['logiofd'] and opts['logiofile']: |
|
2608 | 2608 | raise error.Abort(_('cannot use both --logiofd and --logiofile')) |
|
2609 | 2609 | |
|
2610 | 2610 | if opts['logiofd']: |
|
2611 | 2611 | # Line buffered because output is line based. |
|
2612 | 2612 | try: |
|
2613 | 2613 | logfh = os.fdopen(int(opts['logiofd']), r'ab', 1) |
|
2614 | 2614 | except OSError as e: |
|
2615 | 2615 | if e.errno != errno.ESPIPE: |
|
2616 | 2616 | raise |
|
2617 | 2617 | # can't seek a pipe, so `ab` mode fails on py3 |
|
2618 | 2618 | logfh = os.fdopen(int(opts['logiofd']), r'wb', 1) |
|
2619 | 2619 | elif opts['logiofile']: |
|
2620 | 2620 | logfh = open(opts['logiofile'], 'ab', 1) |
|
2621 | 2621 | |
|
2622 | 2622 | s = wireprotoserver.sshserver(ui, repo, logfh=logfh) |
|
2623 | 2623 | s.serve_forever() |
|
2624 | 2624 | |
|
2625 | 2625 | @command('debugsetparents', [], _('REV1 [REV2]')) |
|
2626 | 2626 | def debugsetparents(ui, repo, rev1, rev2=None): |
|
2627 | 2627 | """manually set the parents of the current working directory |
|
2628 | 2628 | |
|
2629 | 2629 | This is useful for writing repository conversion tools, but should |
|
2630 | 2630 | be used with care. For example, neither the working directory nor the |
|
2631 | 2631 | dirstate is updated, so file status may be incorrect after running this |
|
2632 | 2632 | command. |
|
2633 | 2633 | |
|
2634 | 2634 | Returns 0 on success. |
|
2635 | 2635 | """ |
|
2636 | 2636 | |
|
2637 | 2637 | node1 = scmutil.revsingle(repo, rev1).node() |
|
2638 | 2638 | node2 = scmutil.revsingle(repo, rev2, 'null').node() |
|
2639 | 2639 | |
|
2640 | 2640 | with repo.wlock(): |
|
2641 | 2641 | repo.setparents(node1, node2) |
|
2642 | 2642 | |
|
2643 | 2643 | @command('debugssl', [], '[SOURCE]', optionalrepo=True) |
|
2644 | 2644 | def debugssl(ui, repo, source=None, **opts): |
|
2645 | 2645 | '''test a secure connection to a server |
|
2646 | 2646 | |
|
2647 | 2647 | This builds the certificate chain for the server on Windows, installing the |
|
2648 | 2648 | missing intermediates and trusted root via Windows Update if necessary. It |
|
2649 | 2649 | does nothing on other platforms. |
|
2650 | 2650 | |
|
2651 | 2651 | If SOURCE is omitted, the 'default' path will be used. If a URL is given, |
|
2652 | 2652 | that server is used. See :hg:`help urls` for more information. |
|
2653 | 2653 | |
|
2654 | 2654 | If the update succeeds, retry the original operation. Otherwise, the cause |
|
2655 | 2655 | of the SSL error is likely another issue. |
|
2656 | 2656 | ''' |
|
2657 | 2657 | if not pycompat.iswindows: |
|
2658 | 2658 | raise error.Abort(_('certificate chain building is only possible on ' |
|
2659 | 2659 | 'Windows')) |
|
2660 | 2660 | |
|
2661 | 2661 | if not source: |
|
2662 | 2662 | if not repo: |
|
2663 | 2663 | raise error.Abort(_("there is no Mercurial repository here, and no " |
|
2664 | 2664 | "server specified")) |
|
2665 | 2665 | source = "default" |
|
2666 | 2666 | |
|
2667 | 2667 | source, branches = hg.parseurl(ui.expandpath(source)) |
|
2668 | 2668 | url = util.url(source) |
|
2669 | 2669 | |
|
2670 | 2670 | defaultport = {'https': 443, 'ssh': 22} |
|
2671 | 2671 | if url.scheme in defaultport: |
|
2672 | 2672 | try: |
|
2673 | 2673 | addr = (url.host, int(url.port or defaultport[url.scheme])) |
|
2674 | 2674 | except ValueError: |
|
2675 | 2675 | raise error.Abort(_("malformed port number in URL")) |
|
2676 | 2676 | else: |
|
2677 | 2677 | raise error.Abort(_("only https and ssh connections are supported")) |
|
2678 | 2678 | |
|
2679 | 2679 | from . import win32 |
|
2680 | 2680 | |
|
2681 | 2681 | s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS, |
|
2682 | 2682 | cert_reqs=ssl.CERT_NONE, ca_certs=None) |
|
2683 | 2683 | |
|
2684 | 2684 | try: |
|
2685 | 2685 | s.connect(addr) |
|
2686 | 2686 | cert = s.getpeercert(True) |
|
2687 | 2687 | |
|
2688 | 2688 | ui.status(_('checking the certificate chain for %s\n') % url.host) |
|
2689 | 2689 | |
|
2690 | 2690 | complete = win32.checkcertificatechain(cert, build=False) |
|
2691 | 2691 | |
|
2692 | 2692 | if not complete: |
|
2693 | 2693 | ui.status(_('certificate chain is incomplete, updating... ')) |
|
2694 | 2694 | |
|
2695 | 2695 | if not win32.checkcertificatechain(cert): |
|
2696 | 2696 | ui.status(_('failed.\n')) |
|
2697 | 2697 | else: |
|
2698 | 2698 | ui.status(_('done.\n')) |
|
2699 | 2699 | else: |
|
2700 | 2700 | ui.status(_('full certificate chain is available\n')) |
|
2701 | 2701 | finally: |
|
2702 | 2702 | s.close() |
|
2703 | 2703 | |
|
2704 | 2704 | @command('debugsub', |
|
2705 | 2705 | [('r', 'rev', '', |
|
2706 | 2706 | _('revision to check'), _('REV'))], |
|
2707 | 2707 | _('[-r REV] [REV]')) |
|
2708 | 2708 | def debugsub(ui, repo, rev=None): |
|
2709 | 2709 | ctx = scmutil.revsingle(repo, rev, None) |
|
2710 | 2710 | for k, v in sorted(ctx.substate.items()): |
|
2711 | 2711 | ui.write(('path %s\n') % k) |
|
2712 | 2712 | ui.write((' source %s\n') % v[0]) |
|
2713 | 2713 | ui.write((' revision %s\n') % v[1]) |
|
2714 | 2714 | |
|
2715 | 2715 | @command('debugsuccessorssets', |
|
2716 | 2716 | [('', 'closest', False, _('return closest successors sets only'))], |
|
2717 | 2717 | _('[REV]')) |
|
2718 | 2718 | def debugsuccessorssets(ui, repo, *revs, **opts): |
|
2719 | 2719 | """show set of successors for revision |
|
2720 | 2720 | |
|
2721 | 2721 | A successors set of changeset A is a consistent group of revisions that |
|
2722 | 2722 | succeed A. It contains non-obsolete changesets only unless closests |
|
2723 | 2723 | successors set is set. |
|
2724 | 2724 | |
|
2725 | 2725 | In most cases a changeset A has a single successors set containing a single |
|
2726 | 2726 | successor (changeset A replaced by A'). |
|
2727 | 2727 | |
|
2728 | 2728 | A changeset that is made obsolete with no successors are called "pruned". |
|
2729 | 2729 | Such changesets have no successors sets at all. |
|
2730 | 2730 | |
|
2731 | 2731 | A changeset that has been "split" will have a successors set containing |
|
2732 | 2732 | more than one successor. |
|
2733 | 2733 | |
|
2734 | 2734 | A changeset that has been rewritten in multiple different ways is called |
|
2735 | 2735 | "divergent". Such changesets have multiple successor sets (each of which |
|
2736 | 2736 | may also be split, i.e. have multiple successors). |
|
2737 | 2737 | |
|
2738 | 2738 | Results are displayed as follows:: |
|
2739 | 2739 | |
|
2740 | 2740 | <rev1> |
|
2741 | 2741 | <successors-1A> |
|
2742 | 2742 | <rev2> |
|
2743 | 2743 | <successors-2A> |
|
2744 | 2744 | <successors-2B1> <successors-2B2> <successors-2B3> |
|
2745 | 2745 | |
|
2746 | 2746 | Here rev2 has two possible (i.e. divergent) successors sets. The first |
|
2747 | 2747 | holds one element, whereas the second holds three (i.e. the changeset has |
|
2748 | 2748 | been split). |
|
2749 | 2749 | """ |
|
2750 | 2750 | # passed to successorssets caching computation from one call to another |
|
2751 | 2751 | cache = {} |
|
2752 | 2752 | ctx2str = bytes |
|
2753 | 2753 | node2str = short |
|
2754 | 2754 | for rev in scmutil.revrange(repo, revs): |
|
2755 | 2755 | ctx = repo[rev] |
|
2756 | 2756 | ui.write('%s\n'% ctx2str(ctx)) |
|
2757 | 2757 | for succsset in obsutil.successorssets(repo, ctx.node(), |
|
2758 | 2758 | closest=opts[r'closest'], |
|
2759 | 2759 | cache=cache): |
|
2760 | 2760 | if succsset: |
|
2761 | 2761 | ui.write(' ') |
|
2762 | 2762 | ui.write(node2str(succsset[0])) |
|
2763 | 2763 | for node in succsset[1:]: |
|
2764 | 2764 | ui.write(' ') |
|
2765 | 2765 | ui.write(node2str(node)) |
|
2766 | 2766 | ui.write('\n') |
|
2767 | 2767 | |
|
2768 | 2768 | @command('debugtemplate', |
|
2769 | 2769 | [('r', 'rev', [], _('apply template on changesets'), _('REV')), |
|
2770 | 2770 | ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))], |
|
2771 | 2771 | _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'), |
|
2772 | 2772 | optionalrepo=True) |
|
2773 | 2773 | def debugtemplate(ui, repo, tmpl, **opts): |
|
2774 | 2774 | """parse and apply a template |
|
2775 | 2775 | |
|
2776 | 2776 | If -r/--rev is given, the template is processed as a log template and |
|
2777 | 2777 | applied to the given changesets. Otherwise, it is processed as a generic |
|
2778 | 2778 | template. |
|
2779 | 2779 | |
|
2780 | 2780 | Use --verbose to print the parsed tree. |
|
2781 | 2781 | """ |
|
2782 | 2782 | revs = None |
|
2783 | 2783 | if opts[r'rev']: |
|
2784 | 2784 | if repo is None: |
|
2785 | 2785 | raise error.RepoError(_('there is no Mercurial repository here ' |
|
2786 | 2786 | '(.hg not found)')) |
|
2787 | 2787 | revs = scmutil.revrange(repo, opts[r'rev']) |
|
2788 | 2788 | |
|
2789 | 2789 | props = {} |
|
2790 | 2790 | for d in opts[r'define']: |
|
2791 | 2791 | try: |
|
2792 | 2792 | k, v = (e.strip() for e in d.split('=', 1)) |
|
2793 | 2793 | if not k or k == 'ui': |
|
2794 | 2794 | raise ValueError |
|
2795 | 2795 | props[k] = v |
|
2796 | 2796 | except ValueError: |
|
2797 | 2797 | raise error.Abort(_('malformed keyword definition: %s') % d) |
|
2798 | 2798 | |
|
2799 | 2799 | if ui.verbose: |
|
2800 | 2800 | aliases = ui.configitems('templatealias') |
|
2801 | 2801 | tree = templater.parse(tmpl) |
|
2802 | 2802 | ui.note(templater.prettyformat(tree), '\n') |
|
2803 | 2803 | newtree = templater.expandaliases(tree, aliases) |
|
2804 | 2804 | if newtree != tree: |
|
2805 | 2805 | ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n') |
|
2806 | 2806 | |
|
2807 | 2807 | if revs is None: |
|
2808 | 2808 | tres = formatter.templateresources(ui, repo) |
|
2809 | 2809 | t = formatter.maketemplater(ui, tmpl, resources=tres) |
|
2810 | 2810 | if ui.verbose: |
|
2811 | 2811 | kwds, funcs = t.symbolsuseddefault() |
|
2812 | 2812 | ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds))) |
|
2813 | 2813 | ui.write(("* functions: %s\n") % ', '.join(sorted(funcs))) |
|
2814 | 2814 | ui.write(t.renderdefault(props)) |
|
2815 | 2815 | else: |
|
2816 | 2816 | displayer = logcmdutil.maketemplater(ui, repo, tmpl) |
|
2817 | 2817 | if ui.verbose: |
|
2818 | 2818 | kwds, funcs = displayer.t.symbolsuseddefault() |
|
2819 | 2819 | ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds))) |
|
2820 | 2820 | ui.write(("* functions: %s\n") % ', '.join(sorted(funcs))) |
|
2821 | 2821 | for r in revs: |
|
2822 | 2822 | displayer.show(repo[r], **pycompat.strkwargs(props)) |
|
2823 | 2823 | displayer.close() |
|
2824 | 2824 | |
|
2825 | 2825 | @command('debuguigetpass', [ |
|
2826 | 2826 | ('p', 'prompt', '', _('prompt text'), _('TEXT')), |
|
2827 | 2827 | ], _('[-p TEXT]'), norepo=True) |
|
2828 | 2828 | def debuguigetpass(ui, prompt=''): |
|
2829 | 2829 | """show prompt to type password""" |
|
2830 | 2830 | r = ui.getpass(prompt) |
|
2831 | 2831 | ui.write(('respose: %s\n') % r) |
|
2832 | 2832 | |
|
2833 | 2833 | @command('debuguiprompt', [ |
|
2834 | 2834 | ('p', 'prompt', '', _('prompt text'), _('TEXT')), |
|
2835 | 2835 | ], _('[-p TEXT]'), norepo=True) |
|
2836 | 2836 | def debuguiprompt(ui, prompt=''): |
|
2837 | 2837 | """show plain prompt""" |
|
2838 | 2838 | r = ui.prompt(prompt) |
|
2839 | 2839 | ui.write(('response: %s\n') % r) |
|
2840 | 2840 | |
|
2841 | 2841 | @command('debugupdatecaches', []) |
|
2842 | 2842 | def debugupdatecaches(ui, repo, *pats, **opts): |
|
2843 | 2843 | """warm all known caches in the repository""" |
|
2844 | 2844 | with repo.wlock(), repo.lock(): |
|
2845 | 2845 | repo.updatecaches(full=True) |
|
2846 | 2846 | |
|
2847 | 2847 | @command('debugupgraderepo', [ |
|
2848 | 2848 | ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')), |
|
2849 | 2849 | ('', 'run', False, _('performs an upgrade')), |
|
2850 | 2850 | ('', 'backup', True, _('keep the old repository content around')), |
|
2851 | ('', 'changelog', None, _('select the changelog for upgrade')), | |
|
2851 | 2852 | ('', 'manifest', None, _('select the manifest for upgrade')), |
|
2852 | 2853 | ]) |
|
2853 | 2854 | def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts): |
|
2854 | 2855 | """upgrade a repository to use different features |
|
2855 | 2856 | |
|
2856 | 2857 | If no arguments are specified, the repository is evaluated for upgrade |
|
2857 | 2858 | and a list of problems and potential optimizations is printed. |
|
2858 | 2859 | |
|
2859 | 2860 | With ``--run``, a repository upgrade is performed. Behavior of the upgrade |
|
2860 | 2861 | can be influenced via additional arguments. More details will be provided |
|
2861 | 2862 | by the command output when run without ``--run``. |
|
2862 | 2863 | |
|
2863 | 2864 | During the upgrade, the repository will be locked and no writes will be |
|
2864 | 2865 | allowed. |
|
2865 | 2866 | |
|
2866 | 2867 | At the end of the upgrade, the repository may not be readable while new |
|
2867 | 2868 | repository data is swapped in. This window will be as long as it takes to |
|
2868 | 2869 | rename some directories inside the ``.hg`` directory. On most machines, this |
|
2869 | 2870 | should complete almost instantaneously and the chances of a consumer being |
|
2870 | 2871 | unable to access the repository should be low. |
|
2871 | 2872 | |
|
2872 | 2873 | By default, all revlog will be upgraded. You can restrict this using flag |
|
2873 | 2874 | such as `--manifest`: |
|
2874 | 2875 | |
|
2875 | 2876 | * `--manifest`: only optimize the manifest |
|
2876 | 2877 | * `--no-manifest`: optimize all revlog but the manifest |
|
2878 | * `--changelog`: optimize the changelog only | |
|
2879 | * `--no-changelog --no-manifest`: optimize filelogs only | |
|
2877 | 2880 | """ |
|
2878 | 2881 | return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize, |
|
2879 | 2882 | backup=backup, **opts) |
|
2880 | 2883 | |
|
2881 | 2884 | @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'), |
|
2882 | 2885 | inferrepo=True) |
|
2883 | 2886 | def debugwalk(ui, repo, *pats, **opts): |
|
2884 | 2887 | """show how files match on given patterns""" |
|
2885 | 2888 | opts = pycompat.byteskwargs(opts) |
|
2886 | 2889 | m = scmutil.match(repo[None], pats, opts) |
|
2887 | 2890 | if ui.verbose: |
|
2888 | 2891 | ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n') |
|
2889 | 2892 | items = list(repo[None].walk(m)) |
|
2890 | 2893 | if not items: |
|
2891 | 2894 | return |
|
2892 | 2895 | f = lambda fn: fn |
|
2893 | 2896 | if ui.configbool('ui', 'slash') and pycompat.ossep != '/': |
|
2894 | 2897 | f = lambda fn: util.normpath(fn) |
|
2895 | 2898 | fmt = 'f %%-%ds %%-%ds %%s' % ( |
|
2896 | 2899 | max([len(abs) for abs in items]), |
|
2897 | 2900 | max([len(repo.pathto(abs)) for abs in items])) |
|
2898 | 2901 | for abs in items: |
|
2899 | 2902 | line = fmt % (abs, f(repo.pathto(abs)), m.exact(abs) and 'exact' or '') |
|
2900 | 2903 | ui.write("%s\n" % line.rstrip()) |
|
2901 | 2904 | |
|
2902 | 2905 | @command('debugwhyunstable', [], _('REV')) |
|
2903 | 2906 | def debugwhyunstable(ui, repo, rev): |
|
2904 | 2907 | """explain instabilities of a changeset""" |
|
2905 | 2908 | for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)): |
|
2906 | 2909 | dnodes = '' |
|
2907 | 2910 | if entry.get('divergentnodes'): |
|
2908 | 2911 | dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr()) |
|
2909 | 2912 | for ctx in entry['divergentnodes']) + ' ' |
|
2910 | 2913 | ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes, |
|
2911 | 2914 | entry['reason'], entry['node'])) |
|
2912 | 2915 | |
|
2913 | 2916 | @command('debugwireargs', |
|
2914 | 2917 | [('', 'three', '', 'three'), |
|
2915 | 2918 | ('', 'four', '', 'four'), |
|
2916 | 2919 | ('', 'five', '', 'five'), |
|
2917 | 2920 | ] + cmdutil.remoteopts, |
|
2918 | 2921 | _('REPO [OPTIONS]... [ONE [TWO]]'), |
|
2919 | 2922 | norepo=True) |
|
2920 | 2923 | def debugwireargs(ui, repopath, *vals, **opts): |
|
2921 | 2924 | opts = pycompat.byteskwargs(opts) |
|
2922 | 2925 | repo = hg.peer(ui, opts, repopath) |
|
2923 | 2926 | for opt in cmdutil.remoteopts: |
|
2924 | 2927 | del opts[opt[1]] |
|
2925 | 2928 | args = {} |
|
2926 | 2929 | for k, v in opts.iteritems(): |
|
2927 | 2930 | if v: |
|
2928 | 2931 | args[k] = v |
|
2929 | 2932 | args = pycompat.strkwargs(args) |
|
2930 | 2933 | # run twice to check that we don't mess up the stream for the next command |
|
2931 | 2934 | res1 = repo.debugwireargs(*vals, **args) |
|
2932 | 2935 | res2 = repo.debugwireargs(*vals, **args) |
|
2933 | 2936 | ui.write("%s\n" % res1) |
|
2934 | 2937 | if res1 != res2: |
|
2935 | 2938 | ui.warn("%s\n" % res2) |
|
2936 | 2939 | |
|
2937 | 2940 | def _parsewirelangblocks(fh): |
|
2938 | 2941 | activeaction = None |
|
2939 | 2942 | blocklines = [] |
|
2940 | 2943 | lastindent = 0 |
|
2941 | 2944 | |
|
2942 | 2945 | for line in fh: |
|
2943 | 2946 | line = line.rstrip() |
|
2944 | 2947 | if not line: |
|
2945 | 2948 | continue |
|
2946 | 2949 | |
|
2947 | 2950 | if line.startswith(b'#'): |
|
2948 | 2951 | continue |
|
2949 | 2952 | |
|
2950 | 2953 | if not line.startswith(b' '): |
|
2951 | 2954 | # New block. Flush previous one. |
|
2952 | 2955 | if activeaction: |
|
2953 | 2956 | yield activeaction, blocklines |
|
2954 | 2957 | |
|
2955 | 2958 | activeaction = line |
|
2956 | 2959 | blocklines = [] |
|
2957 | 2960 | lastindent = 0 |
|
2958 | 2961 | continue |
|
2959 | 2962 | |
|
2960 | 2963 | # Else we start with an indent. |
|
2961 | 2964 | |
|
2962 | 2965 | if not activeaction: |
|
2963 | 2966 | raise error.Abort(_('indented line outside of block')) |
|
2964 | 2967 | |
|
2965 | 2968 | indent = len(line) - len(line.lstrip()) |
|
2966 | 2969 | |
|
2967 | 2970 | # If this line is indented more than the last line, concatenate it. |
|
2968 | 2971 | if indent > lastindent and blocklines: |
|
2969 | 2972 | blocklines[-1] += line.lstrip() |
|
2970 | 2973 | else: |
|
2971 | 2974 | blocklines.append(line) |
|
2972 | 2975 | lastindent = indent |
|
2973 | 2976 | |
|
2974 | 2977 | # Flush last block. |
|
2975 | 2978 | if activeaction: |
|
2976 | 2979 | yield activeaction, blocklines |
|
2977 | 2980 | |
|
2978 | 2981 | @command('debugwireproto', |
|
2979 | 2982 | [ |
|
2980 | 2983 | ('', 'localssh', False, _('start an SSH server for this repo')), |
|
2981 | 2984 | ('', 'peer', '', _('construct a specific version of the peer')), |
|
2982 | 2985 | ('', 'noreadstderr', False, _('do not read from stderr of the remote')), |
|
2983 | 2986 | ('', 'nologhandshake', False, |
|
2984 | 2987 | _('do not log I/O related to the peer handshake')), |
|
2985 | 2988 | ] + cmdutil.remoteopts, |
|
2986 | 2989 | _('[PATH]'), |
|
2987 | 2990 | optionalrepo=True) |
|
2988 | 2991 | def debugwireproto(ui, repo, path=None, **opts): |
|
2989 | 2992 | """send wire protocol commands to a server |
|
2990 | 2993 | |
|
2991 | 2994 | This command can be used to issue wire protocol commands to remote |
|
2992 | 2995 | peers and to debug the raw data being exchanged. |
|
2993 | 2996 | |
|
2994 | 2997 | ``--localssh`` will start an SSH server against the current repository |
|
2995 | 2998 | and connect to that. By default, the connection will perform a handshake |
|
2996 | 2999 | and establish an appropriate peer instance. |
|
2997 | 3000 | |
|
2998 | 3001 | ``--peer`` can be used to bypass the handshake protocol and construct a |
|
2999 | 3002 | peer instance using the specified class type. Valid values are ``raw``, |
|
3000 | 3003 | ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending |
|
3001 | 3004 | raw data payloads and don't support higher-level command actions. |
|
3002 | 3005 | |
|
3003 | 3006 | ``--noreadstderr`` can be used to disable automatic reading from stderr |
|
3004 | 3007 | of the peer (for SSH connections only). Disabling automatic reading of |
|
3005 | 3008 | stderr is useful for making output more deterministic. |
|
3006 | 3009 | |
|
3007 | 3010 | Commands are issued via a mini language which is specified via stdin. |
|
3008 | 3011 | The language consists of individual actions to perform. An action is |
|
3009 | 3012 | defined by a block. A block is defined as a line with no leading |
|
3010 | 3013 | space followed by 0 or more lines with leading space. Blocks are |
|
3011 | 3014 | effectively a high-level command with additional metadata. |
|
3012 | 3015 | |
|
3013 | 3016 | Lines beginning with ``#`` are ignored. |
|
3014 | 3017 | |
|
3015 | 3018 | The following sections denote available actions. |
|
3016 | 3019 | |
|
3017 | 3020 | raw |
|
3018 | 3021 | --- |
|
3019 | 3022 | |
|
3020 | 3023 | Send raw data to the server. |
|
3021 | 3024 | |
|
3022 | 3025 | The block payload contains the raw data to send as one atomic send |
|
3023 | 3026 | operation. The data may not actually be delivered in a single system |
|
3024 | 3027 | call: it depends on the abilities of the transport being used. |
|
3025 | 3028 | |
|
3026 | 3029 | Each line in the block is de-indented and concatenated. Then, that |
|
3027 | 3030 | value is evaluated as a Python b'' literal. This allows the use of |
|
3028 | 3031 | backslash escaping, etc. |
|
3029 | 3032 | |
|
3030 | 3033 | raw+ |
|
3031 | 3034 | ---- |
|
3032 | 3035 | |
|
3033 | 3036 | Behaves like ``raw`` except flushes output afterwards. |
|
3034 | 3037 | |
|
3035 | 3038 | command <X> |
|
3036 | 3039 | ----------- |
|
3037 | 3040 | |
|
3038 | 3041 | Send a request to run a named command, whose name follows the ``command`` |
|
3039 | 3042 | string. |
|
3040 | 3043 | |
|
3041 | 3044 | Arguments to the command are defined as lines in this block. The format of |
|
3042 | 3045 | each line is ``<key> <value>``. e.g.:: |
|
3043 | 3046 | |
|
3044 | 3047 | command listkeys |
|
3045 | 3048 | namespace bookmarks |
|
3046 | 3049 | |
|
3047 | 3050 | If the value begins with ``eval:``, it will be interpreted as a Python |
|
3048 | 3051 | literal expression. Otherwise values are interpreted as Python b'' literals. |
|
3049 | 3052 | This allows sending complex types and encoding special byte sequences via |
|
3050 | 3053 | backslash escaping. |
|
3051 | 3054 | |
|
3052 | 3055 | The following arguments have special meaning: |
|
3053 | 3056 | |
|
3054 | 3057 | ``PUSHFILE`` |
|
3055 | 3058 | When defined, the *push* mechanism of the peer will be used instead |
|
3056 | 3059 | of the static request-response mechanism and the content of the |
|
3057 | 3060 | file specified in the value of this argument will be sent as the |
|
3058 | 3061 | command payload. |
|
3059 | 3062 | |
|
3060 | 3063 | This can be used to submit a local bundle file to the remote. |
|
3061 | 3064 | |
|
3062 | 3065 | batchbegin |
|
3063 | 3066 | ---------- |
|
3064 | 3067 | |
|
3065 | 3068 | Instruct the peer to begin a batched send. |
|
3066 | 3069 | |
|
3067 | 3070 | All ``command`` blocks are queued for execution until the next |
|
3068 | 3071 | ``batchsubmit`` block. |
|
3069 | 3072 | |
|
3070 | 3073 | batchsubmit |
|
3071 | 3074 | ----------- |
|
3072 | 3075 | |
|
3073 | 3076 | Submit previously queued ``command`` blocks as a batch request. |
|
3074 | 3077 | |
|
3075 | 3078 | This action MUST be paired with a ``batchbegin`` action. |
|
3076 | 3079 | |
|
3077 | 3080 | httprequest <method> <path> |
|
3078 | 3081 | --------------------------- |
|
3079 | 3082 | |
|
3080 | 3083 | (HTTP peer only) |
|
3081 | 3084 | |
|
3082 | 3085 | Send an HTTP request to the peer. |
|
3083 | 3086 | |
|
3084 | 3087 | The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``. |
|
3085 | 3088 | |
|
3086 | 3089 | Arguments of the form ``<key>: <value>`` are interpreted as HTTP request |
|
3087 | 3090 | headers to add to the request. e.g. ``Accept: foo``. |
|
3088 | 3091 | |
|
3089 | 3092 | The following arguments are special: |
|
3090 | 3093 | |
|
3091 | 3094 | ``BODYFILE`` |
|
3092 | 3095 | The content of the file defined as the value to this argument will be |
|
3093 | 3096 | transferred verbatim as the HTTP request body. |
|
3094 | 3097 | |
|
3095 | 3098 | ``frame <type> <flags> <payload>`` |
|
3096 | 3099 | Send a unified protocol frame as part of the request body. |
|
3097 | 3100 | |
|
3098 | 3101 | All frames will be collected and sent as the body to the HTTP |
|
3099 | 3102 | request. |
|
3100 | 3103 | |
|
3101 | 3104 | close |
|
3102 | 3105 | ----- |
|
3103 | 3106 | |
|
3104 | 3107 | Close the connection to the server. |
|
3105 | 3108 | |
|
3106 | 3109 | flush |
|
3107 | 3110 | ----- |
|
3108 | 3111 | |
|
3109 | 3112 | Flush data written to the server. |
|
3110 | 3113 | |
|
3111 | 3114 | readavailable |
|
3112 | 3115 | ------------- |
|
3113 | 3116 | |
|
3114 | 3117 | Close the write end of the connection and read all available data from |
|
3115 | 3118 | the server. |
|
3116 | 3119 | |
|
3117 | 3120 | If the connection to the server encompasses multiple pipes, we poll both |
|
3118 | 3121 | pipes and read available data. |
|
3119 | 3122 | |
|
3120 | 3123 | readline |
|
3121 | 3124 | -------- |
|
3122 | 3125 | |
|
3123 | 3126 | Read a line of output from the server. If there are multiple output |
|
3124 | 3127 | pipes, reads only the main pipe. |
|
3125 | 3128 | |
|
3126 | 3129 | ereadline |
|
3127 | 3130 | --------- |
|
3128 | 3131 | |
|
3129 | 3132 | Like ``readline``, but read from the stderr pipe, if available. |
|
3130 | 3133 | |
|
3131 | 3134 | read <X> |
|
3132 | 3135 | -------- |
|
3133 | 3136 | |
|
3134 | 3137 | ``read()`` N bytes from the server's main output pipe. |
|
3135 | 3138 | |
|
3136 | 3139 | eread <X> |
|
3137 | 3140 | --------- |
|
3138 | 3141 | |
|
3139 | 3142 | ``read()`` N bytes from the server's stderr pipe, if available. |
|
3140 | 3143 | |
|
3141 | 3144 | Specifying Unified Frame-Based Protocol Frames |
|
3142 | 3145 | ---------------------------------------------- |
|
3143 | 3146 | |
|
3144 | 3147 | It is possible to emit a *Unified Frame-Based Protocol* by using special |
|
3145 | 3148 | syntax. |
|
3146 | 3149 | |
|
3147 | 3150 | A frame is composed as a type, flags, and payload. These can be parsed |
|
3148 | 3151 | from a string of the form: |
|
3149 | 3152 | |
|
3150 | 3153 | <request-id> <stream-id> <stream-flags> <type> <flags> <payload> |
|
3151 | 3154 | |
|
3152 | 3155 | ``request-id`` and ``stream-id`` are integers defining the request and |
|
3153 | 3156 | stream identifiers. |
|
3154 | 3157 | |
|
3155 | 3158 | ``type`` can be an integer value for the frame type or the string name |
|
3156 | 3159 | of the type. The strings are defined in ``wireprotoframing.py``. e.g. |
|
3157 | 3160 | ``command-name``. |
|
3158 | 3161 | |
|
3159 | 3162 | ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag |
|
3160 | 3163 | components. Each component (and there can be just one) can be an integer |
|
3161 | 3164 | or a flag name for stream flags or frame flags, respectively. Values are |
|
3162 | 3165 | resolved to integers and then bitwise OR'd together. |
|
3163 | 3166 | |
|
3164 | 3167 | ``payload`` represents the raw frame payload. If it begins with |
|
3165 | 3168 | ``cbor:``, the following string is evaluated as Python code and the |
|
3166 | 3169 | resulting object is fed into a CBOR encoder. Otherwise it is interpreted |
|
3167 | 3170 | as a Python byte string literal. |
|
3168 | 3171 | """ |
|
3169 | 3172 | opts = pycompat.byteskwargs(opts) |
|
3170 | 3173 | |
|
3171 | 3174 | if opts['localssh'] and not repo: |
|
3172 | 3175 | raise error.Abort(_('--localssh requires a repository')) |
|
3173 | 3176 | |
|
3174 | 3177 | if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'): |
|
3175 | 3178 | raise error.Abort(_('invalid value for --peer'), |
|
3176 | 3179 | hint=_('valid values are "raw", "ssh1", and "ssh2"')) |
|
3177 | 3180 | |
|
3178 | 3181 | if path and opts['localssh']: |
|
3179 | 3182 | raise error.Abort(_('cannot specify --localssh with an explicit ' |
|
3180 | 3183 | 'path')) |
|
3181 | 3184 | |
|
3182 | 3185 | if ui.interactive(): |
|
3183 | 3186 | ui.write(_('(waiting for commands on stdin)\n')) |
|
3184 | 3187 | |
|
3185 | 3188 | blocks = list(_parsewirelangblocks(ui.fin)) |
|
3186 | 3189 | |
|
3187 | 3190 | proc = None |
|
3188 | 3191 | stdin = None |
|
3189 | 3192 | stdout = None |
|
3190 | 3193 | stderr = None |
|
3191 | 3194 | opener = None |
|
3192 | 3195 | |
|
3193 | 3196 | if opts['localssh']: |
|
3194 | 3197 | # We start the SSH server in its own process so there is process |
|
3195 | 3198 | # separation. This prevents a whole class of potential bugs around |
|
3196 | 3199 | # shared state from interfering with server operation. |
|
3197 | 3200 | args = procutil.hgcmd() + [ |
|
3198 | 3201 | '-R', repo.root, |
|
3199 | 3202 | 'debugserve', '--sshstdio', |
|
3200 | 3203 | ] |
|
3201 | 3204 | proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args), |
|
3202 | 3205 | stdin=subprocess.PIPE, |
|
3203 | 3206 | stdout=subprocess.PIPE, stderr=subprocess.PIPE, |
|
3204 | 3207 | bufsize=0) |
|
3205 | 3208 | |
|
3206 | 3209 | stdin = proc.stdin |
|
3207 | 3210 | stdout = proc.stdout |
|
3208 | 3211 | stderr = proc.stderr |
|
3209 | 3212 | |
|
3210 | 3213 | # We turn the pipes into observers so we can log I/O. |
|
3211 | 3214 | if ui.verbose or opts['peer'] == 'raw': |
|
3212 | 3215 | stdin = util.makeloggingfileobject(ui, proc.stdin, b'i', |
|
3213 | 3216 | logdata=True) |
|
3214 | 3217 | stdout = util.makeloggingfileobject(ui, proc.stdout, b'o', |
|
3215 | 3218 | logdata=True) |
|
3216 | 3219 | stderr = util.makeloggingfileobject(ui, proc.stderr, b'e', |
|
3217 | 3220 | logdata=True) |
|
3218 | 3221 | |
|
3219 | 3222 | # --localssh also implies the peer connection settings. |
|
3220 | 3223 | |
|
3221 | 3224 | url = 'ssh://localserver' |
|
3222 | 3225 | autoreadstderr = not opts['noreadstderr'] |
|
3223 | 3226 | |
|
3224 | 3227 | if opts['peer'] == 'ssh1': |
|
3225 | 3228 | ui.write(_('creating ssh peer for wire protocol version 1\n')) |
|
3226 | 3229 | peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr, |
|
3227 | 3230 | None, autoreadstderr=autoreadstderr) |
|
3228 | 3231 | elif opts['peer'] == 'ssh2': |
|
3229 | 3232 | ui.write(_('creating ssh peer for wire protocol version 2\n')) |
|
3230 | 3233 | peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr, |
|
3231 | 3234 | None, autoreadstderr=autoreadstderr) |
|
3232 | 3235 | elif opts['peer'] == 'raw': |
|
3233 | 3236 | ui.write(_('using raw connection to peer\n')) |
|
3234 | 3237 | peer = None |
|
3235 | 3238 | else: |
|
3236 | 3239 | ui.write(_('creating ssh peer from handshake results\n')) |
|
3237 | 3240 | peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr, |
|
3238 | 3241 | autoreadstderr=autoreadstderr) |
|
3239 | 3242 | |
|
3240 | 3243 | elif path: |
|
3241 | 3244 | # We bypass hg.peer() so we can proxy the sockets. |
|
3242 | 3245 | # TODO consider not doing this because we skip |
|
3243 | 3246 | # ``hg.wirepeersetupfuncs`` and potentially other useful functionality. |
|
3244 | 3247 | u = util.url(path) |
|
3245 | 3248 | if u.scheme != 'http': |
|
3246 | 3249 | raise error.Abort(_('only http:// paths are currently supported')) |
|
3247 | 3250 | |
|
3248 | 3251 | url, authinfo = u.authinfo() |
|
3249 | 3252 | openerargs = { |
|
3250 | 3253 | r'useragent': b'Mercurial debugwireproto', |
|
3251 | 3254 | } |
|
3252 | 3255 | |
|
3253 | 3256 | # Turn pipes/sockets into observers so we can log I/O. |
|
3254 | 3257 | if ui.verbose: |
|
3255 | 3258 | openerargs.update({ |
|
3256 | 3259 | r'loggingfh': ui, |
|
3257 | 3260 | r'loggingname': b's', |
|
3258 | 3261 | r'loggingopts': { |
|
3259 | 3262 | r'logdata': True, |
|
3260 | 3263 | r'logdataapis': False, |
|
3261 | 3264 | }, |
|
3262 | 3265 | }) |
|
3263 | 3266 | |
|
3264 | 3267 | if ui.debugflag: |
|
3265 | 3268 | openerargs[r'loggingopts'][r'logdataapis'] = True |
|
3266 | 3269 | |
|
3267 | 3270 | # Don't send default headers when in raw mode. This allows us to |
|
3268 | 3271 | # bypass most of the behavior of our URL handling code so we can |
|
3269 | 3272 | # have near complete control over what's sent on the wire. |
|
3270 | 3273 | if opts['peer'] == 'raw': |
|
3271 | 3274 | openerargs[r'sendaccept'] = False |
|
3272 | 3275 | |
|
3273 | 3276 | opener = urlmod.opener(ui, authinfo, **openerargs) |
|
3274 | 3277 | |
|
3275 | 3278 | if opts['peer'] == 'http2': |
|
3276 | 3279 | ui.write(_('creating http peer for wire protocol version 2\n')) |
|
3277 | 3280 | # We go through makepeer() because we need an API descriptor for |
|
3278 | 3281 | # the peer instance to be useful. |
|
3279 | 3282 | with ui.configoverride({ |
|
3280 | 3283 | ('experimental', 'httppeer.advertise-v2'): True}): |
|
3281 | 3284 | if opts['nologhandshake']: |
|
3282 | 3285 | ui.pushbuffer() |
|
3283 | 3286 | |
|
3284 | 3287 | peer = httppeer.makepeer(ui, path, opener=opener) |
|
3285 | 3288 | |
|
3286 | 3289 | if opts['nologhandshake']: |
|
3287 | 3290 | ui.popbuffer() |
|
3288 | 3291 | |
|
3289 | 3292 | if not isinstance(peer, httppeer.httpv2peer): |
|
3290 | 3293 | raise error.Abort(_('could not instantiate HTTP peer for ' |
|
3291 | 3294 | 'wire protocol version 2'), |
|
3292 | 3295 | hint=_('the server may not have the feature ' |
|
3293 | 3296 | 'enabled or is not allowing this ' |
|
3294 | 3297 | 'client version')) |
|
3295 | 3298 | |
|
3296 | 3299 | elif opts['peer'] == 'raw': |
|
3297 | 3300 | ui.write(_('using raw connection to peer\n')) |
|
3298 | 3301 | peer = None |
|
3299 | 3302 | elif opts['peer']: |
|
3300 | 3303 | raise error.Abort(_('--peer %s not supported with HTTP peers') % |
|
3301 | 3304 | opts['peer']) |
|
3302 | 3305 | else: |
|
3303 | 3306 | peer = httppeer.makepeer(ui, path, opener=opener) |
|
3304 | 3307 | |
|
3305 | 3308 | # We /could/ populate stdin/stdout with sock.makefile()... |
|
3306 | 3309 | else: |
|
3307 | 3310 | raise error.Abort(_('unsupported connection configuration')) |
|
3308 | 3311 | |
|
3309 | 3312 | batchedcommands = None |
|
3310 | 3313 | |
|
3311 | 3314 | # Now perform actions based on the parsed wire language instructions. |
|
3312 | 3315 | for action, lines in blocks: |
|
3313 | 3316 | if action in ('raw', 'raw+'): |
|
3314 | 3317 | if not stdin: |
|
3315 | 3318 | raise error.Abort(_('cannot call raw/raw+ on this peer')) |
|
3316 | 3319 | |
|
3317 | 3320 | # Concatenate the data together. |
|
3318 | 3321 | data = ''.join(l.lstrip() for l in lines) |
|
3319 | 3322 | data = stringutil.unescapestr(data) |
|
3320 | 3323 | stdin.write(data) |
|
3321 | 3324 | |
|
3322 | 3325 | if action == 'raw+': |
|
3323 | 3326 | stdin.flush() |
|
3324 | 3327 | elif action == 'flush': |
|
3325 | 3328 | if not stdin: |
|
3326 | 3329 | raise error.Abort(_('cannot call flush on this peer')) |
|
3327 | 3330 | stdin.flush() |
|
3328 | 3331 | elif action.startswith('command'): |
|
3329 | 3332 | if not peer: |
|
3330 | 3333 | raise error.Abort(_('cannot send commands unless peer instance ' |
|
3331 | 3334 | 'is available')) |
|
3332 | 3335 | |
|
3333 | 3336 | command = action.split(' ', 1)[1] |
|
3334 | 3337 | |
|
3335 | 3338 | args = {} |
|
3336 | 3339 | for line in lines: |
|
3337 | 3340 | # We need to allow empty values. |
|
3338 | 3341 | fields = line.lstrip().split(' ', 1) |
|
3339 | 3342 | if len(fields) == 1: |
|
3340 | 3343 | key = fields[0] |
|
3341 | 3344 | value = '' |
|
3342 | 3345 | else: |
|
3343 | 3346 | key, value = fields |
|
3344 | 3347 | |
|
3345 | 3348 | if value.startswith('eval:'): |
|
3346 | 3349 | value = stringutil.evalpythonliteral(value[5:]) |
|
3347 | 3350 | else: |
|
3348 | 3351 | value = stringutil.unescapestr(value) |
|
3349 | 3352 | |
|
3350 | 3353 | args[key] = value |
|
3351 | 3354 | |
|
3352 | 3355 | if batchedcommands is not None: |
|
3353 | 3356 | batchedcommands.append((command, args)) |
|
3354 | 3357 | continue |
|
3355 | 3358 | |
|
3356 | 3359 | ui.status(_('sending %s command\n') % command) |
|
3357 | 3360 | |
|
3358 | 3361 | if 'PUSHFILE' in args: |
|
3359 | 3362 | with open(args['PUSHFILE'], r'rb') as fh: |
|
3360 | 3363 | del args['PUSHFILE'] |
|
3361 | 3364 | res, output = peer._callpush(command, fh, |
|
3362 | 3365 | **pycompat.strkwargs(args)) |
|
3363 | 3366 | ui.status(_('result: %s\n') % stringutil.escapestr(res)) |
|
3364 | 3367 | ui.status(_('remote output: %s\n') % |
|
3365 | 3368 | stringutil.escapestr(output)) |
|
3366 | 3369 | else: |
|
3367 | 3370 | with peer.commandexecutor() as e: |
|
3368 | 3371 | res = e.callcommand(command, args).result() |
|
3369 | 3372 | |
|
3370 | 3373 | if isinstance(res, wireprotov2peer.commandresponse): |
|
3371 | 3374 | val = res.objects() |
|
3372 | 3375 | ui.status(_('response: %s\n') % |
|
3373 | 3376 | stringutil.pprint(val, bprefix=True, indent=2)) |
|
3374 | 3377 | else: |
|
3375 | 3378 | ui.status(_('response: %s\n') % |
|
3376 | 3379 | stringutil.pprint(res, bprefix=True, indent=2)) |
|
3377 | 3380 | |
|
3378 | 3381 | elif action == 'batchbegin': |
|
3379 | 3382 | if batchedcommands is not None: |
|
3380 | 3383 | raise error.Abort(_('nested batchbegin not allowed')) |
|
3381 | 3384 | |
|
3382 | 3385 | batchedcommands = [] |
|
3383 | 3386 | elif action == 'batchsubmit': |
|
3384 | 3387 | # There is a batching API we could go through. But it would be |
|
3385 | 3388 | # difficult to normalize requests into function calls. It is easier |
|
3386 | 3389 | # to bypass this layer and normalize to commands + args. |
|
3387 | 3390 | ui.status(_('sending batch with %d sub-commands\n') % |
|
3388 | 3391 | len(batchedcommands)) |
|
3389 | 3392 | for i, chunk in enumerate(peer._submitbatch(batchedcommands)): |
|
3390 | 3393 | ui.status(_('response #%d: %s\n') % |
|
3391 | 3394 | (i, stringutil.escapestr(chunk))) |
|
3392 | 3395 | |
|
3393 | 3396 | batchedcommands = None |
|
3394 | 3397 | |
|
3395 | 3398 | elif action.startswith('httprequest '): |
|
3396 | 3399 | if not opener: |
|
3397 | 3400 | raise error.Abort(_('cannot use httprequest without an HTTP ' |
|
3398 | 3401 | 'peer')) |
|
3399 | 3402 | |
|
3400 | 3403 | request = action.split(' ', 2) |
|
3401 | 3404 | if len(request) != 3: |
|
3402 | 3405 | raise error.Abort(_('invalid httprequest: expected format is ' |
|
3403 | 3406 | '"httprequest <method> <path>')) |
|
3404 | 3407 | |
|
3405 | 3408 | method, httppath = request[1:] |
|
3406 | 3409 | headers = {} |
|
3407 | 3410 | body = None |
|
3408 | 3411 | frames = [] |
|
3409 | 3412 | for line in lines: |
|
3410 | 3413 | line = line.lstrip() |
|
3411 | 3414 | m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line) |
|
3412 | 3415 | if m: |
|
3413 | 3416 | # Headers need to use native strings. |
|
3414 | 3417 | key = pycompat.strurl(m.group(1)) |
|
3415 | 3418 | value = pycompat.strurl(m.group(2)) |
|
3416 | 3419 | headers[key] = value |
|
3417 | 3420 | continue |
|
3418 | 3421 | |
|
3419 | 3422 | if line.startswith(b'BODYFILE '): |
|
3420 | 3423 | with open(line.split(b' ', 1), 'rb') as fh: |
|
3421 | 3424 | body = fh.read() |
|
3422 | 3425 | elif line.startswith(b'frame '): |
|
3423 | 3426 | frame = wireprotoframing.makeframefromhumanstring( |
|
3424 | 3427 | line[len(b'frame '):]) |
|
3425 | 3428 | |
|
3426 | 3429 | frames.append(frame) |
|
3427 | 3430 | else: |
|
3428 | 3431 | raise error.Abort(_('unknown argument to httprequest: %s') % |
|
3429 | 3432 | line) |
|
3430 | 3433 | |
|
3431 | 3434 | url = path + httppath |
|
3432 | 3435 | |
|
3433 | 3436 | if frames: |
|
3434 | 3437 | body = b''.join(bytes(f) for f in frames) |
|
3435 | 3438 | |
|
3436 | 3439 | req = urlmod.urlreq.request(pycompat.strurl(url), body, headers) |
|
3437 | 3440 | |
|
3438 | 3441 | # urllib.Request insists on using has_data() as a proxy for |
|
3439 | 3442 | # determining the request method. Override that to use our |
|
3440 | 3443 | # explicitly requested method. |
|
3441 | 3444 | req.get_method = lambda: pycompat.sysstr(method) |
|
3442 | 3445 | |
|
3443 | 3446 | try: |
|
3444 | 3447 | res = opener.open(req) |
|
3445 | 3448 | body = res.read() |
|
3446 | 3449 | except util.urlerr.urlerror as e: |
|
3447 | 3450 | # read() method must be called, but only exists in Python 2 |
|
3448 | 3451 | getattr(e, 'read', lambda: None)() |
|
3449 | 3452 | continue |
|
3450 | 3453 | |
|
3451 | 3454 | ct = res.headers.get(r'Content-Type') |
|
3452 | 3455 | if ct == r'application/mercurial-cbor': |
|
3453 | 3456 | ui.write(_('cbor> %s\n') % |
|
3454 | 3457 | stringutil.pprint(cborutil.decodeall(body), |
|
3455 | 3458 | bprefix=True, |
|
3456 | 3459 | indent=2)) |
|
3457 | 3460 | |
|
3458 | 3461 | elif action == 'close': |
|
3459 | 3462 | peer.close() |
|
3460 | 3463 | elif action == 'readavailable': |
|
3461 | 3464 | if not stdout or not stderr: |
|
3462 | 3465 | raise error.Abort(_('readavailable not available on this peer')) |
|
3463 | 3466 | |
|
3464 | 3467 | stdin.close() |
|
3465 | 3468 | stdout.read() |
|
3466 | 3469 | stderr.read() |
|
3467 | 3470 | |
|
3468 | 3471 | elif action == 'readline': |
|
3469 | 3472 | if not stdout: |
|
3470 | 3473 | raise error.Abort(_('readline not available on this peer')) |
|
3471 | 3474 | stdout.readline() |
|
3472 | 3475 | elif action == 'ereadline': |
|
3473 | 3476 | if not stderr: |
|
3474 | 3477 | raise error.Abort(_('ereadline not available on this peer')) |
|
3475 | 3478 | stderr.readline() |
|
3476 | 3479 | elif action.startswith('read '): |
|
3477 | 3480 | count = int(action.split(' ', 1)[1]) |
|
3478 | 3481 | if not stdout: |
|
3479 | 3482 | raise error.Abort(_('read not available on this peer')) |
|
3480 | 3483 | stdout.read(count) |
|
3481 | 3484 | elif action.startswith('eread '): |
|
3482 | 3485 | count = int(action.split(' ', 1)[1]) |
|
3483 | 3486 | if not stderr: |
|
3484 | 3487 | raise error.Abort(_('eread not available on this peer')) |
|
3485 | 3488 | stderr.read(count) |
|
3486 | 3489 | else: |
|
3487 | 3490 | raise error.Abort(_('unknown action: %s') % action) |
|
3488 | 3491 | |
|
3489 | 3492 | if batchedcommands is not None: |
|
3490 | 3493 | raise error.Abort(_('unclosed "batchbegin" request')) |
|
3491 | 3494 | |
|
3492 | 3495 | if peer: |
|
3493 | 3496 | peer.close() |
|
3494 | 3497 | |
|
3495 | 3498 | if proc: |
|
3496 | 3499 | proc.kill() |
@@ -1,1056 +1,1060 | |||
|
1 | 1 | # upgrade.py - functions for in place upgrade of Mercurial repository |
|
2 | 2 | # |
|
3 | 3 | # Copyright (c) 2016-present, Gregory Szorc |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import stat |
|
11 | 11 | |
|
12 | 12 | from .i18n import _ |
|
13 | 13 | from . import ( |
|
14 | 14 | changelog, |
|
15 | 15 | error, |
|
16 | 16 | filelog, |
|
17 | 17 | hg, |
|
18 | 18 | localrepo, |
|
19 | 19 | manifest, |
|
20 | 20 | pycompat, |
|
21 | 21 | revlog, |
|
22 | 22 | scmutil, |
|
23 | 23 | util, |
|
24 | 24 | vfs as vfsmod, |
|
25 | 25 | ) |
|
26 | 26 | |
|
27 | 27 | from .utils import ( |
|
28 | 28 | compression, |
|
29 | 29 | ) |
|
30 | 30 | |
|
31 | 31 | def requiredsourcerequirements(repo): |
|
32 | 32 | """Obtain requirements required to be present to upgrade a repo. |
|
33 | 33 | |
|
34 | 34 | An upgrade will not be allowed if the repository doesn't have the |
|
35 | 35 | requirements returned by this function. |
|
36 | 36 | """ |
|
37 | 37 | return { |
|
38 | 38 | # Introduced in Mercurial 0.9.2. |
|
39 | 39 | 'revlogv1', |
|
40 | 40 | # Introduced in Mercurial 0.9.2. |
|
41 | 41 | 'store', |
|
42 | 42 | } |
|
43 | 43 | |
|
44 | 44 | def blocksourcerequirements(repo): |
|
45 | 45 | """Obtain requirements that will prevent an upgrade from occurring. |
|
46 | 46 | |
|
47 | 47 | An upgrade cannot be performed if the source repository contains a |
|
48 | 48 | requirements in the returned set. |
|
49 | 49 | """ |
|
50 | 50 | return { |
|
51 | 51 | # The upgrade code does not yet support these experimental features. |
|
52 | 52 | # This is an artificial limitation. |
|
53 | 53 | 'treemanifest', |
|
54 | 54 | # This was a precursor to generaldelta and was never enabled by default. |
|
55 | 55 | # It should (hopefully) not exist in the wild. |
|
56 | 56 | 'parentdelta', |
|
57 | 57 | # Upgrade should operate on the actual store, not the shared link. |
|
58 | 58 | 'shared', |
|
59 | 59 | } |
|
60 | 60 | |
|
61 | 61 | def supportremovedrequirements(repo): |
|
62 | 62 | """Obtain requirements that can be removed during an upgrade. |
|
63 | 63 | |
|
64 | 64 | If an upgrade were to create a repository that dropped a requirement, |
|
65 | 65 | the dropped requirement must appear in the returned set for the upgrade |
|
66 | 66 | to be allowed. |
|
67 | 67 | """ |
|
68 | 68 | supported = { |
|
69 | 69 | localrepo.SPARSEREVLOG_REQUIREMENT, |
|
70 | 70 | } |
|
71 | 71 | for name in compression.compengines: |
|
72 | 72 | engine = compression.compengines[name] |
|
73 | 73 | if engine.available() and engine.revlogheader(): |
|
74 | 74 | supported.add(b'exp-compression-%s' % name) |
|
75 | 75 | if engine.name() == 'zstd': |
|
76 | 76 | supported.add(b'revlog-compression-zstd') |
|
77 | 77 | return supported |
|
78 | 78 | |
|
79 | 79 | def supporteddestrequirements(repo): |
|
80 | 80 | """Obtain requirements that upgrade supports in the destination. |
|
81 | 81 | |
|
82 | 82 | If the result of the upgrade would create requirements not in this set, |
|
83 | 83 | the upgrade is disallowed. |
|
84 | 84 | |
|
85 | 85 | Extensions should monkeypatch this to add their custom requirements. |
|
86 | 86 | """ |
|
87 | 87 | supported = { |
|
88 | 88 | 'dotencode', |
|
89 | 89 | 'fncache', |
|
90 | 90 | 'generaldelta', |
|
91 | 91 | 'revlogv1', |
|
92 | 92 | 'store', |
|
93 | 93 | localrepo.SPARSEREVLOG_REQUIREMENT, |
|
94 | 94 | } |
|
95 | 95 | for name in compression.compengines: |
|
96 | 96 | engine = compression.compengines[name] |
|
97 | 97 | if engine.available() and engine.revlogheader(): |
|
98 | 98 | supported.add(b'exp-compression-%s' % name) |
|
99 | 99 | if engine.name() == 'zstd': |
|
100 | 100 | supported.add(b'revlog-compression-zstd') |
|
101 | 101 | return supported |
|
102 | 102 | |
|
103 | 103 | def allowednewrequirements(repo): |
|
104 | 104 | """Obtain requirements that can be added to a repository during upgrade. |
|
105 | 105 | |
|
106 | 106 | This is used to disallow proposed requirements from being added when |
|
107 | 107 | they weren't present before. |
|
108 | 108 | |
|
109 | 109 | We use a list of allowed requirement additions instead of a list of known |
|
110 | 110 | bad additions because the whitelist approach is safer and will prevent |
|
111 | 111 | future, unknown requirements from accidentally being added. |
|
112 | 112 | """ |
|
113 | 113 | supported = { |
|
114 | 114 | 'dotencode', |
|
115 | 115 | 'fncache', |
|
116 | 116 | 'generaldelta', |
|
117 | 117 | localrepo.SPARSEREVLOG_REQUIREMENT, |
|
118 | 118 | } |
|
119 | 119 | for name in compression.compengines: |
|
120 | 120 | engine = compression.compengines[name] |
|
121 | 121 | if engine.available() and engine.revlogheader(): |
|
122 | 122 | supported.add(b'exp-compression-%s' % name) |
|
123 | 123 | if engine.name() == 'zstd': |
|
124 | 124 | supported.add(b'revlog-compression-zstd') |
|
125 | 125 | return supported |
|
126 | 126 | |
|
127 | 127 | def preservedrequirements(repo): |
|
128 | 128 | return set() |
|
129 | 129 | |
|
130 | 130 | deficiency = 'deficiency' |
|
131 | 131 | optimisation = 'optimization' |
|
132 | 132 | |
|
133 | 133 | class improvement(object): |
|
134 | 134 | """Represents an improvement that can be made as part of an upgrade. |
|
135 | 135 | |
|
136 | 136 | The following attributes are defined on each instance: |
|
137 | 137 | |
|
138 | 138 | name |
|
139 | 139 | Machine-readable string uniquely identifying this improvement. It |
|
140 | 140 | will be mapped to an action later in the upgrade process. |
|
141 | 141 | |
|
142 | 142 | type |
|
143 | 143 | Either ``deficiency`` or ``optimisation``. A deficiency is an obvious |
|
144 | 144 | problem. An optimization is an action (sometimes optional) that |
|
145 | 145 | can be taken to further improve the state of the repository. |
|
146 | 146 | |
|
147 | 147 | description |
|
148 | 148 | Message intended for humans explaining the improvement in more detail, |
|
149 | 149 | including the implications of it. For ``deficiency`` types, should be |
|
150 | 150 | worded in the present tense. For ``optimisation`` types, should be |
|
151 | 151 | worded in the future tense. |
|
152 | 152 | |
|
153 | 153 | upgrademessage |
|
154 | 154 | Message intended for humans explaining what an upgrade addressing this |
|
155 | 155 | issue will do. Should be worded in the future tense. |
|
156 | 156 | """ |
|
157 | 157 | def __init__(self, name, type, description, upgrademessage): |
|
158 | 158 | self.name = name |
|
159 | 159 | self.type = type |
|
160 | 160 | self.description = description |
|
161 | 161 | self.upgrademessage = upgrademessage |
|
162 | 162 | |
|
163 | 163 | def __eq__(self, other): |
|
164 | 164 | if not isinstance(other, improvement): |
|
165 | 165 | # This is what python tell use to do |
|
166 | 166 | return NotImplemented |
|
167 | 167 | return self.name == other.name |
|
168 | 168 | |
|
169 | 169 | def __ne__(self, other): |
|
170 | 170 | return not (self == other) |
|
171 | 171 | |
|
172 | 172 | def __hash__(self): |
|
173 | 173 | return hash(self.name) |
|
174 | 174 | |
|
175 | 175 | allformatvariant = [] |
|
176 | 176 | |
|
177 | 177 | def registerformatvariant(cls): |
|
178 | 178 | allformatvariant.append(cls) |
|
179 | 179 | return cls |
|
180 | 180 | |
|
181 | 181 | class formatvariant(improvement): |
|
182 | 182 | """an improvement subclass dedicated to repository format""" |
|
183 | 183 | type = deficiency |
|
184 | 184 | ### The following attributes should be defined for each class: |
|
185 | 185 | |
|
186 | 186 | # machine-readable string uniquely identifying this improvement. it will be |
|
187 | 187 | # mapped to an action later in the upgrade process. |
|
188 | 188 | name = None |
|
189 | 189 | |
|
190 | 190 | # message intended for humans explaining the improvement in more detail, |
|
191 | 191 | # including the implications of it ``deficiency`` types, should be worded |
|
192 | 192 | # in the present tense. |
|
193 | 193 | description = None |
|
194 | 194 | |
|
195 | 195 | # message intended for humans explaining what an upgrade addressing this |
|
196 | 196 | # issue will do. should be worded in the future tense. |
|
197 | 197 | upgrademessage = None |
|
198 | 198 | |
|
199 | 199 | # value of current Mercurial default for new repository |
|
200 | 200 | default = None |
|
201 | 201 | |
|
202 | 202 | def __init__(self): |
|
203 | 203 | raise NotImplementedError() |
|
204 | 204 | |
|
205 | 205 | @staticmethod |
|
206 | 206 | def fromrepo(repo): |
|
207 | 207 | """current value of the variant in the repository""" |
|
208 | 208 | raise NotImplementedError() |
|
209 | 209 | |
|
210 | 210 | @staticmethod |
|
211 | 211 | def fromconfig(repo): |
|
212 | 212 | """current value of the variant in the configuration""" |
|
213 | 213 | raise NotImplementedError() |
|
214 | 214 | |
|
215 | 215 | class requirementformatvariant(formatvariant): |
|
216 | 216 | """formatvariant based on a 'requirement' name. |
|
217 | 217 | |
|
218 | 218 | Many format variant are controlled by a 'requirement'. We define a small |
|
219 | 219 | subclass to factor the code. |
|
220 | 220 | """ |
|
221 | 221 | |
|
222 | 222 | # the requirement that control this format variant |
|
223 | 223 | _requirement = None |
|
224 | 224 | |
|
225 | 225 | @staticmethod |
|
226 | 226 | def _newreporequirements(ui): |
|
227 | 227 | return localrepo.newreporequirements( |
|
228 | 228 | ui, localrepo.defaultcreateopts(ui)) |
|
229 | 229 | |
|
230 | 230 | @classmethod |
|
231 | 231 | def fromrepo(cls, repo): |
|
232 | 232 | assert cls._requirement is not None |
|
233 | 233 | return cls._requirement in repo.requirements |
|
234 | 234 | |
|
235 | 235 | @classmethod |
|
236 | 236 | def fromconfig(cls, repo): |
|
237 | 237 | assert cls._requirement is not None |
|
238 | 238 | return cls._requirement in cls._newreporequirements(repo.ui) |
|
239 | 239 | |
|
240 | 240 | @registerformatvariant |
|
241 | 241 | class fncache(requirementformatvariant): |
|
242 | 242 | name = 'fncache' |
|
243 | 243 | |
|
244 | 244 | _requirement = 'fncache' |
|
245 | 245 | |
|
246 | 246 | default = True |
|
247 | 247 | |
|
248 | 248 | description = _('long and reserved filenames may not work correctly; ' |
|
249 | 249 | 'repository performance is sub-optimal') |
|
250 | 250 | |
|
251 | 251 | upgrademessage = _('repository will be more resilient to storing ' |
|
252 | 252 | 'certain paths and performance of certain ' |
|
253 | 253 | 'operations should be improved') |
|
254 | 254 | |
|
255 | 255 | @registerformatvariant |
|
256 | 256 | class dotencode(requirementformatvariant): |
|
257 | 257 | name = 'dotencode' |
|
258 | 258 | |
|
259 | 259 | _requirement = 'dotencode' |
|
260 | 260 | |
|
261 | 261 | default = True |
|
262 | 262 | |
|
263 | 263 | description = _('storage of filenames beginning with a period or ' |
|
264 | 264 | 'space may not work correctly') |
|
265 | 265 | |
|
266 | 266 | upgrademessage = _('repository will be better able to store files ' |
|
267 | 267 | 'beginning with a space or period') |
|
268 | 268 | |
|
269 | 269 | @registerformatvariant |
|
270 | 270 | class generaldelta(requirementformatvariant): |
|
271 | 271 | name = 'generaldelta' |
|
272 | 272 | |
|
273 | 273 | _requirement = 'generaldelta' |
|
274 | 274 | |
|
275 | 275 | default = True |
|
276 | 276 | |
|
277 | 277 | description = _('deltas within internal storage are unable to ' |
|
278 | 278 | 'choose optimal revisions; repository is larger and ' |
|
279 | 279 | 'slower than it could be; interaction with other ' |
|
280 | 280 | 'repositories may require extra network and CPU ' |
|
281 | 281 | 'resources, making "hg push" and "hg pull" slower') |
|
282 | 282 | |
|
283 | 283 | upgrademessage = _('repository storage will be able to create ' |
|
284 | 284 | 'optimal deltas; new repository data will be ' |
|
285 | 285 | 'smaller and read times should decrease; ' |
|
286 | 286 | 'interacting with other repositories using this ' |
|
287 | 287 | 'storage model should require less network and ' |
|
288 | 288 | 'CPU resources, making "hg push" and "hg pull" ' |
|
289 | 289 | 'faster') |
|
290 | 290 | |
|
291 | 291 | @registerformatvariant |
|
292 | 292 | class sparserevlog(requirementformatvariant): |
|
293 | 293 | name = 'sparserevlog' |
|
294 | 294 | |
|
295 | 295 | _requirement = localrepo.SPARSEREVLOG_REQUIREMENT |
|
296 | 296 | |
|
297 | 297 | default = True |
|
298 | 298 | |
|
299 | 299 | description = _('in order to limit disk reading and memory usage on older ' |
|
300 | 300 | 'version, the span of a delta chain from its root to its ' |
|
301 | 301 | 'end is limited, whatever the relevant data in this span. ' |
|
302 | 302 | 'This can severly limit Mercurial ability to build good ' |
|
303 | 303 | 'chain of delta resulting is much more storage space being ' |
|
304 | 304 | 'taken and limit reusability of on disk delta during ' |
|
305 | 305 | 'exchange.' |
|
306 | 306 | ) |
|
307 | 307 | |
|
308 | 308 | upgrademessage = _('Revlog supports delta chain with more unused data ' |
|
309 | 309 | 'between payload. These gaps will be skipped at read ' |
|
310 | 310 | 'time. This allows for better delta chains, making a ' |
|
311 | 311 | 'better compression and faster exchange with server.') |
|
312 | 312 | |
|
313 | 313 | @registerformatvariant |
|
314 | 314 | class removecldeltachain(formatvariant): |
|
315 | 315 | name = 'plain-cl-delta' |
|
316 | 316 | |
|
317 | 317 | default = True |
|
318 | 318 | |
|
319 | 319 | description = _('changelog storage is using deltas instead of ' |
|
320 | 320 | 'raw entries; changelog reading and any ' |
|
321 | 321 | 'operation relying on changelog data are slower ' |
|
322 | 322 | 'than they could be') |
|
323 | 323 | |
|
324 | 324 | upgrademessage = _('changelog storage will be reformated to ' |
|
325 | 325 | 'store raw entries; changelog reading will be ' |
|
326 | 326 | 'faster; changelog size may be reduced') |
|
327 | 327 | |
|
328 | 328 | @staticmethod |
|
329 | 329 | def fromrepo(repo): |
|
330 | 330 | # Mercurial 4.0 changed changelogs to not use delta chains. Search for |
|
331 | 331 | # changelogs with deltas. |
|
332 | 332 | cl = repo.changelog |
|
333 | 333 | chainbase = cl.chainbase |
|
334 | 334 | return all(rev == chainbase(rev) for rev in cl) |
|
335 | 335 | |
|
336 | 336 | @staticmethod |
|
337 | 337 | def fromconfig(repo): |
|
338 | 338 | return True |
|
339 | 339 | |
|
340 | 340 | @registerformatvariant |
|
341 | 341 | class compressionengine(formatvariant): |
|
342 | 342 | name = 'compression' |
|
343 | 343 | default = 'zlib' |
|
344 | 344 | |
|
345 | 345 | description = _('Compresion algorithm used to compress data. ' |
|
346 | 346 | 'Some engine are faster than other') |
|
347 | 347 | |
|
348 | 348 | upgrademessage = _('revlog content will be recompressed with the new ' |
|
349 | 349 | 'algorithm.') |
|
350 | 350 | |
|
351 | 351 | @classmethod |
|
352 | 352 | def fromrepo(cls, repo): |
|
353 | 353 | # we allow multiple compression engine requirement to co-exist because |
|
354 | 354 | # strickly speaking, revlog seems to support mixed compression style. |
|
355 | 355 | # |
|
356 | 356 | # The compression used for new entries will be "the last one" |
|
357 | 357 | compression = 'zlib' |
|
358 | 358 | for req in repo.requirements: |
|
359 | 359 | prefix = req.startswith |
|
360 | 360 | if prefix('revlog-compression-') or prefix('exp-compression-'): |
|
361 | 361 | compression = req.split('-', 2)[2] |
|
362 | 362 | return compression |
|
363 | 363 | |
|
364 | 364 | @classmethod |
|
365 | 365 | def fromconfig(cls, repo): |
|
366 | 366 | return repo.ui.config('format', 'revlog-compression') |
|
367 | 367 | |
|
368 | 368 | @registerformatvariant |
|
369 | 369 | class compressionlevel(formatvariant): |
|
370 | 370 | name = 'compression-level' |
|
371 | 371 | default = 'default' |
|
372 | 372 | |
|
373 | 373 | description = _('compression level') |
|
374 | 374 | |
|
375 | 375 | upgrademessage = _('revlog content will be recompressed') |
|
376 | 376 | |
|
377 | 377 | @classmethod |
|
378 | 378 | def fromrepo(cls, repo): |
|
379 | 379 | comp = compressionengine.fromrepo(repo) |
|
380 | 380 | level = None |
|
381 | 381 | if comp == 'zlib': |
|
382 | 382 | level = repo.ui.configint('storage', 'revlog.zlib.level') |
|
383 | 383 | elif comp == 'zstd': |
|
384 | 384 | level = repo.ui.configint('storage', 'revlog.zstd.level') |
|
385 | 385 | if level is None: |
|
386 | 386 | return 'default' |
|
387 | 387 | return bytes(level) |
|
388 | 388 | |
|
389 | 389 | @classmethod |
|
390 | 390 | def fromconfig(cls, repo): |
|
391 | 391 | comp = compressionengine.fromconfig(repo) |
|
392 | 392 | level = None |
|
393 | 393 | if comp == 'zlib': |
|
394 | 394 | level = repo.ui.configint('storage', 'revlog.zlib.level') |
|
395 | 395 | elif comp == 'zstd': |
|
396 | 396 | level = repo.ui.configint('storage', 'revlog.zstd.level') |
|
397 | 397 | if level is None: |
|
398 | 398 | return 'default' |
|
399 | 399 | return bytes(level) |
|
400 | 400 | |
|
401 | 401 | def finddeficiencies(repo): |
|
402 | 402 | """returns a list of deficiencies that the repo suffer from""" |
|
403 | 403 | deficiencies = [] |
|
404 | 404 | |
|
405 | 405 | # We could detect lack of revlogv1 and store here, but they were added |
|
406 | 406 | # in 0.9.2 and we don't support upgrading repos without these |
|
407 | 407 | # requirements, so let's not bother. |
|
408 | 408 | |
|
409 | 409 | for fv in allformatvariant: |
|
410 | 410 | if not fv.fromrepo(repo): |
|
411 | 411 | deficiencies.append(fv) |
|
412 | 412 | |
|
413 | 413 | return deficiencies |
|
414 | 414 | |
|
415 | 415 | # search without '-' to support older form on newer client. |
|
416 | 416 | # |
|
417 | 417 | # We don't enforce backward compatibility for debug command so this |
|
418 | 418 | # might eventually be dropped. However, having to use two different |
|
419 | 419 | # forms in script when comparing result is anoying enough to add |
|
420 | 420 | # backward compatibility for a while. |
|
421 | 421 | legacy_opts_map = { |
|
422 | 422 | 'redeltaparent': 're-delta-parent', |
|
423 | 423 | 'redeltamultibase': 're-delta-multibase', |
|
424 | 424 | 'redeltaall': 're-delta-all', |
|
425 | 425 | 'redeltafulladd': 're-delta-fulladd', |
|
426 | 426 | } |
|
427 | 427 | |
|
428 | 428 | def findoptimizations(repo): |
|
429 | 429 | """Determine optimisation that could be used during upgrade""" |
|
430 | 430 | # These are unconditionally added. There is logic later that figures out |
|
431 | 431 | # which ones to apply. |
|
432 | 432 | optimizations = [] |
|
433 | 433 | |
|
434 | 434 | optimizations.append(improvement( |
|
435 | 435 | name='re-delta-parent', |
|
436 | 436 | type=optimisation, |
|
437 | 437 | description=_('deltas within internal storage will be recalculated to ' |
|
438 | 438 | 'choose an optimal base revision where this was not ' |
|
439 | 439 | 'already done; the size of the repository may shrink and ' |
|
440 | 440 | 'various operations may become faster; the first time ' |
|
441 | 441 | 'this optimization is performed could slow down upgrade ' |
|
442 | 442 | 'execution considerably; subsequent invocations should ' |
|
443 | 443 | 'not run noticeably slower'), |
|
444 | 444 | upgrademessage=_('deltas within internal storage will choose a new ' |
|
445 | 445 | 'base revision if needed'))) |
|
446 | 446 | |
|
447 | 447 | optimizations.append(improvement( |
|
448 | 448 | name='re-delta-multibase', |
|
449 | 449 | type=optimisation, |
|
450 | 450 | description=_('deltas within internal storage will be recalculated ' |
|
451 | 451 | 'against multiple base revision and the smallest ' |
|
452 | 452 | 'difference will be used; the size of the repository may ' |
|
453 | 453 | 'shrink significantly when there are many merges; this ' |
|
454 | 454 | 'optimization will slow down execution in proportion to ' |
|
455 | 455 | 'the number of merges in the repository and the amount ' |
|
456 | 456 | 'of files in the repository; this slow down should not ' |
|
457 | 457 | 'be significant unless there are tens of thousands of ' |
|
458 | 458 | 'files and thousands of merges'), |
|
459 | 459 | upgrademessage=_('deltas within internal storage will choose an ' |
|
460 | 460 | 'optimal delta by computing deltas against multiple ' |
|
461 | 461 | 'parents; may slow down execution time ' |
|
462 | 462 | 'significantly'))) |
|
463 | 463 | |
|
464 | 464 | optimizations.append(improvement( |
|
465 | 465 | name='re-delta-all', |
|
466 | 466 | type=optimisation, |
|
467 | 467 | description=_('deltas within internal storage will always be ' |
|
468 | 468 | 'recalculated without reusing prior deltas; this will ' |
|
469 | 469 | 'likely make execution run several times slower; this ' |
|
470 | 470 | 'optimization is typically not needed'), |
|
471 | 471 | upgrademessage=_('deltas within internal storage will be fully ' |
|
472 | 472 | 'recomputed; this will likely drastically slow down ' |
|
473 | 473 | 'execution time'))) |
|
474 | 474 | |
|
475 | 475 | optimizations.append(improvement( |
|
476 | 476 | name='re-delta-fulladd', |
|
477 | 477 | type=optimisation, |
|
478 | 478 | description=_('every revision will be re-added as if it was new ' |
|
479 | 479 | 'content. It will go through the full storage ' |
|
480 | 480 | 'mechanism giving extensions a chance to process it ' |
|
481 | 481 | '(eg. lfs). This is similar to "re-delta-all" but even ' |
|
482 | 482 | 'slower since more logic is involved.'), |
|
483 | 483 | upgrademessage=_('each revision will be added as new content to the ' |
|
484 | 484 | 'internal storage; this will likely drastically slow ' |
|
485 | 485 | 'down execution time, but some extensions might need ' |
|
486 | 486 | 'it'))) |
|
487 | 487 | |
|
488 | 488 | return optimizations |
|
489 | 489 | |
|
490 | 490 | def determineactions(repo, deficiencies, sourcereqs, destreqs): |
|
491 | 491 | """Determine upgrade actions that will be performed. |
|
492 | 492 | |
|
493 | 493 | Given a list of improvements as returned by ``finddeficiencies`` and |
|
494 | 494 | ``findoptimizations``, determine the list of upgrade actions that |
|
495 | 495 | will be performed. |
|
496 | 496 | |
|
497 | 497 | The role of this function is to filter improvements if needed, apply |
|
498 | 498 | recommended optimizations from the improvements list that make sense, |
|
499 | 499 | etc. |
|
500 | 500 | |
|
501 | 501 | Returns a list of action names. |
|
502 | 502 | """ |
|
503 | 503 | newactions = [] |
|
504 | 504 | |
|
505 | 505 | knownreqs = supporteddestrequirements(repo) |
|
506 | 506 | |
|
507 | 507 | for d in deficiencies: |
|
508 | 508 | name = d.name |
|
509 | 509 | |
|
510 | 510 | # If the action is a requirement that doesn't show up in the |
|
511 | 511 | # destination requirements, prune the action. |
|
512 | 512 | if name in knownreqs and name not in destreqs: |
|
513 | 513 | continue |
|
514 | 514 | |
|
515 | 515 | newactions.append(d) |
|
516 | 516 | |
|
517 | 517 | # FUTURE consider adding some optimizations here for certain transitions. |
|
518 | 518 | # e.g. adding generaldelta could schedule parent redeltas. |
|
519 | 519 | |
|
520 | 520 | return newactions |
|
521 | 521 | |
|
522 | 522 | def _revlogfrompath(repo, path): |
|
523 | 523 | """Obtain a revlog from a repo path. |
|
524 | 524 | |
|
525 | 525 | An instance of the appropriate class is returned. |
|
526 | 526 | """ |
|
527 | 527 | if path == '00changelog.i': |
|
528 | 528 | return changelog.changelog(repo.svfs) |
|
529 | 529 | elif path.endswith('00manifest.i'): |
|
530 | 530 | mandir = path[:-len('00manifest.i')] |
|
531 | 531 | return manifest.manifestrevlog(repo.svfs, tree=mandir) |
|
532 | 532 | else: |
|
533 | 533 | #reverse of "/".join(("data", path + ".i")) |
|
534 | 534 | return filelog.filelog(repo.svfs, path[5:-2]) |
|
535 | 535 | |
|
536 | 536 | def _copyrevlog(tr, destrepo, oldrl, unencodedname): |
|
537 | 537 | """copy all relevant files for `oldrl` into `destrepo` store |
|
538 | 538 | |
|
539 | 539 | Files are copied "as is" without any transformation. The copy is performed |
|
540 | 540 | without extra checks. Callers are responsible for making sure the copied |
|
541 | 541 | content is compatible with format of the destination repository. |
|
542 | 542 | """ |
|
543 | 543 | oldrl = getattr(oldrl, '_revlog', oldrl) |
|
544 | 544 | newrl = _revlogfrompath(destrepo, unencodedname) |
|
545 | 545 | newrl = getattr(newrl, '_revlog', newrl) |
|
546 | 546 | |
|
547 | 547 | oldvfs = oldrl.opener |
|
548 | 548 | newvfs = newrl.opener |
|
549 | 549 | oldindex = oldvfs.join(oldrl.indexfile) |
|
550 | 550 | newindex = newvfs.join(newrl.indexfile) |
|
551 | 551 | olddata = oldvfs.join(oldrl.datafile) |
|
552 | 552 | newdata = newvfs.join(newrl.datafile) |
|
553 | 553 | |
|
554 | 554 | newdir = newvfs.dirname(newrl.indexfile) |
|
555 | 555 | newvfs.makedirs(newdir) |
|
556 | 556 | |
|
557 | 557 | util.copyfile(oldindex, newindex) |
|
558 | 558 | if oldrl.opener.exists(olddata): |
|
559 | 559 | util.copyfile(olddata, newdata) |
|
560 | 560 | |
|
561 | 561 | if not (unencodedname.endswith('00changelog.i') |
|
562 | 562 | or unencodedname.endswith('00manifest.i')): |
|
563 | 563 | destrepo.svfs.fncache.add(unencodedname) |
|
564 | 564 | |
|
565 | 565 | UPGRADE_CHANGELOG = object() |
|
566 | 566 | UPGRADE_MANIFEST = object() |
|
567 | 567 | UPGRADE_FILELOG = object() |
|
568 | 568 | |
|
569 | 569 | UPGRADE_ALL_REVLOGS = frozenset([UPGRADE_CHANGELOG, |
|
570 | 570 | UPGRADE_MANIFEST, |
|
571 | 571 | UPGRADE_FILELOG]) |
|
572 | 572 | |
|
573 | 573 | def matchrevlog(revlogfilter, entry): |
|
574 | 574 | """check is a revlog is selected for cloning |
|
575 | 575 | |
|
576 | 576 | The store entry is checked against the passed filter""" |
|
577 | 577 | if entry.endswith('00changelog.i'): |
|
578 | 578 | return UPGRADE_CHANGELOG in revlogfilter |
|
579 | 579 | elif entry.endswith('00manifest.i'): |
|
580 | 580 | return UPGRADE_MANIFEST in revlogfilter |
|
581 | 581 | return UPGRADE_FILELOG in revlogfilter |
|
582 | 582 | |
|
583 | 583 | def _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents, |
|
584 | 584 | revlogs=UPGRADE_ALL_REVLOGS): |
|
585 | 585 | """Copy revlogs between 2 repos.""" |
|
586 | 586 | revcount = 0 |
|
587 | 587 | srcsize = 0 |
|
588 | 588 | srcrawsize = 0 |
|
589 | 589 | dstsize = 0 |
|
590 | 590 | fcount = 0 |
|
591 | 591 | frevcount = 0 |
|
592 | 592 | fsrcsize = 0 |
|
593 | 593 | frawsize = 0 |
|
594 | 594 | fdstsize = 0 |
|
595 | 595 | mcount = 0 |
|
596 | 596 | mrevcount = 0 |
|
597 | 597 | msrcsize = 0 |
|
598 | 598 | mrawsize = 0 |
|
599 | 599 | mdstsize = 0 |
|
600 | 600 | crevcount = 0 |
|
601 | 601 | csrcsize = 0 |
|
602 | 602 | crawsize = 0 |
|
603 | 603 | cdstsize = 0 |
|
604 | 604 | |
|
605 | 605 | alldatafiles = list(srcrepo.store.walk()) |
|
606 | 606 | |
|
607 | 607 | # Perform a pass to collect metadata. This validates we can open all |
|
608 | 608 | # source files and allows a unified progress bar to be displayed. |
|
609 | 609 | for unencoded, encoded, size in alldatafiles: |
|
610 | 610 | if unencoded.endswith('.d'): |
|
611 | 611 | continue |
|
612 | 612 | |
|
613 | 613 | rl = _revlogfrompath(srcrepo, unencoded) |
|
614 | 614 | |
|
615 | 615 | info = rl.storageinfo(exclusivefiles=True, revisionscount=True, |
|
616 | 616 | trackedsize=True, storedsize=True) |
|
617 | 617 | |
|
618 | 618 | revcount += info['revisionscount'] or 0 |
|
619 | 619 | datasize = info['storedsize'] or 0 |
|
620 | 620 | rawsize = info['trackedsize'] or 0 |
|
621 | 621 | |
|
622 | 622 | srcsize += datasize |
|
623 | 623 | srcrawsize += rawsize |
|
624 | 624 | |
|
625 | 625 | # This is for the separate progress bars. |
|
626 | 626 | if isinstance(rl, changelog.changelog): |
|
627 | 627 | crevcount += len(rl) |
|
628 | 628 | csrcsize += datasize |
|
629 | 629 | crawsize += rawsize |
|
630 | 630 | elif isinstance(rl, manifest.manifestrevlog): |
|
631 | 631 | mcount += 1 |
|
632 | 632 | mrevcount += len(rl) |
|
633 | 633 | msrcsize += datasize |
|
634 | 634 | mrawsize += rawsize |
|
635 | 635 | elif isinstance(rl, filelog.filelog): |
|
636 | 636 | fcount += 1 |
|
637 | 637 | frevcount += len(rl) |
|
638 | 638 | fsrcsize += datasize |
|
639 | 639 | frawsize += rawsize |
|
640 | 640 | else: |
|
641 | 641 | error.ProgrammingError('unknown revlog type') |
|
642 | 642 | |
|
643 | 643 | if not revcount: |
|
644 | 644 | return |
|
645 | 645 | |
|
646 | 646 | ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, ' |
|
647 | 647 | '%d in changelog)\n') % |
|
648 | 648 | (revcount, frevcount, mrevcount, crevcount)) |
|
649 | 649 | ui.write(_('migrating %s in store; %s tracked data\n') % ( |
|
650 | 650 | (util.bytecount(srcsize), util.bytecount(srcrawsize)))) |
|
651 | 651 | |
|
652 | 652 | # Used to keep track of progress. |
|
653 | 653 | progress = None |
|
654 | 654 | def oncopiedrevision(rl, rev, node): |
|
655 | 655 | progress.increment() |
|
656 | 656 | |
|
657 | 657 | # Do the actual copying. |
|
658 | 658 | # FUTURE this operation can be farmed off to worker processes. |
|
659 | 659 | seen = set() |
|
660 | 660 | for unencoded, encoded, size in alldatafiles: |
|
661 | 661 | if unencoded.endswith('.d'): |
|
662 | 662 | continue |
|
663 | 663 | |
|
664 | 664 | oldrl = _revlogfrompath(srcrepo, unencoded) |
|
665 | 665 | |
|
666 | 666 | if isinstance(oldrl, changelog.changelog) and 'c' not in seen: |
|
667 | 667 | ui.write(_('finished migrating %d manifest revisions across %d ' |
|
668 | 668 | 'manifests; change in size: %s\n') % |
|
669 | 669 | (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))) |
|
670 | 670 | |
|
671 | 671 | ui.write(_('migrating changelog containing %d revisions ' |
|
672 | 672 | '(%s in store; %s tracked data)\n') % |
|
673 | 673 | (crevcount, util.bytecount(csrcsize), |
|
674 | 674 | util.bytecount(crawsize))) |
|
675 | 675 | seen.add('c') |
|
676 | 676 | progress = srcrepo.ui.makeprogress(_('changelog revisions'), |
|
677 | 677 | total=crevcount) |
|
678 | 678 | elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen: |
|
679 | 679 | ui.write(_('finished migrating %d filelog revisions across %d ' |
|
680 | 680 | 'filelogs; change in size: %s\n') % |
|
681 | 681 | (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))) |
|
682 | 682 | |
|
683 | 683 | ui.write(_('migrating %d manifests containing %d revisions ' |
|
684 | 684 | '(%s in store; %s tracked data)\n') % |
|
685 | 685 | (mcount, mrevcount, util.bytecount(msrcsize), |
|
686 | 686 | util.bytecount(mrawsize))) |
|
687 | 687 | seen.add('m') |
|
688 | 688 | if progress: |
|
689 | 689 | progress.complete() |
|
690 | 690 | progress = srcrepo.ui.makeprogress(_('manifest revisions'), |
|
691 | 691 | total=mrevcount) |
|
692 | 692 | elif 'f' not in seen: |
|
693 | 693 | ui.write(_('migrating %d filelogs containing %d revisions ' |
|
694 | 694 | '(%s in store; %s tracked data)\n') % |
|
695 | 695 | (fcount, frevcount, util.bytecount(fsrcsize), |
|
696 | 696 | util.bytecount(frawsize))) |
|
697 | 697 | seen.add('f') |
|
698 | 698 | if progress: |
|
699 | 699 | progress.complete() |
|
700 | 700 | progress = srcrepo.ui.makeprogress(_('file revisions'), |
|
701 | 701 | total=frevcount) |
|
702 | 702 | |
|
703 | 703 | if matchrevlog(revlogs, unencoded): |
|
704 | 704 | ui.note(_('cloning %d revisions from %s\n') |
|
705 | 705 | % (len(oldrl), unencoded)) |
|
706 | 706 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
707 | 707 | oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision, |
|
708 | 708 | deltareuse=deltareuse, |
|
709 | 709 | forcedeltabothparents=forcedeltabothparents) |
|
710 | 710 | else: |
|
711 | 711 | msg = _('blindly copying %s containing %i revisions\n') |
|
712 | 712 | ui.note(msg % (unencoded, len(oldrl))) |
|
713 | 713 | _copyrevlog(tr, dstrepo, oldrl, unencoded) |
|
714 | 714 | |
|
715 | 715 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
716 | 716 | |
|
717 | 717 | info = newrl.storageinfo(storedsize=True) |
|
718 | 718 | datasize = info['storedsize'] or 0 |
|
719 | 719 | |
|
720 | 720 | dstsize += datasize |
|
721 | 721 | |
|
722 | 722 | if isinstance(newrl, changelog.changelog): |
|
723 | 723 | cdstsize += datasize |
|
724 | 724 | elif isinstance(newrl, manifest.manifestrevlog): |
|
725 | 725 | mdstsize += datasize |
|
726 | 726 | else: |
|
727 | 727 | fdstsize += datasize |
|
728 | 728 | |
|
729 | 729 | progress.complete() |
|
730 | 730 | |
|
731 | 731 | ui.write(_('finished migrating %d changelog revisions; change in size: ' |
|
732 | 732 | '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize))) |
|
733 | 733 | |
|
734 | 734 | ui.write(_('finished migrating %d total revisions; total change in store ' |
|
735 | 735 | 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize))) |
|
736 | 736 | |
|
737 | 737 | def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st): |
|
738 | 738 | """Determine whether to copy a store file during upgrade. |
|
739 | 739 | |
|
740 | 740 | This function is called when migrating store files from ``srcrepo`` to |
|
741 | 741 | ``dstrepo`` as part of upgrading a repository. |
|
742 | 742 | |
|
743 | 743 | Args: |
|
744 | 744 | srcrepo: repo we are copying from |
|
745 | 745 | dstrepo: repo we are copying to |
|
746 | 746 | requirements: set of requirements for ``dstrepo`` |
|
747 | 747 | path: store file being examined |
|
748 | 748 | mode: the ``ST_MODE`` file type of ``path`` |
|
749 | 749 | st: ``stat`` data structure for ``path`` |
|
750 | 750 | |
|
751 | 751 | Function should return ``True`` if the file is to be copied. |
|
752 | 752 | """ |
|
753 | 753 | # Skip revlogs. |
|
754 | 754 | if path.endswith(('.i', '.d')): |
|
755 | 755 | return False |
|
756 | 756 | # Skip transaction related files. |
|
757 | 757 | if path.startswith('undo'): |
|
758 | 758 | return False |
|
759 | 759 | # Only copy regular files. |
|
760 | 760 | if mode != stat.S_IFREG: |
|
761 | 761 | return False |
|
762 | 762 | # Skip other skipped files. |
|
763 | 763 | if path in ('lock', 'fncache'): |
|
764 | 764 | return False |
|
765 | 765 | |
|
766 | 766 | return True |
|
767 | 767 | |
|
768 | 768 | def _finishdatamigration(ui, srcrepo, dstrepo, requirements): |
|
769 | 769 | """Hook point for extensions to perform additional actions during upgrade. |
|
770 | 770 | |
|
771 | 771 | This function is called after revlogs and store files have been copied but |
|
772 | 772 | before the new store is swapped into the original location. |
|
773 | 773 | """ |
|
774 | 774 | |
|
775 | 775 | def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions, |
|
776 | 776 | revlogs=UPGRADE_ALL_REVLOGS): |
|
777 | 777 | """Do the low-level work of upgrading a repository. |
|
778 | 778 | |
|
779 | 779 | The upgrade is effectively performed as a copy between a source |
|
780 | 780 | repository and a temporary destination repository. |
|
781 | 781 | |
|
782 | 782 | The source repository is unmodified for as long as possible so the |
|
783 | 783 | upgrade can abort at any time without causing loss of service for |
|
784 | 784 | readers and without corrupting the source repository. |
|
785 | 785 | """ |
|
786 | 786 | assert srcrepo.currentwlock() |
|
787 | 787 | assert dstrepo.currentwlock() |
|
788 | 788 | |
|
789 | 789 | ui.write(_('(it is safe to interrupt this process any time before ' |
|
790 | 790 | 'data migration completes)\n')) |
|
791 | 791 | |
|
792 | 792 | if 're-delta-all' in actions: |
|
793 | 793 | deltareuse = revlog.revlog.DELTAREUSENEVER |
|
794 | 794 | elif 're-delta-parent' in actions: |
|
795 | 795 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
796 | 796 | elif 're-delta-multibase' in actions: |
|
797 | 797 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
798 | 798 | elif 're-delta-fulladd' in actions: |
|
799 | 799 | deltareuse = revlog.revlog.DELTAREUSEFULLADD |
|
800 | 800 | else: |
|
801 | 801 | deltareuse = revlog.revlog.DELTAREUSEALWAYS |
|
802 | 802 | |
|
803 | 803 | with dstrepo.transaction('upgrade') as tr: |
|
804 | 804 | _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse, |
|
805 | 805 | 're-delta-multibase' in actions, revlogs=revlogs) |
|
806 | 806 | |
|
807 | 807 | # Now copy other files in the store directory. |
|
808 | 808 | # The sorted() makes execution deterministic. |
|
809 | 809 | for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)): |
|
810 | 810 | if not _filterstorefile(srcrepo, dstrepo, requirements, |
|
811 | 811 | p, kind, st): |
|
812 | 812 | continue |
|
813 | 813 | |
|
814 | 814 | srcrepo.ui.write(_('copying %s\n') % p) |
|
815 | 815 | src = srcrepo.store.rawvfs.join(p) |
|
816 | 816 | dst = dstrepo.store.rawvfs.join(p) |
|
817 | 817 | util.copyfile(src, dst, copystat=True) |
|
818 | 818 | |
|
819 | 819 | _finishdatamigration(ui, srcrepo, dstrepo, requirements) |
|
820 | 820 | |
|
821 | 821 | ui.write(_('data fully migrated to temporary repository\n')) |
|
822 | 822 | |
|
823 | 823 | backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path) |
|
824 | 824 | backupvfs = vfsmod.vfs(backuppath) |
|
825 | 825 | |
|
826 | 826 | # Make a backup of requires file first, as it is the first to be modified. |
|
827 | 827 | util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires')) |
|
828 | 828 | |
|
829 | 829 | # We install an arbitrary requirement that clients must not support |
|
830 | 830 | # as a mechanism to lock out new clients during the data swap. This is |
|
831 | 831 | # better than allowing a client to continue while the repository is in |
|
832 | 832 | # an inconsistent state. |
|
833 | 833 | ui.write(_('marking source repository as being upgraded; clients will be ' |
|
834 | 834 | 'unable to read from repository\n')) |
|
835 | 835 | scmutil.writerequires(srcrepo.vfs, |
|
836 | 836 | srcrepo.requirements | {'upgradeinprogress'}) |
|
837 | 837 | |
|
838 | 838 | ui.write(_('starting in-place swap of repository data\n')) |
|
839 | 839 | ui.write(_('replaced files will be backed up at %s\n') % |
|
840 | 840 | backuppath) |
|
841 | 841 | |
|
842 | 842 | # Now swap in the new store directory. Doing it as a rename should make |
|
843 | 843 | # the operation nearly instantaneous and atomic (at least in well-behaved |
|
844 | 844 | # environments). |
|
845 | 845 | ui.write(_('replacing store...\n')) |
|
846 | 846 | tstart = util.timer() |
|
847 | 847 | util.rename(srcrepo.spath, backupvfs.join('store')) |
|
848 | 848 | util.rename(dstrepo.spath, srcrepo.spath) |
|
849 | 849 | elapsed = util.timer() - tstart |
|
850 | 850 | ui.write(_('store replacement complete; repository was inconsistent for ' |
|
851 | 851 | '%0.1fs\n') % elapsed) |
|
852 | 852 | |
|
853 | 853 | # We first write the requirements file. Any new requirements will lock |
|
854 | 854 | # out legacy clients. |
|
855 | 855 | ui.write(_('finalizing requirements file and making repository readable ' |
|
856 | 856 | 'again\n')) |
|
857 | 857 | scmutil.writerequires(srcrepo.vfs, requirements) |
|
858 | 858 | |
|
859 | 859 | # The lock file from the old store won't be removed because nothing has a |
|
860 | 860 | # reference to its new location. So clean it up manually. Alternatively, we |
|
861 | 861 | # could update srcrepo.svfs and other variables to point to the new |
|
862 | 862 | # location. This is simpler. |
|
863 | 863 | backupvfs.unlink('store/lock') |
|
864 | 864 | |
|
865 | 865 | return backuppath |
|
866 | 866 | |
|
867 | 867 | def upgraderepo(ui, repo, run=False, optimize=None, backup=True, |
|
868 | manifest=None): | |
|
868 | manifest=None, changelog=None): | |
|
869 | 869 | """Upgrade a repository in place.""" |
|
870 | 870 | if optimize is None: |
|
871 | 871 | optimize = [] |
|
872 | 872 | optimize = set(legacy_opts_map.get(o, o) for o in optimize) |
|
873 | 873 | repo = repo.unfiltered() |
|
874 | 874 | |
|
875 | 875 | revlogs = set(UPGRADE_ALL_REVLOGS) |
|
876 |
specentries = (('m', manifest) |
|
|
876 | specentries = (('c', changelog), ('m', manifest)) | |
|
877 | 877 | specified = [(y, x) for (y, x) in specentries if x is not None] |
|
878 | 878 | if specified: |
|
879 | 879 | # we have some limitation on revlogs to be recloned |
|
880 | 880 | if any(x for y, x in specified): |
|
881 | 881 | revlogs = set() |
|
882 | 882 | for r, enabled in specified: |
|
883 | 883 | if enabled: |
|
884 |
if r == ' |
|
|
884 | if r == 'c': | |
|
885 | revlogs.add(UPGRADE_CHANGELOG) | |
|
886 | elif r == 'm': | |
|
885 | 887 | revlogs.add(UPGRADE_MANIFEST) |
|
886 | 888 | else: |
|
887 | 889 | # none are enabled |
|
888 | 890 | for r, __ in specified: |
|
889 |
if r == ' |
|
|
891 | if r == 'c': | |
|
892 | revlogs.discard(UPGRADE_CHANGELOG) | |
|
893 | elif r == 'm': | |
|
890 | 894 | revlogs.discard(UPGRADE_MANIFEST) |
|
891 | 895 | |
|
892 | 896 | # Ensure the repository can be upgraded. |
|
893 | 897 | missingreqs = requiredsourcerequirements(repo) - repo.requirements |
|
894 | 898 | if missingreqs: |
|
895 | 899 | raise error.Abort(_('cannot upgrade repository; requirement ' |
|
896 | 900 | 'missing: %s') % _(', ').join(sorted(missingreqs))) |
|
897 | 901 | |
|
898 | 902 | blockedreqs = blocksourcerequirements(repo) & repo.requirements |
|
899 | 903 | if blockedreqs: |
|
900 | 904 | raise error.Abort(_('cannot upgrade repository; unsupported source ' |
|
901 | 905 | 'requirement: %s') % |
|
902 | 906 | _(', ').join(sorted(blockedreqs))) |
|
903 | 907 | |
|
904 | 908 | # FUTURE there is potentially a need to control the wanted requirements via |
|
905 | 909 | # command arguments or via an extension hook point. |
|
906 | 910 | newreqs = localrepo.newreporequirements( |
|
907 | 911 | repo.ui, localrepo.defaultcreateopts(repo.ui)) |
|
908 | 912 | newreqs.update(preservedrequirements(repo)) |
|
909 | 913 | |
|
910 | 914 | noremovereqs = (repo.requirements - newreqs - |
|
911 | 915 | supportremovedrequirements(repo)) |
|
912 | 916 | if noremovereqs: |
|
913 | 917 | raise error.Abort(_('cannot upgrade repository; requirement would be ' |
|
914 | 918 | 'removed: %s') % _(', ').join(sorted(noremovereqs))) |
|
915 | 919 | |
|
916 | 920 | noaddreqs = (newreqs - repo.requirements - |
|
917 | 921 | allowednewrequirements(repo)) |
|
918 | 922 | if noaddreqs: |
|
919 | 923 | raise error.Abort(_('cannot upgrade repository; do not support adding ' |
|
920 | 924 | 'requirement: %s') % |
|
921 | 925 | _(', ').join(sorted(noaddreqs))) |
|
922 | 926 | |
|
923 | 927 | unsupportedreqs = newreqs - supporteddestrequirements(repo) |
|
924 | 928 | if unsupportedreqs: |
|
925 | 929 | raise error.Abort(_('cannot upgrade repository; do not support ' |
|
926 | 930 | 'destination requirement: %s') % |
|
927 | 931 | _(', ').join(sorted(unsupportedreqs))) |
|
928 | 932 | |
|
929 | 933 | # Find and validate all improvements that can be made. |
|
930 | 934 | alloptimizations = findoptimizations(repo) |
|
931 | 935 | |
|
932 | 936 | # Apply and Validate arguments. |
|
933 | 937 | optimizations = [] |
|
934 | 938 | for o in alloptimizations: |
|
935 | 939 | if o.name in optimize: |
|
936 | 940 | optimizations.append(o) |
|
937 | 941 | optimize.discard(o.name) |
|
938 | 942 | |
|
939 | 943 | if optimize: # anything left is unknown |
|
940 | 944 | raise error.Abort(_('unknown optimization action requested: %s') % |
|
941 | 945 | ', '.join(sorted(optimize)), |
|
942 | 946 | hint=_('run without arguments to see valid ' |
|
943 | 947 | 'optimizations')) |
|
944 | 948 | |
|
945 | 949 | deficiencies = finddeficiencies(repo) |
|
946 | 950 | actions = determineactions(repo, deficiencies, repo.requirements, newreqs) |
|
947 | 951 | actions.extend(o for o in sorted(optimizations) |
|
948 | 952 | # determineactions could have added optimisation |
|
949 | 953 | if o not in actions) |
|
950 | 954 | |
|
951 | 955 | def printrequirements(): |
|
952 | 956 | ui.write(_('requirements\n')) |
|
953 | 957 | ui.write(_(' preserved: %s\n') % |
|
954 | 958 | _(', ').join(sorted(newreqs & repo.requirements))) |
|
955 | 959 | |
|
956 | 960 | if repo.requirements - newreqs: |
|
957 | 961 | ui.write(_(' removed: %s\n') % |
|
958 | 962 | _(', ').join(sorted(repo.requirements - newreqs))) |
|
959 | 963 | |
|
960 | 964 | if newreqs - repo.requirements: |
|
961 | 965 | ui.write(_(' added: %s\n') % |
|
962 | 966 | _(', ').join(sorted(newreqs - repo.requirements))) |
|
963 | 967 | |
|
964 | 968 | ui.write('\n') |
|
965 | 969 | |
|
966 | 970 | def printupgradeactions(): |
|
967 | 971 | for a in actions: |
|
968 | 972 | ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage)) |
|
969 | 973 | |
|
970 | 974 | if not run: |
|
971 | 975 | fromconfig = [] |
|
972 | 976 | onlydefault = [] |
|
973 | 977 | |
|
974 | 978 | for d in deficiencies: |
|
975 | 979 | if d.fromconfig(repo): |
|
976 | 980 | fromconfig.append(d) |
|
977 | 981 | elif d.default: |
|
978 | 982 | onlydefault.append(d) |
|
979 | 983 | |
|
980 | 984 | if fromconfig or onlydefault: |
|
981 | 985 | |
|
982 | 986 | if fromconfig: |
|
983 | 987 | ui.write(_('repository lacks features recommended by ' |
|
984 | 988 | 'current config options:\n\n')) |
|
985 | 989 | for i in fromconfig: |
|
986 | 990 | ui.write('%s\n %s\n\n' % (i.name, i.description)) |
|
987 | 991 | |
|
988 | 992 | if onlydefault: |
|
989 | 993 | ui.write(_('repository lacks features used by the default ' |
|
990 | 994 | 'config options:\n\n')) |
|
991 | 995 | for i in onlydefault: |
|
992 | 996 | ui.write('%s\n %s\n\n' % (i.name, i.description)) |
|
993 | 997 | |
|
994 | 998 | ui.write('\n') |
|
995 | 999 | else: |
|
996 | 1000 | ui.write(_('(no feature deficiencies found in existing ' |
|
997 | 1001 | 'repository)\n')) |
|
998 | 1002 | |
|
999 | 1003 | ui.write(_('performing an upgrade with "--run" will make the following ' |
|
1000 | 1004 | 'changes:\n\n')) |
|
1001 | 1005 | |
|
1002 | 1006 | printrequirements() |
|
1003 | 1007 | printupgradeactions() |
|
1004 | 1008 | |
|
1005 | 1009 | unusedoptimize = [i for i in alloptimizations if i not in actions] |
|
1006 | 1010 | |
|
1007 | 1011 | if unusedoptimize: |
|
1008 | 1012 | ui.write(_('additional optimizations are available by specifying ' |
|
1009 | 1013 | '"--optimize <name>":\n\n')) |
|
1010 | 1014 | for i in unusedoptimize: |
|
1011 | 1015 | ui.write(_('%s\n %s\n\n') % (i.name, i.description)) |
|
1012 | 1016 | return |
|
1013 | 1017 | |
|
1014 | 1018 | # Else we're in the run=true case. |
|
1015 | 1019 | ui.write(_('upgrade will perform the following actions:\n\n')) |
|
1016 | 1020 | printrequirements() |
|
1017 | 1021 | printupgradeactions() |
|
1018 | 1022 | |
|
1019 | 1023 | upgradeactions = [a.name for a in actions] |
|
1020 | 1024 | |
|
1021 | 1025 | ui.write(_('beginning upgrade...\n')) |
|
1022 | 1026 | with repo.wlock(), repo.lock(): |
|
1023 | 1027 | ui.write(_('repository locked and read-only\n')) |
|
1024 | 1028 | # Our strategy for upgrading the repository is to create a new, |
|
1025 | 1029 | # temporary repository, write data to it, then do a swap of the |
|
1026 | 1030 | # data. There are less heavyweight ways to do this, but it is easier |
|
1027 | 1031 | # to create a new repo object than to instantiate all the components |
|
1028 | 1032 | # (like the store) separately. |
|
1029 | 1033 | tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path) |
|
1030 | 1034 | backuppath = None |
|
1031 | 1035 | try: |
|
1032 | 1036 | ui.write(_('creating temporary repository to stage migrated ' |
|
1033 | 1037 | 'data: %s\n') % tmppath) |
|
1034 | 1038 | |
|
1035 | 1039 | # clone ui without using ui.copy because repo.ui is protected |
|
1036 | 1040 | repoui = repo.ui.__class__(repo.ui) |
|
1037 | 1041 | dstrepo = hg.repository(repoui, path=tmppath, create=True) |
|
1038 | 1042 | |
|
1039 | 1043 | with dstrepo.wlock(), dstrepo.lock(): |
|
1040 | 1044 | backuppath = _upgraderepo(ui, repo, dstrepo, newreqs, |
|
1041 | 1045 | upgradeactions, revlogs=revlogs) |
|
1042 | 1046 | if not (backup or backuppath is None): |
|
1043 | 1047 | ui.write(_('removing old repository content%s\n') % backuppath) |
|
1044 | 1048 | repo.vfs.rmtree(backuppath, forcibly=True) |
|
1045 | 1049 | backuppath = None |
|
1046 | 1050 | |
|
1047 | 1051 | finally: |
|
1048 | 1052 | ui.write(_('removing temporary repository %s\n') % tmppath) |
|
1049 | 1053 | repo.vfs.rmtree(tmppath, forcibly=True) |
|
1050 | 1054 | |
|
1051 | 1055 | if backuppath: |
|
1052 | 1056 | ui.warn(_('copy of old repository backed up at %s\n') % |
|
1053 | 1057 | backuppath) |
|
1054 | 1058 | ui.warn(_('the old repository will not be deleted; remove ' |
|
1055 | 1059 | 'it to free up disk space once the upgraded ' |
|
1056 | 1060 | 'repository is verified\n')) |
@@ -1,423 +1,423 | |||
|
1 | 1 | Show all commands except debug commands |
|
2 | 2 | $ hg debugcomplete |
|
3 | 3 | abort |
|
4 | 4 | add |
|
5 | 5 | addremove |
|
6 | 6 | annotate |
|
7 | 7 | archive |
|
8 | 8 | backout |
|
9 | 9 | bisect |
|
10 | 10 | bookmarks |
|
11 | 11 | branch |
|
12 | 12 | branches |
|
13 | 13 | bundle |
|
14 | 14 | cat |
|
15 | 15 | clone |
|
16 | 16 | commit |
|
17 | 17 | config |
|
18 | 18 | continue |
|
19 | 19 | copy |
|
20 | 20 | diff |
|
21 | 21 | export |
|
22 | 22 | files |
|
23 | 23 | forget |
|
24 | 24 | graft |
|
25 | 25 | grep |
|
26 | 26 | heads |
|
27 | 27 | help |
|
28 | 28 | identify |
|
29 | 29 | import |
|
30 | 30 | incoming |
|
31 | 31 | init |
|
32 | 32 | locate |
|
33 | 33 | log |
|
34 | 34 | manifest |
|
35 | 35 | merge |
|
36 | 36 | outgoing |
|
37 | 37 | parents |
|
38 | 38 | paths |
|
39 | 39 | phase |
|
40 | 40 | pull |
|
41 | 41 | push |
|
42 | 42 | recover |
|
43 | 43 | remove |
|
44 | 44 | rename |
|
45 | 45 | resolve |
|
46 | 46 | revert |
|
47 | 47 | rollback |
|
48 | 48 | root |
|
49 | 49 | serve |
|
50 | 50 | shelve |
|
51 | 51 | status |
|
52 | 52 | summary |
|
53 | 53 | tag |
|
54 | 54 | tags |
|
55 | 55 | tip |
|
56 | 56 | unbundle |
|
57 | 57 | unshelve |
|
58 | 58 | update |
|
59 | 59 | verify |
|
60 | 60 | version |
|
61 | 61 | |
|
62 | 62 | Show all commands that start with "a" |
|
63 | 63 | $ hg debugcomplete a |
|
64 | 64 | abort |
|
65 | 65 | add |
|
66 | 66 | addremove |
|
67 | 67 | annotate |
|
68 | 68 | archive |
|
69 | 69 | |
|
70 | 70 | Do not show debug commands if there are other candidates |
|
71 | 71 | $ hg debugcomplete d |
|
72 | 72 | diff |
|
73 | 73 | |
|
74 | 74 | Show debug commands if there are no other candidates |
|
75 | 75 | $ hg debugcomplete debug |
|
76 | 76 | debugancestor |
|
77 | 77 | debugapplystreamclonebundle |
|
78 | 78 | debugbuilddag |
|
79 | 79 | debugbundle |
|
80 | 80 | debugcapabilities |
|
81 | 81 | debugcheckstate |
|
82 | 82 | debugcolor |
|
83 | 83 | debugcommands |
|
84 | 84 | debugcomplete |
|
85 | 85 | debugconfig |
|
86 | 86 | debugcreatestreamclonebundle |
|
87 | 87 | debugdag |
|
88 | 88 | debugdata |
|
89 | 89 | debugdate |
|
90 | 90 | debugdeltachain |
|
91 | 91 | debugdirstate |
|
92 | 92 | debugdiscovery |
|
93 | 93 | debugdownload |
|
94 | 94 | debugextensions |
|
95 | 95 | debugfileset |
|
96 | 96 | debugformat |
|
97 | 97 | debugfsinfo |
|
98 | 98 | debuggetbundle |
|
99 | 99 | debugignore |
|
100 | 100 | debugindex |
|
101 | 101 | debugindexdot |
|
102 | 102 | debugindexstats |
|
103 | 103 | debuginstall |
|
104 | 104 | debugknown |
|
105 | 105 | debuglabelcomplete |
|
106 | 106 | debuglocks |
|
107 | 107 | debugmanifestfulltextcache |
|
108 | 108 | debugmergestate |
|
109 | 109 | debugnamecomplete |
|
110 | 110 | debugobsolete |
|
111 | 111 | debugp1copies |
|
112 | 112 | debugp2copies |
|
113 | 113 | debugpathcomplete |
|
114 | 114 | debugpathcopies |
|
115 | 115 | debugpeer |
|
116 | 116 | debugpickmergetool |
|
117 | 117 | debugpushkey |
|
118 | 118 | debugpvec |
|
119 | 119 | debugrebuilddirstate |
|
120 | 120 | debugrebuildfncache |
|
121 | 121 | debugrename |
|
122 | 122 | debugrevlog |
|
123 | 123 | debugrevlogindex |
|
124 | 124 | debugrevspec |
|
125 | 125 | debugserve |
|
126 | 126 | debugsetparents |
|
127 | 127 | debugssl |
|
128 | 128 | debugsub |
|
129 | 129 | debugsuccessorssets |
|
130 | 130 | debugtemplate |
|
131 | 131 | debuguigetpass |
|
132 | 132 | debuguiprompt |
|
133 | 133 | debugupdatecaches |
|
134 | 134 | debugupgraderepo |
|
135 | 135 | debugwalk |
|
136 | 136 | debugwhyunstable |
|
137 | 137 | debugwireargs |
|
138 | 138 | debugwireproto |
|
139 | 139 | |
|
140 | 140 | Do not show the alias of a debug command if there are other candidates |
|
141 | 141 | (this should hide rawcommit) |
|
142 | 142 | $ hg debugcomplete r |
|
143 | 143 | recover |
|
144 | 144 | remove |
|
145 | 145 | rename |
|
146 | 146 | resolve |
|
147 | 147 | revert |
|
148 | 148 | rollback |
|
149 | 149 | root |
|
150 | 150 | Show the alias of a debug command if there are no other candidates |
|
151 | 151 | $ hg debugcomplete rawc |
|
152 | 152 | |
|
153 | 153 | |
|
154 | 154 | Show the global options |
|
155 | 155 | $ hg debugcomplete --options | sort |
|
156 | 156 | --color |
|
157 | 157 | --config |
|
158 | 158 | --cwd |
|
159 | 159 | --debug |
|
160 | 160 | --debugger |
|
161 | 161 | --encoding |
|
162 | 162 | --encodingmode |
|
163 | 163 | --help |
|
164 | 164 | --hidden |
|
165 | 165 | --noninteractive |
|
166 | 166 | --pager |
|
167 | 167 | --profile |
|
168 | 168 | --quiet |
|
169 | 169 | --repository |
|
170 | 170 | --time |
|
171 | 171 | --traceback |
|
172 | 172 | --verbose |
|
173 | 173 | --version |
|
174 | 174 | -R |
|
175 | 175 | -h |
|
176 | 176 | -q |
|
177 | 177 | -v |
|
178 | 178 | -y |
|
179 | 179 | |
|
180 | 180 | Show the options for the "serve" command |
|
181 | 181 | $ hg debugcomplete --options serve | sort |
|
182 | 182 | --accesslog |
|
183 | 183 | --address |
|
184 | 184 | --certificate |
|
185 | 185 | --cmdserver |
|
186 | 186 | --color |
|
187 | 187 | --config |
|
188 | 188 | --cwd |
|
189 | 189 | --daemon |
|
190 | 190 | --daemon-postexec |
|
191 | 191 | --debug |
|
192 | 192 | --debugger |
|
193 | 193 | --encoding |
|
194 | 194 | --encodingmode |
|
195 | 195 | --errorlog |
|
196 | 196 | --help |
|
197 | 197 | --hidden |
|
198 | 198 | --ipv6 |
|
199 | 199 | --name |
|
200 | 200 | --noninteractive |
|
201 | 201 | --pager |
|
202 | 202 | --pid-file |
|
203 | 203 | --port |
|
204 | 204 | --prefix |
|
205 | 205 | --print-url |
|
206 | 206 | --profile |
|
207 | 207 | --quiet |
|
208 | 208 | --repository |
|
209 | 209 | --stdio |
|
210 | 210 | --style |
|
211 | 211 | --subrepos |
|
212 | 212 | --templates |
|
213 | 213 | --time |
|
214 | 214 | --traceback |
|
215 | 215 | --verbose |
|
216 | 216 | --version |
|
217 | 217 | --web-conf |
|
218 | 218 | -6 |
|
219 | 219 | -A |
|
220 | 220 | -E |
|
221 | 221 | -R |
|
222 | 222 | -S |
|
223 | 223 | -a |
|
224 | 224 | -d |
|
225 | 225 | -h |
|
226 | 226 | -n |
|
227 | 227 | -p |
|
228 | 228 | -q |
|
229 | 229 | -t |
|
230 | 230 | -v |
|
231 | 231 | -y |
|
232 | 232 | |
|
233 | 233 | Show an error if we use --options with an ambiguous abbreviation |
|
234 | 234 | $ hg debugcomplete --options s |
|
235 | 235 | hg: command 's' is ambiguous: |
|
236 | 236 | serve shelve showconfig status summary |
|
237 | 237 | [255] |
|
238 | 238 | |
|
239 | 239 | Show all commands + options |
|
240 | 240 | $ hg debugcommands |
|
241 | 241 | abort: dry-run |
|
242 | 242 | add: include, exclude, subrepos, dry-run |
|
243 | 243 | addremove: similarity, subrepos, include, exclude, dry-run |
|
244 | 244 | annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template |
|
245 | 245 | archive: no-decode, prefix, rev, type, subrepos, include, exclude |
|
246 | 246 | backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user |
|
247 | 247 | bisect: reset, good, bad, skip, extend, command, noupdate |
|
248 | 248 | bookmarks: force, rev, delete, rename, inactive, list, template |
|
249 | 249 | branch: force, clean, rev |
|
250 | 250 | branches: active, closed, rev, template |
|
251 | 251 | bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure |
|
252 | 252 | cat: output, rev, decode, include, exclude, template |
|
253 | 253 | clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure |
|
254 | 254 | commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos |
|
255 | 255 | config: untrusted, edit, local, global, template |
|
256 | 256 | continue: dry-run |
|
257 | 257 | copy: after, force, include, exclude, dry-run |
|
258 | 258 | debugancestor: |
|
259 | 259 | debugapplystreamclonebundle: |
|
260 | 260 | debugbuilddag: mergeable-file, overwritten-file, new-file |
|
261 | 261 | debugbundle: all, part-type, spec |
|
262 | 262 | debugcapabilities: |
|
263 | 263 | debugcheckstate: |
|
264 | 264 | debugcolor: style |
|
265 | 265 | debugcommands: |
|
266 | 266 | debugcomplete: options |
|
267 | 267 | debugcreatestreamclonebundle: |
|
268 | 268 | debugdag: tags, branches, dots, spaces |
|
269 | 269 | debugdata: changelog, manifest, dir |
|
270 | 270 | debugdate: extended |
|
271 | 271 | debugdeltachain: changelog, manifest, dir, template |
|
272 | 272 | debugdirstate: nodates, dates, datesort |
|
273 | 273 | debugdiscovery: old, nonheads, rev, seed, ssh, remotecmd, insecure |
|
274 | 274 | debugdownload: output |
|
275 | 275 | debugextensions: template |
|
276 | 276 | debugfileset: rev, all-files, show-matcher, show-stage |
|
277 | 277 | debugformat: template |
|
278 | 278 | debugfsinfo: |
|
279 | 279 | debuggetbundle: head, common, type |
|
280 | 280 | debugignore: |
|
281 | 281 | debugindex: changelog, manifest, dir, template |
|
282 | 282 | debugindexdot: changelog, manifest, dir |
|
283 | 283 | debugindexstats: |
|
284 | 284 | debuginstall: template |
|
285 | 285 | debugknown: |
|
286 | 286 | debuglabelcomplete: |
|
287 | 287 | debuglocks: force-lock, force-wlock, set-lock, set-wlock |
|
288 | 288 | debugmanifestfulltextcache: clear, add |
|
289 | 289 | debugmergestate: |
|
290 | 290 | debugnamecomplete: |
|
291 | 291 | debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template |
|
292 | 292 | debugp1copies: rev |
|
293 | 293 | debugp2copies: rev |
|
294 | 294 | debugpathcomplete: full, normal, added, removed |
|
295 | 295 | debugpathcopies: include, exclude |
|
296 | 296 | debugpeer: |
|
297 | 297 | debugpickmergetool: rev, changedelete, include, exclude, tool |
|
298 | 298 | debugpushkey: |
|
299 | 299 | debugpvec: |
|
300 | 300 | debugrebuilddirstate: rev, minimal |
|
301 | 301 | debugrebuildfncache: |
|
302 | 302 | debugrename: rev |
|
303 | 303 | debugrevlog: changelog, manifest, dir, dump |
|
304 | 304 | debugrevlogindex: changelog, manifest, dir, format |
|
305 | 305 | debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized |
|
306 | 306 | debugserve: sshstdio, logiofd, logiofile |
|
307 | 307 | debugsetparents: |
|
308 | 308 | debugssl: |
|
309 | 309 | debugsub: rev |
|
310 | 310 | debugsuccessorssets: closest |
|
311 | 311 | debugtemplate: rev, define |
|
312 | 312 | debuguigetpass: prompt |
|
313 | 313 | debuguiprompt: prompt |
|
314 | 314 | debugupdatecaches: |
|
315 | debugupgraderepo: optimize, run, backup, manifest | |
|
315 | debugupgraderepo: optimize, run, backup, changelog, manifest | |
|
316 | 316 | debugwalk: include, exclude |
|
317 | 317 | debugwhyunstable: |
|
318 | 318 | debugwireargs: three, four, five, ssh, remotecmd, insecure |
|
319 | 319 | debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure |
|
320 | 320 | diff: rev, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos |
|
321 | 321 | export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template |
|
322 | 322 | files: rev, print0, include, exclude, template, subrepos |
|
323 | 323 | forget: interactive, include, exclude, dry-run |
|
324 | 324 | graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run |
|
325 | 325 | grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude |
|
326 | 326 | heads: rev, topo, active, closed, style, template |
|
327 | 327 | help: extension, command, keyword, system |
|
328 | 328 | identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template |
|
329 | 329 | import: strip, base, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity |
|
330 | 330 | incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos |
|
331 | 331 | init: ssh, remotecmd, insecure |
|
332 | 332 | locate: rev, print0, fullpath, include, exclude |
|
333 | 333 | log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude |
|
334 | 334 | manifest: rev, all, template |
|
335 | 335 | merge: force, rev, preview, abort, tool |
|
336 | 336 | outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos |
|
337 | 337 | parents: rev, style, template |
|
338 | 338 | paths: template |
|
339 | 339 | phase: public, draft, secret, force, rev |
|
340 | 340 | pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure |
|
341 | 341 | push: force, rev, bookmark, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure |
|
342 | 342 | recover: verify |
|
343 | 343 | remove: after, force, subrepos, include, exclude, dry-run |
|
344 | 344 | rename: after, force, include, exclude, dry-run |
|
345 | 345 | resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template |
|
346 | 346 | revert: all, date, rev, no-backup, interactive, include, exclude, dry-run |
|
347 | 347 | rollback: dry-run, force |
|
348 | 348 | root: template |
|
349 | 349 | serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos |
|
350 | 350 | shelve: addremove, unknown, cleanup, date, delete, edit, keep, list, message, name, patch, interactive, stat, include, exclude |
|
351 | 351 | status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template |
|
352 | 352 | summary: remote |
|
353 | 353 | tag: force, local, rev, remove, edit, message, date, user |
|
354 | 354 | tags: template |
|
355 | 355 | tip: patch, git, style, template |
|
356 | 356 | unbundle: update |
|
357 | 357 | unshelve: abort, continue, interactive, keep, name, tool, date |
|
358 | 358 | update: clean, check, merge, date, rev, tool |
|
359 | 359 | verify: full |
|
360 | 360 | version: template |
|
361 | 361 | |
|
362 | 362 | $ hg init a |
|
363 | 363 | $ cd a |
|
364 | 364 | $ echo fee > fee |
|
365 | 365 | $ hg ci -q -Amfee |
|
366 | 366 | $ hg tag fee |
|
367 | 367 | $ mkdir fie |
|
368 | 368 | $ echo dead > fie/dead |
|
369 | 369 | $ echo live > fie/live |
|
370 | 370 | $ hg bookmark fo |
|
371 | 371 | $ hg branch -q fie |
|
372 | 372 | $ hg ci -q -Amfie |
|
373 | 373 | $ echo fo > fo |
|
374 | 374 | $ hg branch -qf default |
|
375 | 375 | $ hg ci -q -Amfo |
|
376 | 376 | $ echo Fum > Fum |
|
377 | 377 | $ hg ci -q -AmFum |
|
378 | 378 | $ hg bookmark Fum |
|
379 | 379 | |
|
380 | 380 | Test debugpathcomplete |
|
381 | 381 | |
|
382 | 382 | $ hg debugpathcomplete f |
|
383 | 383 | fee |
|
384 | 384 | fie |
|
385 | 385 | fo |
|
386 | 386 | $ hg debugpathcomplete -f f |
|
387 | 387 | fee |
|
388 | 388 | fie/dead |
|
389 | 389 | fie/live |
|
390 | 390 | fo |
|
391 | 391 | |
|
392 | 392 | $ hg rm Fum |
|
393 | 393 | $ hg debugpathcomplete -r F |
|
394 | 394 | Fum |
|
395 | 395 | |
|
396 | 396 | Test debugnamecomplete |
|
397 | 397 | |
|
398 | 398 | $ hg debugnamecomplete |
|
399 | 399 | Fum |
|
400 | 400 | default |
|
401 | 401 | fee |
|
402 | 402 | fie |
|
403 | 403 | fo |
|
404 | 404 | tip |
|
405 | 405 | $ hg debugnamecomplete f |
|
406 | 406 | fee |
|
407 | 407 | fie |
|
408 | 408 | fo |
|
409 | 409 | |
|
410 | 410 | Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still |
|
411 | 411 | used for completions in some shells. |
|
412 | 412 | |
|
413 | 413 | $ hg debuglabelcomplete |
|
414 | 414 | Fum |
|
415 | 415 | default |
|
416 | 416 | fee |
|
417 | 417 | fie |
|
418 | 418 | fo |
|
419 | 419 | tip |
|
420 | 420 | $ hg debuglabelcomplete f |
|
421 | 421 | fee |
|
422 | 422 | fie |
|
423 | 423 | fo |
@@ -1,1048 +1,1140 | |||
|
1 | 1 | #require no-reposimplestore |
|
2 | 2 | |
|
3 | 3 | $ cat >> $HGRCPATH << EOF |
|
4 | 4 | > [extensions] |
|
5 | 5 | > share = |
|
6 | 6 | > EOF |
|
7 | 7 | |
|
8 | 8 | store and revlogv1 are required in source |
|
9 | 9 | |
|
10 | 10 | $ hg --config format.usestore=false init no-store |
|
11 | 11 | $ hg -R no-store debugupgraderepo |
|
12 | 12 | abort: cannot upgrade repository; requirement missing: store |
|
13 | 13 | [255] |
|
14 | 14 | |
|
15 | 15 | $ hg init no-revlogv1 |
|
16 | 16 | $ cat > no-revlogv1/.hg/requires << EOF |
|
17 | 17 | > dotencode |
|
18 | 18 | > fncache |
|
19 | 19 | > generaldelta |
|
20 | 20 | > store |
|
21 | 21 | > EOF |
|
22 | 22 | |
|
23 | 23 | $ hg -R no-revlogv1 debugupgraderepo |
|
24 | 24 | abort: cannot upgrade repository; requirement missing: revlogv1 |
|
25 | 25 | [255] |
|
26 | 26 | |
|
27 | 27 | Cannot upgrade shared repositories |
|
28 | 28 | |
|
29 | 29 | $ hg init share-parent |
|
30 | 30 | $ hg -q share share-parent share-child |
|
31 | 31 | |
|
32 | 32 | $ hg -R share-child debugupgraderepo |
|
33 | 33 | abort: cannot upgrade repository; unsupported source requirement: shared |
|
34 | 34 | [255] |
|
35 | 35 | |
|
36 | 36 | Do not yet support upgrading treemanifest repos |
|
37 | 37 | |
|
38 | 38 | $ hg --config experimental.treemanifest=true init treemanifest |
|
39 | 39 | $ hg -R treemanifest debugupgraderepo |
|
40 | 40 | abort: cannot upgrade repository; unsupported source requirement: treemanifest |
|
41 | 41 | [255] |
|
42 | 42 | |
|
43 | 43 | Cannot add treemanifest requirement during upgrade |
|
44 | 44 | |
|
45 | 45 | $ hg init disallowaddedreq |
|
46 | 46 | $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo |
|
47 | 47 | abort: cannot upgrade repository; do not support adding requirement: treemanifest |
|
48 | 48 | [255] |
|
49 | 49 | |
|
50 | 50 | An upgrade of a repository created with recommended settings only suggests optimizations |
|
51 | 51 | |
|
52 | 52 | $ hg init empty |
|
53 | 53 | $ cd empty |
|
54 | 54 | $ hg debugformat |
|
55 | 55 | format-variant repo |
|
56 | 56 | fncache: yes |
|
57 | 57 | dotencode: yes |
|
58 | 58 | generaldelta: yes |
|
59 | 59 | sparserevlog: yes |
|
60 | 60 | plain-cl-delta: yes |
|
61 | 61 | compression: zlib |
|
62 | 62 | compression-level: default |
|
63 | 63 | $ hg debugformat --verbose |
|
64 | 64 | format-variant repo config default |
|
65 | 65 | fncache: yes yes yes |
|
66 | 66 | dotencode: yes yes yes |
|
67 | 67 | generaldelta: yes yes yes |
|
68 | 68 | sparserevlog: yes yes yes |
|
69 | 69 | plain-cl-delta: yes yes yes |
|
70 | 70 | compression: zlib zlib zlib |
|
71 | 71 | compression-level: default default default |
|
72 | 72 | $ hg debugformat --verbose --config format.usefncache=no |
|
73 | 73 | format-variant repo config default |
|
74 | 74 | fncache: yes no yes |
|
75 | 75 | dotencode: yes no yes |
|
76 | 76 | generaldelta: yes yes yes |
|
77 | 77 | sparserevlog: yes yes yes |
|
78 | 78 | plain-cl-delta: yes yes yes |
|
79 | 79 | compression: zlib zlib zlib |
|
80 | 80 | compression-level: default default default |
|
81 | 81 | $ hg debugformat --verbose --config format.usefncache=no --color=debug |
|
82 | 82 | format-variant repo config default |
|
83 | 83 | [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes] |
|
84 | 84 | [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes] |
|
85 | 85 | [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes] |
|
86 | 86 | [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes] |
|
87 | 87 | [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes] |
|
88 | 88 | [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] |
|
89 | 89 | [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default] |
|
90 | 90 | $ hg debugformat -Tjson |
|
91 | 91 | [ |
|
92 | 92 | { |
|
93 | 93 | "config": true, |
|
94 | 94 | "default": true, |
|
95 | 95 | "name": "fncache", |
|
96 | 96 | "repo": true |
|
97 | 97 | }, |
|
98 | 98 | { |
|
99 | 99 | "config": true, |
|
100 | 100 | "default": true, |
|
101 | 101 | "name": "dotencode", |
|
102 | 102 | "repo": true |
|
103 | 103 | }, |
|
104 | 104 | { |
|
105 | 105 | "config": true, |
|
106 | 106 | "default": true, |
|
107 | 107 | "name": "generaldelta", |
|
108 | 108 | "repo": true |
|
109 | 109 | }, |
|
110 | 110 | { |
|
111 | 111 | "config": true, |
|
112 | 112 | "default": true, |
|
113 | 113 | "name": "sparserevlog", |
|
114 | 114 | "repo": true |
|
115 | 115 | }, |
|
116 | 116 | { |
|
117 | 117 | "config": true, |
|
118 | 118 | "default": true, |
|
119 | 119 | "name": "plain-cl-delta", |
|
120 | 120 | "repo": true |
|
121 | 121 | }, |
|
122 | 122 | { |
|
123 | 123 | "config": "zlib", |
|
124 | 124 | "default": "zlib", |
|
125 | 125 | "name": "compression", |
|
126 | 126 | "repo": "zlib" |
|
127 | 127 | }, |
|
128 | 128 | { |
|
129 | 129 | "config": "default", |
|
130 | 130 | "default": "default", |
|
131 | 131 | "name": "compression-level", |
|
132 | 132 | "repo": "default" |
|
133 | 133 | } |
|
134 | 134 | ] |
|
135 | 135 | $ hg debugupgraderepo |
|
136 | 136 | (no feature deficiencies found in existing repository) |
|
137 | 137 | performing an upgrade with "--run" will make the following changes: |
|
138 | 138 | |
|
139 | 139 | requirements |
|
140 | 140 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store |
|
141 | 141 | |
|
142 | 142 | additional optimizations are available by specifying "--optimize <name>": |
|
143 | 143 | |
|
144 | 144 | re-delta-parent |
|
145 | 145 | deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower |
|
146 | 146 | |
|
147 | 147 | re-delta-multibase |
|
148 | 148 | deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges |
|
149 | 149 | |
|
150 | 150 | re-delta-all |
|
151 | 151 | deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed |
|
152 | 152 | |
|
153 | 153 | re-delta-fulladd |
|
154 | 154 | every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved. |
|
155 | 155 | |
|
156 | 156 | |
|
157 | 157 | --optimize can be used to add optimizations |
|
158 | 158 | |
|
159 | 159 | $ hg debugupgrade --optimize redeltaparent |
|
160 | 160 | (no feature deficiencies found in existing repository) |
|
161 | 161 | performing an upgrade with "--run" will make the following changes: |
|
162 | 162 | |
|
163 | 163 | requirements |
|
164 | 164 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store |
|
165 | 165 | |
|
166 | 166 | re-delta-parent |
|
167 | 167 | deltas within internal storage will choose a new base revision if needed |
|
168 | 168 | |
|
169 | 169 | additional optimizations are available by specifying "--optimize <name>": |
|
170 | 170 | |
|
171 | 171 | re-delta-multibase |
|
172 | 172 | deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges |
|
173 | 173 | |
|
174 | 174 | re-delta-all |
|
175 | 175 | deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed |
|
176 | 176 | |
|
177 | 177 | re-delta-fulladd |
|
178 | 178 | every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved. |
|
179 | 179 | |
|
180 | 180 | |
|
181 | 181 | modern form of the option |
|
182 | 182 | |
|
183 | 183 | $ hg debugupgrade --optimize re-delta-parent |
|
184 | 184 | (no feature deficiencies found in existing repository) |
|
185 | 185 | performing an upgrade with "--run" will make the following changes: |
|
186 | 186 | |
|
187 | 187 | requirements |
|
188 | 188 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store |
|
189 | 189 | |
|
190 | 190 | re-delta-parent |
|
191 | 191 | deltas within internal storage will choose a new base revision if needed |
|
192 | 192 | |
|
193 | 193 | additional optimizations are available by specifying "--optimize <name>": |
|
194 | 194 | |
|
195 | 195 | re-delta-multibase |
|
196 | 196 | deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges |
|
197 | 197 | |
|
198 | 198 | re-delta-all |
|
199 | 199 | deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed |
|
200 | 200 | |
|
201 | 201 | re-delta-fulladd |
|
202 | 202 | every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved. |
|
203 | 203 | |
|
204 | 204 | |
|
205 | 205 | unknown optimization: |
|
206 | 206 | |
|
207 | 207 | $ hg debugupgrade --optimize foobar |
|
208 | 208 | abort: unknown optimization action requested: foobar |
|
209 | 209 | (run without arguments to see valid optimizations) |
|
210 | 210 | [255] |
|
211 | 211 | |
|
212 | 212 | Various sub-optimal detections work |
|
213 | 213 | |
|
214 | 214 | $ cat > .hg/requires << EOF |
|
215 | 215 | > revlogv1 |
|
216 | 216 | > store |
|
217 | 217 | > EOF |
|
218 | 218 | |
|
219 | 219 | $ hg debugformat |
|
220 | 220 | format-variant repo |
|
221 | 221 | fncache: no |
|
222 | 222 | dotencode: no |
|
223 | 223 | generaldelta: no |
|
224 | 224 | sparserevlog: no |
|
225 | 225 | plain-cl-delta: yes |
|
226 | 226 | compression: zlib |
|
227 | 227 | compression-level: default |
|
228 | 228 | $ hg debugformat --verbose |
|
229 | 229 | format-variant repo config default |
|
230 | 230 | fncache: no yes yes |
|
231 | 231 | dotencode: no yes yes |
|
232 | 232 | generaldelta: no yes yes |
|
233 | 233 | sparserevlog: no yes yes |
|
234 | 234 | plain-cl-delta: yes yes yes |
|
235 | 235 | compression: zlib zlib zlib |
|
236 | 236 | compression-level: default default default |
|
237 | 237 | $ hg debugformat --verbose --config format.usegeneraldelta=no |
|
238 | 238 | format-variant repo config default |
|
239 | 239 | fncache: no yes yes |
|
240 | 240 | dotencode: no yes yes |
|
241 | 241 | generaldelta: no no yes |
|
242 | 242 | sparserevlog: no no yes |
|
243 | 243 | plain-cl-delta: yes yes yes |
|
244 | 244 | compression: zlib zlib zlib |
|
245 | 245 | compression-level: default default default |
|
246 | 246 | $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug |
|
247 | 247 | format-variant repo config default |
|
248 | 248 | [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes] |
|
249 | 249 | [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes] |
|
250 | 250 | [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes] |
|
251 | 251 | [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes] |
|
252 | 252 | [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes] |
|
253 | 253 | [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib] |
|
254 | 254 | [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default] |
|
255 | 255 | $ hg debugupgraderepo |
|
256 | 256 | repository lacks features recommended by current config options: |
|
257 | 257 | |
|
258 | 258 | fncache |
|
259 | 259 | long and reserved filenames may not work correctly; repository performance is sub-optimal |
|
260 | 260 | |
|
261 | 261 | dotencode |
|
262 | 262 | storage of filenames beginning with a period or space may not work correctly |
|
263 | 263 | |
|
264 | 264 | generaldelta |
|
265 | 265 | deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower |
|
266 | 266 | |
|
267 | 267 | sparserevlog |
|
268 | 268 | in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange. |
|
269 | 269 | |
|
270 | 270 | |
|
271 | 271 | performing an upgrade with "--run" will make the following changes: |
|
272 | 272 | |
|
273 | 273 | requirements |
|
274 | 274 | preserved: revlogv1, store |
|
275 | 275 | added: dotencode, fncache, generaldelta, sparserevlog |
|
276 | 276 | |
|
277 | 277 | fncache |
|
278 | 278 | repository will be more resilient to storing certain paths and performance of certain operations should be improved |
|
279 | 279 | |
|
280 | 280 | dotencode |
|
281 | 281 | repository will be better able to store files beginning with a space or period |
|
282 | 282 | |
|
283 | 283 | generaldelta |
|
284 | 284 | repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster |
|
285 | 285 | |
|
286 | 286 | sparserevlog |
|
287 | 287 | Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server. |
|
288 | 288 | |
|
289 | 289 | additional optimizations are available by specifying "--optimize <name>": |
|
290 | 290 | |
|
291 | 291 | re-delta-parent |
|
292 | 292 | deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower |
|
293 | 293 | |
|
294 | 294 | re-delta-multibase |
|
295 | 295 | deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges |
|
296 | 296 | |
|
297 | 297 | re-delta-all |
|
298 | 298 | deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed |
|
299 | 299 | |
|
300 | 300 | re-delta-fulladd |
|
301 | 301 | every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved. |
|
302 | 302 | |
|
303 | 303 | |
|
304 | 304 | $ hg --config format.dotencode=false debugupgraderepo |
|
305 | 305 | repository lacks features recommended by current config options: |
|
306 | 306 | |
|
307 | 307 | fncache |
|
308 | 308 | long and reserved filenames may not work correctly; repository performance is sub-optimal |
|
309 | 309 | |
|
310 | 310 | generaldelta |
|
311 | 311 | deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower |
|
312 | 312 | |
|
313 | 313 | sparserevlog |
|
314 | 314 | in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange. |
|
315 | 315 | |
|
316 | 316 | repository lacks features used by the default config options: |
|
317 | 317 | |
|
318 | 318 | dotencode |
|
319 | 319 | storage of filenames beginning with a period or space may not work correctly |
|
320 | 320 | |
|
321 | 321 | |
|
322 | 322 | performing an upgrade with "--run" will make the following changes: |
|
323 | 323 | |
|
324 | 324 | requirements |
|
325 | 325 | preserved: revlogv1, store |
|
326 | 326 | added: fncache, generaldelta, sparserevlog |
|
327 | 327 | |
|
328 | 328 | fncache |
|
329 | 329 | repository will be more resilient to storing certain paths and performance of certain operations should be improved |
|
330 | 330 | |
|
331 | 331 | generaldelta |
|
332 | 332 | repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster |
|
333 | 333 | |
|
334 | 334 | sparserevlog |
|
335 | 335 | Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server. |
|
336 | 336 | |
|
337 | 337 | additional optimizations are available by specifying "--optimize <name>": |
|
338 | 338 | |
|
339 | 339 | re-delta-parent |
|
340 | 340 | deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower |
|
341 | 341 | |
|
342 | 342 | re-delta-multibase |
|
343 | 343 | deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges |
|
344 | 344 | |
|
345 | 345 | re-delta-all |
|
346 | 346 | deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed |
|
347 | 347 | |
|
348 | 348 | re-delta-fulladd |
|
349 | 349 | every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved. |
|
350 | 350 | |
|
351 | 351 | |
|
352 | 352 | $ cd .. |
|
353 | 353 | |
|
354 | 354 | Upgrading a repository that is already modern essentially no-ops |
|
355 | 355 | |
|
356 | 356 | $ hg init modern |
|
357 | 357 | $ hg -R modern debugupgraderepo --run |
|
358 | 358 | upgrade will perform the following actions: |
|
359 | 359 | |
|
360 | 360 | requirements |
|
361 | 361 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store |
|
362 | 362 | |
|
363 | 363 | beginning upgrade... |
|
364 | 364 | repository locked and read-only |
|
365 | 365 | creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob) |
|
366 | 366 | (it is safe to interrupt this process any time before data migration completes) |
|
367 | 367 | data fully migrated to temporary repository |
|
368 | 368 | marking source repository as being upgraded; clients will be unable to read from repository |
|
369 | 369 | starting in-place swap of repository data |
|
370 | 370 | replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob) |
|
371 | 371 | replacing store... |
|
372 | 372 | store replacement complete; repository was inconsistent for *s (glob) |
|
373 | 373 | finalizing requirements file and making repository readable again |
|
374 | 374 | removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob) |
|
375 | 375 | copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob) |
|
376 | 376 | the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified |
|
377 | 377 | |
|
378 | 378 | Upgrading a repository to generaldelta works |
|
379 | 379 | |
|
380 | 380 | $ hg --config format.usegeneraldelta=false init upgradegd |
|
381 | 381 | $ cd upgradegd |
|
382 | 382 | $ touch f0 |
|
383 | 383 | $ hg -q commit -A -m initial |
|
384 | 384 | $ touch f1 |
|
385 | 385 | $ hg -q commit -A -m 'add f1' |
|
386 | 386 | $ hg -q up -r 0 |
|
387 | 387 | $ touch f2 |
|
388 | 388 | $ hg -q commit -A -m 'add f2' |
|
389 | 389 | |
|
390 | 390 | $ hg debugupgraderepo --run --config format.sparse-revlog=false |
|
391 | 391 | upgrade will perform the following actions: |
|
392 | 392 | |
|
393 | 393 | requirements |
|
394 | 394 | preserved: dotencode, fncache, revlogv1, store |
|
395 | 395 | added: generaldelta |
|
396 | 396 | |
|
397 | 397 | generaldelta |
|
398 | 398 | repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster |
|
399 | 399 | |
|
400 | 400 | beginning upgrade... |
|
401 | 401 | repository locked and read-only |
|
402 | 402 | creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob) |
|
403 | 403 | (it is safe to interrupt this process any time before data migration completes) |
|
404 | 404 | migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog) |
|
405 | 405 | migrating 917 bytes in store; 401 bytes tracked data |
|
406 | 406 | migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data) |
|
407 | 407 | finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes |
|
408 | 408 | migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data) |
|
409 | 409 | finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes |
|
410 | 410 | migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data) |
|
411 | 411 | finished migrating 3 changelog revisions; change in size: 0 bytes |
|
412 | 412 | finished migrating 9 total revisions; total change in store size: 0 bytes |
|
413 | 413 | copying phaseroots |
|
414 | 414 | data fully migrated to temporary repository |
|
415 | 415 | marking source repository as being upgraded; clients will be unable to read from repository |
|
416 | 416 | starting in-place swap of repository data |
|
417 | 417 | replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob) |
|
418 | 418 | replacing store... |
|
419 | 419 | store replacement complete; repository was inconsistent for *s (glob) |
|
420 | 420 | finalizing requirements file and making repository readable again |
|
421 | 421 | removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob) |
|
422 | 422 | copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob) |
|
423 | 423 | the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified |
|
424 | 424 | |
|
425 | 425 | Original requirements backed up |
|
426 | 426 | |
|
427 | 427 | $ cat .hg/upgradebackup.*/requires |
|
428 | 428 | dotencode |
|
429 | 429 | fncache |
|
430 | 430 | revlogv1 |
|
431 | 431 | store |
|
432 | 432 | |
|
433 | 433 | generaldelta added to original requirements files |
|
434 | 434 | |
|
435 | 435 | $ cat .hg/requires |
|
436 | 436 | dotencode |
|
437 | 437 | fncache |
|
438 | 438 | generaldelta |
|
439 | 439 | revlogv1 |
|
440 | 440 | store |
|
441 | 441 | |
|
442 | 442 | store directory has files we expect |
|
443 | 443 | |
|
444 | 444 | $ ls .hg/store |
|
445 | 445 | 00changelog.i |
|
446 | 446 | 00manifest.i |
|
447 | 447 | data |
|
448 | 448 | fncache |
|
449 | 449 | phaseroots |
|
450 | 450 | undo |
|
451 | 451 | undo.backupfiles |
|
452 | 452 | undo.phaseroots |
|
453 | 453 | |
|
454 | 454 | manifest should be generaldelta |
|
455 | 455 | |
|
456 | 456 | $ hg debugrevlog -m | grep flags |
|
457 | 457 | flags : inline, generaldelta |
|
458 | 458 | |
|
459 | 459 | verify should be happy |
|
460 | 460 | |
|
461 | 461 | $ hg verify |
|
462 | 462 | checking changesets |
|
463 | 463 | checking manifests |
|
464 | 464 | crosschecking files in changesets and manifests |
|
465 | 465 | checking files |
|
466 | 466 | checked 3 changesets with 3 changes to 3 files |
|
467 | 467 | |
|
468 | 468 | old store should be backed up |
|
469 | 469 | |
|
470 | 470 | $ ls -d .hg/upgradebackup.*/ |
|
471 | 471 | .hg/upgradebackup.*/ (glob) |
|
472 | 472 | $ ls .hg/upgradebackup.*/store |
|
473 | 473 | 00changelog.i |
|
474 | 474 | 00manifest.i |
|
475 | 475 | data |
|
476 | 476 | fncache |
|
477 | 477 | phaseroots |
|
478 | 478 | undo |
|
479 | 479 | undo.backup.fncache |
|
480 | 480 | undo.backupfiles |
|
481 | 481 | undo.phaseroots |
|
482 | 482 | |
|
483 | 483 | unless --no-backup is passed |
|
484 | 484 | |
|
485 | 485 | $ rm -rf .hg/upgradebackup.*/ |
|
486 | 486 | $ hg debugupgraderepo --run --no-backup |
|
487 | 487 | upgrade will perform the following actions: |
|
488 | 488 | |
|
489 | 489 | requirements |
|
490 | 490 | preserved: dotencode, fncache, generaldelta, revlogv1, store |
|
491 | 491 | added: sparserevlog |
|
492 | 492 | |
|
493 | 493 | sparserevlog |
|
494 | 494 | Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server. |
|
495 | 495 | |
|
496 | 496 | beginning upgrade... |
|
497 | 497 | repository locked and read-only |
|
498 | 498 | creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob) |
|
499 | 499 | (it is safe to interrupt this process any time before data migration completes) |
|
500 | 500 | migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog) |
|
501 | 501 | migrating 917 bytes in store; 401 bytes tracked data |
|
502 | 502 | migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data) |
|
503 | 503 | finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes |
|
504 | 504 | migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data) |
|
505 | 505 | finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes |
|
506 | 506 | migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data) |
|
507 | 507 | finished migrating 3 changelog revisions; change in size: 0 bytes |
|
508 | 508 | finished migrating 9 total revisions; total change in store size: 0 bytes |
|
509 | 509 | copying phaseroots |
|
510 | 510 | data fully migrated to temporary repository |
|
511 | 511 | marking source repository as being upgraded; clients will be unable to read from repository |
|
512 | 512 | starting in-place swap of repository data |
|
513 | 513 | replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob) |
|
514 | 514 | replacing store... |
|
515 | 515 | store replacement complete; repository was inconsistent for * (glob) |
|
516 | 516 | finalizing requirements file and making repository readable again |
|
517 | 517 | removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob) |
|
518 | 518 | removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob) |
|
519 | 519 | $ ls -1 .hg/ | grep upgradebackup |
|
520 | 520 | [1] |
|
521 | 521 | |
|
522 | 522 | We can restrict optimization to some revlog: |
|
523 | 523 | |
|
524 | 524 | $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback |
|
525 | 525 | upgrade will perform the following actions: |
|
526 | 526 | |
|
527 | 527 | requirements |
|
528 | 528 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store |
|
529 | 529 | |
|
530 | 530 | re-delta-parent |
|
531 | 531 | deltas within internal storage will choose a new base revision if needed |
|
532 | 532 | |
|
533 | 533 | beginning upgrade... |
|
534 | 534 | repository locked and read-only |
|
535 | 535 | creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob) |
|
536 | 536 | (it is safe to interrupt this process any time before data migration completes) |
|
537 | 537 | migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog) |
|
538 | 538 | migrating 917 bytes in store; 401 bytes tracked data |
|
539 | 539 | migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data) |
|
540 | 540 | blindly copying data/f0.i containing 1 revisions |
|
541 | 541 | blindly copying data/f1.i containing 1 revisions |
|
542 | 542 | blindly copying data/f2.i containing 1 revisions |
|
543 | 543 | finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes |
|
544 | 544 | migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data) |
|
545 | 545 | cloning 3 revisions from 00manifest.i |
|
546 | 546 | finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes |
|
547 | 547 | migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data) |
|
548 | 548 | blindly copying 00changelog.i containing 3 revisions |
|
549 | 549 | finished migrating 3 changelog revisions; change in size: 0 bytes |
|
550 | 550 | finished migrating 9 total revisions; total change in store size: 0 bytes |
|
551 | 551 | copying phaseroots |
|
552 | 552 | data fully migrated to temporary repository |
|
553 | 553 | marking source repository as being upgraded; clients will be unable to read from repository |
|
554 | 554 | starting in-place swap of repository data |
|
555 | 555 | replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob) |
|
556 | 556 | replacing store... |
|
557 | 557 | store replacement complete; repository was inconsistent for *s (glob) |
|
558 | 558 | finalizing requirements file and making repository readable again |
|
559 | 559 | removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob) |
|
560 | 560 | removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob) |
|
561 | 561 | |
|
562 | 562 | Check that the repo still works fine |
|
563 | 563 | |
|
564 | 564 | $ hg log -G --patch |
|
565 | 565 | @ changeset: 2:b5a3b78015e5 |
|
566 | 566 | | tag: tip |
|
567 | 567 | | parent: 0:ba592bf28da2 |
|
568 | 568 | | user: test |
|
569 | 569 | | date: Thu Jan 01 00:00:00 1970 +0000 |
|
570 | 570 | | summary: add f2 |
|
571 | 571 | | |
|
572 | 572 | | |
|
573 | 573 | | o changeset: 1:da8c0fc4833c |
|
574 | 574 | |/ user: test |
|
575 | 575 | | date: Thu Jan 01 00:00:00 1970 +0000 |
|
576 | 576 | | summary: add f1 |
|
577 | 577 | | |
|
578 | 578 | | |
|
579 | 579 | o changeset: 0:ba592bf28da2 |
|
580 | 580 | user: test |
|
581 | 581 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
582 | 582 | summary: initial |
|
583 | 583 | |
|
584 | 584 | |
|
585 | 585 | |
|
586 | 586 | $ hg verify |
|
587 | 587 | checking changesets |
|
588 | 588 | checking manifests |
|
589 | 589 | crosschecking files in changesets and manifests |
|
590 | 590 | checking files |
|
591 | 591 | checked 3 changesets with 3 changes to 3 files |
|
592 | 592 | |
|
593 | 593 | Check we can select negatively |
|
594 | 594 | |
|
595 | 595 | $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback |
|
596 | 596 | upgrade will perform the following actions: |
|
597 | 597 | |
|
598 | 598 | requirements |
|
599 | 599 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store |
|
600 | 600 | |
|
601 | 601 | re-delta-parent |
|
602 | 602 | deltas within internal storage will choose a new base revision if needed |
|
603 | 603 | |
|
604 | 604 | beginning upgrade... |
|
605 | 605 | repository locked and read-only |
|
606 | 606 | creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob) |
|
607 | 607 | (it is safe to interrupt this process any time before data migration completes) |
|
608 | 608 | migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog) |
|
609 | 609 | migrating 917 bytes in store; 401 bytes tracked data |
|
610 | 610 | migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data) |
|
611 | 611 | cloning 1 revisions from data/f0.i |
|
612 | 612 | cloning 1 revisions from data/f1.i |
|
613 | 613 | cloning 1 revisions from data/f2.i |
|
614 | 614 | finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes |
|
615 | 615 | migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data) |
|
616 | 616 | blindly copying 00manifest.i containing 3 revisions |
|
617 | 617 | finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes |
|
618 | 618 | migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data) |
|
619 | 619 | cloning 3 revisions from 00changelog.i |
|
620 | 620 | finished migrating 3 changelog revisions; change in size: 0 bytes |
|
621 | 621 | finished migrating 9 total revisions; total change in store size: 0 bytes |
|
622 | 622 | copying phaseroots |
|
623 | 623 | data fully migrated to temporary repository |
|
624 | 624 | marking source repository as being upgraded; clients will be unable to read from repository |
|
625 | 625 | starting in-place swap of repository data |
|
626 | 626 | replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob) |
|
627 | 627 | replacing store... |
|
628 | 628 | store replacement complete; repository was inconsistent for *s (glob) |
|
629 | 629 | finalizing requirements file and making repository readable again |
|
630 | 630 | removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob) |
|
631 | 631 | removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob) |
|
632 | 632 | $ hg verify |
|
633 | 633 | checking changesets |
|
634 | 634 | checking manifests |
|
635 | 635 | crosschecking files in changesets and manifests |
|
636 | 636 | checking files |
|
637 | 637 | checked 3 changesets with 3 changes to 3 files |
|
638 | 638 | |
|
639 | Check that we can select changelog only | |
|
640 | ||
|
641 | $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback | |
|
642 | upgrade will perform the following actions: | |
|
643 | ||
|
644 | requirements | |
|
645 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store | |
|
646 | ||
|
647 | re-delta-parent | |
|
648 | deltas within internal storage will choose a new base revision if needed | |
|
649 | ||
|
650 | beginning upgrade... | |
|
651 | repository locked and read-only | |
|
652 | creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob) | |
|
653 | (it is safe to interrupt this process any time before data migration completes) | |
|
654 | migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog) | |
|
655 | migrating 917 bytes in store; 401 bytes tracked data | |
|
656 | migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data) | |
|
657 | blindly copying data/f0.i containing 1 revisions | |
|
658 | blindly copying data/f1.i containing 1 revisions | |
|
659 | blindly copying data/f2.i containing 1 revisions | |
|
660 | finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes | |
|
661 | migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data) | |
|
662 | blindly copying 00manifest.i containing 3 revisions | |
|
663 | finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes | |
|
664 | migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data) | |
|
665 | cloning 3 revisions from 00changelog.i | |
|
666 | finished migrating 3 changelog revisions; change in size: 0 bytes | |
|
667 | finished migrating 9 total revisions; total change in store size: 0 bytes | |
|
668 | copying phaseroots | |
|
669 | data fully migrated to temporary repository | |
|
670 | marking source repository as being upgraded; clients will be unable to read from repository | |
|
671 | starting in-place swap of repository data | |
|
672 | replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob) | |
|
673 | replacing store... | |
|
674 | store replacement complete; repository was inconsistent for *s (glob) | |
|
675 | finalizing requirements file and making repository readable again | |
|
676 | removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob) | |
|
677 | removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob) | |
|
678 | $ hg verify | |
|
679 | checking changesets | |
|
680 | checking manifests | |
|
681 | crosschecking files in changesets and manifests | |
|
682 | checking files | |
|
683 | checked 3 changesets with 3 changes to 3 files | |
|
684 | ||
|
685 | Check that we can select filelog only | |
|
686 | ||
|
687 | $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback | |
|
688 | upgrade will perform the following actions: | |
|
689 | ||
|
690 | requirements | |
|
691 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store | |
|
692 | ||
|
693 | re-delta-parent | |
|
694 | deltas within internal storage will choose a new base revision if needed | |
|
695 | ||
|
696 | beginning upgrade... | |
|
697 | repository locked and read-only | |
|
698 | creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob) | |
|
699 | (it is safe to interrupt this process any time before data migration completes) | |
|
700 | migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog) | |
|
701 | migrating 917 bytes in store; 401 bytes tracked data | |
|
702 | migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data) | |
|
703 | cloning 1 revisions from data/f0.i | |
|
704 | cloning 1 revisions from data/f1.i | |
|
705 | cloning 1 revisions from data/f2.i | |
|
706 | finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes | |
|
707 | migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data) | |
|
708 | blindly copying 00manifest.i containing 3 revisions | |
|
709 | finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes | |
|
710 | migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data) | |
|
711 | blindly copying 00changelog.i containing 3 revisions | |
|
712 | finished migrating 3 changelog revisions; change in size: 0 bytes | |
|
713 | finished migrating 9 total revisions; total change in store size: 0 bytes | |
|
714 | copying phaseroots | |
|
715 | data fully migrated to temporary repository | |
|
716 | marking source repository as being upgraded; clients will be unable to read from repository | |
|
717 | starting in-place swap of repository data | |
|
718 | replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob) | |
|
719 | replacing store... | |
|
720 | store replacement complete; repository was inconsistent for *s (glob) | |
|
721 | finalizing requirements file and making repository readable again | |
|
722 | removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob) | |
|
723 | removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob) | |
|
724 | $ hg verify | |
|
725 | checking changesets | |
|
726 | checking manifests | |
|
727 | crosschecking files in changesets and manifests | |
|
728 | checking files | |
|
729 | checked 3 changesets with 3 changes to 3 files | |
|
730 | ||
|
639 | 731 | $ cd .. |
|
640 | 732 | |
|
641 | 733 | store files with special filenames aren't encoded during copy |
|
642 | 734 | |
|
643 | 735 | $ hg init store-filenames |
|
644 | 736 | $ cd store-filenames |
|
645 | 737 | $ touch foo |
|
646 | 738 | $ hg -q commit -A -m initial |
|
647 | 739 | $ touch .hg/store/.XX_special_filename |
|
648 | 740 | |
|
649 | 741 | $ hg debugupgraderepo --run |
|
650 | 742 | upgrade will perform the following actions: |
|
651 | 743 | |
|
652 | 744 | requirements |
|
653 | 745 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store |
|
654 | 746 | |
|
655 | 747 | beginning upgrade... |
|
656 | 748 | repository locked and read-only |
|
657 | 749 | creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob) |
|
658 | 750 | (it is safe to interrupt this process any time before data migration completes) |
|
659 | 751 | migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog) |
|
660 | 752 | migrating 301 bytes in store; 107 bytes tracked data |
|
661 | 753 | migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data) |
|
662 | 754 | finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes |
|
663 | 755 | migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data) |
|
664 | 756 | finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes |
|
665 | 757 | migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data) |
|
666 | 758 | finished migrating 1 changelog revisions; change in size: 0 bytes |
|
667 | 759 | finished migrating 3 total revisions; total change in store size: 0 bytes |
|
668 | 760 | copying .XX_special_filename |
|
669 | 761 | copying phaseroots |
|
670 | 762 | data fully migrated to temporary repository |
|
671 | 763 | marking source repository as being upgraded; clients will be unable to read from repository |
|
672 | 764 | starting in-place swap of repository data |
|
673 | 765 | replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob) |
|
674 | 766 | replacing store... |
|
675 | 767 | store replacement complete; repository was inconsistent for *s (glob) |
|
676 | 768 | finalizing requirements file and making repository readable again |
|
677 | 769 | removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob) |
|
678 | 770 | copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob) |
|
679 | 771 | the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified |
|
680 | 772 | $ hg debugupgraderepo --run --optimize redeltafulladd |
|
681 | 773 | upgrade will perform the following actions: |
|
682 | 774 | |
|
683 | 775 | requirements |
|
684 | 776 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store |
|
685 | 777 | |
|
686 | 778 | re-delta-fulladd |
|
687 | 779 | each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it |
|
688 | 780 | |
|
689 | 781 | beginning upgrade... |
|
690 | 782 | repository locked and read-only |
|
691 | 783 | creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob) |
|
692 | 784 | (it is safe to interrupt this process any time before data migration completes) |
|
693 | 785 | migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog) |
|
694 | 786 | migrating 301 bytes in store; 107 bytes tracked data |
|
695 | 787 | migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data) |
|
696 | 788 | finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes |
|
697 | 789 | migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data) |
|
698 | 790 | finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes |
|
699 | 791 | migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data) |
|
700 | 792 | finished migrating 1 changelog revisions; change in size: 0 bytes |
|
701 | 793 | finished migrating 3 total revisions; total change in store size: 0 bytes |
|
702 | 794 | copying .XX_special_filename |
|
703 | 795 | copying phaseroots |
|
704 | 796 | data fully migrated to temporary repository |
|
705 | 797 | marking source repository as being upgraded; clients will be unable to read from repository |
|
706 | 798 | starting in-place swap of repository data |
|
707 | 799 | replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob) |
|
708 | 800 | replacing store... |
|
709 | 801 | store replacement complete; repository was inconsistent for *s (glob) |
|
710 | 802 | finalizing requirements file and making repository readable again |
|
711 | 803 | removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob) |
|
712 | 804 | copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob) |
|
713 | 805 | the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified |
|
714 | 806 | |
|
715 | 807 | fncache is valid after upgrade |
|
716 | 808 | |
|
717 | 809 | $ hg debugrebuildfncache |
|
718 | 810 | fncache already up to date |
|
719 | 811 | |
|
720 | 812 | $ cd .. |
|
721 | 813 | |
|
722 | 814 | Check upgrading a large file repository |
|
723 | 815 | --------------------------------------- |
|
724 | 816 | |
|
725 | 817 | $ hg init largefilesrepo |
|
726 | 818 | $ cat << EOF >> largefilesrepo/.hg/hgrc |
|
727 | 819 | > [extensions] |
|
728 | 820 | > largefiles = |
|
729 | 821 | > EOF |
|
730 | 822 | |
|
731 | 823 | $ cd largefilesrepo |
|
732 | 824 | $ touch foo |
|
733 | 825 | $ hg add --large foo |
|
734 | 826 | $ hg -q commit -m initial |
|
735 | 827 | $ cat .hg/requires |
|
736 | 828 | dotencode |
|
737 | 829 | fncache |
|
738 | 830 | generaldelta |
|
739 | 831 | largefiles |
|
740 | 832 | revlogv1 |
|
741 | 833 | sparserevlog |
|
742 | 834 | store |
|
743 | 835 | |
|
744 | 836 | $ hg debugupgraderepo --run |
|
745 | 837 | upgrade will perform the following actions: |
|
746 | 838 | |
|
747 | 839 | requirements |
|
748 | 840 | preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store |
|
749 | 841 | |
|
750 | 842 | beginning upgrade... |
|
751 | 843 | repository locked and read-only |
|
752 | 844 | creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob) |
|
753 | 845 | (it is safe to interrupt this process any time before data migration completes) |
|
754 | 846 | migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog) |
|
755 | 847 | migrating 355 bytes in store; 160 bytes tracked data |
|
756 | 848 | migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data) |
|
757 | 849 | finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes |
|
758 | 850 | migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data) |
|
759 | 851 | finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes |
|
760 | 852 | migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data) |
|
761 | 853 | finished migrating 1 changelog revisions; change in size: 0 bytes |
|
762 | 854 | finished migrating 3 total revisions; total change in store size: 0 bytes |
|
763 | 855 | copying phaseroots |
|
764 | 856 | data fully migrated to temporary repository |
|
765 | 857 | marking source repository as being upgraded; clients will be unable to read from repository |
|
766 | 858 | starting in-place swap of repository data |
|
767 | 859 | replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob) |
|
768 | 860 | replacing store... |
|
769 | 861 | store replacement complete; repository was inconsistent for *s (glob) |
|
770 | 862 | finalizing requirements file and making repository readable again |
|
771 | 863 | removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob) |
|
772 | 864 | copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob) |
|
773 | 865 | the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified |
|
774 | 866 | $ cat .hg/requires |
|
775 | 867 | dotencode |
|
776 | 868 | fncache |
|
777 | 869 | generaldelta |
|
778 | 870 | largefiles |
|
779 | 871 | revlogv1 |
|
780 | 872 | sparserevlog |
|
781 | 873 | store |
|
782 | 874 | |
|
783 | 875 | $ cat << EOF >> .hg/hgrc |
|
784 | 876 | > [extensions] |
|
785 | 877 | > lfs = |
|
786 | 878 | > [lfs] |
|
787 | 879 | > threshold = 10 |
|
788 | 880 | > EOF |
|
789 | 881 | $ echo '123456789012345' > lfs.bin |
|
790 | 882 | $ hg ci -Am 'lfs.bin' |
|
791 | 883 | adding lfs.bin |
|
792 | 884 | $ grep lfs .hg/requires |
|
793 | 885 | lfs |
|
794 | 886 | $ find .hg/store/lfs -type f |
|
795 | 887 | .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f |
|
796 | 888 | |
|
797 | 889 | $ hg debugupgraderepo --run |
|
798 | 890 | upgrade will perform the following actions: |
|
799 | 891 | |
|
800 | 892 | requirements |
|
801 | 893 | preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store |
|
802 | 894 | |
|
803 | 895 | beginning upgrade... |
|
804 | 896 | repository locked and read-only |
|
805 | 897 | creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob) |
|
806 | 898 | (it is safe to interrupt this process any time before data migration completes) |
|
807 | 899 | migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog) |
|
808 | 900 | migrating 801 bytes in store; 467 bytes tracked data |
|
809 | 901 | migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data) |
|
810 | 902 | finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes |
|
811 | 903 | migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data) |
|
812 | 904 | finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes |
|
813 | 905 | migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data) |
|
814 | 906 | finished migrating 2 changelog revisions; change in size: 0 bytes |
|
815 | 907 | finished migrating 6 total revisions; total change in store size: 0 bytes |
|
816 | 908 | copying phaseroots |
|
817 | 909 | copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f |
|
818 | 910 | data fully migrated to temporary repository |
|
819 | 911 | marking source repository as being upgraded; clients will be unable to read from repository |
|
820 | 912 | starting in-place swap of repository data |
|
821 | 913 | replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob) |
|
822 | 914 | replacing store... |
|
823 | 915 | store replacement complete; repository was inconsistent for *s (glob) |
|
824 | 916 | finalizing requirements file and making repository readable again |
|
825 | 917 | removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob) |
|
826 | 918 | copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob) |
|
827 | 919 | the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified |
|
828 | 920 | |
|
829 | 921 | $ grep lfs .hg/requires |
|
830 | 922 | lfs |
|
831 | 923 | $ find .hg/store/lfs -type f |
|
832 | 924 | .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f |
|
833 | 925 | $ hg verify |
|
834 | 926 | checking changesets |
|
835 | 927 | checking manifests |
|
836 | 928 | crosschecking files in changesets and manifests |
|
837 | 929 | checking files |
|
838 | 930 | checked 2 changesets with 2 changes to 2 files |
|
839 | 931 | $ hg debugdata lfs.bin 0 |
|
840 | 932 | version https://git-lfs.github.com/spec/v1 |
|
841 | 933 | oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f |
|
842 | 934 | size 16 |
|
843 | 935 | x-is-binary 0 |
|
844 | 936 | |
|
845 | 937 | $ cd .. |
|
846 | 938 | |
|
847 | 939 | repository config is taken in account |
|
848 | 940 | ------------------------------------- |
|
849 | 941 | |
|
850 | 942 | $ cat << EOF >> $HGRCPATH |
|
851 | 943 | > [format] |
|
852 | 944 | > maxchainlen = 1 |
|
853 | 945 | > EOF |
|
854 | 946 | |
|
855 | 947 | $ hg init localconfig |
|
856 | 948 | $ cd localconfig |
|
857 | 949 | $ cat << EOF > file |
|
858 | 950 | > some content |
|
859 | 951 | > with some length |
|
860 | 952 | > to make sure we get a delta |
|
861 | 953 | > after changes |
|
862 | 954 | > very long |
|
863 | 955 | > very long |
|
864 | 956 | > very long |
|
865 | 957 | > very long |
|
866 | 958 | > very long |
|
867 | 959 | > very long |
|
868 | 960 | > very long |
|
869 | 961 | > very long |
|
870 | 962 | > very long |
|
871 | 963 | > very long |
|
872 | 964 | > very long |
|
873 | 965 | > EOF |
|
874 | 966 | $ hg -q commit -A -m A |
|
875 | 967 | $ echo "new line" >> file |
|
876 | 968 | $ hg -q commit -m B |
|
877 | 969 | $ echo "new line" >> file |
|
878 | 970 | $ hg -q commit -m C |
|
879 | 971 | |
|
880 | 972 | $ cat << EOF >> .hg/hgrc |
|
881 | 973 | > [format] |
|
882 | 974 | > maxchainlen = 9001 |
|
883 | 975 | > EOF |
|
884 | 976 | $ hg config format |
|
885 | 977 | format.maxchainlen=9001 |
|
886 | 978 | $ hg debugdeltachain file |
|
887 | 979 | rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks |
|
888 | 980 | 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1 |
|
889 | 981 | 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1 |
|
890 | 982 | 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1 |
|
891 | 983 | |
|
892 | 984 | $ hg debugupgraderepo --run --optimize redeltaall |
|
893 | 985 | upgrade will perform the following actions: |
|
894 | 986 | |
|
895 | 987 | requirements |
|
896 | 988 | preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store |
|
897 | 989 | |
|
898 | 990 | re-delta-all |
|
899 | 991 | deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time |
|
900 | 992 | |
|
901 | 993 | beginning upgrade... |
|
902 | 994 | repository locked and read-only |
|
903 | 995 | creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob) |
|
904 | 996 | (it is safe to interrupt this process any time before data migration completes) |
|
905 | 997 | migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog) |
|
906 | 998 | migrating 1019 bytes in store; 882 bytes tracked data |
|
907 | 999 | migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data) |
|
908 | 1000 | finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes |
|
909 | 1001 | migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data) |
|
910 | 1002 | finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes |
|
911 | 1003 | migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data) |
|
912 | 1004 | finished migrating 3 changelog revisions; change in size: 0 bytes |
|
913 | 1005 | finished migrating 9 total revisions; total change in store size: -9 bytes |
|
914 | 1006 | copying phaseroots |
|
915 | 1007 | data fully migrated to temporary repository |
|
916 | 1008 | marking source repository as being upgraded; clients will be unable to read from repository |
|
917 | 1009 | starting in-place swap of repository data |
|
918 | 1010 | replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob) |
|
919 | 1011 | replacing store... |
|
920 | 1012 | store replacement complete; repository was inconsistent for *s (glob) |
|
921 | 1013 | finalizing requirements file and making repository readable again |
|
922 | 1014 | removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob) |
|
923 | 1015 | copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob) |
|
924 | 1016 | the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified |
|
925 | 1017 | $ hg debugdeltachain file |
|
926 | 1018 | rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks |
|
927 | 1019 | 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1 |
|
928 | 1020 | 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1 |
|
929 | 1021 | 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1 |
|
930 | 1022 | $ cd .. |
|
931 | 1023 | |
|
932 | 1024 | $ cat << EOF >> $HGRCPATH |
|
933 | 1025 | > [format] |
|
934 | 1026 | > maxchainlen = 9001 |
|
935 | 1027 | > EOF |
|
936 | 1028 | |
|
937 | 1029 | Check upgrading a sparse-revlog repository |
|
938 | 1030 | --------------------------------------- |
|
939 | 1031 | |
|
940 | 1032 | $ hg init sparserevlogrepo --config format.sparse-revlog=no |
|
941 | 1033 | $ cd sparserevlogrepo |
|
942 | 1034 | $ touch foo |
|
943 | 1035 | $ hg add foo |
|
944 | 1036 | $ hg -q commit -m "foo" |
|
945 | 1037 | $ cat .hg/requires |
|
946 | 1038 | dotencode |
|
947 | 1039 | fncache |
|
948 | 1040 | generaldelta |
|
949 | 1041 | revlogv1 |
|
950 | 1042 | store |
|
951 | 1043 | |
|
952 | 1044 | Check that we can add the sparse-revlog format requirement |
|
953 | 1045 | $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null |
|
954 | 1046 | copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob) |
|
955 | 1047 | the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified |
|
956 | 1048 | $ cat .hg/requires |
|
957 | 1049 | dotencode |
|
958 | 1050 | fncache |
|
959 | 1051 | generaldelta |
|
960 | 1052 | revlogv1 |
|
961 | 1053 | sparserevlog |
|
962 | 1054 | store |
|
963 | 1055 | |
|
964 | 1056 | Check that we can remove the sparse-revlog format requirement |
|
965 | 1057 | $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null |
|
966 | 1058 | copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob) |
|
967 | 1059 | the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified |
|
968 | 1060 | $ cat .hg/requires |
|
969 | 1061 | dotencode |
|
970 | 1062 | fncache |
|
971 | 1063 | generaldelta |
|
972 | 1064 | revlogv1 |
|
973 | 1065 | store |
|
974 | 1066 | |
|
975 | 1067 | #if zstd |
|
976 | 1068 | |
|
977 | 1069 | Check upgrading to a zstd revlog |
|
978 | 1070 | -------------------------------- |
|
979 | 1071 | |
|
980 | 1072 | upgrade |
|
981 | 1073 | |
|
982 | 1074 | $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup >/dev/null |
|
983 | 1075 | $ hg debugformat -v |
|
984 | 1076 | format-variant repo config default |
|
985 | 1077 | fncache: yes yes yes |
|
986 | 1078 | dotencode: yes yes yes |
|
987 | 1079 | generaldelta: yes yes yes |
|
988 | 1080 | sparserevlog: yes yes yes |
|
989 | 1081 | plain-cl-delta: yes yes yes |
|
990 | 1082 | compression: zstd zlib zlib |
|
991 | 1083 | compression-level: default default default |
|
992 | 1084 | $ cat .hg/requires |
|
993 | 1085 | dotencode |
|
994 | 1086 | fncache |
|
995 | 1087 | generaldelta |
|
996 | 1088 | revlog-compression-zstd |
|
997 | 1089 | revlogv1 |
|
998 | 1090 | sparserevlog |
|
999 | 1091 | store |
|
1000 | 1092 | |
|
1001 | 1093 | downgrade |
|
1002 | 1094 | |
|
1003 | 1095 | $ hg debugupgraderepo --run --no-backup > /dev/null |
|
1004 | 1096 | $ hg debugformat -v |
|
1005 | 1097 | format-variant repo config default |
|
1006 | 1098 | fncache: yes yes yes |
|
1007 | 1099 | dotencode: yes yes yes |
|
1008 | 1100 | generaldelta: yes yes yes |
|
1009 | 1101 | sparserevlog: yes yes yes |
|
1010 | 1102 | plain-cl-delta: yes yes yes |
|
1011 | 1103 | compression: zlib zlib zlib |
|
1012 | 1104 | compression-level: default default default |
|
1013 | 1105 | $ cat .hg/requires |
|
1014 | 1106 | dotencode |
|
1015 | 1107 | fncache |
|
1016 | 1108 | generaldelta |
|
1017 | 1109 | revlogv1 |
|
1018 | 1110 | sparserevlog |
|
1019 | 1111 | store |
|
1020 | 1112 | |
|
1021 | 1113 | upgrade from hgrc |
|
1022 | 1114 | |
|
1023 | 1115 | $ cat >> .hg/hgrc << EOF |
|
1024 | 1116 | > [format] |
|
1025 | 1117 | > revlog-compression=zstd |
|
1026 | 1118 | > EOF |
|
1027 | 1119 | $ hg debugupgraderepo --run --no-backup > /dev/null |
|
1028 | 1120 | $ hg debugformat -v |
|
1029 | 1121 | format-variant repo config default |
|
1030 | 1122 | fncache: yes yes yes |
|
1031 | 1123 | dotencode: yes yes yes |
|
1032 | 1124 | generaldelta: yes yes yes |
|
1033 | 1125 | sparserevlog: yes yes yes |
|
1034 | 1126 | plain-cl-delta: yes yes yes |
|
1035 | 1127 | compression: zstd zstd zlib |
|
1036 | 1128 | compression-level: default default default |
|
1037 | 1129 | $ cat .hg/requires |
|
1038 | 1130 | dotencode |
|
1039 | 1131 | fncache |
|
1040 | 1132 | generaldelta |
|
1041 | 1133 | revlog-compression-zstd |
|
1042 | 1134 | revlogv1 |
|
1043 | 1135 | sparserevlog |
|
1044 | 1136 | store |
|
1045 | 1137 | |
|
1046 | 1138 | $ cd .. |
|
1047 | 1139 | |
|
1048 | 1140 | #endif |
General Comments 0
You need to be logged in to leave comments.
Login now