##// END OF EJS Templates
manifestcache: protect write with `wlock` instead of `lock`...
marmoute -
r42130:d1218230 default
parent child Browse files
Show More
@@ -1,3429 +1,3429 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import codecs
11 11 import collections
12 12 import difflib
13 13 import errno
14 14 import operator
15 15 import os
16 16 import random
17 17 import re
18 18 import socket
19 19 import ssl
20 20 import stat
21 21 import string
22 22 import subprocess
23 23 import sys
24 24 import time
25 25
26 26 from .i18n import _
27 27 from .node import (
28 28 bin,
29 29 hex,
30 30 nullhex,
31 31 nullid,
32 32 nullrev,
33 33 short,
34 34 )
35 35 from . import (
36 36 bundle2,
37 37 changegroup,
38 38 cmdutil,
39 39 color,
40 40 context,
41 41 copies,
42 42 dagparser,
43 43 encoding,
44 44 error,
45 45 exchange,
46 46 extensions,
47 47 filemerge,
48 48 filesetlang,
49 49 formatter,
50 50 hg,
51 51 httppeer,
52 52 localrepo,
53 53 lock as lockmod,
54 54 logcmdutil,
55 55 merge as mergemod,
56 56 obsolete,
57 57 obsutil,
58 58 phases,
59 59 policy,
60 60 pvec,
61 61 pycompat,
62 62 registrar,
63 63 repair,
64 64 revlog,
65 65 revset,
66 66 revsetlang,
67 67 scmutil,
68 68 setdiscovery,
69 69 simplemerge,
70 70 sshpeer,
71 71 sslutil,
72 72 streamclone,
73 73 templater,
74 74 treediscovery,
75 75 upgrade,
76 76 url as urlmod,
77 77 util,
78 78 vfs as vfsmod,
79 79 wireprotoframing,
80 80 wireprotoserver,
81 81 wireprotov2peer,
82 82 )
83 83 from .utils import (
84 84 cborutil,
85 85 dateutil,
86 86 procutil,
87 87 stringutil,
88 88 )
89 89
90 90 from .revlogutils import (
91 91 deltas as deltautil
92 92 )
93 93
94 94 release = lockmod.release
95 95
96 96 command = registrar.command()
97 97
98 98 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
99 99 def debugancestor(ui, repo, *args):
100 100 """find the ancestor revision of two revisions in a given index"""
101 101 if len(args) == 3:
102 102 index, rev1, rev2 = args
103 103 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
104 104 lookup = r.lookup
105 105 elif len(args) == 2:
106 106 if not repo:
107 107 raise error.Abort(_('there is no Mercurial repository here '
108 108 '(.hg not found)'))
109 109 rev1, rev2 = args
110 110 r = repo.changelog
111 111 lookup = repo.lookup
112 112 else:
113 113 raise error.Abort(_('either two or three arguments required'))
114 114 a = r.ancestor(lookup(rev1), lookup(rev2))
115 115 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
116 116
117 117 @command('debugapplystreamclonebundle', [], 'FILE')
118 118 def debugapplystreamclonebundle(ui, repo, fname):
119 119 """apply a stream clone bundle file"""
120 120 f = hg.openpath(ui, fname)
121 121 gen = exchange.readbundle(ui, f, fname)
122 122 gen.apply(repo)
123 123
124 124 @command('debugbuilddag',
125 125 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
126 126 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
127 127 ('n', 'new-file', None, _('add new file at each rev'))],
128 128 _('[OPTION]... [TEXT]'))
129 129 def debugbuilddag(ui, repo, text=None,
130 130 mergeable_file=False,
131 131 overwritten_file=False,
132 132 new_file=False):
133 133 """builds a repo with a given DAG from scratch in the current empty repo
134 134
135 135 The description of the DAG is read from stdin if not given on the
136 136 command line.
137 137
138 138 Elements:
139 139
140 140 - "+n" is a linear run of n nodes based on the current default parent
141 141 - "." is a single node based on the current default parent
142 142 - "$" resets the default parent to null (implied at the start);
143 143 otherwise the default parent is always the last node created
144 144 - "<p" sets the default parent to the backref p
145 145 - "*p" is a fork at parent p, which is a backref
146 146 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
147 147 - "/p2" is a merge of the preceding node and p2
148 148 - ":tag" defines a local tag for the preceding node
149 149 - "@branch" sets the named branch for subsequent nodes
150 150 - "#...\\n" is a comment up to the end of the line
151 151
152 152 Whitespace between the above elements is ignored.
153 153
154 154 A backref is either
155 155
156 156 - a number n, which references the node curr-n, where curr is the current
157 157 node, or
158 158 - the name of a local tag you placed earlier using ":tag", or
159 159 - empty to denote the default parent.
160 160
161 161 All string valued-elements are either strictly alphanumeric, or must
162 162 be enclosed in double quotes ("..."), with "\\" as escape character.
163 163 """
164 164
165 165 if text is None:
166 166 ui.status(_("reading DAG from stdin\n"))
167 167 text = ui.fin.read()
168 168
169 169 cl = repo.changelog
170 170 if len(cl) > 0:
171 171 raise error.Abort(_('repository is not empty'))
172 172
173 173 # determine number of revs in DAG
174 174 total = 0
175 175 for type, data in dagparser.parsedag(text):
176 176 if type == 'n':
177 177 total += 1
178 178
179 179 if mergeable_file:
180 180 linesperrev = 2
181 181 # make a file with k lines per rev
182 182 initialmergedlines = ['%d' % i
183 183 for i in pycompat.xrange(0, total * linesperrev)]
184 184 initialmergedlines.append("")
185 185
186 186 tags = []
187 187 progress = ui.makeprogress(_('building'), unit=_('revisions'),
188 188 total=total)
189 189 with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
190 190 at = -1
191 191 atbranch = 'default'
192 192 nodeids = []
193 193 id = 0
194 194 progress.update(id)
195 195 for type, data in dagparser.parsedag(text):
196 196 if type == 'n':
197 197 ui.note(('node %s\n' % pycompat.bytestr(data)))
198 198 id, ps = data
199 199
200 200 files = []
201 201 filecontent = {}
202 202
203 203 p2 = None
204 204 if mergeable_file:
205 205 fn = "mf"
206 206 p1 = repo[ps[0]]
207 207 if len(ps) > 1:
208 208 p2 = repo[ps[1]]
209 209 pa = p1.ancestor(p2)
210 210 base, local, other = [x[fn].data() for x in (pa, p1,
211 211 p2)]
212 212 m3 = simplemerge.Merge3Text(base, local, other)
213 213 ml = [l.strip() for l in m3.merge_lines()]
214 214 ml.append("")
215 215 elif at > 0:
216 216 ml = p1[fn].data().split("\n")
217 217 else:
218 218 ml = initialmergedlines
219 219 ml[id * linesperrev] += " r%i" % id
220 220 mergedtext = "\n".join(ml)
221 221 files.append(fn)
222 222 filecontent[fn] = mergedtext
223 223
224 224 if overwritten_file:
225 225 fn = "of"
226 226 files.append(fn)
227 227 filecontent[fn] = "r%i\n" % id
228 228
229 229 if new_file:
230 230 fn = "nf%i" % id
231 231 files.append(fn)
232 232 filecontent[fn] = "r%i\n" % id
233 233 if len(ps) > 1:
234 234 if not p2:
235 235 p2 = repo[ps[1]]
236 236 for fn in p2:
237 237 if fn.startswith("nf"):
238 238 files.append(fn)
239 239 filecontent[fn] = p2[fn].data()
240 240
241 241 def fctxfn(repo, cx, path):
242 242 if path in filecontent:
243 243 return context.memfilectx(repo, cx, path,
244 244 filecontent[path])
245 245 return None
246 246
247 247 if len(ps) == 0 or ps[0] < 0:
248 248 pars = [None, None]
249 249 elif len(ps) == 1:
250 250 pars = [nodeids[ps[0]], None]
251 251 else:
252 252 pars = [nodeids[p] for p in ps]
253 253 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
254 254 date=(id, 0),
255 255 user="debugbuilddag",
256 256 extra={'branch': atbranch})
257 257 nodeid = repo.commitctx(cx)
258 258 nodeids.append(nodeid)
259 259 at = id
260 260 elif type == 'l':
261 261 id, name = data
262 262 ui.note(('tag %s\n' % name))
263 263 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
264 264 elif type == 'a':
265 265 ui.note(('branch %s\n' % data))
266 266 atbranch = data
267 267 progress.update(id)
268 268
269 269 if tags:
270 270 repo.vfs.write("localtags", "".join(tags))
271 271
272 272 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
273 273 indent_string = ' ' * indent
274 274 if all:
275 275 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
276 276 % indent_string)
277 277
278 278 def showchunks(named):
279 279 ui.write("\n%s%s\n" % (indent_string, named))
280 280 for deltadata in gen.deltaiter():
281 281 node, p1, p2, cs, deltabase, delta, flags = deltadata
282 282 ui.write("%s%s %s %s %s %s %d\n" %
283 283 (indent_string, hex(node), hex(p1), hex(p2),
284 284 hex(cs), hex(deltabase), len(delta)))
285 285
286 286 chunkdata = gen.changelogheader()
287 287 showchunks("changelog")
288 288 chunkdata = gen.manifestheader()
289 289 showchunks("manifest")
290 290 for chunkdata in iter(gen.filelogheader, {}):
291 291 fname = chunkdata['filename']
292 292 showchunks(fname)
293 293 else:
294 294 if isinstance(gen, bundle2.unbundle20):
295 295 raise error.Abort(_('use debugbundle2 for this file'))
296 296 chunkdata = gen.changelogheader()
297 297 for deltadata in gen.deltaiter():
298 298 node, p1, p2, cs, deltabase, delta, flags = deltadata
299 299 ui.write("%s%s\n" % (indent_string, hex(node)))
300 300
301 301 def _debugobsmarkers(ui, part, indent=0, **opts):
302 302 """display version and markers contained in 'data'"""
303 303 opts = pycompat.byteskwargs(opts)
304 304 data = part.read()
305 305 indent_string = ' ' * indent
306 306 try:
307 307 version, markers = obsolete._readmarkers(data)
308 308 except error.UnknownVersion as exc:
309 309 msg = "%sunsupported version: %s (%d bytes)\n"
310 310 msg %= indent_string, exc.version, len(data)
311 311 ui.write(msg)
312 312 else:
313 313 msg = "%sversion: %d (%d bytes)\n"
314 314 msg %= indent_string, version, len(data)
315 315 ui.write(msg)
316 316 fm = ui.formatter('debugobsolete', opts)
317 317 for rawmarker in sorted(markers):
318 318 m = obsutil.marker(None, rawmarker)
319 319 fm.startitem()
320 320 fm.plain(indent_string)
321 321 cmdutil.showmarker(fm, m)
322 322 fm.end()
323 323
324 324 def _debugphaseheads(ui, data, indent=0):
325 325 """display version and markers contained in 'data'"""
326 326 indent_string = ' ' * indent
327 327 headsbyphase = phases.binarydecode(data)
328 328 for phase in phases.allphases:
329 329 for head in headsbyphase[phase]:
330 330 ui.write(indent_string)
331 331 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
332 332
333 333 def _quasirepr(thing):
334 334 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
335 335 return '{%s}' % (
336 336 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
337 337 return pycompat.bytestr(repr(thing))
338 338
339 339 def _debugbundle2(ui, gen, all=None, **opts):
340 340 """lists the contents of a bundle2"""
341 341 if not isinstance(gen, bundle2.unbundle20):
342 342 raise error.Abort(_('not a bundle2 file'))
343 343 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
344 344 parttypes = opts.get(r'part_type', [])
345 345 for part in gen.iterparts():
346 346 if parttypes and part.type not in parttypes:
347 347 continue
348 348 msg = '%s -- %s (mandatory: %r)\n'
349 349 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
350 350 if part.type == 'changegroup':
351 351 version = part.params.get('version', '01')
352 352 cg = changegroup.getunbundler(version, part, 'UN')
353 353 if not ui.quiet:
354 354 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
355 355 if part.type == 'obsmarkers':
356 356 if not ui.quiet:
357 357 _debugobsmarkers(ui, part, indent=4, **opts)
358 358 if part.type == 'phase-heads':
359 359 if not ui.quiet:
360 360 _debugphaseheads(ui, part, indent=4)
361 361
362 362 @command('debugbundle',
363 363 [('a', 'all', None, _('show all details')),
364 364 ('', 'part-type', [], _('show only the named part type')),
365 365 ('', 'spec', None, _('print the bundlespec of the bundle'))],
366 366 _('FILE'),
367 367 norepo=True)
368 368 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
369 369 """lists the contents of a bundle"""
370 370 with hg.openpath(ui, bundlepath) as f:
371 371 if spec:
372 372 spec = exchange.getbundlespec(ui, f)
373 373 ui.write('%s\n' % spec)
374 374 return
375 375
376 376 gen = exchange.readbundle(ui, f, bundlepath)
377 377 if isinstance(gen, bundle2.unbundle20):
378 378 return _debugbundle2(ui, gen, all=all, **opts)
379 379 _debugchangegroup(ui, gen, all=all, **opts)
380 380
381 381 @command('debugcapabilities',
382 382 [], _('PATH'),
383 383 norepo=True)
384 384 def debugcapabilities(ui, path, **opts):
385 385 """lists the capabilities of a remote peer"""
386 386 opts = pycompat.byteskwargs(opts)
387 387 peer = hg.peer(ui, opts, path)
388 388 caps = peer.capabilities()
389 389 ui.write(('Main capabilities:\n'))
390 390 for c in sorted(caps):
391 391 ui.write((' %s\n') % c)
392 392 b2caps = bundle2.bundle2caps(peer)
393 393 if b2caps:
394 394 ui.write(('Bundle2 capabilities:\n'))
395 395 for key, values in sorted(b2caps.iteritems()):
396 396 ui.write((' %s\n') % key)
397 397 for v in values:
398 398 ui.write((' %s\n') % v)
399 399
400 400 @command('debugcheckstate', [], '')
401 401 def debugcheckstate(ui, repo):
402 402 """validate the correctness of the current dirstate"""
403 403 parent1, parent2 = repo.dirstate.parents()
404 404 m1 = repo[parent1].manifest()
405 405 m2 = repo[parent2].manifest()
406 406 errors = 0
407 407 for f in repo.dirstate:
408 408 state = repo.dirstate[f]
409 409 if state in "nr" and f not in m1:
410 410 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
411 411 errors += 1
412 412 if state in "a" and f in m1:
413 413 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
414 414 errors += 1
415 415 if state in "m" and f not in m1 and f not in m2:
416 416 ui.warn(_("%s in state %s, but not in either manifest\n") %
417 417 (f, state))
418 418 errors += 1
419 419 for f in m1:
420 420 state = repo.dirstate[f]
421 421 if state not in "nrm":
422 422 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
423 423 errors += 1
424 424 if errors:
425 425 error = _(".hg/dirstate inconsistent with current parent's manifest")
426 426 raise error.Abort(error)
427 427
428 428 @command('debugcolor',
429 429 [('', 'style', None, _('show all configured styles'))],
430 430 'hg debugcolor')
431 431 def debugcolor(ui, repo, **opts):
432 432 """show available color, effects or style"""
433 433 ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
434 434 if opts.get(r'style'):
435 435 return _debugdisplaystyle(ui)
436 436 else:
437 437 return _debugdisplaycolor(ui)
438 438
439 439 def _debugdisplaycolor(ui):
440 440 ui = ui.copy()
441 441 ui._styles.clear()
442 442 for effect in color._activeeffects(ui).keys():
443 443 ui._styles[effect] = effect
444 444 if ui._terminfoparams:
445 445 for k, v in ui.configitems('color'):
446 446 if k.startswith('color.'):
447 447 ui._styles[k] = k[6:]
448 448 elif k.startswith('terminfo.'):
449 449 ui._styles[k] = k[9:]
450 450 ui.write(_('available colors:\n'))
451 451 # sort label with a '_' after the other to group '_background' entry.
452 452 items = sorted(ui._styles.items(),
453 453 key=lambda i: ('_' in i[0], i[0], i[1]))
454 454 for colorname, label in items:
455 455 ui.write(('%s\n') % colorname, label=label)
456 456
457 457 def _debugdisplaystyle(ui):
458 458 ui.write(_('available style:\n'))
459 459 if not ui._styles:
460 460 return
461 461 width = max(len(s) for s in ui._styles)
462 462 for label, effects in sorted(ui._styles.items()):
463 463 ui.write('%s' % label, label=label)
464 464 if effects:
465 465 # 50
466 466 ui.write(': ')
467 467 ui.write(' ' * (max(0, width - len(label))))
468 468 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
469 469 ui.write('\n')
470 470
471 471 @command('debugcreatestreamclonebundle', [], 'FILE')
472 472 def debugcreatestreamclonebundle(ui, repo, fname):
473 473 """create a stream clone bundle file
474 474
475 475 Stream bundles are special bundles that are essentially archives of
476 476 revlog files. They are commonly used for cloning very quickly.
477 477 """
478 478 # TODO we may want to turn this into an abort when this functionality
479 479 # is moved into `hg bundle`.
480 480 if phases.hassecret(repo):
481 481 ui.warn(_('(warning: stream clone bundle will contain secret '
482 482 'revisions)\n'))
483 483
484 484 requirements, gen = streamclone.generatebundlev1(repo)
485 485 changegroup.writechunks(ui, gen, fname)
486 486
487 487 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
488 488
489 489 @command('debugdag',
490 490 [('t', 'tags', None, _('use tags as labels')),
491 491 ('b', 'branches', None, _('annotate with branch names')),
492 492 ('', 'dots', None, _('use dots for runs')),
493 493 ('s', 'spaces', None, _('separate elements by spaces'))],
494 494 _('[OPTION]... [FILE [REV]...]'),
495 495 optionalrepo=True)
496 496 def debugdag(ui, repo, file_=None, *revs, **opts):
497 497 """format the changelog or an index DAG as a concise textual description
498 498
499 499 If you pass a revlog index, the revlog's DAG is emitted. If you list
500 500 revision numbers, they get labeled in the output as rN.
501 501
502 502 Otherwise, the changelog DAG of the current repo is emitted.
503 503 """
504 504 spaces = opts.get(r'spaces')
505 505 dots = opts.get(r'dots')
506 506 if file_:
507 507 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
508 508 file_)
509 509 revs = set((int(r) for r in revs))
510 510 def events():
511 511 for r in rlog:
512 512 yield 'n', (r, list(p for p in rlog.parentrevs(r)
513 513 if p != -1))
514 514 if r in revs:
515 515 yield 'l', (r, "r%i" % r)
516 516 elif repo:
517 517 cl = repo.changelog
518 518 tags = opts.get(r'tags')
519 519 branches = opts.get(r'branches')
520 520 if tags:
521 521 labels = {}
522 522 for l, n in repo.tags().items():
523 523 labels.setdefault(cl.rev(n), []).append(l)
524 524 def events():
525 525 b = "default"
526 526 for r in cl:
527 527 if branches:
528 528 newb = cl.read(cl.node(r))[5]['branch']
529 529 if newb != b:
530 530 yield 'a', newb
531 531 b = newb
532 532 yield 'n', (r, list(p for p in cl.parentrevs(r)
533 533 if p != -1))
534 534 if tags:
535 535 ls = labels.get(r)
536 536 if ls:
537 537 for l in ls:
538 538 yield 'l', (r, l)
539 539 else:
540 540 raise error.Abort(_('need repo for changelog dag'))
541 541
542 542 for line in dagparser.dagtextlines(events(),
543 543 addspaces=spaces,
544 544 wraplabels=True,
545 545 wrapannotations=True,
546 546 wrapnonlinear=dots,
547 547 usedots=dots,
548 548 maxlinewidth=70):
549 549 ui.write(line)
550 550 ui.write("\n")
551 551
552 552 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
553 553 def debugdata(ui, repo, file_, rev=None, **opts):
554 554 """dump the contents of a data file revision"""
555 555 opts = pycompat.byteskwargs(opts)
556 556 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
557 557 if rev is not None:
558 558 raise error.CommandError('debugdata', _('invalid arguments'))
559 559 file_, rev = None, file_
560 560 elif rev is None:
561 561 raise error.CommandError('debugdata', _('invalid arguments'))
562 562 r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
563 563 try:
564 564 ui.write(r.revision(r.lookup(rev), raw=True))
565 565 except KeyError:
566 566 raise error.Abort(_('invalid revision identifier %s') % rev)
567 567
568 568 @command('debugdate',
569 569 [('e', 'extended', None, _('try extended date formats'))],
570 570 _('[-e] DATE [RANGE]'),
571 571 norepo=True, optionalrepo=True)
572 572 def debugdate(ui, date, range=None, **opts):
573 573 """parse and display a date"""
574 574 if opts[r"extended"]:
575 575 d = dateutil.parsedate(date, util.extendeddateformats)
576 576 else:
577 577 d = dateutil.parsedate(date)
578 578 ui.write(("internal: %d %d\n") % d)
579 579 ui.write(("standard: %s\n") % dateutil.datestr(d))
580 580 if range:
581 581 m = dateutil.matchdate(range)
582 582 ui.write(("match: %s\n") % m(d[0]))
583 583
584 584 @command('debugdeltachain',
585 585 cmdutil.debugrevlogopts + cmdutil.formatteropts,
586 586 _('-c|-m|FILE'),
587 587 optionalrepo=True)
588 588 def debugdeltachain(ui, repo, file_=None, **opts):
589 589 """dump information about delta chains in a revlog
590 590
591 591 Output can be templatized. Available template keywords are:
592 592
593 593 :``rev``: revision number
594 594 :``chainid``: delta chain identifier (numbered by unique base)
595 595 :``chainlen``: delta chain length to this revision
596 596 :``prevrev``: previous revision in delta chain
597 597 :``deltatype``: role of delta / how it was computed
598 598 :``compsize``: compressed size of revision
599 599 :``uncompsize``: uncompressed size of revision
600 600 :``chainsize``: total size of compressed revisions in chain
601 601 :``chainratio``: total chain size divided by uncompressed revision size
602 602 (new delta chains typically start at ratio 2.00)
603 603 :``lindist``: linear distance from base revision in delta chain to end
604 604 of this revision
605 605 :``extradist``: total size of revisions not part of this delta chain from
606 606 base of delta chain to end of this revision; a measurement
607 607 of how much extra data we need to read/seek across to read
608 608 the delta chain for this revision
609 609 :``extraratio``: extradist divided by chainsize; another representation of
610 610 how much unrelated data is needed to load this delta chain
611 611
612 612 If the repository is configured to use the sparse read, additional keywords
613 613 are available:
614 614
615 615 :``readsize``: total size of data read from the disk for a revision
616 616 (sum of the sizes of all the blocks)
617 617 :``largestblock``: size of the largest block of data read from the disk
618 618 :``readdensity``: density of useful bytes in the data read from the disk
619 619 :``srchunks``: in how many data hunks the whole revision would be read
620 620
621 621 The sparse read can be enabled with experimental.sparse-read = True
622 622 """
623 623 opts = pycompat.byteskwargs(opts)
624 624 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
625 625 index = r.index
626 626 start = r.start
627 627 length = r.length
628 628 generaldelta = r.version & revlog.FLAG_GENERALDELTA
629 629 withsparseread = getattr(r, '_withsparseread', False)
630 630
631 631 def revinfo(rev):
632 632 e = index[rev]
633 633 compsize = e[1]
634 634 uncompsize = e[2]
635 635 chainsize = 0
636 636
637 637 if generaldelta:
638 638 if e[3] == e[5]:
639 639 deltatype = 'p1'
640 640 elif e[3] == e[6]:
641 641 deltatype = 'p2'
642 642 elif e[3] == rev - 1:
643 643 deltatype = 'prev'
644 644 elif e[3] == rev:
645 645 deltatype = 'base'
646 646 else:
647 647 deltatype = 'other'
648 648 else:
649 649 if e[3] == rev:
650 650 deltatype = 'base'
651 651 else:
652 652 deltatype = 'prev'
653 653
654 654 chain = r._deltachain(rev)[0]
655 655 for iterrev in chain:
656 656 e = index[iterrev]
657 657 chainsize += e[1]
658 658
659 659 return compsize, uncompsize, deltatype, chain, chainsize
660 660
661 661 fm = ui.formatter('debugdeltachain', opts)
662 662
663 663 fm.plain(' rev chain# chainlen prev delta '
664 664 'size rawsize chainsize ratio lindist extradist '
665 665 'extraratio')
666 666 if withsparseread:
667 667 fm.plain(' readsize largestblk rddensity srchunks')
668 668 fm.plain('\n')
669 669
670 670 chainbases = {}
671 671 for rev in r:
672 672 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
673 673 chainbase = chain[0]
674 674 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
675 675 basestart = start(chainbase)
676 676 revstart = start(rev)
677 677 lineardist = revstart + comp - basestart
678 678 extradist = lineardist - chainsize
679 679 try:
680 680 prevrev = chain[-2]
681 681 except IndexError:
682 682 prevrev = -1
683 683
684 684 if uncomp != 0:
685 685 chainratio = float(chainsize) / float(uncomp)
686 686 else:
687 687 chainratio = chainsize
688 688
689 689 if chainsize != 0:
690 690 extraratio = float(extradist) / float(chainsize)
691 691 else:
692 692 extraratio = extradist
693 693
694 694 fm.startitem()
695 695 fm.write('rev chainid chainlen prevrev deltatype compsize '
696 696 'uncompsize chainsize chainratio lindist extradist '
697 697 'extraratio',
698 698 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
699 699 rev, chainid, len(chain), prevrev, deltatype, comp,
700 700 uncomp, chainsize, chainratio, lineardist, extradist,
701 701 extraratio,
702 702 rev=rev, chainid=chainid, chainlen=len(chain),
703 703 prevrev=prevrev, deltatype=deltatype, compsize=comp,
704 704 uncompsize=uncomp, chainsize=chainsize,
705 705 chainratio=chainratio, lindist=lineardist,
706 706 extradist=extradist, extraratio=extraratio)
707 707 if withsparseread:
708 708 readsize = 0
709 709 largestblock = 0
710 710 srchunks = 0
711 711
712 712 for revschunk in deltautil.slicechunk(r, chain):
713 713 srchunks += 1
714 714 blkend = start(revschunk[-1]) + length(revschunk[-1])
715 715 blksize = blkend - start(revschunk[0])
716 716
717 717 readsize += blksize
718 718 if largestblock < blksize:
719 719 largestblock = blksize
720 720
721 721 if readsize:
722 722 readdensity = float(chainsize) / float(readsize)
723 723 else:
724 724 readdensity = 1
725 725
726 726 fm.write('readsize largestblock readdensity srchunks',
727 727 ' %10d %10d %9.5f %8d',
728 728 readsize, largestblock, readdensity, srchunks,
729 729 readsize=readsize, largestblock=largestblock,
730 730 readdensity=readdensity, srchunks=srchunks)
731 731
732 732 fm.plain('\n')
733 733
734 734 fm.end()
735 735
736 736 @command('debugdirstate|debugstate',
737 737 [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
738 738 ('', 'dates', True, _('display the saved mtime')),
739 739 ('', 'datesort', None, _('sort by saved mtime'))],
740 740 _('[OPTION]...'))
741 741 def debugstate(ui, repo, **opts):
742 742 """show the contents of the current dirstate"""
743 743
744 744 nodates = not opts[r'dates']
745 745 if opts.get(r'nodates') is not None:
746 746 nodates = True
747 747 datesort = opts.get(r'datesort')
748 748
749 749 if datesort:
750 750 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
751 751 else:
752 752 keyfunc = None # sort by filename
753 753 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
754 754 if ent[3] == -1:
755 755 timestr = 'unset '
756 756 elif nodates:
757 757 timestr = 'set '
758 758 else:
759 759 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
760 760 time.localtime(ent[3]))
761 761 timestr = encoding.strtolocal(timestr)
762 762 if ent[1] & 0o20000:
763 763 mode = 'lnk'
764 764 else:
765 765 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
766 766 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
767 767 for f in repo.dirstate.copies():
768 768 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
769 769
770 770 @command('debugdiscovery',
771 771 [('', 'old', None, _('use old-style discovery')),
772 772 ('', 'nonheads', None,
773 773 _('use old-style discovery with non-heads included')),
774 774 ('', 'rev', [], 'restrict discovery to this set of revs'),
775 775 ] + cmdutil.remoteopts,
776 776 _('[--rev REV] [OTHER]'))
777 777 def debugdiscovery(ui, repo, remoteurl="default", **opts):
778 778 """runs the changeset discovery protocol in isolation"""
779 779 opts = pycompat.byteskwargs(opts)
780 780 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
781 781 remote = hg.peer(repo, opts, remoteurl)
782 782 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
783 783
784 784 # make sure tests are repeatable
785 785 random.seed(12323)
786 786
787 787 def doit(pushedrevs, remoteheads, remote=remote):
788 788 if opts.get('old'):
789 789 if not util.safehasattr(remote, 'branches'):
790 790 # enable in-client legacy support
791 791 remote = localrepo.locallegacypeer(remote.local())
792 792 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
793 793 force=True)
794 794 common = set(common)
795 795 if not opts.get('nonheads'):
796 796 ui.write(("unpruned common: %s\n") %
797 797 " ".join(sorted(short(n) for n in common)))
798 798
799 799 clnode = repo.changelog.node
800 800 common = repo.revs('heads(::%ln)', common)
801 801 common = {clnode(r) for r in common}
802 802 else:
803 803 nodes = None
804 804 if pushedrevs:
805 805 revs = scmutil.revrange(repo, pushedrevs)
806 806 nodes = [repo[r].node() for r in revs]
807 807 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
808 808 ancestorsof=nodes)
809 809 common = set(common)
810 810 rheads = set(hds)
811 811 lheads = set(repo.heads())
812 812 ui.write(("common heads: %s\n") %
813 813 " ".join(sorted(short(n) for n in common)))
814 814 if lheads <= common:
815 815 ui.write(("local is subset\n"))
816 816 elif rheads <= common:
817 817 ui.write(("remote is subset\n"))
818 818
819 819 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
820 820 localrevs = opts['rev']
821 821 doit(localrevs, remoterevs)
822 822
823 823 _chunksize = 4 << 10
824 824
825 825 @command('debugdownload',
826 826 [
827 827 ('o', 'output', '', _('path')),
828 828 ],
829 829 optionalrepo=True)
830 830 def debugdownload(ui, repo, url, output=None, **opts):
831 831 """download a resource using Mercurial logic and config
832 832 """
833 833 fh = urlmod.open(ui, url, output)
834 834
835 835 dest = ui
836 836 if output:
837 837 dest = open(output, "wb", _chunksize)
838 838 try:
839 839 data = fh.read(_chunksize)
840 840 while data:
841 841 dest.write(data)
842 842 data = fh.read(_chunksize)
843 843 finally:
844 844 if output:
845 845 dest.close()
846 846
847 847 @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
848 848 def debugextensions(ui, repo, **opts):
849 849 '''show information about active extensions'''
850 850 opts = pycompat.byteskwargs(opts)
851 851 exts = extensions.extensions(ui)
852 852 hgver = util.version()
853 853 fm = ui.formatter('debugextensions', opts)
854 854 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
855 855 isinternal = extensions.ismoduleinternal(extmod)
856 856 extsource = pycompat.fsencode(extmod.__file__)
857 857 if isinternal:
858 858 exttestedwith = [] # never expose magic string to users
859 859 else:
860 860 exttestedwith = getattr(extmod, 'testedwith', '').split()
861 861 extbuglink = getattr(extmod, 'buglink', None)
862 862
863 863 fm.startitem()
864 864
865 865 if ui.quiet or ui.verbose:
866 866 fm.write('name', '%s\n', extname)
867 867 else:
868 868 fm.write('name', '%s', extname)
869 869 if isinternal or hgver in exttestedwith:
870 870 fm.plain('\n')
871 871 elif not exttestedwith:
872 872 fm.plain(_(' (untested!)\n'))
873 873 else:
874 874 lasttestedversion = exttestedwith[-1]
875 875 fm.plain(' (%s!)\n' % lasttestedversion)
876 876
877 877 fm.condwrite(ui.verbose and extsource, 'source',
878 878 _(' location: %s\n'), extsource or "")
879 879
880 880 if ui.verbose:
881 881 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
882 882 fm.data(bundled=isinternal)
883 883
884 884 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
885 885 _(' tested with: %s\n'),
886 886 fm.formatlist(exttestedwith, name='ver'))
887 887
888 888 fm.condwrite(ui.verbose and extbuglink, 'buglink',
889 889 _(' bug reporting: %s\n'), extbuglink or "")
890 890
891 891 fm.end()
892 892
893 893 @command('debugfileset',
894 894 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
895 895 ('', 'all-files', False,
896 896 _('test files from all revisions and working directory')),
897 897 ('s', 'show-matcher', None,
898 898 _('print internal representation of matcher')),
899 899 ('p', 'show-stage', [],
900 900 _('print parsed tree at the given stage'), _('NAME'))],
901 901 _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
902 902 def debugfileset(ui, repo, expr, **opts):
903 903 '''parse and apply a fileset specification'''
904 904 from . import fileset
905 905 fileset.symbols # force import of fileset so we have predicates to optimize
906 906 opts = pycompat.byteskwargs(opts)
907 907 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
908 908
909 909 stages = [
910 910 ('parsed', pycompat.identity),
911 911 ('analyzed', filesetlang.analyze),
912 912 ('optimized', filesetlang.optimize),
913 913 ]
914 914 stagenames = set(n for n, f in stages)
915 915
916 916 showalways = set()
917 917 if ui.verbose and not opts['show_stage']:
918 918 # show parsed tree by --verbose (deprecated)
919 919 showalways.add('parsed')
920 920 if opts['show_stage'] == ['all']:
921 921 showalways.update(stagenames)
922 922 else:
923 923 for n in opts['show_stage']:
924 924 if n not in stagenames:
925 925 raise error.Abort(_('invalid stage name: %s') % n)
926 926 showalways.update(opts['show_stage'])
927 927
928 928 tree = filesetlang.parse(expr)
929 929 for n, f in stages:
930 930 tree = f(tree)
931 931 if n in showalways:
932 932 if opts['show_stage'] or n != 'parsed':
933 933 ui.write(("* %s:\n") % n)
934 934 ui.write(filesetlang.prettyformat(tree), "\n")
935 935
936 936 files = set()
937 937 if opts['all_files']:
938 938 for r in repo:
939 939 c = repo[r]
940 940 files.update(c.files())
941 941 files.update(c.substate)
942 942 if opts['all_files'] or ctx.rev() is None:
943 943 wctx = repo[None]
944 944 files.update(repo.dirstate.walk(scmutil.matchall(repo),
945 945 subrepos=list(wctx.substate),
946 946 unknown=True, ignored=True))
947 947 files.update(wctx.substate)
948 948 else:
949 949 files.update(ctx.files())
950 950 files.update(ctx.substate)
951 951
952 952 m = ctx.matchfileset(expr)
953 953 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
954 954 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
955 955 for f in sorted(files):
956 956 if not m(f):
957 957 continue
958 958 ui.write("%s\n" % f)
959 959
960 960 @command('debugformat',
961 961 [] + cmdutil.formatteropts)
962 962 def debugformat(ui, repo, **opts):
963 963 """display format information about the current repository
964 964
965 965 Use --verbose to get extra information about current config value and
966 966 Mercurial default."""
967 967 opts = pycompat.byteskwargs(opts)
968 968 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
969 969 maxvariantlength = max(len('format-variant'), maxvariantlength)
970 970
971 971 def makeformatname(name):
972 972 return '%s:' + (' ' * (maxvariantlength - len(name)))
973 973
974 974 fm = ui.formatter('debugformat', opts)
975 975 if fm.isplain():
976 976 def formatvalue(value):
977 977 if util.safehasattr(value, 'startswith'):
978 978 return value
979 979 if value:
980 980 return 'yes'
981 981 else:
982 982 return 'no'
983 983 else:
984 984 formatvalue = pycompat.identity
985 985
986 986 fm.plain('format-variant')
987 987 fm.plain(' ' * (maxvariantlength - len('format-variant')))
988 988 fm.plain(' repo')
989 989 if ui.verbose:
990 990 fm.plain(' config default')
991 991 fm.plain('\n')
992 992 for fv in upgrade.allformatvariant:
993 993 fm.startitem()
994 994 repovalue = fv.fromrepo(repo)
995 995 configvalue = fv.fromconfig(repo)
996 996
997 997 if repovalue != configvalue:
998 998 namelabel = 'formatvariant.name.mismatchconfig'
999 999 repolabel = 'formatvariant.repo.mismatchconfig'
1000 1000 elif repovalue != fv.default:
1001 1001 namelabel = 'formatvariant.name.mismatchdefault'
1002 1002 repolabel = 'formatvariant.repo.mismatchdefault'
1003 1003 else:
1004 1004 namelabel = 'formatvariant.name.uptodate'
1005 1005 repolabel = 'formatvariant.repo.uptodate'
1006 1006
1007 1007 fm.write('name', makeformatname(fv.name), fv.name,
1008 1008 label=namelabel)
1009 1009 fm.write('repo', ' %3s', formatvalue(repovalue),
1010 1010 label=repolabel)
1011 1011 if fv.default != configvalue:
1012 1012 configlabel = 'formatvariant.config.special'
1013 1013 else:
1014 1014 configlabel = 'formatvariant.config.default'
1015 1015 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
1016 1016 label=configlabel)
1017 1017 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
1018 1018 label='formatvariant.default')
1019 1019 fm.plain('\n')
1020 1020 fm.end()
1021 1021
1022 1022 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
1023 1023 def debugfsinfo(ui, path="."):
1024 1024 """show information detected about current filesystem"""
1025 1025 ui.write(('path: %s\n') % path)
1026 1026 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
1027 1027 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
1028 1028 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
1029 1029 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
1030 1030 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
1031 1031 casesensitive = '(unknown)'
1032 1032 try:
1033 1033 with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
1034 1034 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
1035 1035 except OSError:
1036 1036 pass
1037 1037 ui.write(('case-sensitive: %s\n') % casesensitive)
1038 1038
1039 1039 @command('debuggetbundle',
1040 1040 [('H', 'head', [], _('id of head node'), _('ID')),
1041 1041 ('C', 'common', [], _('id of common node'), _('ID')),
1042 1042 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1043 1043 _('REPO FILE [-H|-C ID]...'),
1044 1044 norepo=True)
1045 1045 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1046 1046 """retrieves a bundle from a repo
1047 1047
1048 1048 Every ID must be a full-length hex node id string. Saves the bundle to the
1049 1049 given file.
1050 1050 """
1051 1051 opts = pycompat.byteskwargs(opts)
1052 1052 repo = hg.peer(ui, opts, repopath)
1053 1053 if not repo.capable('getbundle'):
1054 1054 raise error.Abort("getbundle() not supported by target repository")
1055 1055 args = {}
1056 1056 if common:
1057 1057 args[r'common'] = [bin(s) for s in common]
1058 1058 if head:
1059 1059 args[r'heads'] = [bin(s) for s in head]
1060 1060 # TODO: get desired bundlecaps from command line.
1061 1061 args[r'bundlecaps'] = None
1062 1062 bundle = repo.getbundle('debug', **args)
1063 1063
1064 1064 bundletype = opts.get('type', 'bzip2').lower()
1065 1065 btypes = {'none': 'HG10UN',
1066 1066 'bzip2': 'HG10BZ',
1067 1067 'gzip': 'HG10GZ',
1068 1068 'bundle2': 'HG20'}
1069 1069 bundletype = btypes.get(bundletype)
1070 1070 if bundletype not in bundle2.bundletypes:
1071 1071 raise error.Abort(_('unknown bundle type specified with --type'))
1072 1072 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1073 1073
1074 1074 @command('debugignore', [], '[FILE]')
1075 1075 def debugignore(ui, repo, *files, **opts):
1076 1076 """display the combined ignore pattern and information about ignored files
1077 1077
1078 1078 With no argument display the combined ignore pattern.
1079 1079
1080 1080 Given space separated file names, shows if the given file is ignored and
1081 1081 if so, show the ignore rule (file and line number) that matched it.
1082 1082 """
1083 1083 ignore = repo.dirstate._ignore
1084 1084 if not files:
1085 1085 # Show all the patterns
1086 1086 ui.write("%s\n" % pycompat.byterepr(ignore))
1087 1087 else:
1088 1088 m = scmutil.match(repo[None], pats=files)
1089 1089 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1090 1090 for f in m.files():
1091 1091 nf = util.normpath(f)
1092 1092 ignored = None
1093 1093 ignoredata = None
1094 1094 if nf != '.':
1095 1095 if ignore(nf):
1096 1096 ignored = nf
1097 1097 ignoredata = repo.dirstate._ignorefileandline(nf)
1098 1098 else:
1099 1099 for p in util.finddirs(nf):
1100 1100 if ignore(p):
1101 1101 ignored = p
1102 1102 ignoredata = repo.dirstate._ignorefileandline(p)
1103 1103 break
1104 1104 if ignored:
1105 1105 if ignored == nf:
1106 1106 ui.write(_("%s is ignored\n") % uipathfn(f))
1107 1107 else:
1108 1108 ui.write(_("%s is ignored because of "
1109 1109 "containing folder %s\n")
1110 1110 % (uipathfn(f), ignored))
1111 1111 ignorefile, lineno, line = ignoredata
1112 1112 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1113 1113 % (ignorefile, lineno, line))
1114 1114 else:
1115 1115 ui.write(_("%s is not ignored\n") % uipathfn(f))
1116 1116
1117 1117 @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
1118 1118 _('-c|-m|FILE'))
1119 1119 def debugindex(ui, repo, file_=None, **opts):
1120 1120 """dump index data for a storage primitive"""
1121 1121 opts = pycompat.byteskwargs(opts)
1122 1122 store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
1123 1123
1124 1124 if ui.debugflag:
1125 1125 shortfn = hex
1126 1126 else:
1127 1127 shortfn = short
1128 1128
1129 1129 idlen = 12
1130 1130 for i in store:
1131 1131 idlen = len(shortfn(store.node(i)))
1132 1132 break
1133 1133
1134 1134 fm = ui.formatter('debugindex', opts)
1135 1135 fm.plain(b' rev linkrev %s %s p2\n' % (
1136 1136 b'nodeid'.ljust(idlen),
1137 1137 b'p1'.ljust(idlen)))
1138 1138
1139 1139 for rev in store:
1140 1140 node = store.node(rev)
1141 1141 parents = store.parents(node)
1142 1142
1143 1143 fm.startitem()
1144 1144 fm.write(b'rev', b'%6d ', rev)
1145 1145 fm.write(b'linkrev', '%7d ', store.linkrev(rev))
1146 1146 fm.write(b'node', '%s ', shortfn(node))
1147 1147 fm.write(b'p1', '%s ', shortfn(parents[0]))
1148 1148 fm.write(b'p2', '%s', shortfn(parents[1]))
1149 1149 fm.plain(b'\n')
1150 1150
1151 1151 fm.end()
1152 1152
1153 1153 @command('debugindexdot', cmdutil.debugrevlogopts,
1154 1154 _('-c|-m|FILE'), optionalrepo=True)
1155 1155 def debugindexdot(ui, repo, file_=None, **opts):
1156 1156 """dump an index DAG as a graphviz dot file"""
1157 1157 opts = pycompat.byteskwargs(opts)
1158 1158 r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
1159 1159 ui.write(("digraph G {\n"))
1160 1160 for i in r:
1161 1161 node = r.node(i)
1162 1162 pp = r.parents(node)
1163 1163 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1164 1164 if pp[1] != nullid:
1165 1165 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1166 1166 ui.write("}\n")
1167 1167
1168 1168 @command('debugindexstats', [])
1169 1169 def debugindexstats(ui, repo):
1170 1170 """show stats related to the changelog index"""
1171 1171 repo.changelog.shortest(nullid, 1)
1172 1172 index = repo.changelog.index
1173 1173 if not util.safehasattr(index, 'stats'):
1174 1174 raise error.Abort(_('debugindexstats only works with native code'))
1175 1175 for k, v in sorted(index.stats().items()):
1176 1176 ui.write('%s: %d\n' % (k, v))
1177 1177
1178 1178 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1179 1179 def debuginstall(ui, **opts):
1180 1180 '''test Mercurial installation
1181 1181
1182 1182 Returns 0 on success.
1183 1183 '''
1184 1184 opts = pycompat.byteskwargs(opts)
1185 1185
1186 1186 problems = 0
1187 1187
1188 1188 fm = ui.formatter('debuginstall', opts)
1189 1189 fm.startitem()
1190 1190
1191 1191 # encoding
1192 1192 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1193 1193 err = None
1194 1194 try:
1195 1195 codecs.lookup(pycompat.sysstr(encoding.encoding))
1196 1196 except LookupError as inst:
1197 1197 err = stringutil.forcebytestr(inst)
1198 1198 problems += 1
1199 1199 fm.condwrite(err, 'encodingerror', _(" %s\n"
1200 1200 " (check that your locale is properly set)\n"), err)
1201 1201
1202 1202 # Python
1203 1203 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1204 1204 pycompat.sysexecutable)
1205 1205 fm.write('pythonver', _("checking Python version (%s)\n"),
1206 1206 ("%d.%d.%d" % sys.version_info[:3]))
1207 1207 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1208 1208 os.path.dirname(pycompat.fsencode(os.__file__)))
1209 1209
1210 1210 security = set(sslutil.supportedprotocols)
1211 1211 if sslutil.hassni:
1212 1212 security.add('sni')
1213 1213
1214 1214 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1215 1215 fm.formatlist(sorted(security), name='protocol',
1216 1216 fmt='%s', sep=','))
1217 1217
1218 1218 # These are warnings, not errors. So don't increment problem count. This
1219 1219 # may change in the future.
1220 1220 if 'tls1.2' not in security:
1221 1221 fm.plain(_(' TLS 1.2 not supported by Python install; '
1222 1222 'network connections lack modern security\n'))
1223 1223 if 'sni' not in security:
1224 1224 fm.plain(_(' SNI not supported by Python install; may have '
1225 1225 'connectivity issues with some servers\n'))
1226 1226
1227 1227 # TODO print CA cert info
1228 1228
1229 1229 # hg version
1230 1230 hgver = util.version()
1231 1231 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1232 1232 hgver.split('+')[0])
1233 1233 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1234 1234 '+'.join(hgver.split('+')[1:]))
1235 1235
1236 1236 # compiled modules
1237 1237 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1238 1238 policy.policy)
1239 1239 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1240 1240 os.path.dirname(pycompat.fsencode(__file__)))
1241 1241
1242 1242 if policy.policy in ('c', 'allow'):
1243 1243 err = None
1244 1244 try:
1245 1245 from .cext import (
1246 1246 base85,
1247 1247 bdiff,
1248 1248 mpatch,
1249 1249 osutil,
1250 1250 )
1251 1251 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1252 1252 except Exception as inst:
1253 1253 err = stringutil.forcebytestr(inst)
1254 1254 problems += 1
1255 1255 fm.condwrite(err, 'extensionserror', " %s\n", err)
1256 1256
1257 1257 compengines = util.compengines._engines.values()
1258 1258 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1259 1259 fm.formatlist(sorted(e.name() for e in compengines),
1260 1260 name='compengine', fmt='%s', sep=', '))
1261 1261 fm.write('compenginesavail', _('checking available compression engines '
1262 1262 '(%s)\n'),
1263 1263 fm.formatlist(sorted(e.name() for e in compengines
1264 1264 if e.available()),
1265 1265 name='compengine', fmt='%s', sep=', '))
1266 1266 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1267 1267 fm.write('compenginesserver', _('checking available compression engines '
1268 1268 'for wire protocol (%s)\n'),
1269 1269 fm.formatlist([e.name() for e in wirecompengines
1270 1270 if e.wireprotosupport()],
1271 1271 name='compengine', fmt='%s', sep=', '))
1272 1272 re2 = 'missing'
1273 1273 if util._re2:
1274 1274 re2 = 'available'
1275 1275 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1276 1276 fm.data(re2=bool(util._re2))
1277 1277
1278 1278 # templates
1279 1279 p = templater.templatepaths()
1280 1280 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1281 1281 fm.condwrite(not p, '', _(" no template directories found\n"))
1282 1282 if p:
1283 1283 m = templater.templatepath("map-cmdline.default")
1284 1284 if m:
1285 1285 # template found, check if it is working
1286 1286 err = None
1287 1287 try:
1288 1288 templater.templater.frommapfile(m)
1289 1289 except Exception as inst:
1290 1290 err = stringutil.forcebytestr(inst)
1291 1291 p = None
1292 1292 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1293 1293 else:
1294 1294 p = None
1295 1295 fm.condwrite(p, 'defaulttemplate',
1296 1296 _("checking default template (%s)\n"), m)
1297 1297 fm.condwrite(not m, 'defaulttemplatenotfound',
1298 1298 _(" template '%s' not found\n"), "default")
1299 1299 if not p:
1300 1300 problems += 1
1301 1301 fm.condwrite(not p, '',
1302 1302 _(" (templates seem to have been installed incorrectly)\n"))
1303 1303
1304 1304 # editor
1305 1305 editor = ui.geteditor()
1306 1306 editor = util.expandpath(editor)
1307 1307 editorbin = procutil.shellsplit(editor)[0]
1308 1308 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1309 1309 cmdpath = procutil.findexe(editorbin)
1310 1310 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1311 1311 _(" No commit editor set and can't find %s in PATH\n"
1312 1312 " (specify a commit editor in your configuration"
1313 1313 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1314 1314 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1315 1315 _(" Can't find editor '%s' in PATH\n"
1316 1316 " (specify a commit editor in your configuration"
1317 1317 " file)\n"), not cmdpath and editorbin)
1318 1318 if not cmdpath and editor != 'vi':
1319 1319 problems += 1
1320 1320
1321 1321 # check username
1322 1322 username = None
1323 1323 err = None
1324 1324 try:
1325 1325 username = ui.username()
1326 1326 except error.Abort as e:
1327 1327 err = stringutil.forcebytestr(e)
1328 1328 problems += 1
1329 1329
1330 1330 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1331 1331 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1332 1332 " (specify a username in your configuration file)\n"), err)
1333 1333
1334 1334 fm.condwrite(not problems, '',
1335 1335 _("no problems detected\n"))
1336 1336 if not problems:
1337 1337 fm.data(problems=problems)
1338 1338 fm.condwrite(problems, 'problems',
1339 1339 _("%d problems detected,"
1340 1340 " please check your install!\n"), problems)
1341 1341 fm.end()
1342 1342
1343 1343 return problems
1344 1344
1345 1345 @command('debugknown', [], _('REPO ID...'), norepo=True)
1346 1346 def debugknown(ui, repopath, *ids, **opts):
1347 1347 """test whether node ids are known to a repo
1348 1348
1349 1349 Every ID must be a full-length hex node id string. Returns a list of 0s
1350 1350 and 1s indicating unknown/known.
1351 1351 """
1352 1352 opts = pycompat.byteskwargs(opts)
1353 1353 repo = hg.peer(ui, opts, repopath)
1354 1354 if not repo.capable('known'):
1355 1355 raise error.Abort("known() not supported by target repository")
1356 1356 flags = repo.known([bin(s) for s in ids])
1357 1357 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1358 1358
1359 1359 @command('debuglabelcomplete', [], _('LABEL...'))
1360 1360 def debuglabelcomplete(ui, repo, *args):
1361 1361 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1362 1362 debugnamecomplete(ui, repo, *args)
1363 1363
1364 1364 @command('debuglocks',
1365 1365 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1366 1366 ('W', 'force-wlock', None,
1367 1367 _('free the working state lock (DANGEROUS)')),
1368 1368 ('s', 'set-lock', None, _('set the store lock until stopped')),
1369 1369 ('S', 'set-wlock', None,
1370 1370 _('set the working state lock until stopped'))],
1371 1371 _('[OPTION]...'))
1372 1372 def debuglocks(ui, repo, **opts):
1373 1373 """show or modify state of locks
1374 1374
1375 1375 By default, this command will show which locks are held. This
1376 1376 includes the user and process holding the lock, the amount of time
1377 1377 the lock has been held, and the machine name where the process is
1378 1378 running if it's not local.
1379 1379
1380 1380 Locks protect the integrity of Mercurial's data, so should be
1381 1381 treated with care. System crashes or other interruptions may cause
1382 1382 locks to not be properly released, though Mercurial will usually
1383 1383 detect and remove such stale locks automatically.
1384 1384
1385 1385 However, detecting stale locks may not always be possible (for
1386 1386 instance, on a shared filesystem). Removing locks may also be
1387 1387 blocked by filesystem permissions.
1388 1388
1389 1389 Setting a lock will prevent other commands from changing the data.
1390 1390 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1391 1391 The set locks are removed when the command exits.
1392 1392
1393 1393 Returns 0 if no locks are held.
1394 1394
1395 1395 """
1396 1396
1397 1397 if opts.get(r'force_lock'):
1398 1398 repo.svfs.unlink('lock')
1399 1399 if opts.get(r'force_wlock'):
1400 1400 repo.vfs.unlink('wlock')
1401 1401 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1402 1402 return 0
1403 1403
1404 1404 locks = []
1405 1405 try:
1406 1406 if opts.get(r'set_wlock'):
1407 1407 try:
1408 1408 locks.append(repo.wlock(False))
1409 1409 except error.LockHeld:
1410 1410 raise error.Abort(_('wlock is already held'))
1411 1411 if opts.get(r'set_lock'):
1412 1412 try:
1413 1413 locks.append(repo.lock(False))
1414 1414 except error.LockHeld:
1415 1415 raise error.Abort(_('lock is already held'))
1416 1416 if len(locks):
1417 1417 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1418 1418 return 0
1419 1419 finally:
1420 1420 release(*locks)
1421 1421
1422 1422 now = time.time()
1423 1423 held = 0
1424 1424
1425 1425 def report(vfs, name, method):
1426 1426 # this causes stale locks to get reaped for more accurate reporting
1427 1427 try:
1428 1428 l = method(False)
1429 1429 except error.LockHeld:
1430 1430 l = None
1431 1431
1432 1432 if l:
1433 1433 l.release()
1434 1434 else:
1435 1435 try:
1436 1436 st = vfs.lstat(name)
1437 1437 age = now - st[stat.ST_MTIME]
1438 1438 user = util.username(st.st_uid)
1439 1439 locker = vfs.readlock(name)
1440 1440 if ":" in locker:
1441 1441 host, pid = locker.split(':')
1442 1442 if host == socket.gethostname():
1443 1443 locker = 'user %s, process %s' % (user or b'None', pid)
1444 1444 else:
1445 1445 locker = ('user %s, process %s, host %s'
1446 1446 % (user or b'None', pid, host))
1447 1447 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1448 1448 return 1
1449 1449 except OSError as e:
1450 1450 if e.errno != errno.ENOENT:
1451 1451 raise
1452 1452
1453 1453 ui.write(("%-6s free\n") % (name + ":"))
1454 1454 return 0
1455 1455
1456 1456 held += report(repo.svfs, "lock", repo.lock)
1457 1457 held += report(repo.vfs, "wlock", repo.wlock)
1458 1458
1459 1459 return held
1460 1460
1461 1461 @command('debugmanifestfulltextcache', [
1462 1462 ('', 'clear', False, _('clear the cache')),
1463 1463 ('a', 'add', [], _('add the given manifest nodes to the cache'),
1464 1464 _('NODE'))
1465 1465 ], '')
1466 1466 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1467 1467 """show, clear or amend the contents of the manifest fulltext cache"""
1468 1468
1469 1469 def getcache():
1470 1470 r = repo.manifestlog.getstorage(b'')
1471 1471 try:
1472 1472 return r._fulltextcache
1473 1473 except AttributeError:
1474 1474 msg = _("Current revlog implementation doesn't appear to have a "
1475 1475 "manifest fulltext cache\n")
1476 1476 raise error.Abort(msg)
1477 1477
1478 1478 if opts.get(r'clear'):
1479 with repo.lock():
1479 with repo.wlock():
1480 1480 cache = getcache()
1481 1481 cache.clear(clear_persisted_data=True)
1482 1482 return
1483 1483
1484 1484 if add:
1485 with repo.lock():
1485 with repo.wlock():
1486 1486 m = repo.manifestlog
1487 1487 store = m.getstorage(b'')
1488 1488 for n in add:
1489 1489 try:
1490 1490 manifest = m[store.lookup(n)]
1491 1491 except error.LookupError as e:
1492 1492 raise error.Abort(e, hint="Check your manifest node id")
1493 1493 manifest.read() # stores revisision in cache too
1494 1494 return
1495 1495
1496 1496 cache = getcache()
1497 1497 if not len(cache):
1498 1498 ui.write(_('cache empty\n'))
1499 1499 else:
1500 1500 ui.write(
1501 1501 _('cache contains %d manifest entries, in order of most to '
1502 1502 'least recent:\n') % (len(cache),))
1503 1503 totalsize = 0
1504 1504 for nodeid in cache:
1505 1505 # Use cache.get to not update the LRU order
1506 1506 data = cache.peek(nodeid)
1507 1507 size = len(data)
1508 1508 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1509 1509 ui.write(_('id: %s, size %s\n') % (
1510 1510 hex(nodeid), util.bytecount(size)))
1511 1511 ondisk = cache._opener.stat('manifestfulltextcache').st_size
1512 1512 ui.write(
1513 1513 _('total cache data size %s, on-disk %s\n') % (
1514 1514 util.bytecount(totalsize), util.bytecount(ondisk))
1515 1515 )
1516 1516
1517 1517 @command('debugmergestate', [], '')
1518 1518 def debugmergestate(ui, repo, *args):
1519 1519 """print merge state
1520 1520
1521 1521 Use --verbose to print out information about whether v1 or v2 merge state
1522 1522 was chosen."""
1523 1523 def _hashornull(h):
1524 1524 if h == nullhex:
1525 1525 return 'null'
1526 1526 else:
1527 1527 return h
1528 1528
1529 1529 def printrecords(version):
1530 1530 ui.write(('* version %d records\n') % version)
1531 1531 if version == 1:
1532 1532 records = v1records
1533 1533 else:
1534 1534 records = v2records
1535 1535
1536 1536 for rtype, record in records:
1537 1537 # pretty print some record types
1538 1538 if rtype == 'L':
1539 1539 ui.write(('local: %s\n') % record)
1540 1540 elif rtype == 'O':
1541 1541 ui.write(('other: %s\n') % record)
1542 1542 elif rtype == 'm':
1543 1543 driver, mdstate = record.split('\0', 1)
1544 1544 ui.write(('merge driver: %s (state "%s")\n')
1545 1545 % (driver, mdstate))
1546 1546 elif rtype in 'FDC':
1547 1547 r = record.split('\0')
1548 1548 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1549 1549 if version == 1:
1550 1550 onode = 'not stored in v1 format'
1551 1551 flags = r[7]
1552 1552 else:
1553 1553 onode, flags = r[7:9]
1554 1554 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1555 1555 % (f, rtype, state, _hashornull(hash)))
1556 1556 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1557 1557 ui.write((' ancestor path: %s (node %s)\n')
1558 1558 % (afile, _hashornull(anode)))
1559 1559 ui.write((' other path: %s (node %s)\n')
1560 1560 % (ofile, _hashornull(onode)))
1561 1561 elif rtype == 'f':
1562 1562 filename, rawextras = record.split('\0', 1)
1563 1563 extras = rawextras.split('\0')
1564 1564 i = 0
1565 1565 extrastrings = []
1566 1566 while i < len(extras):
1567 1567 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1568 1568 i += 2
1569 1569
1570 1570 ui.write(('file extras: %s (%s)\n')
1571 1571 % (filename, ', '.join(extrastrings)))
1572 1572 elif rtype == 'l':
1573 1573 labels = record.split('\0', 2)
1574 1574 labels = [l for l in labels if len(l) > 0]
1575 1575 ui.write(('labels:\n'))
1576 1576 ui.write((' local: %s\n' % labels[0]))
1577 1577 ui.write((' other: %s\n' % labels[1]))
1578 1578 if len(labels) > 2:
1579 1579 ui.write((' base: %s\n' % labels[2]))
1580 1580 else:
1581 1581 ui.write(('unrecognized entry: %s\t%s\n')
1582 1582 % (rtype, record.replace('\0', '\t')))
1583 1583
1584 1584 # Avoid mergestate.read() since it may raise an exception for unsupported
1585 1585 # merge state records. We shouldn't be doing this, but this is OK since this
1586 1586 # command is pretty low-level.
1587 1587 ms = mergemod.mergestate(repo)
1588 1588
1589 1589 # sort so that reasonable information is on top
1590 1590 v1records = ms._readrecordsv1()
1591 1591 v2records = ms._readrecordsv2()
1592 1592 order = 'LOml'
1593 1593 def key(r):
1594 1594 idx = order.find(r[0])
1595 1595 if idx == -1:
1596 1596 return (1, r[1])
1597 1597 else:
1598 1598 return (0, idx)
1599 1599 v1records.sort(key=key)
1600 1600 v2records.sort(key=key)
1601 1601
1602 1602 if not v1records and not v2records:
1603 1603 ui.write(('no merge state found\n'))
1604 1604 elif not v2records:
1605 1605 ui.note(('no version 2 merge state\n'))
1606 1606 printrecords(1)
1607 1607 elif ms._v1v2match(v1records, v2records):
1608 1608 ui.note(('v1 and v2 states match: using v2\n'))
1609 1609 printrecords(2)
1610 1610 else:
1611 1611 ui.note(('v1 and v2 states mismatch: using v1\n'))
1612 1612 printrecords(1)
1613 1613 if ui.verbose:
1614 1614 printrecords(2)
1615 1615
1616 1616 @command('debugnamecomplete', [], _('NAME...'))
1617 1617 def debugnamecomplete(ui, repo, *args):
1618 1618 '''complete "names" - tags, open branch names, bookmark names'''
1619 1619
1620 1620 names = set()
1621 1621 # since we previously only listed open branches, we will handle that
1622 1622 # specially (after this for loop)
1623 1623 for name, ns in repo.names.iteritems():
1624 1624 if name != 'branches':
1625 1625 names.update(ns.listnames(repo))
1626 1626 names.update(tag for (tag, heads, tip, closed)
1627 1627 in repo.branchmap().iterbranches() if not closed)
1628 1628 completions = set()
1629 1629 if not args:
1630 1630 args = ['']
1631 1631 for a in args:
1632 1632 completions.update(n for n in names if n.startswith(a))
1633 1633 ui.write('\n'.join(sorted(completions)))
1634 1634 ui.write('\n')
1635 1635
1636 1636 @command('debugobsolete',
1637 1637 [('', 'flags', 0, _('markers flag')),
1638 1638 ('', 'record-parents', False,
1639 1639 _('record parent information for the precursor')),
1640 1640 ('r', 'rev', [], _('display markers relevant to REV')),
1641 1641 ('', 'exclusive', False, _('restrict display to markers only '
1642 1642 'relevant to REV')),
1643 1643 ('', 'index', False, _('display index of the marker')),
1644 1644 ('', 'delete', [], _('delete markers specified by indices')),
1645 1645 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1646 1646 _('[OBSOLETED [REPLACEMENT ...]]'))
1647 1647 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1648 1648 """create arbitrary obsolete marker
1649 1649
1650 1650 With no arguments, displays the list of obsolescence markers."""
1651 1651
1652 1652 opts = pycompat.byteskwargs(opts)
1653 1653
1654 1654 def parsenodeid(s):
1655 1655 try:
1656 1656 # We do not use revsingle/revrange functions here to accept
1657 1657 # arbitrary node identifiers, possibly not present in the
1658 1658 # local repository.
1659 1659 n = bin(s)
1660 1660 if len(n) != len(nullid):
1661 1661 raise TypeError()
1662 1662 return n
1663 1663 except TypeError:
1664 1664 raise error.Abort('changeset references must be full hexadecimal '
1665 1665 'node identifiers')
1666 1666
1667 1667 if opts.get('delete'):
1668 1668 indices = []
1669 1669 for v in opts.get('delete'):
1670 1670 try:
1671 1671 indices.append(int(v))
1672 1672 except ValueError:
1673 1673 raise error.Abort(_('invalid index value: %r') % v,
1674 1674 hint=_('use integers for indices'))
1675 1675
1676 1676 if repo.currenttransaction():
1677 1677 raise error.Abort(_('cannot delete obsmarkers in the middle '
1678 1678 'of transaction.'))
1679 1679
1680 1680 with repo.lock():
1681 1681 n = repair.deleteobsmarkers(repo.obsstore, indices)
1682 1682 ui.write(_('deleted %i obsolescence markers\n') % n)
1683 1683
1684 1684 return
1685 1685
1686 1686 if precursor is not None:
1687 1687 if opts['rev']:
1688 1688 raise error.Abort('cannot select revision when creating marker')
1689 1689 metadata = {}
1690 1690 metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
1691 1691 succs = tuple(parsenodeid(succ) for succ in successors)
1692 1692 l = repo.lock()
1693 1693 try:
1694 1694 tr = repo.transaction('debugobsolete')
1695 1695 try:
1696 1696 date = opts.get('date')
1697 1697 if date:
1698 1698 date = dateutil.parsedate(date)
1699 1699 else:
1700 1700 date = None
1701 1701 prec = parsenodeid(precursor)
1702 1702 parents = None
1703 1703 if opts['record_parents']:
1704 1704 if prec not in repo.unfiltered():
1705 1705 raise error.Abort('cannot used --record-parents on '
1706 1706 'unknown changesets')
1707 1707 parents = repo.unfiltered()[prec].parents()
1708 1708 parents = tuple(p.node() for p in parents)
1709 1709 repo.obsstore.create(tr, prec, succs, opts['flags'],
1710 1710 parents=parents, date=date,
1711 1711 metadata=metadata, ui=ui)
1712 1712 tr.close()
1713 1713 except ValueError as exc:
1714 1714 raise error.Abort(_('bad obsmarker input: %s') %
1715 1715 pycompat.bytestr(exc))
1716 1716 finally:
1717 1717 tr.release()
1718 1718 finally:
1719 1719 l.release()
1720 1720 else:
1721 1721 if opts['rev']:
1722 1722 revs = scmutil.revrange(repo, opts['rev'])
1723 1723 nodes = [repo[r].node() for r in revs]
1724 1724 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1725 1725 exclusive=opts['exclusive']))
1726 1726 markers.sort(key=lambda x: x._data)
1727 1727 else:
1728 1728 markers = obsutil.getmarkers(repo)
1729 1729
1730 1730 markerstoiter = markers
1731 1731 isrelevant = lambda m: True
1732 1732 if opts.get('rev') and opts.get('index'):
1733 1733 markerstoiter = obsutil.getmarkers(repo)
1734 1734 markerset = set(markers)
1735 1735 isrelevant = lambda m: m in markerset
1736 1736
1737 1737 fm = ui.formatter('debugobsolete', opts)
1738 1738 for i, m in enumerate(markerstoiter):
1739 1739 if not isrelevant(m):
1740 1740 # marker can be irrelevant when we're iterating over a set
1741 1741 # of markers (markerstoiter) which is bigger than the set
1742 1742 # of markers we want to display (markers)
1743 1743 # this can happen if both --index and --rev options are
1744 1744 # provided and thus we need to iterate over all of the markers
1745 1745 # to get the correct indices, but only display the ones that
1746 1746 # are relevant to --rev value
1747 1747 continue
1748 1748 fm.startitem()
1749 1749 ind = i if opts.get('index') else None
1750 1750 cmdutil.showmarker(fm, m, index=ind)
1751 1751 fm.end()
1752 1752
1753 1753 @command('debugp1copies',
1754 1754 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1755 1755 _('[-r REV]'))
1756 1756 def debugp1copies(ui, repo, **opts):
1757 1757 """dump copy information compared to p1"""
1758 1758
1759 1759 opts = pycompat.byteskwargs(opts)
1760 1760 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1761 1761 for dst, src in ctx.p1copies().items():
1762 1762 ui.write('%s -> %s\n' % (src, dst))
1763 1763
1764 1764 @command('debugp2copies',
1765 1765 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1766 1766 _('[-r REV]'))
1767 1767 def debugp1copies(ui, repo, **opts):
1768 1768 """dump copy information compared to p2"""
1769 1769
1770 1770 opts = pycompat.byteskwargs(opts)
1771 1771 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1772 1772 for dst, src in ctx.p2copies().items():
1773 1773 ui.write('%s -> %s\n' % (src, dst))
1774 1774
1775 1775 @command('debugpathcomplete',
1776 1776 [('f', 'full', None, _('complete an entire path')),
1777 1777 ('n', 'normal', None, _('show only normal files')),
1778 1778 ('a', 'added', None, _('show only added files')),
1779 1779 ('r', 'removed', None, _('show only removed files'))],
1780 1780 _('FILESPEC...'))
1781 1781 def debugpathcomplete(ui, repo, *specs, **opts):
1782 1782 '''complete part or all of a tracked path
1783 1783
1784 1784 This command supports shells that offer path name completion. It
1785 1785 currently completes only files already known to the dirstate.
1786 1786
1787 1787 Completion extends only to the next path segment unless
1788 1788 --full is specified, in which case entire paths are used.'''
1789 1789
1790 1790 def complete(path, acceptable):
1791 1791 dirstate = repo.dirstate
1792 1792 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
1793 1793 rootdir = repo.root + pycompat.ossep
1794 1794 if spec != repo.root and not spec.startswith(rootdir):
1795 1795 return [], []
1796 1796 if os.path.isdir(spec):
1797 1797 spec += '/'
1798 1798 spec = spec[len(rootdir):]
1799 1799 fixpaths = pycompat.ossep != '/'
1800 1800 if fixpaths:
1801 1801 spec = spec.replace(pycompat.ossep, '/')
1802 1802 speclen = len(spec)
1803 1803 fullpaths = opts[r'full']
1804 1804 files, dirs = set(), set()
1805 1805 adddir, addfile = dirs.add, files.add
1806 1806 for f, st in dirstate.iteritems():
1807 1807 if f.startswith(spec) and st[0] in acceptable:
1808 1808 if fixpaths:
1809 1809 f = f.replace('/', pycompat.ossep)
1810 1810 if fullpaths:
1811 1811 addfile(f)
1812 1812 continue
1813 1813 s = f.find(pycompat.ossep, speclen)
1814 1814 if s >= 0:
1815 1815 adddir(f[:s])
1816 1816 else:
1817 1817 addfile(f)
1818 1818 return files, dirs
1819 1819
1820 1820 acceptable = ''
1821 1821 if opts[r'normal']:
1822 1822 acceptable += 'nm'
1823 1823 if opts[r'added']:
1824 1824 acceptable += 'a'
1825 1825 if opts[r'removed']:
1826 1826 acceptable += 'r'
1827 1827 cwd = repo.getcwd()
1828 1828 if not specs:
1829 1829 specs = ['.']
1830 1830
1831 1831 files, dirs = set(), set()
1832 1832 for spec in specs:
1833 1833 f, d = complete(spec, acceptable or 'nmar')
1834 1834 files.update(f)
1835 1835 dirs.update(d)
1836 1836 files.update(dirs)
1837 1837 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1838 1838 ui.write('\n')
1839 1839
1840 1840 @command('debugpathcopies',
1841 1841 cmdutil.walkopts,
1842 1842 'hg debugpathcopies REV1 REV2 [FILE]',
1843 1843 inferrepo=True)
1844 1844 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
1845 1845 """show copies between two revisions"""
1846 1846 ctx1 = scmutil.revsingle(repo, rev1)
1847 1847 ctx2 = scmutil.revsingle(repo, rev2)
1848 1848 m = scmutil.match(ctx1, pats, opts)
1849 1849 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
1850 1850 ui.write('%s -> %s\n' % (src, dst))
1851 1851
1852 1852 @command('debugpeer', [], _('PATH'), norepo=True)
1853 1853 def debugpeer(ui, path):
1854 1854 """establish a connection to a peer repository"""
1855 1855 # Always enable peer request logging. Requires --debug to display
1856 1856 # though.
1857 1857 overrides = {
1858 1858 ('devel', 'debug.peer-request'): True,
1859 1859 }
1860 1860
1861 1861 with ui.configoverride(overrides):
1862 1862 peer = hg.peer(ui, {}, path)
1863 1863
1864 1864 local = peer.local() is not None
1865 1865 canpush = peer.canpush()
1866 1866
1867 1867 ui.write(_('url: %s\n') % peer.url())
1868 1868 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1869 1869 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1870 1870
1871 1871 @command('debugpickmergetool',
1872 1872 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1873 1873 ('', 'changedelete', None, _('emulate merging change and delete')),
1874 1874 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1875 1875 _('[PATTERN]...'),
1876 1876 inferrepo=True)
1877 1877 def debugpickmergetool(ui, repo, *pats, **opts):
1878 1878 """examine which merge tool is chosen for specified file
1879 1879
1880 1880 As described in :hg:`help merge-tools`, Mercurial examines
1881 1881 configurations below in this order to decide which merge tool is
1882 1882 chosen for specified file.
1883 1883
1884 1884 1. ``--tool`` option
1885 1885 2. ``HGMERGE`` environment variable
1886 1886 3. configurations in ``merge-patterns`` section
1887 1887 4. configuration of ``ui.merge``
1888 1888 5. configurations in ``merge-tools`` section
1889 1889 6. ``hgmerge`` tool (for historical reason only)
1890 1890 7. default tool for fallback (``:merge`` or ``:prompt``)
1891 1891
1892 1892 This command writes out examination result in the style below::
1893 1893
1894 1894 FILE = MERGETOOL
1895 1895
1896 1896 By default, all files known in the first parent context of the
1897 1897 working directory are examined. Use file patterns and/or -I/-X
1898 1898 options to limit target files. -r/--rev is also useful to examine
1899 1899 files in another context without actual updating to it.
1900 1900
1901 1901 With --debug, this command shows warning messages while matching
1902 1902 against ``merge-patterns`` and so on, too. It is recommended to
1903 1903 use this option with explicit file patterns and/or -I/-X options,
1904 1904 because this option increases amount of output per file according
1905 1905 to configurations in hgrc.
1906 1906
1907 1907 With -v/--verbose, this command shows configurations below at
1908 1908 first (only if specified).
1909 1909
1910 1910 - ``--tool`` option
1911 1911 - ``HGMERGE`` environment variable
1912 1912 - configuration of ``ui.merge``
1913 1913
1914 1914 If merge tool is chosen before matching against
1915 1915 ``merge-patterns``, this command can't show any helpful
1916 1916 information, even with --debug. In such case, information above is
1917 1917 useful to know why a merge tool is chosen.
1918 1918 """
1919 1919 opts = pycompat.byteskwargs(opts)
1920 1920 overrides = {}
1921 1921 if opts['tool']:
1922 1922 overrides[('ui', 'forcemerge')] = opts['tool']
1923 1923 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1924 1924
1925 1925 with ui.configoverride(overrides, 'debugmergepatterns'):
1926 1926 hgmerge = encoding.environ.get("HGMERGE")
1927 1927 if hgmerge is not None:
1928 1928 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1929 1929 uimerge = ui.config("ui", "merge")
1930 1930 if uimerge:
1931 1931 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1932 1932
1933 1933 ctx = scmutil.revsingle(repo, opts.get('rev'))
1934 1934 m = scmutil.match(ctx, pats, opts)
1935 1935 changedelete = opts['changedelete']
1936 1936 for path in ctx.walk(m):
1937 1937 fctx = ctx[path]
1938 1938 try:
1939 1939 if not ui.debugflag:
1940 1940 ui.pushbuffer(error=True)
1941 1941 tool, toolpath = filemerge._picktool(repo, ui, path,
1942 1942 fctx.isbinary(),
1943 1943 'l' in fctx.flags(),
1944 1944 changedelete)
1945 1945 finally:
1946 1946 if not ui.debugflag:
1947 1947 ui.popbuffer()
1948 1948 ui.write(('%s = %s\n') % (path, tool))
1949 1949
1950 1950 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1951 1951 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1952 1952 '''access the pushkey key/value protocol
1953 1953
1954 1954 With two args, list the keys in the given namespace.
1955 1955
1956 1956 With five args, set a key to new if it currently is set to old.
1957 1957 Reports success or failure.
1958 1958 '''
1959 1959
1960 1960 target = hg.peer(ui, {}, repopath)
1961 1961 if keyinfo:
1962 1962 key, old, new = keyinfo
1963 1963 with target.commandexecutor() as e:
1964 1964 r = e.callcommand('pushkey', {
1965 1965 'namespace': namespace,
1966 1966 'key': key,
1967 1967 'old': old,
1968 1968 'new': new,
1969 1969 }).result()
1970 1970
1971 1971 ui.status(pycompat.bytestr(r) + '\n')
1972 1972 return not r
1973 1973 else:
1974 1974 for k, v in sorted(target.listkeys(namespace).iteritems()):
1975 1975 ui.write("%s\t%s\n" % (stringutil.escapestr(k),
1976 1976 stringutil.escapestr(v)))
1977 1977
1978 1978 @command('debugpvec', [], _('A B'))
1979 1979 def debugpvec(ui, repo, a, b=None):
1980 1980 ca = scmutil.revsingle(repo, a)
1981 1981 cb = scmutil.revsingle(repo, b)
1982 1982 pa = pvec.ctxpvec(ca)
1983 1983 pb = pvec.ctxpvec(cb)
1984 1984 if pa == pb:
1985 1985 rel = "="
1986 1986 elif pa > pb:
1987 1987 rel = ">"
1988 1988 elif pa < pb:
1989 1989 rel = "<"
1990 1990 elif pa | pb:
1991 1991 rel = "|"
1992 1992 ui.write(_("a: %s\n") % pa)
1993 1993 ui.write(_("b: %s\n") % pb)
1994 1994 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1995 1995 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1996 1996 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1997 1997 pa.distance(pb), rel))
1998 1998
1999 1999 @command('debugrebuilddirstate|debugrebuildstate',
2000 2000 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
2001 2001 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
2002 2002 'the working copy parent')),
2003 2003 ],
2004 2004 _('[-r REV]'))
2005 2005 def debugrebuilddirstate(ui, repo, rev, **opts):
2006 2006 """rebuild the dirstate as it would look like for the given revision
2007 2007
2008 2008 If no revision is specified the first current parent will be used.
2009 2009
2010 2010 The dirstate will be set to the files of the given revision.
2011 2011 The actual working directory content or existing dirstate
2012 2012 information such as adds or removes is not considered.
2013 2013
2014 2014 ``minimal`` will only rebuild the dirstate status for files that claim to be
2015 2015 tracked but are not in the parent manifest, or that exist in the parent
2016 2016 manifest but are not in the dirstate. It will not change adds, removes, or
2017 2017 modified files that are in the working copy parent.
2018 2018
2019 2019 One use of this command is to make the next :hg:`status` invocation
2020 2020 check the actual file content.
2021 2021 """
2022 2022 ctx = scmutil.revsingle(repo, rev)
2023 2023 with repo.wlock():
2024 2024 dirstate = repo.dirstate
2025 2025 changedfiles = None
2026 2026 # See command doc for what minimal does.
2027 2027 if opts.get(r'minimal'):
2028 2028 manifestfiles = set(ctx.manifest().keys())
2029 2029 dirstatefiles = set(dirstate)
2030 2030 manifestonly = manifestfiles - dirstatefiles
2031 2031 dsonly = dirstatefiles - manifestfiles
2032 2032 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
2033 2033 changedfiles = manifestonly | dsnotadded
2034 2034
2035 2035 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2036 2036
2037 2037 @command('debugrebuildfncache', [], '')
2038 2038 def debugrebuildfncache(ui, repo):
2039 2039 """rebuild the fncache file"""
2040 2040 repair.rebuildfncache(ui, repo)
2041 2041
2042 2042 @command('debugrename',
2043 2043 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2044 2044 _('[-r REV] [FILE]...'))
2045 2045 def debugrename(ui, repo, *pats, **opts):
2046 2046 """dump rename information"""
2047 2047
2048 2048 opts = pycompat.byteskwargs(opts)
2049 2049 ctx = scmutil.revsingle(repo, opts.get('rev'))
2050 2050 m = scmutil.match(ctx, pats, opts)
2051 2051 for abs in ctx.walk(m):
2052 2052 fctx = ctx[abs]
2053 2053 o = fctx.filelog().renamed(fctx.filenode())
2054 2054 rel = repo.pathto(abs)
2055 2055 if o:
2056 2056 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2057 2057 else:
2058 2058 ui.write(_("%s not renamed\n") % rel)
2059 2059
2060 2060 @command('debugrevlog', cmdutil.debugrevlogopts +
2061 2061 [('d', 'dump', False, _('dump index data'))],
2062 2062 _('-c|-m|FILE'),
2063 2063 optionalrepo=True)
2064 2064 def debugrevlog(ui, repo, file_=None, **opts):
2065 2065 """show data and statistics about a revlog"""
2066 2066 opts = pycompat.byteskwargs(opts)
2067 2067 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2068 2068
2069 2069 if opts.get("dump"):
2070 2070 numrevs = len(r)
2071 2071 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
2072 2072 " rawsize totalsize compression heads chainlen\n"))
2073 2073 ts = 0
2074 2074 heads = set()
2075 2075
2076 2076 for rev in pycompat.xrange(numrevs):
2077 2077 dbase = r.deltaparent(rev)
2078 2078 if dbase == -1:
2079 2079 dbase = rev
2080 2080 cbase = r.chainbase(rev)
2081 2081 clen = r.chainlen(rev)
2082 2082 p1, p2 = r.parentrevs(rev)
2083 2083 rs = r.rawsize(rev)
2084 2084 ts = ts + rs
2085 2085 heads -= set(r.parentrevs(rev))
2086 2086 heads.add(rev)
2087 2087 try:
2088 2088 compression = ts / r.end(rev)
2089 2089 except ZeroDivisionError:
2090 2090 compression = 0
2091 2091 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2092 2092 "%11d %5d %8d\n" %
2093 2093 (rev, p1, p2, r.start(rev), r.end(rev),
2094 2094 r.start(dbase), r.start(cbase),
2095 2095 r.start(p1), r.start(p2),
2096 2096 rs, ts, compression, len(heads), clen))
2097 2097 return 0
2098 2098
2099 2099 v = r.version
2100 2100 format = v & 0xFFFF
2101 2101 flags = []
2102 2102 gdelta = False
2103 2103 if v & revlog.FLAG_INLINE_DATA:
2104 2104 flags.append('inline')
2105 2105 if v & revlog.FLAG_GENERALDELTA:
2106 2106 gdelta = True
2107 2107 flags.append('generaldelta')
2108 2108 if not flags:
2109 2109 flags = ['(none)']
2110 2110
2111 2111 ### tracks merge vs single parent
2112 2112 nummerges = 0
2113 2113
2114 2114 ### tracks ways the "delta" are build
2115 2115 # nodelta
2116 2116 numempty = 0
2117 2117 numemptytext = 0
2118 2118 numemptydelta = 0
2119 2119 # full file content
2120 2120 numfull = 0
2121 2121 # intermediate snapshot against a prior snapshot
2122 2122 numsemi = 0
2123 2123 # snapshot count per depth
2124 2124 numsnapdepth = collections.defaultdict(lambda: 0)
2125 2125 # delta against previous revision
2126 2126 numprev = 0
2127 2127 # delta against first or second parent (not prev)
2128 2128 nump1 = 0
2129 2129 nump2 = 0
2130 2130 # delta against neither prev nor parents
2131 2131 numother = 0
2132 2132 # delta against prev that are also first or second parent
2133 2133 # (details of `numprev`)
2134 2134 nump1prev = 0
2135 2135 nump2prev = 0
2136 2136
2137 2137 # data about delta chain of each revs
2138 2138 chainlengths = []
2139 2139 chainbases = []
2140 2140 chainspans = []
2141 2141
2142 2142 # data about each revision
2143 2143 datasize = [None, 0, 0]
2144 2144 fullsize = [None, 0, 0]
2145 2145 semisize = [None, 0, 0]
2146 2146 # snapshot count per depth
2147 2147 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2148 2148 deltasize = [None, 0, 0]
2149 2149 chunktypecounts = {}
2150 2150 chunktypesizes = {}
2151 2151
2152 2152 def addsize(size, l):
2153 2153 if l[0] is None or size < l[0]:
2154 2154 l[0] = size
2155 2155 if size > l[1]:
2156 2156 l[1] = size
2157 2157 l[2] += size
2158 2158
2159 2159 numrevs = len(r)
2160 2160 for rev in pycompat.xrange(numrevs):
2161 2161 p1, p2 = r.parentrevs(rev)
2162 2162 delta = r.deltaparent(rev)
2163 2163 if format > 0:
2164 2164 addsize(r.rawsize(rev), datasize)
2165 2165 if p2 != nullrev:
2166 2166 nummerges += 1
2167 2167 size = r.length(rev)
2168 2168 if delta == nullrev:
2169 2169 chainlengths.append(0)
2170 2170 chainbases.append(r.start(rev))
2171 2171 chainspans.append(size)
2172 2172 if size == 0:
2173 2173 numempty += 1
2174 2174 numemptytext += 1
2175 2175 else:
2176 2176 numfull += 1
2177 2177 numsnapdepth[0] += 1
2178 2178 addsize(size, fullsize)
2179 2179 addsize(size, snapsizedepth[0])
2180 2180 else:
2181 2181 chainlengths.append(chainlengths[delta] + 1)
2182 2182 baseaddr = chainbases[delta]
2183 2183 revaddr = r.start(rev)
2184 2184 chainbases.append(baseaddr)
2185 2185 chainspans.append((revaddr - baseaddr) + size)
2186 2186 if size == 0:
2187 2187 numempty += 1
2188 2188 numemptydelta += 1
2189 2189 elif r.issnapshot(rev):
2190 2190 addsize(size, semisize)
2191 2191 numsemi += 1
2192 2192 depth = r.snapshotdepth(rev)
2193 2193 numsnapdepth[depth] += 1
2194 2194 addsize(size, snapsizedepth[depth])
2195 2195 else:
2196 2196 addsize(size, deltasize)
2197 2197 if delta == rev - 1:
2198 2198 numprev += 1
2199 2199 if delta == p1:
2200 2200 nump1prev += 1
2201 2201 elif delta == p2:
2202 2202 nump2prev += 1
2203 2203 elif delta == p1:
2204 2204 nump1 += 1
2205 2205 elif delta == p2:
2206 2206 nump2 += 1
2207 2207 elif delta != nullrev:
2208 2208 numother += 1
2209 2209
2210 2210 # Obtain data on the raw chunks in the revlog.
2211 2211 if util.safehasattr(r, '_getsegmentforrevs'):
2212 2212 segment = r._getsegmentforrevs(rev, rev)[1]
2213 2213 else:
2214 2214 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2215 2215 if segment:
2216 2216 chunktype = bytes(segment[0:1])
2217 2217 else:
2218 2218 chunktype = 'empty'
2219 2219
2220 2220 if chunktype not in chunktypecounts:
2221 2221 chunktypecounts[chunktype] = 0
2222 2222 chunktypesizes[chunktype] = 0
2223 2223
2224 2224 chunktypecounts[chunktype] += 1
2225 2225 chunktypesizes[chunktype] += size
2226 2226
2227 2227 # Adjust size min value for empty cases
2228 2228 for size in (datasize, fullsize, semisize, deltasize):
2229 2229 if size[0] is None:
2230 2230 size[0] = 0
2231 2231
2232 2232 numdeltas = numrevs - numfull - numempty - numsemi
2233 2233 numoprev = numprev - nump1prev - nump2prev
2234 2234 totalrawsize = datasize[2]
2235 2235 datasize[2] /= numrevs
2236 2236 fulltotal = fullsize[2]
2237 2237 fullsize[2] /= numfull
2238 2238 semitotal = semisize[2]
2239 2239 snaptotal = {}
2240 2240 if numsemi > 0:
2241 2241 semisize[2] /= numsemi
2242 2242 for depth in snapsizedepth:
2243 2243 snaptotal[depth] = snapsizedepth[depth][2]
2244 2244 snapsizedepth[depth][2] /= numsnapdepth[depth]
2245 2245
2246 2246 deltatotal = deltasize[2]
2247 2247 if numdeltas > 0:
2248 2248 deltasize[2] /= numdeltas
2249 2249 totalsize = fulltotal + semitotal + deltatotal
2250 2250 avgchainlen = sum(chainlengths) / numrevs
2251 2251 maxchainlen = max(chainlengths)
2252 2252 maxchainspan = max(chainspans)
2253 2253 compratio = 1
2254 2254 if totalsize:
2255 2255 compratio = totalrawsize / totalsize
2256 2256
2257 2257 basedfmtstr = '%%%dd\n'
2258 2258 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2259 2259
2260 2260 def dfmtstr(max):
2261 2261 return basedfmtstr % len(str(max))
2262 2262 def pcfmtstr(max, padding=0):
2263 2263 return basepcfmtstr % (len(str(max)), ' ' * padding)
2264 2264
2265 2265 def pcfmt(value, total):
2266 2266 if total:
2267 2267 return (value, 100 * float(value) / total)
2268 2268 else:
2269 2269 return value, 100.0
2270 2270
2271 2271 ui.write(('format : %d\n') % format)
2272 2272 ui.write(('flags : %s\n') % ', '.join(flags))
2273 2273
2274 2274 ui.write('\n')
2275 2275 fmt = pcfmtstr(totalsize)
2276 2276 fmt2 = dfmtstr(totalsize)
2277 2277 ui.write(('revisions : ') + fmt2 % numrevs)
2278 2278 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2279 2279 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2280 2280 ui.write(('revisions : ') + fmt2 % numrevs)
2281 2281 ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs))
2282 2282 ui.write((' text : ')
2283 2283 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
2284 2284 ui.write((' delta : ')
2285 2285 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
2286 2286 ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs))
2287 2287 for depth in sorted(numsnapdepth):
2288 2288 ui.write((' lvl-%-3d : ' % depth)
2289 2289 + fmt % pcfmt(numsnapdepth[depth], numrevs))
2290 2290 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2291 2291 ui.write(('revision size : ') + fmt2 % totalsize)
2292 2292 ui.write((' snapshot : ')
2293 2293 + fmt % pcfmt(fulltotal + semitotal, totalsize))
2294 2294 for depth in sorted(numsnapdepth):
2295 2295 ui.write((' lvl-%-3d : ' % depth)
2296 2296 + fmt % pcfmt(snaptotal[depth], totalsize))
2297 2297 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2298 2298
2299 2299 def fmtchunktype(chunktype):
2300 2300 if chunktype == 'empty':
2301 2301 return ' %s : ' % chunktype
2302 2302 elif chunktype in pycompat.bytestr(string.ascii_letters):
2303 2303 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2304 2304 else:
2305 2305 return ' 0x%s : ' % hex(chunktype)
2306 2306
2307 2307 ui.write('\n')
2308 2308 ui.write(('chunks : ') + fmt2 % numrevs)
2309 2309 for chunktype in sorted(chunktypecounts):
2310 2310 ui.write(fmtchunktype(chunktype))
2311 2311 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2312 2312 ui.write(('chunks size : ') + fmt2 % totalsize)
2313 2313 for chunktype in sorted(chunktypecounts):
2314 2314 ui.write(fmtchunktype(chunktype))
2315 2315 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2316 2316
2317 2317 ui.write('\n')
2318 2318 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2319 2319 ui.write(('avg chain length : ') + fmt % avgchainlen)
2320 2320 ui.write(('max chain length : ') + fmt % maxchainlen)
2321 2321 ui.write(('max chain reach : ') + fmt % maxchainspan)
2322 2322 ui.write(('compression ratio : ') + fmt % compratio)
2323 2323
2324 2324 if format > 0:
2325 2325 ui.write('\n')
2326 2326 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2327 2327 % tuple(datasize))
2328 2328 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2329 2329 % tuple(fullsize))
2330 2330 ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n')
2331 2331 % tuple(semisize))
2332 2332 for depth in sorted(snapsizedepth):
2333 2333 if depth == 0:
2334 2334 continue
2335 2335 ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n')
2336 2336 % ((depth,) + tuple(snapsizedepth[depth])))
2337 2337 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2338 2338 % tuple(deltasize))
2339 2339
2340 2340 if numdeltas > 0:
2341 2341 ui.write('\n')
2342 2342 fmt = pcfmtstr(numdeltas)
2343 2343 fmt2 = pcfmtstr(numdeltas, 4)
2344 2344 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2345 2345 if numprev > 0:
2346 2346 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2347 2347 numprev))
2348 2348 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2349 2349 numprev))
2350 2350 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2351 2351 numprev))
2352 2352 if gdelta:
2353 2353 ui.write(('deltas against p1 : ')
2354 2354 + fmt % pcfmt(nump1, numdeltas))
2355 2355 ui.write(('deltas against p2 : ')
2356 2356 + fmt % pcfmt(nump2, numdeltas))
2357 2357 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2358 2358 numdeltas))
2359 2359
2360 2360 @command('debugrevlogindex', cmdutil.debugrevlogopts +
2361 2361 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2362 2362 _('[-f FORMAT] -c|-m|FILE'),
2363 2363 optionalrepo=True)
2364 2364 def debugrevlogindex(ui, repo, file_=None, **opts):
2365 2365 """dump the contents of a revlog index"""
2366 2366 opts = pycompat.byteskwargs(opts)
2367 2367 r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
2368 2368 format = opts.get('format', 0)
2369 2369 if format not in (0, 1):
2370 2370 raise error.Abort(_("unknown format %d") % format)
2371 2371
2372 2372 if ui.debugflag:
2373 2373 shortfn = hex
2374 2374 else:
2375 2375 shortfn = short
2376 2376
2377 2377 # There might not be anything in r, so have a sane default
2378 2378 idlen = 12
2379 2379 for i in r:
2380 2380 idlen = len(shortfn(r.node(i)))
2381 2381 break
2382 2382
2383 2383 if format == 0:
2384 2384 if ui.verbose:
2385 2385 ui.write((" rev offset length linkrev"
2386 2386 " %s %s p2\n") % ("nodeid".ljust(idlen),
2387 2387 "p1".ljust(idlen)))
2388 2388 else:
2389 2389 ui.write((" rev linkrev %s %s p2\n") % (
2390 2390 "nodeid".ljust(idlen), "p1".ljust(idlen)))
2391 2391 elif format == 1:
2392 2392 if ui.verbose:
2393 2393 ui.write((" rev flag offset length size link p1"
2394 2394 " p2 %s\n") % "nodeid".rjust(idlen))
2395 2395 else:
2396 2396 ui.write((" rev flag size link p1 p2 %s\n") %
2397 2397 "nodeid".rjust(idlen))
2398 2398
2399 2399 for i in r:
2400 2400 node = r.node(i)
2401 2401 if format == 0:
2402 2402 try:
2403 2403 pp = r.parents(node)
2404 2404 except Exception:
2405 2405 pp = [nullid, nullid]
2406 2406 if ui.verbose:
2407 2407 ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
2408 2408 i, r.start(i), r.length(i), r.linkrev(i),
2409 2409 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
2410 2410 else:
2411 2411 ui.write("% 6d % 7d %s %s %s\n" % (
2412 2412 i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
2413 2413 shortfn(pp[1])))
2414 2414 elif format == 1:
2415 2415 pr = r.parentrevs(i)
2416 2416 if ui.verbose:
2417 2417 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
2418 2418 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2419 2419 r.linkrev(i), pr[0], pr[1], shortfn(node)))
2420 2420 else:
2421 2421 ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
2422 2422 i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
2423 2423 shortfn(node)))
2424 2424
2425 2425 @command('debugrevspec',
2426 2426 [('', 'optimize', None,
2427 2427 _('print parsed tree after optimizing (DEPRECATED)')),
2428 2428 ('', 'show-revs', True, _('print list of result revisions (default)')),
2429 2429 ('s', 'show-set', None, _('print internal representation of result set')),
2430 2430 ('p', 'show-stage', [],
2431 2431 _('print parsed tree at the given stage'), _('NAME')),
2432 2432 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2433 2433 ('', 'verify-optimized', False, _('verify optimized result')),
2434 2434 ],
2435 2435 ('REVSPEC'))
2436 2436 def debugrevspec(ui, repo, expr, **opts):
2437 2437 """parse and apply a revision specification
2438 2438
2439 2439 Use -p/--show-stage option to print the parsed tree at the given stages.
2440 2440 Use -p all to print tree at every stage.
2441 2441
2442 2442 Use --no-show-revs option with -s or -p to print only the set
2443 2443 representation or the parsed tree respectively.
2444 2444
2445 2445 Use --verify-optimized to compare the optimized result with the unoptimized
2446 2446 one. Returns 1 if the optimized result differs.
2447 2447 """
2448 2448 opts = pycompat.byteskwargs(opts)
2449 2449 aliases = ui.configitems('revsetalias')
2450 2450 stages = [
2451 2451 ('parsed', lambda tree: tree),
2452 2452 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2453 2453 ui.warn)),
2454 2454 ('concatenated', revsetlang.foldconcat),
2455 2455 ('analyzed', revsetlang.analyze),
2456 2456 ('optimized', revsetlang.optimize),
2457 2457 ]
2458 2458 if opts['no_optimized']:
2459 2459 stages = stages[:-1]
2460 2460 if opts['verify_optimized'] and opts['no_optimized']:
2461 2461 raise error.Abort(_('cannot use --verify-optimized with '
2462 2462 '--no-optimized'))
2463 2463 stagenames = set(n for n, f in stages)
2464 2464
2465 2465 showalways = set()
2466 2466 showchanged = set()
2467 2467 if ui.verbose and not opts['show_stage']:
2468 2468 # show parsed tree by --verbose (deprecated)
2469 2469 showalways.add('parsed')
2470 2470 showchanged.update(['expanded', 'concatenated'])
2471 2471 if opts['optimize']:
2472 2472 showalways.add('optimized')
2473 2473 if opts['show_stage'] and opts['optimize']:
2474 2474 raise error.Abort(_('cannot use --optimize with --show-stage'))
2475 2475 if opts['show_stage'] == ['all']:
2476 2476 showalways.update(stagenames)
2477 2477 else:
2478 2478 for n in opts['show_stage']:
2479 2479 if n not in stagenames:
2480 2480 raise error.Abort(_('invalid stage name: %s') % n)
2481 2481 showalways.update(opts['show_stage'])
2482 2482
2483 2483 treebystage = {}
2484 2484 printedtree = None
2485 2485 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
2486 2486 for n, f in stages:
2487 2487 treebystage[n] = tree = f(tree)
2488 2488 if n in showalways or (n in showchanged and tree != printedtree):
2489 2489 if opts['show_stage'] or n != 'parsed':
2490 2490 ui.write(("* %s:\n") % n)
2491 2491 ui.write(revsetlang.prettyformat(tree), "\n")
2492 2492 printedtree = tree
2493 2493
2494 2494 if opts['verify_optimized']:
2495 2495 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2496 2496 brevs = revset.makematcher(treebystage['optimized'])(repo)
2497 2497 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2498 2498 ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
2499 2499 ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
2500 2500 arevs = list(arevs)
2501 2501 brevs = list(brevs)
2502 2502 if arevs == brevs:
2503 2503 return 0
2504 2504 ui.write(('--- analyzed\n'), label='diff.file_a')
2505 2505 ui.write(('+++ optimized\n'), label='diff.file_b')
2506 2506 sm = difflib.SequenceMatcher(None, arevs, brevs)
2507 2507 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2508 2508 if tag in (r'delete', r'replace'):
2509 2509 for c in arevs[alo:ahi]:
2510 2510 ui.write('-%d\n' % c, label='diff.deleted')
2511 2511 if tag in (r'insert', r'replace'):
2512 2512 for c in brevs[blo:bhi]:
2513 2513 ui.write('+%d\n' % c, label='diff.inserted')
2514 2514 if tag == r'equal':
2515 2515 for c in arevs[alo:ahi]:
2516 2516 ui.write(' %d\n' % c)
2517 2517 return 1
2518 2518
2519 2519 func = revset.makematcher(tree)
2520 2520 revs = func(repo)
2521 2521 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2522 2522 ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
2523 2523 if not opts['show_revs']:
2524 2524 return
2525 2525 for c in revs:
2526 2526 ui.write("%d\n" % c)
2527 2527
2528 2528 @command('debugserve', [
2529 2529 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2530 2530 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2531 2531 ('', 'logiofile', '', _('file to log server I/O to')),
2532 2532 ], '')
2533 2533 def debugserve(ui, repo, **opts):
2534 2534 """run a server with advanced settings
2535 2535
2536 2536 This command is similar to :hg:`serve`. It exists partially as a
2537 2537 workaround to the fact that ``hg serve --stdio`` must have specific
2538 2538 arguments for security reasons.
2539 2539 """
2540 2540 opts = pycompat.byteskwargs(opts)
2541 2541
2542 2542 if not opts['sshstdio']:
2543 2543 raise error.Abort(_('only --sshstdio is currently supported'))
2544 2544
2545 2545 logfh = None
2546 2546
2547 2547 if opts['logiofd'] and opts['logiofile']:
2548 2548 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2549 2549
2550 2550 if opts['logiofd']:
2551 2551 # Line buffered because output is line based.
2552 2552 try:
2553 2553 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2554 2554 except OSError as e:
2555 2555 if e.errno != errno.ESPIPE:
2556 2556 raise
2557 2557 # can't seek a pipe, so `ab` mode fails on py3
2558 2558 logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
2559 2559 elif opts['logiofile']:
2560 2560 logfh = open(opts['logiofile'], 'ab', 1)
2561 2561
2562 2562 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2563 2563 s.serve_forever()
2564 2564
2565 2565 @command('debugsetparents', [], _('REV1 [REV2]'))
2566 2566 def debugsetparents(ui, repo, rev1, rev2=None):
2567 2567 """manually set the parents of the current working directory
2568 2568
2569 2569 This is useful for writing repository conversion tools, but should
2570 2570 be used with care. For example, neither the working directory nor the
2571 2571 dirstate is updated, so file status may be incorrect after running this
2572 2572 command.
2573 2573
2574 2574 Returns 0 on success.
2575 2575 """
2576 2576
2577 2577 node1 = scmutil.revsingle(repo, rev1).node()
2578 2578 node2 = scmutil.revsingle(repo, rev2, 'null').node()
2579 2579
2580 2580 with repo.wlock():
2581 2581 repo.setparents(node1, node2)
2582 2582
2583 2583 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2584 2584 def debugssl(ui, repo, source=None, **opts):
2585 2585 '''test a secure connection to a server
2586 2586
2587 2587 This builds the certificate chain for the server on Windows, installing the
2588 2588 missing intermediates and trusted root via Windows Update if necessary. It
2589 2589 does nothing on other platforms.
2590 2590
2591 2591 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2592 2592 that server is used. See :hg:`help urls` for more information.
2593 2593
2594 2594 If the update succeeds, retry the original operation. Otherwise, the cause
2595 2595 of the SSL error is likely another issue.
2596 2596 '''
2597 2597 if not pycompat.iswindows:
2598 2598 raise error.Abort(_('certificate chain building is only possible on '
2599 2599 'Windows'))
2600 2600
2601 2601 if not source:
2602 2602 if not repo:
2603 2603 raise error.Abort(_("there is no Mercurial repository here, and no "
2604 2604 "server specified"))
2605 2605 source = "default"
2606 2606
2607 2607 source, branches = hg.parseurl(ui.expandpath(source))
2608 2608 url = util.url(source)
2609 2609
2610 2610 defaultport = {'https': 443, 'ssh': 22}
2611 2611 if url.scheme in defaultport:
2612 2612 try:
2613 2613 addr = (url.host, int(url.port or defaultport[url.scheme]))
2614 2614 except ValueError:
2615 2615 raise error.Abort(_("malformed port number in URL"))
2616 2616 else:
2617 2617 raise error.Abort(_("only https and ssh connections are supported"))
2618 2618
2619 2619 from . import win32
2620 2620
2621 2621 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2622 2622 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2623 2623
2624 2624 try:
2625 2625 s.connect(addr)
2626 2626 cert = s.getpeercert(True)
2627 2627
2628 2628 ui.status(_('checking the certificate chain for %s\n') % url.host)
2629 2629
2630 2630 complete = win32.checkcertificatechain(cert, build=False)
2631 2631
2632 2632 if not complete:
2633 2633 ui.status(_('certificate chain is incomplete, updating... '))
2634 2634
2635 2635 if not win32.checkcertificatechain(cert):
2636 2636 ui.status(_('failed.\n'))
2637 2637 else:
2638 2638 ui.status(_('done.\n'))
2639 2639 else:
2640 2640 ui.status(_('full certificate chain is available\n'))
2641 2641 finally:
2642 2642 s.close()
2643 2643
2644 2644 @command('debugsub',
2645 2645 [('r', 'rev', '',
2646 2646 _('revision to check'), _('REV'))],
2647 2647 _('[-r REV] [REV]'))
2648 2648 def debugsub(ui, repo, rev=None):
2649 2649 ctx = scmutil.revsingle(repo, rev, None)
2650 2650 for k, v in sorted(ctx.substate.items()):
2651 2651 ui.write(('path %s\n') % k)
2652 2652 ui.write((' source %s\n') % v[0])
2653 2653 ui.write((' revision %s\n') % v[1])
2654 2654
2655 2655 @command('debugsuccessorssets',
2656 2656 [('', 'closest', False, _('return closest successors sets only'))],
2657 2657 _('[REV]'))
2658 2658 def debugsuccessorssets(ui, repo, *revs, **opts):
2659 2659 """show set of successors for revision
2660 2660
2661 2661 A successors set of changeset A is a consistent group of revisions that
2662 2662 succeed A. It contains non-obsolete changesets only unless closests
2663 2663 successors set is set.
2664 2664
2665 2665 In most cases a changeset A has a single successors set containing a single
2666 2666 successor (changeset A replaced by A').
2667 2667
2668 2668 A changeset that is made obsolete with no successors are called "pruned".
2669 2669 Such changesets have no successors sets at all.
2670 2670
2671 2671 A changeset that has been "split" will have a successors set containing
2672 2672 more than one successor.
2673 2673
2674 2674 A changeset that has been rewritten in multiple different ways is called
2675 2675 "divergent". Such changesets have multiple successor sets (each of which
2676 2676 may also be split, i.e. have multiple successors).
2677 2677
2678 2678 Results are displayed as follows::
2679 2679
2680 2680 <rev1>
2681 2681 <successors-1A>
2682 2682 <rev2>
2683 2683 <successors-2A>
2684 2684 <successors-2B1> <successors-2B2> <successors-2B3>
2685 2685
2686 2686 Here rev2 has two possible (i.e. divergent) successors sets. The first
2687 2687 holds one element, whereas the second holds three (i.e. the changeset has
2688 2688 been split).
2689 2689 """
2690 2690 # passed to successorssets caching computation from one call to another
2691 2691 cache = {}
2692 2692 ctx2str = bytes
2693 2693 node2str = short
2694 2694 for rev in scmutil.revrange(repo, revs):
2695 2695 ctx = repo[rev]
2696 2696 ui.write('%s\n'% ctx2str(ctx))
2697 2697 for succsset in obsutil.successorssets(repo, ctx.node(),
2698 2698 closest=opts[r'closest'],
2699 2699 cache=cache):
2700 2700 if succsset:
2701 2701 ui.write(' ')
2702 2702 ui.write(node2str(succsset[0]))
2703 2703 for node in succsset[1:]:
2704 2704 ui.write(' ')
2705 2705 ui.write(node2str(node))
2706 2706 ui.write('\n')
2707 2707
2708 2708 @command('debugtemplate',
2709 2709 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2710 2710 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2711 2711 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2712 2712 optionalrepo=True)
2713 2713 def debugtemplate(ui, repo, tmpl, **opts):
2714 2714 """parse and apply a template
2715 2715
2716 2716 If -r/--rev is given, the template is processed as a log template and
2717 2717 applied to the given changesets. Otherwise, it is processed as a generic
2718 2718 template.
2719 2719
2720 2720 Use --verbose to print the parsed tree.
2721 2721 """
2722 2722 revs = None
2723 2723 if opts[r'rev']:
2724 2724 if repo is None:
2725 2725 raise error.RepoError(_('there is no Mercurial repository here '
2726 2726 '(.hg not found)'))
2727 2727 revs = scmutil.revrange(repo, opts[r'rev'])
2728 2728
2729 2729 props = {}
2730 2730 for d in opts[r'define']:
2731 2731 try:
2732 2732 k, v = (e.strip() for e in d.split('=', 1))
2733 2733 if not k or k == 'ui':
2734 2734 raise ValueError
2735 2735 props[k] = v
2736 2736 except ValueError:
2737 2737 raise error.Abort(_('malformed keyword definition: %s') % d)
2738 2738
2739 2739 if ui.verbose:
2740 2740 aliases = ui.configitems('templatealias')
2741 2741 tree = templater.parse(tmpl)
2742 2742 ui.note(templater.prettyformat(tree), '\n')
2743 2743 newtree = templater.expandaliases(tree, aliases)
2744 2744 if newtree != tree:
2745 2745 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2746 2746
2747 2747 if revs is None:
2748 2748 tres = formatter.templateresources(ui, repo)
2749 2749 t = formatter.maketemplater(ui, tmpl, resources=tres)
2750 2750 if ui.verbose:
2751 2751 kwds, funcs = t.symbolsuseddefault()
2752 2752 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2753 2753 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2754 2754 ui.write(t.renderdefault(props))
2755 2755 else:
2756 2756 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2757 2757 if ui.verbose:
2758 2758 kwds, funcs = displayer.t.symbolsuseddefault()
2759 2759 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2760 2760 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2761 2761 for r in revs:
2762 2762 displayer.show(repo[r], **pycompat.strkwargs(props))
2763 2763 displayer.close()
2764 2764
2765 2765 @command('debuguigetpass', [
2766 2766 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2767 2767 ], _('[-p TEXT]'), norepo=True)
2768 2768 def debuguigetpass(ui, prompt=''):
2769 2769 """show prompt to type password"""
2770 2770 r = ui.getpass(prompt)
2771 2771 ui.write(('respose: %s\n') % r)
2772 2772
2773 2773 @command('debuguiprompt', [
2774 2774 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2775 2775 ], _('[-p TEXT]'), norepo=True)
2776 2776 def debuguiprompt(ui, prompt=''):
2777 2777 """show plain prompt"""
2778 2778 r = ui.prompt(prompt)
2779 2779 ui.write(('response: %s\n') % r)
2780 2780
2781 2781 @command('debugupdatecaches', [])
2782 2782 def debugupdatecaches(ui, repo, *pats, **opts):
2783 2783 """warm all known caches in the repository"""
2784 2784 with repo.wlock(), repo.lock():
2785 2785 repo.updatecaches(full=True)
2786 2786
2787 2787 @command('debugupgraderepo', [
2788 2788 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2789 2789 ('', 'run', False, _('performs an upgrade')),
2790 2790 ('', 'backup', True, _('keep the old repository content around')),
2791 2791 ])
2792 2792 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True):
2793 2793 """upgrade a repository to use different features
2794 2794
2795 2795 If no arguments are specified, the repository is evaluated for upgrade
2796 2796 and a list of problems and potential optimizations is printed.
2797 2797
2798 2798 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2799 2799 can be influenced via additional arguments. More details will be provided
2800 2800 by the command output when run without ``--run``.
2801 2801
2802 2802 During the upgrade, the repository will be locked and no writes will be
2803 2803 allowed.
2804 2804
2805 2805 At the end of the upgrade, the repository may not be readable while new
2806 2806 repository data is swapped in. This window will be as long as it takes to
2807 2807 rename some directories inside the ``.hg`` directory. On most machines, this
2808 2808 should complete almost instantaneously and the chances of a consumer being
2809 2809 unable to access the repository should be low.
2810 2810 """
2811 2811 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize,
2812 2812 backup=backup)
2813 2813
2814 2814 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2815 2815 inferrepo=True)
2816 2816 def debugwalk(ui, repo, *pats, **opts):
2817 2817 """show how files match on given patterns"""
2818 2818 opts = pycompat.byteskwargs(opts)
2819 2819 m = scmutil.match(repo[None], pats, opts)
2820 2820 if ui.verbose:
2821 2821 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
2822 2822 items = list(repo[None].walk(m))
2823 2823 if not items:
2824 2824 return
2825 2825 f = lambda fn: fn
2826 2826 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2827 2827 f = lambda fn: util.normpath(fn)
2828 2828 fmt = 'f %%-%ds %%-%ds %%s' % (
2829 2829 max([len(abs) for abs in items]),
2830 2830 max([len(repo.pathto(abs)) for abs in items]))
2831 2831 for abs in items:
2832 2832 line = fmt % (abs, f(repo.pathto(abs)), m.exact(abs) and 'exact' or '')
2833 2833 ui.write("%s\n" % line.rstrip())
2834 2834
2835 2835 @command('debugwhyunstable', [], _('REV'))
2836 2836 def debugwhyunstable(ui, repo, rev):
2837 2837 """explain instabilities of a changeset"""
2838 2838 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
2839 2839 dnodes = ''
2840 2840 if entry.get('divergentnodes'):
2841 2841 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2842 2842 for ctx in entry['divergentnodes']) + ' '
2843 2843 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2844 2844 entry['reason'], entry['node']))
2845 2845
2846 2846 @command('debugwireargs',
2847 2847 [('', 'three', '', 'three'),
2848 2848 ('', 'four', '', 'four'),
2849 2849 ('', 'five', '', 'five'),
2850 2850 ] + cmdutil.remoteopts,
2851 2851 _('REPO [OPTIONS]... [ONE [TWO]]'),
2852 2852 norepo=True)
2853 2853 def debugwireargs(ui, repopath, *vals, **opts):
2854 2854 opts = pycompat.byteskwargs(opts)
2855 2855 repo = hg.peer(ui, opts, repopath)
2856 2856 for opt in cmdutil.remoteopts:
2857 2857 del opts[opt[1]]
2858 2858 args = {}
2859 2859 for k, v in opts.iteritems():
2860 2860 if v:
2861 2861 args[k] = v
2862 2862 args = pycompat.strkwargs(args)
2863 2863 # run twice to check that we don't mess up the stream for the next command
2864 2864 res1 = repo.debugwireargs(*vals, **args)
2865 2865 res2 = repo.debugwireargs(*vals, **args)
2866 2866 ui.write("%s\n" % res1)
2867 2867 if res1 != res2:
2868 2868 ui.warn("%s\n" % res2)
2869 2869
2870 2870 def _parsewirelangblocks(fh):
2871 2871 activeaction = None
2872 2872 blocklines = []
2873 2873 lastindent = 0
2874 2874
2875 2875 for line in fh:
2876 2876 line = line.rstrip()
2877 2877 if not line:
2878 2878 continue
2879 2879
2880 2880 if line.startswith(b'#'):
2881 2881 continue
2882 2882
2883 2883 if not line.startswith(b' '):
2884 2884 # New block. Flush previous one.
2885 2885 if activeaction:
2886 2886 yield activeaction, blocklines
2887 2887
2888 2888 activeaction = line
2889 2889 blocklines = []
2890 2890 lastindent = 0
2891 2891 continue
2892 2892
2893 2893 # Else we start with an indent.
2894 2894
2895 2895 if not activeaction:
2896 2896 raise error.Abort(_('indented line outside of block'))
2897 2897
2898 2898 indent = len(line) - len(line.lstrip())
2899 2899
2900 2900 # If this line is indented more than the last line, concatenate it.
2901 2901 if indent > lastindent and blocklines:
2902 2902 blocklines[-1] += line.lstrip()
2903 2903 else:
2904 2904 blocklines.append(line)
2905 2905 lastindent = indent
2906 2906
2907 2907 # Flush last block.
2908 2908 if activeaction:
2909 2909 yield activeaction, blocklines
2910 2910
2911 2911 @command('debugwireproto',
2912 2912 [
2913 2913 ('', 'localssh', False, _('start an SSH server for this repo')),
2914 2914 ('', 'peer', '', _('construct a specific version of the peer')),
2915 2915 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2916 2916 ('', 'nologhandshake', False,
2917 2917 _('do not log I/O related to the peer handshake')),
2918 2918 ] + cmdutil.remoteopts,
2919 2919 _('[PATH]'),
2920 2920 optionalrepo=True)
2921 2921 def debugwireproto(ui, repo, path=None, **opts):
2922 2922 """send wire protocol commands to a server
2923 2923
2924 2924 This command can be used to issue wire protocol commands to remote
2925 2925 peers and to debug the raw data being exchanged.
2926 2926
2927 2927 ``--localssh`` will start an SSH server against the current repository
2928 2928 and connect to that. By default, the connection will perform a handshake
2929 2929 and establish an appropriate peer instance.
2930 2930
2931 2931 ``--peer`` can be used to bypass the handshake protocol and construct a
2932 2932 peer instance using the specified class type. Valid values are ``raw``,
2933 2933 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
2934 2934 raw data payloads and don't support higher-level command actions.
2935 2935
2936 2936 ``--noreadstderr`` can be used to disable automatic reading from stderr
2937 2937 of the peer (for SSH connections only). Disabling automatic reading of
2938 2938 stderr is useful for making output more deterministic.
2939 2939
2940 2940 Commands are issued via a mini language which is specified via stdin.
2941 2941 The language consists of individual actions to perform. An action is
2942 2942 defined by a block. A block is defined as a line with no leading
2943 2943 space followed by 0 or more lines with leading space. Blocks are
2944 2944 effectively a high-level command with additional metadata.
2945 2945
2946 2946 Lines beginning with ``#`` are ignored.
2947 2947
2948 2948 The following sections denote available actions.
2949 2949
2950 2950 raw
2951 2951 ---
2952 2952
2953 2953 Send raw data to the server.
2954 2954
2955 2955 The block payload contains the raw data to send as one atomic send
2956 2956 operation. The data may not actually be delivered in a single system
2957 2957 call: it depends on the abilities of the transport being used.
2958 2958
2959 2959 Each line in the block is de-indented and concatenated. Then, that
2960 2960 value is evaluated as a Python b'' literal. This allows the use of
2961 2961 backslash escaping, etc.
2962 2962
2963 2963 raw+
2964 2964 ----
2965 2965
2966 2966 Behaves like ``raw`` except flushes output afterwards.
2967 2967
2968 2968 command <X>
2969 2969 -----------
2970 2970
2971 2971 Send a request to run a named command, whose name follows the ``command``
2972 2972 string.
2973 2973
2974 2974 Arguments to the command are defined as lines in this block. The format of
2975 2975 each line is ``<key> <value>``. e.g.::
2976 2976
2977 2977 command listkeys
2978 2978 namespace bookmarks
2979 2979
2980 2980 If the value begins with ``eval:``, it will be interpreted as a Python
2981 2981 literal expression. Otherwise values are interpreted as Python b'' literals.
2982 2982 This allows sending complex types and encoding special byte sequences via
2983 2983 backslash escaping.
2984 2984
2985 2985 The following arguments have special meaning:
2986 2986
2987 2987 ``PUSHFILE``
2988 2988 When defined, the *push* mechanism of the peer will be used instead
2989 2989 of the static request-response mechanism and the content of the
2990 2990 file specified in the value of this argument will be sent as the
2991 2991 command payload.
2992 2992
2993 2993 This can be used to submit a local bundle file to the remote.
2994 2994
2995 2995 batchbegin
2996 2996 ----------
2997 2997
2998 2998 Instruct the peer to begin a batched send.
2999 2999
3000 3000 All ``command`` blocks are queued for execution until the next
3001 3001 ``batchsubmit`` block.
3002 3002
3003 3003 batchsubmit
3004 3004 -----------
3005 3005
3006 3006 Submit previously queued ``command`` blocks as a batch request.
3007 3007
3008 3008 This action MUST be paired with a ``batchbegin`` action.
3009 3009
3010 3010 httprequest <method> <path>
3011 3011 ---------------------------
3012 3012
3013 3013 (HTTP peer only)
3014 3014
3015 3015 Send an HTTP request to the peer.
3016 3016
3017 3017 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3018 3018
3019 3019 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3020 3020 headers to add to the request. e.g. ``Accept: foo``.
3021 3021
3022 3022 The following arguments are special:
3023 3023
3024 3024 ``BODYFILE``
3025 3025 The content of the file defined as the value to this argument will be
3026 3026 transferred verbatim as the HTTP request body.
3027 3027
3028 3028 ``frame <type> <flags> <payload>``
3029 3029 Send a unified protocol frame as part of the request body.
3030 3030
3031 3031 All frames will be collected and sent as the body to the HTTP
3032 3032 request.
3033 3033
3034 3034 close
3035 3035 -----
3036 3036
3037 3037 Close the connection to the server.
3038 3038
3039 3039 flush
3040 3040 -----
3041 3041
3042 3042 Flush data written to the server.
3043 3043
3044 3044 readavailable
3045 3045 -------------
3046 3046
3047 3047 Close the write end of the connection and read all available data from
3048 3048 the server.
3049 3049
3050 3050 If the connection to the server encompasses multiple pipes, we poll both
3051 3051 pipes and read available data.
3052 3052
3053 3053 readline
3054 3054 --------
3055 3055
3056 3056 Read a line of output from the server. If there are multiple output
3057 3057 pipes, reads only the main pipe.
3058 3058
3059 3059 ereadline
3060 3060 ---------
3061 3061
3062 3062 Like ``readline``, but read from the stderr pipe, if available.
3063 3063
3064 3064 read <X>
3065 3065 --------
3066 3066
3067 3067 ``read()`` N bytes from the server's main output pipe.
3068 3068
3069 3069 eread <X>
3070 3070 ---------
3071 3071
3072 3072 ``read()`` N bytes from the server's stderr pipe, if available.
3073 3073
3074 3074 Specifying Unified Frame-Based Protocol Frames
3075 3075 ----------------------------------------------
3076 3076
3077 3077 It is possible to emit a *Unified Frame-Based Protocol* by using special
3078 3078 syntax.
3079 3079
3080 3080 A frame is composed as a type, flags, and payload. These can be parsed
3081 3081 from a string of the form:
3082 3082
3083 3083 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3084 3084
3085 3085 ``request-id`` and ``stream-id`` are integers defining the request and
3086 3086 stream identifiers.
3087 3087
3088 3088 ``type`` can be an integer value for the frame type or the string name
3089 3089 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3090 3090 ``command-name``.
3091 3091
3092 3092 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3093 3093 components. Each component (and there can be just one) can be an integer
3094 3094 or a flag name for stream flags or frame flags, respectively. Values are
3095 3095 resolved to integers and then bitwise OR'd together.
3096 3096
3097 3097 ``payload`` represents the raw frame payload. If it begins with
3098 3098 ``cbor:``, the following string is evaluated as Python code and the
3099 3099 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3100 3100 as a Python byte string literal.
3101 3101 """
3102 3102 opts = pycompat.byteskwargs(opts)
3103 3103
3104 3104 if opts['localssh'] and not repo:
3105 3105 raise error.Abort(_('--localssh requires a repository'))
3106 3106
3107 3107 if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
3108 3108 raise error.Abort(_('invalid value for --peer'),
3109 3109 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
3110 3110
3111 3111 if path and opts['localssh']:
3112 3112 raise error.Abort(_('cannot specify --localssh with an explicit '
3113 3113 'path'))
3114 3114
3115 3115 if ui.interactive():
3116 3116 ui.write(_('(waiting for commands on stdin)\n'))
3117 3117
3118 3118 blocks = list(_parsewirelangblocks(ui.fin))
3119 3119
3120 3120 proc = None
3121 3121 stdin = None
3122 3122 stdout = None
3123 3123 stderr = None
3124 3124 opener = None
3125 3125
3126 3126 if opts['localssh']:
3127 3127 # We start the SSH server in its own process so there is process
3128 3128 # separation. This prevents a whole class of potential bugs around
3129 3129 # shared state from interfering with server operation.
3130 3130 args = procutil.hgcmd() + [
3131 3131 '-R', repo.root,
3132 3132 'debugserve', '--sshstdio',
3133 3133 ]
3134 3134 proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
3135 3135 stdin=subprocess.PIPE,
3136 3136 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3137 3137 bufsize=0)
3138 3138
3139 3139 stdin = proc.stdin
3140 3140 stdout = proc.stdout
3141 3141 stderr = proc.stderr
3142 3142
3143 3143 # We turn the pipes into observers so we can log I/O.
3144 3144 if ui.verbose or opts['peer'] == 'raw':
3145 3145 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
3146 3146 logdata=True)
3147 3147 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
3148 3148 logdata=True)
3149 3149 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
3150 3150 logdata=True)
3151 3151
3152 3152 # --localssh also implies the peer connection settings.
3153 3153
3154 3154 url = 'ssh://localserver'
3155 3155 autoreadstderr = not opts['noreadstderr']
3156 3156
3157 3157 if opts['peer'] == 'ssh1':
3158 3158 ui.write(_('creating ssh peer for wire protocol version 1\n'))
3159 3159 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
3160 3160 None, autoreadstderr=autoreadstderr)
3161 3161 elif opts['peer'] == 'ssh2':
3162 3162 ui.write(_('creating ssh peer for wire protocol version 2\n'))
3163 3163 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
3164 3164 None, autoreadstderr=autoreadstderr)
3165 3165 elif opts['peer'] == 'raw':
3166 3166 ui.write(_('using raw connection to peer\n'))
3167 3167 peer = None
3168 3168 else:
3169 3169 ui.write(_('creating ssh peer from handshake results\n'))
3170 3170 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
3171 3171 autoreadstderr=autoreadstderr)
3172 3172
3173 3173 elif path:
3174 3174 # We bypass hg.peer() so we can proxy the sockets.
3175 3175 # TODO consider not doing this because we skip
3176 3176 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3177 3177 u = util.url(path)
3178 3178 if u.scheme != 'http':
3179 3179 raise error.Abort(_('only http:// paths are currently supported'))
3180 3180
3181 3181 url, authinfo = u.authinfo()
3182 3182 openerargs = {
3183 3183 r'useragent': b'Mercurial debugwireproto',
3184 3184 }
3185 3185
3186 3186 # Turn pipes/sockets into observers so we can log I/O.
3187 3187 if ui.verbose:
3188 3188 openerargs.update({
3189 3189 r'loggingfh': ui,
3190 3190 r'loggingname': b's',
3191 3191 r'loggingopts': {
3192 3192 r'logdata': True,
3193 3193 r'logdataapis': False,
3194 3194 },
3195 3195 })
3196 3196
3197 3197 if ui.debugflag:
3198 3198 openerargs[r'loggingopts'][r'logdataapis'] = True
3199 3199
3200 3200 # Don't send default headers when in raw mode. This allows us to
3201 3201 # bypass most of the behavior of our URL handling code so we can
3202 3202 # have near complete control over what's sent on the wire.
3203 3203 if opts['peer'] == 'raw':
3204 3204 openerargs[r'sendaccept'] = False
3205 3205
3206 3206 opener = urlmod.opener(ui, authinfo, **openerargs)
3207 3207
3208 3208 if opts['peer'] == 'http2':
3209 3209 ui.write(_('creating http peer for wire protocol version 2\n'))
3210 3210 # We go through makepeer() because we need an API descriptor for
3211 3211 # the peer instance to be useful.
3212 3212 with ui.configoverride({
3213 3213 ('experimental', 'httppeer.advertise-v2'): True}):
3214 3214 if opts['nologhandshake']:
3215 3215 ui.pushbuffer()
3216 3216
3217 3217 peer = httppeer.makepeer(ui, path, opener=opener)
3218 3218
3219 3219 if opts['nologhandshake']:
3220 3220 ui.popbuffer()
3221 3221
3222 3222 if not isinstance(peer, httppeer.httpv2peer):
3223 3223 raise error.Abort(_('could not instantiate HTTP peer for '
3224 3224 'wire protocol version 2'),
3225 3225 hint=_('the server may not have the feature '
3226 3226 'enabled or is not allowing this '
3227 3227 'client version'))
3228 3228
3229 3229 elif opts['peer'] == 'raw':
3230 3230 ui.write(_('using raw connection to peer\n'))
3231 3231 peer = None
3232 3232 elif opts['peer']:
3233 3233 raise error.Abort(_('--peer %s not supported with HTTP peers') %
3234 3234 opts['peer'])
3235 3235 else:
3236 3236 peer = httppeer.makepeer(ui, path, opener=opener)
3237 3237
3238 3238 # We /could/ populate stdin/stdout with sock.makefile()...
3239 3239 else:
3240 3240 raise error.Abort(_('unsupported connection configuration'))
3241 3241
3242 3242 batchedcommands = None
3243 3243
3244 3244 # Now perform actions based on the parsed wire language instructions.
3245 3245 for action, lines in blocks:
3246 3246 if action in ('raw', 'raw+'):
3247 3247 if not stdin:
3248 3248 raise error.Abort(_('cannot call raw/raw+ on this peer'))
3249 3249
3250 3250 # Concatenate the data together.
3251 3251 data = ''.join(l.lstrip() for l in lines)
3252 3252 data = stringutil.unescapestr(data)
3253 3253 stdin.write(data)
3254 3254
3255 3255 if action == 'raw+':
3256 3256 stdin.flush()
3257 3257 elif action == 'flush':
3258 3258 if not stdin:
3259 3259 raise error.Abort(_('cannot call flush on this peer'))
3260 3260 stdin.flush()
3261 3261 elif action.startswith('command'):
3262 3262 if not peer:
3263 3263 raise error.Abort(_('cannot send commands unless peer instance '
3264 3264 'is available'))
3265 3265
3266 3266 command = action.split(' ', 1)[1]
3267 3267
3268 3268 args = {}
3269 3269 for line in lines:
3270 3270 # We need to allow empty values.
3271 3271 fields = line.lstrip().split(' ', 1)
3272 3272 if len(fields) == 1:
3273 3273 key = fields[0]
3274 3274 value = ''
3275 3275 else:
3276 3276 key, value = fields
3277 3277
3278 3278 if value.startswith('eval:'):
3279 3279 value = stringutil.evalpythonliteral(value[5:])
3280 3280 else:
3281 3281 value = stringutil.unescapestr(value)
3282 3282
3283 3283 args[key] = value
3284 3284
3285 3285 if batchedcommands is not None:
3286 3286 batchedcommands.append((command, args))
3287 3287 continue
3288 3288
3289 3289 ui.status(_('sending %s command\n') % command)
3290 3290
3291 3291 if 'PUSHFILE' in args:
3292 3292 with open(args['PUSHFILE'], r'rb') as fh:
3293 3293 del args['PUSHFILE']
3294 3294 res, output = peer._callpush(command, fh,
3295 3295 **pycompat.strkwargs(args))
3296 3296 ui.status(_('result: %s\n') % stringutil.escapestr(res))
3297 3297 ui.status(_('remote output: %s\n') %
3298 3298 stringutil.escapestr(output))
3299 3299 else:
3300 3300 with peer.commandexecutor() as e:
3301 3301 res = e.callcommand(command, args).result()
3302 3302
3303 3303 if isinstance(res, wireprotov2peer.commandresponse):
3304 3304 val = res.objects()
3305 3305 ui.status(_('response: %s\n') %
3306 3306 stringutil.pprint(val, bprefix=True, indent=2))
3307 3307 else:
3308 3308 ui.status(_('response: %s\n') %
3309 3309 stringutil.pprint(res, bprefix=True, indent=2))
3310 3310
3311 3311 elif action == 'batchbegin':
3312 3312 if batchedcommands is not None:
3313 3313 raise error.Abort(_('nested batchbegin not allowed'))
3314 3314
3315 3315 batchedcommands = []
3316 3316 elif action == 'batchsubmit':
3317 3317 # There is a batching API we could go through. But it would be
3318 3318 # difficult to normalize requests into function calls. It is easier
3319 3319 # to bypass this layer and normalize to commands + args.
3320 3320 ui.status(_('sending batch with %d sub-commands\n') %
3321 3321 len(batchedcommands))
3322 3322 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
3323 3323 ui.status(_('response #%d: %s\n') %
3324 3324 (i, stringutil.escapestr(chunk)))
3325 3325
3326 3326 batchedcommands = None
3327 3327
3328 3328 elif action.startswith('httprequest '):
3329 3329 if not opener:
3330 3330 raise error.Abort(_('cannot use httprequest without an HTTP '
3331 3331 'peer'))
3332 3332
3333 3333 request = action.split(' ', 2)
3334 3334 if len(request) != 3:
3335 3335 raise error.Abort(_('invalid httprequest: expected format is '
3336 3336 '"httprequest <method> <path>'))
3337 3337
3338 3338 method, httppath = request[1:]
3339 3339 headers = {}
3340 3340 body = None
3341 3341 frames = []
3342 3342 for line in lines:
3343 3343 line = line.lstrip()
3344 3344 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
3345 3345 if m:
3346 3346 # Headers need to use native strings.
3347 3347 key = pycompat.strurl(m.group(1))
3348 3348 value = pycompat.strurl(m.group(2))
3349 3349 headers[key] = value
3350 3350 continue
3351 3351
3352 3352 if line.startswith(b'BODYFILE '):
3353 3353 with open(line.split(b' ', 1), 'rb') as fh:
3354 3354 body = fh.read()
3355 3355 elif line.startswith(b'frame '):
3356 3356 frame = wireprotoframing.makeframefromhumanstring(
3357 3357 line[len(b'frame '):])
3358 3358
3359 3359 frames.append(frame)
3360 3360 else:
3361 3361 raise error.Abort(_('unknown argument to httprequest: %s') %
3362 3362 line)
3363 3363
3364 3364 url = path + httppath
3365 3365
3366 3366 if frames:
3367 3367 body = b''.join(bytes(f) for f in frames)
3368 3368
3369 3369 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
3370 3370
3371 3371 # urllib.Request insists on using has_data() as a proxy for
3372 3372 # determining the request method. Override that to use our
3373 3373 # explicitly requested method.
3374 3374 req.get_method = lambda: pycompat.sysstr(method)
3375 3375
3376 3376 try:
3377 3377 res = opener.open(req)
3378 3378 body = res.read()
3379 3379 except util.urlerr.urlerror as e:
3380 3380 # read() method must be called, but only exists in Python 2
3381 3381 getattr(e, 'read', lambda: None)()
3382 3382 continue
3383 3383
3384 3384 ct = res.headers.get(r'Content-Type')
3385 3385 if ct == r'application/mercurial-cbor':
3386 3386 ui.write(_('cbor> %s\n') %
3387 3387 stringutil.pprint(cborutil.decodeall(body),
3388 3388 bprefix=True,
3389 3389 indent=2))
3390 3390
3391 3391 elif action == 'close':
3392 3392 peer.close()
3393 3393 elif action == 'readavailable':
3394 3394 if not stdout or not stderr:
3395 3395 raise error.Abort(_('readavailable not available on this peer'))
3396 3396
3397 3397 stdin.close()
3398 3398 stdout.read()
3399 3399 stderr.read()
3400 3400
3401 3401 elif action == 'readline':
3402 3402 if not stdout:
3403 3403 raise error.Abort(_('readline not available on this peer'))
3404 3404 stdout.readline()
3405 3405 elif action == 'ereadline':
3406 3406 if not stderr:
3407 3407 raise error.Abort(_('ereadline not available on this peer'))
3408 3408 stderr.readline()
3409 3409 elif action.startswith('read '):
3410 3410 count = int(action.split(' ', 1)[1])
3411 3411 if not stdout:
3412 3412 raise error.Abort(_('read not available on this peer'))
3413 3413 stdout.read(count)
3414 3414 elif action.startswith('eread '):
3415 3415 count = int(action.split(' ', 1)[1])
3416 3416 if not stderr:
3417 3417 raise error.Abort(_('eread not available on this peer'))
3418 3418 stderr.read(count)
3419 3419 else:
3420 3420 raise error.Abort(_('unknown action: %s') % action)
3421 3421
3422 3422 if batchedcommands is not None:
3423 3423 raise error.Abort(_('unclosed "batchbegin" request'))
3424 3424
3425 3425 if peer:
3426 3426 peer.close()
3427 3427
3428 3428 if proc:
3429 3429 proc.kill()
@@ -1,2055 +1,2055 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import itertools
12 12 import struct
13 13 import weakref
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 bin,
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 )
22 22 from . import (
23 23 error,
24 24 mdiff,
25 25 policy,
26 26 pycompat,
27 27 repository,
28 28 revlog,
29 29 util,
30 30 )
31 31 from .utils import (
32 32 interfaceutil,
33 33 )
34 34
35 35 parsers = policy.importmod(r'parsers')
36 36 propertycache = util.propertycache
37 37
38 38 def _parse(data):
39 39 # This method does a little bit of excessive-looking
40 40 # precondition checking. This is so that the behavior of this
41 41 # class exactly matches its C counterpart to try and help
42 42 # prevent surprise breakage for anyone that develops against
43 43 # the pure version.
44 44 if data and data[-1:] != '\n':
45 45 raise ValueError('Manifest did not end in a newline.')
46 46 prev = None
47 47 for l in data.splitlines():
48 48 if prev is not None and prev > l:
49 49 raise ValueError('Manifest lines not in sorted order.')
50 50 prev = l
51 51 f, n = l.split('\0')
52 52 if len(n) > 40:
53 53 yield f, bin(n[:40]), n[40:]
54 54 else:
55 55 yield f, bin(n), ''
56 56
57 57 def _text(it):
58 58 files = []
59 59 lines = []
60 60 for f, n, fl in it:
61 61 files.append(f)
62 62 # if this is changed to support newlines in filenames,
63 63 # be sure to check the templates/ dir again (especially *-raw.tmpl)
64 64 lines.append("%s\0%s%s\n" % (f, hex(n), fl))
65 65
66 66 _checkforbidden(files)
67 67 return ''.join(lines)
68 68
69 69 class lazymanifestiter(object):
70 70 def __init__(self, lm):
71 71 self.pos = 0
72 72 self.lm = lm
73 73
74 74 def __iter__(self):
75 75 return self
76 76
77 77 def next(self):
78 78 try:
79 79 data, pos = self.lm._get(self.pos)
80 80 except IndexError:
81 81 raise StopIteration
82 82 if pos == -1:
83 83 self.pos += 1
84 84 return data[0]
85 85 self.pos += 1
86 86 zeropos = data.find('\x00', pos)
87 87 return data[pos:zeropos]
88 88
89 89 __next__ = next
90 90
91 91 class lazymanifestiterentries(object):
92 92 def __init__(self, lm):
93 93 self.lm = lm
94 94 self.pos = 0
95 95
96 96 def __iter__(self):
97 97 return self
98 98
99 99 def next(self):
100 100 try:
101 101 data, pos = self.lm._get(self.pos)
102 102 except IndexError:
103 103 raise StopIteration
104 104 if pos == -1:
105 105 self.pos += 1
106 106 return data
107 107 zeropos = data.find('\x00', pos)
108 108 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
109 109 zeropos + 1, 40)
110 110 flags = self.lm._getflags(data, self.pos, zeropos)
111 111 self.pos += 1
112 112 return (data[pos:zeropos], hashval, flags)
113 113
114 114 __next__ = next
115 115
116 116 def unhexlify(data, extra, pos, length):
117 117 s = bin(data[pos:pos + length])
118 118 if extra:
119 119 s += chr(extra & 0xff)
120 120 return s
121 121
122 122 def _cmp(a, b):
123 123 return (a > b) - (a < b)
124 124
125 125 class _lazymanifest(object):
126 126 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
127 127 if positions is None:
128 128 self.positions = self.findlines(data)
129 129 self.extrainfo = [0] * len(self.positions)
130 130 self.data = data
131 131 self.extradata = []
132 132 else:
133 133 self.positions = positions[:]
134 134 self.extrainfo = extrainfo[:]
135 135 self.extradata = extradata[:]
136 136 self.data = data
137 137
138 138 def findlines(self, data):
139 139 if not data:
140 140 return []
141 141 pos = data.find("\n")
142 142 if pos == -1 or data[-1:] != '\n':
143 143 raise ValueError("Manifest did not end in a newline.")
144 144 positions = [0]
145 145 prev = data[:data.find('\x00')]
146 146 while pos < len(data) - 1 and pos != -1:
147 147 positions.append(pos + 1)
148 148 nexts = data[pos + 1:data.find('\x00', pos + 1)]
149 149 if nexts < prev:
150 150 raise ValueError("Manifest lines not in sorted order.")
151 151 prev = nexts
152 152 pos = data.find("\n", pos + 1)
153 153 return positions
154 154
155 155 def _get(self, index):
156 156 # get the position encoded in pos:
157 157 # positive number is an index in 'data'
158 158 # negative number is in extrapieces
159 159 pos = self.positions[index]
160 160 if pos >= 0:
161 161 return self.data, pos
162 162 return self.extradata[-pos - 1], -1
163 163
164 164 def _getkey(self, pos):
165 165 if pos >= 0:
166 166 return self.data[pos:self.data.find('\x00', pos + 1)]
167 167 return self.extradata[-pos - 1][0]
168 168
169 169 def bsearch(self, key):
170 170 first = 0
171 171 last = len(self.positions) - 1
172 172
173 173 while first <= last:
174 174 midpoint = (first + last)//2
175 175 nextpos = self.positions[midpoint]
176 176 candidate = self._getkey(nextpos)
177 177 r = _cmp(key, candidate)
178 178 if r == 0:
179 179 return midpoint
180 180 else:
181 181 if r < 0:
182 182 last = midpoint - 1
183 183 else:
184 184 first = midpoint + 1
185 185 return -1
186 186
187 187 def bsearch2(self, key):
188 188 # same as the above, but will always return the position
189 189 # done for performance reasons
190 190 first = 0
191 191 last = len(self.positions) - 1
192 192
193 193 while first <= last:
194 194 midpoint = (first + last)//2
195 195 nextpos = self.positions[midpoint]
196 196 candidate = self._getkey(nextpos)
197 197 r = _cmp(key, candidate)
198 198 if r == 0:
199 199 return (midpoint, True)
200 200 else:
201 201 if r < 0:
202 202 last = midpoint - 1
203 203 else:
204 204 first = midpoint + 1
205 205 return (first, False)
206 206
207 207 def __contains__(self, key):
208 208 return self.bsearch(key) != -1
209 209
210 210 def _getflags(self, data, needle, pos):
211 211 start = pos + 41
212 212 end = data.find("\n", start)
213 213 if end == -1:
214 214 end = len(data) - 1
215 215 if start == end:
216 216 return ''
217 217 return self.data[start:end]
218 218
219 219 def __getitem__(self, key):
220 220 if not isinstance(key, bytes):
221 221 raise TypeError("getitem: manifest keys must be a bytes.")
222 222 needle = self.bsearch(key)
223 223 if needle == -1:
224 224 raise KeyError
225 225 data, pos = self._get(needle)
226 226 if pos == -1:
227 227 return (data[1], data[2])
228 228 zeropos = data.find('\x00', pos)
229 229 assert 0 <= needle <= len(self.positions)
230 230 assert len(self.extrainfo) == len(self.positions)
231 231 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
232 232 flags = self._getflags(data, needle, zeropos)
233 233 return (hashval, flags)
234 234
235 235 def __delitem__(self, key):
236 236 needle, found = self.bsearch2(key)
237 237 if not found:
238 238 raise KeyError
239 239 cur = self.positions[needle]
240 240 self.positions = self.positions[:needle] + self.positions[needle + 1:]
241 241 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
242 242 if cur >= 0:
243 243 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
244 244
245 245 def __setitem__(self, key, value):
246 246 if not isinstance(key, bytes):
247 247 raise TypeError("setitem: manifest keys must be a byte string.")
248 248 if not isinstance(value, tuple) or len(value) != 2:
249 249 raise TypeError("Manifest values must be a tuple of (node, flags).")
250 250 hashval = value[0]
251 251 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
252 252 raise TypeError("node must be a 20-byte byte string")
253 253 flags = value[1]
254 254 if len(hashval) == 22:
255 255 hashval = hashval[:-1]
256 256 if not isinstance(flags, bytes) or len(flags) > 1:
257 257 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
258 258 needle, found = self.bsearch2(key)
259 259 if found:
260 260 # put the item
261 261 pos = self.positions[needle]
262 262 if pos < 0:
263 263 self.extradata[-pos - 1] = (key, hashval, value[1])
264 264 else:
265 265 # just don't bother
266 266 self.extradata.append((key, hashval, value[1]))
267 267 self.positions[needle] = -len(self.extradata)
268 268 else:
269 269 # not found, put it in with extra positions
270 270 self.extradata.append((key, hashval, value[1]))
271 271 self.positions = (self.positions[:needle] + [-len(self.extradata)]
272 272 + self.positions[needle:])
273 273 self.extrainfo = (self.extrainfo[:needle] + [0] +
274 274 self.extrainfo[needle:])
275 275
276 276 def copy(self):
277 277 # XXX call _compact like in C?
278 278 return _lazymanifest(self.data, self.positions, self.extrainfo,
279 279 self.extradata)
280 280
281 281 def _compact(self):
282 282 # hopefully not called TOO often
283 283 if len(self.extradata) == 0:
284 284 return
285 285 l = []
286 286 i = 0
287 287 offset = 0
288 288 self.extrainfo = [0] * len(self.positions)
289 289 while i < len(self.positions):
290 290 if self.positions[i] >= 0:
291 291 cur = self.positions[i]
292 292 last_cut = cur
293 293 while True:
294 294 self.positions[i] = offset
295 295 i += 1
296 296 if i == len(self.positions) or self.positions[i] < 0:
297 297 break
298 298 offset += self.positions[i] - cur
299 299 cur = self.positions[i]
300 300 end_cut = self.data.find('\n', cur)
301 301 if end_cut != -1:
302 302 end_cut += 1
303 303 offset += end_cut - cur
304 304 l.append(self.data[last_cut:end_cut])
305 305 else:
306 306 while i < len(self.positions) and self.positions[i] < 0:
307 307 cur = self.positions[i]
308 308 t = self.extradata[-cur - 1]
309 309 l.append(self._pack(t))
310 310 self.positions[i] = offset
311 311 if len(t[1]) > 20:
312 312 self.extrainfo[i] = ord(t[1][21])
313 313 offset += len(l[-1])
314 314 i += 1
315 315 self.data = ''.join(l)
316 316 self.extradata = []
317 317
318 318 def _pack(self, d):
319 319 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
320 320
321 321 def text(self):
322 322 self._compact()
323 323 return self.data
324 324
325 325 def diff(self, m2, clean=False):
326 326 '''Finds changes between the current manifest and m2.'''
327 327 # XXX think whether efficiency matters here
328 328 diff = {}
329 329
330 330 for fn, e1, flags in self.iterentries():
331 331 if fn not in m2:
332 332 diff[fn] = (e1, flags), (None, '')
333 333 else:
334 334 e2 = m2[fn]
335 335 if (e1, flags) != e2:
336 336 diff[fn] = (e1, flags), e2
337 337 elif clean:
338 338 diff[fn] = None
339 339
340 340 for fn, e2, flags in m2.iterentries():
341 341 if fn not in self:
342 342 diff[fn] = (None, ''), (e2, flags)
343 343
344 344 return diff
345 345
346 346 def iterentries(self):
347 347 return lazymanifestiterentries(self)
348 348
349 349 def iterkeys(self):
350 350 return lazymanifestiter(self)
351 351
352 352 def __iter__(self):
353 353 return lazymanifestiter(self)
354 354
355 355 def __len__(self):
356 356 return len(self.positions)
357 357
358 358 def filtercopy(self, filterfn):
359 359 # XXX should be optimized
360 360 c = _lazymanifest('')
361 361 for f, n, fl in self.iterentries():
362 362 if filterfn(f):
363 363 c[f] = n, fl
364 364 return c
365 365
366 366 try:
367 367 _lazymanifest = parsers.lazymanifest
368 368 except AttributeError:
369 369 pass
370 370
371 371 @interfaceutil.implementer(repository.imanifestdict)
372 372 class manifestdict(object):
373 373 def __init__(self, data=''):
374 374 self._lm = _lazymanifest(data)
375 375
376 376 def __getitem__(self, key):
377 377 return self._lm[key][0]
378 378
379 379 def find(self, key):
380 380 return self._lm[key]
381 381
382 382 def __len__(self):
383 383 return len(self._lm)
384 384
385 385 def __nonzero__(self):
386 386 # nonzero is covered by the __len__ function, but implementing it here
387 387 # makes it easier for extensions to override.
388 388 return len(self._lm) != 0
389 389
390 390 __bool__ = __nonzero__
391 391
392 392 def __setitem__(self, key, node):
393 393 self._lm[key] = node, self.flags(key, '')
394 394
395 395 def __contains__(self, key):
396 396 if key is None:
397 397 return False
398 398 return key in self._lm
399 399
400 400 def __delitem__(self, key):
401 401 del self._lm[key]
402 402
403 403 def __iter__(self):
404 404 return self._lm.__iter__()
405 405
406 406 def iterkeys(self):
407 407 return self._lm.iterkeys()
408 408
409 409 def keys(self):
410 410 return list(self.iterkeys())
411 411
412 412 def filesnotin(self, m2, match=None):
413 413 '''Set of files in this manifest that are not in the other'''
414 414 if match:
415 415 m1 = self.matches(match)
416 416 m2 = m2.matches(match)
417 417 return m1.filesnotin(m2)
418 418 diff = self.diff(m2)
419 419 files = set(filepath
420 420 for filepath, hashflags in diff.iteritems()
421 421 if hashflags[1][0] is None)
422 422 return files
423 423
424 424 @propertycache
425 425 def _dirs(self):
426 426 return util.dirs(self)
427 427
428 428 def dirs(self):
429 429 return self._dirs
430 430
431 431 def hasdir(self, dir):
432 432 return dir in self._dirs
433 433
434 434 def _filesfastpath(self, match):
435 435 '''Checks whether we can correctly and quickly iterate over matcher
436 436 files instead of over manifest files.'''
437 437 files = match.files()
438 438 return (len(files) < 100 and (match.isexact() or
439 439 (match.prefix() and all(fn in self for fn in files))))
440 440
441 441 def walk(self, match):
442 442 '''Generates matching file names.
443 443
444 444 Equivalent to manifest.matches(match).iterkeys(), but without creating
445 445 an entirely new manifest.
446 446
447 447 It also reports nonexistent files by marking them bad with match.bad().
448 448 '''
449 449 if match.always():
450 450 for f in iter(self):
451 451 yield f
452 452 return
453 453
454 454 fset = set(match.files())
455 455
456 456 # avoid the entire walk if we're only looking for specific files
457 457 if self._filesfastpath(match):
458 458 for fn in sorted(fset):
459 459 yield fn
460 460 return
461 461
462 462 for fn in self:
463 463 if fn in fset:
464 464 # specified pattern is the exact name
465 465 fset.remove(fn)
466 466 if match(fn):
467 467 yield fn
468 468
469 469 # for dirstate.walk, files=['.'] means "walk the whole tree".
470 470 # follow that here, too
471 471 fset.discard('.')
472 472
473 473 for fn in sorted(fset):
474 474 if not self.hasdir(fn):
475 475 match.bad(fn, None)
476 476
477 477 def matches(self, match):
478 478 '''generate a new manifest filtered by the match argument'''
479 479 if match.always():
480 480 return self.copy()
481 481
482 482 if self._filesfastpath(match):
483 483 m = manifestdict()
484 484 lm = self._lm
485 485 for fn in match.files():
486 486 if fn in lm:
487 487 m._lm[fn] = lm[fn]
488 488 return m
489 489
490 490 m = manifestdict()
491 491 m._lm = self._lm.filtercopy(match)
492 492 return m
493 493
494 494 def diff(self, m2, match=None, clean=False):
495 495 '''Finds changes between the current manifest and m2.
496 496
497 497 Args:
498 498 m2: the manifest to which this manifest should be compared.
499 499 clean: if true, include files unchanged between these manifests
500 500 with a None value in the returned dictionary.
501 501
502 502 The result is returned as a dict with filename as key and
503 503 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
504 504 nodeid in the current/other manifest and fl1/fl2 is the flag
505 505 in the current/other manifest. Where the file does not exist,
506 506 the nodeid will be None and the flags will be the empty
507 507 string.
508 508 '''
509 509 if match:
510 510 m1 = self.matches(match)
511 511 m2 = m2.matches(match)
512 512 return m1.diff(m2, clean=clean)
513 513 return self._lm.diff(m2._lm, clean)
514 514
515 515 def setflag(self, key, flag):
516 516 self._lm[key] = self[key], flag
517 517
518 518 def get(self, key, default=None):
519 519 try:
520 520 return self._lm[key][0]
521 521 except KeyError:
522 522 return default
523 523
524 524 def flags(self, key, default=''):
525 525 try:
526 526 return self._lm[key][1]
527 527 except KeyError:
528 528 return default
529 529
530 530 def copy(self):
531 531 c = manifestdict()
532 532 c._lm = self._lm.copy()
533 533 return c
534 534
535 535 def items(self):
536 536 return (x[:2] for x in self._lm.iterentries())
537 537
538 538 def iteritems(self):
539 539 return (x[:2] for x in self._lm.iterentries())
540 540
541 541 def iterentries(self):
542 542 return self._lm.iterentries()
543 543
544 544 def text(self):
545 545 # most likely uses native version
546 546 return self._lm.text()
547 547
548 548 def fastdelta(self, base, changes):
549 549 """Given a base manifest text as a bytearray and a list of changes
550 550 relative to that text, compute a delta that can be used by revlog.
551 551 """
552 552 delta = []
553 553 dstart = None
554 554 dend = None
555 555 dline = [""]
556 556 start = 0
557 557 # zero copy representation of base as a buffer
558 558 addbuf = util.buffer(base)
559 559
560 560 changes = list(changes)
561 561 if len(changes) < 1000:
562 562 # start with a readonly loop that finds the offset of
563 563 # each line and creates the deltas
564 564 for f, todelete in changes:
565 565 # bs will either be the index of the item or the insert point
566 566 start, end = _msearch(addbuf, f, start)
567 567 if not todelete:
568 568 h, fl = self._lm[f]
569 569 l = "%s\0%s%s\n" % (f, hex(h), fl)
570 570 else:
571 571 if start == end:
572 572 # item we want to delete was not found, error out
573 573 raise AssertionError(
574 574 _("failed to remove %s from manifest") % f)
575 575 l = ""
576 576 if dstart is not None and dstart <= start and dend >= start:
577 577 if dend < end:
578 578 dend = end
579 579 if l:
580 580 dline.append(l)
581 581 else:
582 582 if dstart is not None:
583 583 delta.append([dstart, dend, "".join(dline)])
584 584 dstart = start
585 585 dend = end
586 586 dline = [l]
587 587
588 588 if dstart is not None:
589 589 delta.append([dstart, dend, "".join(dline)])
590 590 # apply the delta to the base, and get a delta for addrevision
591 591 deltatext, arraytext = _addlistdelta(base, delta)
592 592 else:
593 593 # For large changes, it's much cheaper to just build the text and
594 594 # diff it.
595 595 arraytext = bytearray(self.text())
596 596 deltatext = mdiff.textdiff(
597 597 util.buffer(base), util.buffer(arraytext))
598 598
599 599 return arraytext, deltatext
600 600
601 601 def _msearch(m, s, lo=0, hi=None):
602 602 '''return a tuple (start, end) that says where to find s within m.
603 603
604 604 If the string is found m[start:end] are the line containing
605 605 that string. If start == end the string was not found and
606 606 they indicate the proper sorted insertion point.
607 607
608 608 m should be a buffer, a memoryview or a byte string.
609 609 s is a byte string'''
610 610 def advance(i, c):
611 611 while i < lenm and m[i:i + 1] != c:
612 612 i += 1
613 613 return i
614 614 if not s:
615 615 return (lo, lo)
616 616 lenm = len(m)
617 617 if not hi:
618 618 hi = lenm
619 619 while lo < hi:
620 620 mid = (lo + hi) // 2
621 621 start = mid
622 622 while start > 0 and m[start - 1:start] != '\n':
623 623 start -= 1
624 624 end = advance(start, '\0')
625 625 if bytes(m[start:end]) < s:
626 626 # we know that after the null there are 40 bytes of sha1
627 627 # this translates to the bisect lo = mid + 1
628 628 lo = advance(end + 40, '\n') + 1
629 629 else:
630 630 # this translates to the bisect hi = mid
631 631 hi = start
632 632 end = advance(lo, '\0')
633 633 found = m[lo:end]
634 634 if s == found:
635 635 # we know that after the null there are 40 bytes of sha1
636 636 end = advance(end + 40, '\n')
637 637 return (lo, end + 1)
638 638 else:
639 639 return (lo, lo)
640 640
641 641 def _checkforbidden(l):
642 642 """Check filenames for illegal characters."""
643 643 for f in l:
644 644 if '\n' in f or '\r' in f:
645 645 raise error.StorageError(
646 646 _("'\\n' and '\\r' disallowed in filenames: %r")
647 647 % pycompat.bytestr(f))
648 648
649 649
650 650 # apply the changes collected during the bisect loop to our addlist
651 651 # return a delta suitable for addrevision
652 652 def _addlistdelta(addlist, x):
653 653 # for large addlist arrays, building a new array is cheaper
654 654 # than repeatedly modifying the existing one
655 655 currentposition = 0
656 656 newaddlist = bytearray()
657 657
658 658 for start, end, content in x:
659 659 newaddlist += addlist[currentposition:start]
660 660 if content:
661 661 newaddlist += bytearray(content)
662 662
663 663 currentposition = end
664 664
665 665 newaddlist += addlist[currentposition:]
666 666
667 667 deltatext = "".join(struct.pack(">lll", start, end, len(content))
668 668 + content for start, end, content in x)
669 669 return deltatext, newaddlist
670 670
671 671 def _splittopdir(f):
672 672 if '/' in f:
673 673 dir, subpath = f.split('/', 1)
674 674 return dir + '/', subpath
675 675 else:
676 676 return '', f
677 677
678 678 _noop = lambda s: None
679 679
680 680 class treemanifest(object):
681 681 def __init__(self, dir='', text=''):
682 682 self._dir = dir
683 683 self._node = nullid
684 684 self._loadfunc = _noop
685 685 self._copyfunc = _noop
686 686 self._dirty = False
687 687 self._dirs = {}
688 688 self._lazydirs = {}
689 689 # Using _lazymanifest here is a little slower than plain old dicts
690 690 self._files = {}
691 691 self._flags = {}
692 692 if text:
693 693 def readsubtree(subdir, subm):
694 694 raise AssertionError('treemanifest constructor only accepts '
695 695 'flat manifests')
696 696 self.parse(text, readsubtree)
697 697 self._dirty = True # Mark flat manifest dirty after parsing
698 698
699 699 def _subpath(self, path):
700 700 return self._dir + path
701 701
702 702 def _loadalllazy(self):
703 703 selfdirs = self._dirs
704 704 for d, (path, node, readsubtree, docopy) in self._lazydirs.iteritems():
705 705 if docopy:
706 706 selfdirs[d] = readsubtree(path, node).copy()
707 707 else:
708 708 selfdirs[d] = readsubtree(path, node)
709 709 self._lazydirs = {}
710 710
711 711 def _loadlazy(self, d):
712 712 v = self._lazydirs.get(d)
713 713 if v:
714 714 path, node, readsubtree, docopy = v
715 715 if docopy:
716 716 self._dirs[d] = readsubtree(path, node).copy()
717 717 else:
718 718 self._dirs[d] = readsubtree(path, node)
719 719 del self._lazydirs[d]
720 720
721 721 def _loadchildrensetlazy(self, visit):
722 722 if not visit:
723 723 return None
724 724 if visit == 'all' or visit == 'this':
725 725 self._loadalllazy()
726 726 return None
727 727
728 728 loadlazy = self._loadlazy
729 729 for k in visit:
730 730 loadlazy(k + '/')
731 731 return visit
732 732
733 733 def _loaddifflazy(self, t1, t2):
734 734 """load items in t1 and t2 if they're needed for diffing.
735 735
736 736 The criteria currently is:
737 737 - if it's not present in _lazydirs in either t1 or t2, load it in the
738 738 other (it may already be loaded or it may not exist, doesn't matter)
739 739 - if it's present in _lazydirs in both, compare the nodeid; if it
740 740 differs, load it in both
741 741 """
742 742 toloadlazy = []
743 743 for d, v1 in t1._lazydirs.iteritems():
744 744 v2 = t2._lazydirs.get(d)
745 745 if not v2 or v2[1] != v1[1]:
746 746 toloadlazy.append(d)
747 747 for d, v1 in t2._lazydirs.iteritems():
748 748 if d not in t1._lazydirs:
749 749 toloadlazy.append(d)
750 750
751 751 for d in toloadlazy:
752 752 t1._loadlazy(d)
753 753 t2._loadlazy(d)
754 754
755 755 def __len__(self):
756 756 self._load()
757 757 size = len(self._files)
758 758 self._loadalllazy()
759 759 for m in self._dirs.values():
760 760 size += m.__len__()
761 761 return size
762 762
763 763 def __nonzero__(self):
764 764 # Faster than "__len() != 0" since it avoids loading sub-manifests
765 765 return not self._isempty()
766 766
767 767 __bool__ = __nonzero__
768 768
769 769 def _isempty(self):
770 770 self._load() # for consistency; already loaded by all callers
771 771 # See if we can skip loading everything.
772 772 if self._files or (self._dirs and
773 773 any(not m._isempty() for m in self._dirs.values())):
774 774 return False
775 775 self._loadalllazy()
776 776 return (not self._dirs or
777 777 all(m._isempty() for m in self._dirs.values()))
778 778
779 779 def __repr__(self):
780 780 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
781 781 (self._dir, hex(self._node),
782 782 bool(self._loadfunc is _noop),
783 783 self._dirty, id(self)))
784 784
785 785 def dir(self):
786 786 '''The directory that this tree manifest represents, including a
787 787 trailing '/'. Empty string for the repo root directory.'''
788 788 return self._dir
789 789
790 790 def node(self):
791 791 '''This node of this instance. nullid for unsaved instances. Should
792 792 be updated when the instance is read or written from a revlog.
793 793 '''
794 794 assert not self._dirty
795 795 return self._node
796 796
797 797 def setnode(self, node):
798 798 self._node = node
799 799 self._dirty = False
800 800
801 801 def iterentries(self):
802 802 self._load()
803 803 self._loadalllazy()
804 804 for p, n in sorted(itertools.chain(self._dirs.items(),
805 805 self._files.items())):
806 806 if p in self._files:
807 807 yield self._subpath(p), n, self._flags.get(p, '')
808 808 else:
809 809 for x in n.iterentries():
810 810 yield x
811 811
812 812 def items(self):
813 813 self._load()
814 814 self._loadalllazy()
815 815 for p, n in sorted(itertools.chain(self._dirs.items(),
816 816 self._files.items())):
817 817 if p in self._files:
818 818 yield self._subpath(p), n
819 819 else:
820 820 for f, sn in n.iteritems():
821 821 yield f, sn
822 822
823 823 iteritems = items
824 824
825 825 def iterkeys(self):
826 826 self._load()
827 827 self._loadalllazy()
828 828 for p in sorted(itertools.chain(self._dirs, self._files)):
829 829 if p in self._files:
830 830 yield self._subpath(p)
831 831 else:
832 832 for f in self._dirs[p]:
833 833 yield f
834 834
835 835 def keys(self):
836 836 return list(self.iterkeys())
837 837
838 838 def __iter__(self):
839 839 return self.iterkeys()
840 840
841 841 def __contains__(self, f):
842 842 if f is None:
843 843 return False
844 844 self._load()
845 845 dir, subpath = _splittopdir(f)
846 846 if dir:
847 847 self._loadlazy(dir)
848 848
849 849 if dir not in self._dirs:
850 850 return False
851 851
852 852 return self._dirs[dir].__contains__(subpath)
853 853 else:
854 854 return f in self._files
855 855
856 856 def get(self, f, default=None):
857 857 self._load()
858 858 dir, subpath = _splittopdir(f)
859 859 if dir:
860 860 self._loadlazy(dir)
861 861
862 862 if dir not in self._dirs:
863 863 return default
864 864 return self._dirs[dir].get(subpath, default)
865 865 else:
866 866 return self._files.get(f, default)
867 867
868 868 def __getitem__(self, f):
869 869 self._load()
870 870 dir, subpath = _splittopdir(f)
871 871 if dir:
872 872 self._loadlazy(dir)
873 873
874 874 return self._dirs[dir].__getitem__(subpath)
875 875 else:
876 876 return self._files[f]
877 877
878 878 def flags(self, f):
879 879 self._load()
880 880 dir, subpath = _splittopdir(f)
881 881 if dir:
882 882 self._loadlazy(dir)
883 883
884 884 if dir not in self._dirs:
885 885 return ''
886 886 return self._dirs[dir].flags(subpath)
887 887 else:
888 888 if f in self._lazydirs or f in self._dirs:
889 889 return ''
890 890 return self._flags.get(f, '')
891 891
892 892 def find(self, f):
893 893 self._load()
894 894 dir, subpath = _splittopdir(f)
895 895 if dir:
896 896 self._loadlazy(dir)
897 897
898 898 return self._dirs[dir].find(subpath)
899 899 else:
900 900 return self._files[f], self._flags.get(f, '')
901 901
902 902 def __delitem__(self, f):
903 903 self._load()
904 904 dir, subpath = _splittopdir(f)
905 905 if dir:
906 906 self._loadlazy(dir)
907 907
908 908 self._dirs[dir].__delitem__(subpath)
909 909 # If the directory is now empty, remove it
910 910 if self._dirs[dir]._isempty():
911 911 del self._dirs[dir]
912 912 else:
913 913 del self._files[f]
914 914 if f in self._flags:
915 915 del self._flags[f]
916 916 self._dirty = True
917 917
918 918 def __setitem__(self, f, n):
919 919 assert n is not None
920 920 self._load()
921 921 dir, subpath = _splittopdir(f)
922 922 if dir:
923 923 self._loadlazy(dir)
924 924 if dir not in self._dirs:
925 925 self._dirs[dir] = treemanifest(self._subpath(dir))
926 926 self._dirs[dir].__setitem__(subpath, n)
927 927 else:
928 928 self._files[f] = n[:21] # to match manifestdict's behavior
929 929 self._dirty = True
930 930
931 931 def _load(self):
932 932 if self._loadfunc is not _noop:
933 933 lf, self._loadfunc = self._loadfunc, _noop
934 934 lf(self)
935 935 elif self._copyfunc is not _noop:
936 936 cf, self._copyfunc = self._copyfunc, _noop
937 937 cf(self)
938 938
939 939 def setflag(self, f, flags):
940 940 """Set the flags (symlink, executable) for path f."""
941 941 self._load()
942 942 dir, subpath = _splittopdir(f)
943 943 if dir:
944 944 self._loadlazy(dir)
945 945 if dir not in self._dirs:
946 946 self._dirs[dir] = treemanifest(self._subpath(dir))
947 947 self._dirs[dir].setflag(subpath, flags)
948 948 else:
949 949 self._flags[f] = flags
950 950 self._dirty = True
951 951
952 952 def copy(self):
953 953 copy = treemanifest(self._dir)
954 954 copy._node = self._node
955 955 copy._dirty = self._dirty
956 956 if self._copyfunc is _noop:
957 957 def _copyfunc(s):
958 958 self._load()
959 959 s._lazydirs = {d: (p, n, r, True) for
960 960 d, (p, n, r, c) in self._lazydirs.iteritems()}
961 961 sdirs = s._dirs
962 962 for d, v in self._dirs.iteritems():
963 963 sdirs[d] = v.copy()
964 964 s._files = dict.copy(self._files)
965 965 s._flags = dict.copy(self._flags)
966 966 if self._loadfunc is _noop:
967 967 _copyfunc(copy)
968 968 else:
969 969 copy._copyfunc = _copyfunc
970 970 else:
971 971 copy._copyfunc = self._copyfunc
972 972 return copy
973 973
974 974 def filesnotin(self, m2, match=None):
975 975 '''Set of files in this manifest that are not in the other'''
976 976 if match and not match.always():
977 977 m1 = self.matches(match)
978 978 m2 = m2.matches(match)
979 979 return m1.filesnotin(m2)
980 980
981 981 files = set()
982 982 def _filesnotin(t1, t2):
983 983 if t1._node == t2._node and not t1._dirty and not t2._dirty:
984 984 return
985 985 t1._load()
986 986 t2._load()
987 987 self._loaddifflazy(t1, t2)
988 988 for d, m1 in t1._dirs.iteritems():
989 989 if d in t2._dirs:
990 990 m2 = t2._dirs[d]
991 991 _filesnotin(m1, m2)
992 992 else:
993 993 files.update(m1.iterkeys())
994 994
995 995 for fn in t1._files:
996 996 if fn not in t2._files:
997 997 files.add(t1._subpath(fn))
998 998
999 999 _filesnotin(self, m2)
1000 1000 return files
1001 1001
1002 1002 @propertycache
1003 1003 def _alldirs(self):
1004 1004 return util.dirs(self)
1005 1005
1006 1006 def dirs(self):
1007 1007 return self._alldirs
1008 1008
1009 1009 def hasdir(self, dir):
1010 1010 self._load()
1011 1011 topdir, subdir = _splittopdir(dir)
1012 1012 if topdir:
1013 1013 self._loadlazy(topdir)
1014 1014 if topdir in self._dirs:
1015 1015 return self._dirs[topdir].hasdir(subdir)
1016 1016 return False
1017 1017 dirslash = dir + '/'
1018 1018 return dirslash in self._dirs or dirslash in self._lazydirs
1019 1019
1020 1020 def walk(self, match):
1021 1021 '''Generates matching file names.
1022 1022
1023 1023 Equivalent to manifest.matches(match).iterkeys(), but without creating
1024 1024 an entirely new manifest.
1025 1025
1026 1026 It also reports nonexistent files by marking them bad with match.bad().
1027 1027 '''
1028 1028 if match.always():
1029 1029 for f in iter(self):
1030 1030 yield f
1031 1031 return
1032 1032
1033 1033 fset = set(match.files())
1034 1034
1035 1035 for fn in self._walk(match):
1036 1036 if fn in fset:
1037 1037 # specified pattern is the exact name
1038 1038 fset.remove(fn)
1039 1039 yield fn
1040 1040
1041 1041 # for dirstate.walk, files=['.'] means "walk the whole tree".
1042 1042 # follow that here, too
1043 1043 fset.discard('.')
1044 1044
1045 1045 for fn in sorted(fset):
1046 1046 if not self.hasdir(fn):
1047 1047 match.bad(fn, None)
1048 1048
1049 1049 def _walk(self, match):
1050 1050 '''Recursively generates matching file names for walk().'''
1051 1051 visit = match.visitchildrenset(self._dir[:-1] or '.')
1052 1052 if not visit:
1053 1053 return
1054 1054
1055 1055 # yield this dir's files and walk its submanifests
1056 1056 self._load()
1057 1057 visit = self._loadchildrensetlazy(visit)
1058 1058 for p in sorted(list(self._dirs) + list(self._files)):
1059 1059 if p in self._files:
1060 1060 fullp = self._subpath(p)
1061 1061 if match(fullp):
1062 1062 yield fullp
1063 1063 else:
1064 1064 if not visit or p[:-1] in visit:
1065 1065 for f in self._dirs[p]._walk(match):
1066 1066 yield f
1067 1067
1068 1068 def matches(self, match):
1069 1069 '''generate a new manifest filtered by the match argument'''
1070 1070 if match.always():
1071 1071 return self.copy()
1072 1072
1073 1073 return self._matches(match)
1074 1074
1075 1075 def _matches(self, match):
1076 1076 '''recursively generate a new manifest filtered by the match argument.
1077 1077 '''
1078 1078
1079 1079 visit = match.visitchildrenset(self._dir[:-1] or '.')
1080 1080 if visit == 'all':
1081 1081 return self.copy()
1082 1082 ret = treemanifest(self._dir)
1083 1083 if not visit:
1084 1084 return ret
1085 1085
1086 1086 self._load()
1087 1087 for fn in self._files:
1088 1088 # While visitchildrenset *usually* lists only subdirs, this is
1089 1089 # actually up to the matcher and may have some files in the set().
1090 1090 # If visit == 'this', we should obviously look at the files in this
1091 1091 # directory; if visit is a set, and fn is in it, we should inspect
1092 1092 # fn (but no need to inspect things not in the set).
1093 1093 if visit != 'this' and fn not in visit:
1094 1094 continue
1095 1095 fullp = self._subpath(fn)
1096 1096 # visitchildrenset isn't perfect, we still need to call the regular
1097 1097 # matcher code to further filter results.
1098 1098 if not match(fullp):
1099 1099 continue
1100 1100 ret._files[fn] = self._files[fn]
1101 1101 if fn in self._flags:
1102 1102 ret._flags[fn] = self._flags[fn]
1103 1103
1104 1104 visit = self._loadchildrensetlazy(visit)
1105 1105 for dir, subm in self._dirs.iteritems():
1106 1106 if visit and dir[:-1] not in visit:
1107 1107 continue
1108 1108 m = subm._matches(match)
1109 1109 if not m._isempty():
1110 1110 ret._dirs[dir] = m
1111 1111
1112 1112 if not ret._isempty():
1113 1113 ret._dirty = True
1114 1114 return ret
1115 1115
1116 1116 def diff(self, m2, match=None, clean=False):
1117 1117 '''Finds changes between the current manifest and m2.
1118 1118
1119 1119 Args:
1120 1120 m2: the manifest to which this manifest should be compared.
1121 1121 clean: if true, include files unchanged between these manifests
1122 1122 with a None value in the returned dictionary.
1123 1123
1124 1124 The result is returned as a dict with filename as key and
1125 1125 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1126 1126 nodeid in the current/other manifest and fl1/fl2 is the flag
1127 1127 in the current/other manifest. Where the file does not exist,
1128 1128 the nodeid will be None and the flags will be the empty
1129 1129 string.
1130 1130 '''
1131 1131 if match and not match.always():
1132 1132 m1 = self.matches(match)
1133 1133 m2 = m2.matches(match)
1134 1134 return m1.diff(m2, clean=clean)
1135 1135 result = {}
1136 1136 emptytree = treemanifest()
1137 1137
1138 1138 def _iterativediff(t1, t2, stack):
1139 1139 """compares two tree manifests and append new tree-manifests which
1140 1140 needs to be compared to stack"""
1141 1141 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1142 1142 return
1143 1143 t1._load()
1144 1144 t2._load()
1145 1145 self._loaddifflazy(t1, t2)
1146 1146
1147 1147 for d, m1 in t1._dirs.iteritems():
1148 1148 m2 = t2._dirs.get(d, emptytree)
1149 1149 stack.append((m1, m2))
1150 1150
1151 1151 for d, m2 in t2._dirs.iteritems():
1152 1152 if d not in t1._dirs:
1153 1153 stack.append((emptytree, m2))
1154 1154
1155 1155 for fn, n1 in t1._files.iteritems():
1156 1156 fl1 = t1._flags.get(fn, '')
1157 1157 n2 = t2._files.get(fn, None)
1158 1158 fl2 = t2._flags.get(fn, '')
1159 1159 if n1 != n2 or fl1 != fl2:
1160 1160 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1161 1161 elif clean:
1162 1162 result[t1._subpath(fn)] = None
1163 1163
1164 1164 for fn, n2 in t2._files.iteritems():
1165 1165 if fn not in t1._files:
1166 1166 fl2 = t2._flags.get(fn, '')
1167 1167 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1168 1168
1169 1169 stackls = []
1170 1170 _iterativediff(self, m2, stackls)
1171 1171 while stackls:
1172 1172 t1, t2 = stackls.pop()
1173 1173 # stackls is populated in the function call
1174 1174 _iterativediff(t1, t2, stackls)
1175 1175 return result
1176 1176
1177 1177 def unmodifiedsince(self, m2):
1178 1178 return not self._dirty and not m2._dirty and self._node == m2._node
1179 1179
1180 1180 def parse(self, text, readsubtree):
1181 1181 selflazy = self._lazydirs
1182 1182 subpath = self._subpath
1183 1183 for f, n, fl in _parse(text):
1184 1184 if fl == 't':
1185 1185 f = f + '/'
1186 1186 # False below means "doesn't need to be copied" and can use the
1187 1187 # cached value from readsubtree directly.
1188 1188 selflazy[f] = (subpath(f), n, readsubtree, False)
1189 1189 elif '/' in f:
1190 1190 # This is a flat manifest, so use __setitem__ and setflag rather
1191 1191 # than assigning directly to _files and _flags, so we can
1192 1192 # assign a path in a subdirectory, and to mark dirty (compared
1193 1193 # to nullid).
1194 1194 self[f] = n
1195 1195 if fl:
1196 1196 self.setflag(f, fl)
1197 1197 else:
1198 1198 # Assigning to _files and _flags avoids marking as dirty,
1199 1199 # and should be a little faster.
1200 1200 self._files[f] = n
1201 1201 if fl:
1202 1202 self._flags[f] = fl
1203 1203
1204 1204 def text(self):
1205 1205 """Get the full data of this manifest as a bytestring."""
1206 1206 self._load()
1207 1207 return _text(self.iterentries())
1208 1208
1209 1209 def dirtext(self):
1210 1210 """Get the full data of this directory as a bytestring. Make sure that
1211 1211 any submanifests have been written first, so their nodeids are correct.
1212 1212 """
1213 1213 self._load()
1214 1214 flags = self.flags
1215 1215 lazydirs = [(d[:-1], v[1], 't') for d, v in self._lazydirs.iteritems()]
1216 1216 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1217 1217 files = [(f, self._files[f], flags(f)) for f in self._files]
1218 1218 return _text(sorted(dirs + files + lazydirs))
1219 1219
1220 1220 def read(self, gettext, readsubtree):
1221 1221 def _load_for_read(s):
1222 1222 s.parse(gettext(), readsubtree)
1223 1223 s._dirty = False
1224 1224 self._loadfunc = _load_for_read
1225 1225
1226 1226 def writesubtrees(self, m1, m2, writesubtree, match):
1227 1227 self._load() # for consistency; should never have any effect here
1228 1228 m1._load()
1229 1229 m2._load()
1230 1230 emptytree = treemanifest()
1231 1231 def getnode(m, d):
1232 1232 ld = m._lazydirs.get(d)
1233 1233 if ld:
1234 1234 return ld[1]
1235 1235 return m._dirs.get(d, emptytree)._node
1236 1236
1237 1237 # let's skip investigating things that `match` says we do not need.
1238 1238 visit = match.visitchildrenset(self._dir[:-1] or '.')
1239 1239 visit = self._loadchildrensetlazy(visit)
1240 1240 if visit == 'this' or visit == 'all':
1241 1241 visit = None
1242 1242 for d, subm in self._dirs.iteritems():
1243 1243 if visit and d[:-1] not in visit:
1244 1244 continue
1245 1245 subp1 = getnode(m1, d)
1246 1246 subp2 = getnode(m2, d)
1247 1247 if subp1 == nullid:
1248 1248 subp1, subp2 = subp2, subp1
1249 1249 writesubtree(subm, subp1, subp2, match)
1250 1250
1251 1251 def walksubtrees(self, matcher=None):
1252 1252 """Returns an iterator of the subtrees of this manifest, including this
1253 1253 manifest itself.
1254 1254
1255 1255 If `matcher` is provided, it only returns subtrees that match.
1256 1256 """
1257 1257 if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
1258 1258 return
1259 1259 if not matcher or matcher(self._dir[:-1]):
1260 1260 yield self
1261 1261
1262 1262 self._load()
1263 1263 # OPT: use visitchildrenset to avoid loading everything.
1264 1264 self._loadalllazy()
1265 1265 for d, subm in self._dirs.iteritems():
1266 1266 for subtree in subm.walksubtrees(matcher=matcher):
1267 1267 yield subtree
1268 1268
1269 1269 class manifestfulltextcache(util.lrucachedict):
1270 1270 """File-backed LRU cache for the manifest cache
1271 1271
1272 1272 File consists of entries, up to EOF:
1273 1273
1274 1274 - 20 bytes node, 4 bytes length, <length> manifest data
1275 1275
1276 1276 These are written in reverse cache order (oldest to newest).
1277 1277
1278 1278 """
1279 1279
1280 1280 _file = 'manifestfulltextcache'
1281 1281
1282 1282 def __init__(self, max):
1283 1283 super(manifestfulltextcache, self).__init__(max)
1284 1284 self._dirty = False
1285 1285 self._read = False
1286 1286 self._opener = None
1287 1287
1288 1288 def read(self):
1289 1289 if self._read or self._opener is None:
1290 1290 return
1291 1291
1292 1292 try:
1293 1293 with self._opener(self._file) as fp:
1294 1294 set = super(manifestfulltextcache, self).__setitem__
1295 1295 # ignore trailing data, this is a cache, corruption is skipped
1296 1296 while True:
1297 1297 node = fp.read(20)
1298 1298 if len(node) < 20:
1299 1299 break
1300 1300 try:
1301 1301 size = struct.unpack('>L', fp.read(4))[0]
1302 1302 except struct.error:
1303 1303 break
1304 1304 value = bytearray(fp.read(size))
1305 1305 if len(value) != size:
1306 1306 break
1307 1307 set(node, value)
1308 1308 except IOError:
1309 1309 # the file is allowed to be missing
1310 1310 pass
1311 1311
1312 1312 self._read = True
1313 1313 self._dirty = False
1314 1314
1315 1315 def write(self):
1316 1316 if not self._dirty or self._opener is None:
1317 1317 return
1318 1318 # rotate backwards to the first used node
1319 1319 with self._opener(self._file, 'w', atomictemp=True, checkambig=True
1320 1320 ) as fp:
1321 1321 node = self._head.prev
1322 1322 while True:
1323 1323 if node.key in self._cache:
1324 1324 fp.write(node.key)
1325 1325 fp.write(struct.pack('>L', len(node.value)))
1326 1326 fp.write(node.value)
1327 1327 if node is self._head:
1328 1328 break
1329 1329 node = node.prev
1330 1330
1331 1331 def __len__(self):
1332 1332 if not self._read:
1333 1333 self.read()
1334 1334 return super(manifestfulltextcache, self).__len__()
1335 1335
1336 1336 def __contains__(self, k):
1337 1337 if not self._read:
1338 1338 self.read()
1339 1339 return super(manifestfulltextcache, self).__contains__(k)
1340 1340
1341 1341 def __iter__(self):
1342 1342 if not self._read:
1343 1343 self.read()
1344 1344 return super(manifestfulltextcache, self).__iter__()
1345 1345
1346 1346 def __getitem__(self, k):
1347 1347 if not self._read:
1348 1348 self.read()
1349 1349 # the cache lru order can change on read
1350 1350 setdirty = self._cache.get(k) is not self._head
1351 1351 value = super(manifestfulltextcache, self).__getitem__(k)
1352 1352 if setdirty:
1353 1353 self._dirty = True
1354 1354 return value
1355 1355
1356 1356 def __setitem__(self, k, v):
1357 1357 if not self._read:
1358 1358 self.read()
1359 1359 super(manifestfulltextcache, self).__setitem__(k, v)
1360 1360 self._dirty = True
1361 1361
1362 1362 def __delitem__(self, k):
1363 1363 if not self._read:
1364 1364 self.read()
1365 1365 super(manifestfulltextcache, self).__delitem__(k)
1366 1366 self._dirty = True
1367 1367
1368 1368 def get(self, k, default=None):
1369 1369 if not self._read:
1370 1370 self.read()
1371 1371 return super(manifestfulltextcache, self).get(k, default=default)
1372 1372
1373 1373 def clear(self, clear_persisted_data=False):
1374 1374 super(manifestfulltextcache, self).clear()
1375 1375 if clear_persisted_data:
1376 1376 self._dirty = True
1377 1377 self.write()
1378 1378 self._read = False
1379 1379
1380 1380 @interfaceutil.implementer(repository.imanifeststorage)
1381 1381 class manifestrevlog(object):
1382 1382 '''A revlog that stores manifest texts. This is responsible for caching the
1383 1383 full-text manifest contents.
1384 1384 '''
1385 1385 def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
1386 1386 treemanifest=False):
1387 1387 """Constructs a new manifest revlog
1388 1388
1389 1389 `indexfile` - used by extensions to have two manifests at once, like
1390 1390 when transitioning between flatmanifeset and treemanifests.
1391 1391
1392 1392 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1393 1393 options can also be used to make this a tree manifest revlog. The opener
1394 1394 option takes precedence, so if it is set to True, we ignore whatever
1395 1395 value is passed in to the constructor.
1396 1396 """
1397 1397 # During normal operations, we expect to deal with not more than four
1398 1398 # revs at a time (such as during commit --amend). When rebasing large
1399 1399 # stacks of commits, the number can go up, hence the config knob below.
1400 1400 cachesize = 4
1401 1401 optiontreemanifest = False
1402 1402 opts = getattr(opener, 'options', None)
1403 1403 if opts is not None:
1404 1404 cachesize = opts.get('manifestcachesize', cachesize)
1405 1405 optiontreemanifest = opts.get('treemanifest', False)
1406 1406
1407 1407 self._treeondisk = optiontreemanifest or treemanifest
1408 1408
1409 1409 self._fulltextcache = manifestfulltextcache(cachesize)
1410 1410
1411 1411 if tree:
1412 1412 assert self._treeondisk, 'opts is %r' % opts
1413 1413
1414 1414 if indexfile is None:
1415 1415 indexfile = '00manifest.i'
1416 1416 if tree:
1417 1417 indexfile = "meta/" + tree + indexfile
1418 1418
1419 1419 self.tree = tree
1420 1420
1421 1421 # The dirlogcache is kept on the root manifest log
1422 1422 if tree:
1423 1423 self._dirlogcache = dirlogcache
1424 1424 else:
1425 1425 self._dirlogcache = {'': self}
1426 1426
1427 1427 self._revlog = revlog.revlog(opener, indexfile,
1428 1428 # only root indexfile is cached
1429 1429 checkambig=not bool(tree),
1430 1430 mmaplargeindex=True)
1431 1431
1432 1432 self.index = self._revlog.index
1433 1433 self.version = self._revlog.version
1434 1434 self._generaldelta = self._revlog._generaldelta
1435 1435
1436 1436 def _setupmanifestcachehooks(self, repo):
1437 1437 """Persist the manifestfulltextcache on lock release"""
1438 if not util.safehasattr(repo, '_lockref'):
1438 if not util.safehasattr(repo, '_wlockref'):
1439 1439 return
1440 1440
1441 1441 self._fulltextcache._opener = repo.cachevfs
1442 if repo._currentlock(repo._lockref) is None:
1442 if repo._currentlock(repo._wlockref) is None:
1443 1443 return
1444 1444
1445 1445 reporef = weakref.ref(repo)
1446 1446 manifestrevlogref = weakref.ref(self)
1447 1447
1448 1448 def persistmanifestcache():
1449 1449 repo = reporef()
1450 1450 self = manifestrevlogref()
1451 1451 if repo is None or self is None:
1452 1452 return
1453 1453 if repo.manifestlog.getstorage(b'') is not self:
1454 1454 # there's a different manifest in play now, abort
1455 1455 return
1456 1456 self._fulltextcache.write()
1457 1457
1458 1458 repo._afterlock(persistmanifestcache)
1459 1459
1460 1460 @property
1461 1461 def fulltextcache(self):
1462 1462 return self._fulltextcache
1463 1463
1464 1464 def clearcaches(self, clear_persisted_data=False):
1465 1465 self._revlog.clearcaches()
1466 1466 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1467 1467 self._dirlogcache = {self.tree: self}
1468 1468
1469 1469 def dirlog(self, d):
1470 1470 if d:
1471 1471 assert self._treeondisk
1472 1472 if d not in self._dirlogcache:
1473 1473 mfrevlog = manifestrevlog(self.opener, d,
1474 1474 self._dirlogcache,
1475 1475 treemanifest=self._treeondisk)
1476 1476 self._dirlogcache[d] = mfrevlog
1477 1477 return self._dirlogcache[d]
1478 1478
1479 1479 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
1480 1480 match=None):
1481 1481 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1482 1482 # If our first parent is in the manifest cache, we can
1483 1483 # compute a delta here using properties we know about the
1484 1484 # manifest up-front, which may save time later for the
1485 1485 # revlog layer.
1486 1486
1487 1487 _checkforbidden(added)
1488 1488 # combine the changed lists into one sorted iterator
1489 1489 work = heapq.merge([(x, False) for x in added],
1490 1490 [(x, True) for x in removed])
1491 1491
1492 1492 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1493 1493 cachedelta = self._revlog.rev(p1), deltatext
1494 1494 text = util.buffer(arraytext)
1495 1495 n = self._revlog.addrevision(text, transaction, link, p1, p2,
1496 1496 cachedelta)
1497 1497 else:
1498 1498 # The first parent manifest isn't already loaded, so we'll
1499 1499 # just encode a fulltext of the manifest and pass that
1500 1500 # through to the revlog layer, and let it handle the delta
1501 1501 # process.
1502 1502 if self._treeondisk:
1503 1503 assert readtree, "readtree must be set for treemanifest writes"
1504 1504 assert match, "match must be specified for treemanifest writes"
1505 1505 m1 = readtree(self.tree, p1)
1506 1506 m2 = readtree(self.tree, p2)
1507 1507 n = self._addtree(m, transaction, link, m1, m2, readtree,
1508 1508 match=match)
1509 1509 arraytext = None
1510 1510 else:
1511 1511 text = m.text()
1512 1512 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1513 1513 arraytext = bytearray(text)
1514 1514
1515 1515 if arraytext is not None:
1516 1516 self.fulltextcache[n] = arraytext
1517 1517
1518 1518 return n
1519 1519
1520 1520 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1521 1521 # If the manifest is unchanged compared to one parent,
1522 1522 # don't write a new revision
1523 1523 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
1524 1524 m2)):
1525 1525 return m.node()
1526 1526 def writesubtree(subm, subp1, subp2, match):
1527 1527 sublog = self.dirlog(subm.dir())
1528 1528 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1529 1529 readtree=readtree, match=match)
1530 1530 m.writesubtrees(m1, m2, writesubtree, match)
1531 1531 text = m.dirtext()
1532 1532 n = None
1533 1533 if self.tree != '':
1534 1534 # Double-check whether contents are unchanged to one parent
1535 1535 if text == m1.dirtext():
1536 1536 n = m1.node()
1537 1537 elif text == m2.dirtext():
1538 1538 n = m2.node()
1539 1539
1540 1540 if not n:
1541 1541 n = self._revlog.addrevision(text, transaction, link, m1.node(),
1542 1542 m2.node())
1543 1543
1544 1544 # Save nodeid so parent manifest can calculate its nodeid
1545 1545 m.setnode(n)
1546 1546 return n
1547 1547
1548 1548 def __len__(self):
1549 1549 return len(self._revlog)
1550 1550
1551 1551 def __iter__(self):
1552 1552 return self._revlog.__iter__()
1553 1553
1554 1554 def rev(self, node):
1555 1555 return self._revlog.rev(node)
1556 1556
1557 1557 def node(self, rev):
1558 1558 return self._revlog.node(rev)
1559 1559
1560 1560 def lookup(self, value):
1561 1561 return self._revlog.lookup(value)
1562 1562
1563 1563 def parentrevs(self, rev):
1564 1564 return self._revlog.parentrevs(rev)
1565 1565
1566 1566 def parents(self, node):
1567 1567 return self._revlog.parents(node)
1568 1568
1569 1569 def linkrev(self, rev):
1570 1570 return self._revlog.linkrev(rev)
1571 1571
1572 1572 def checksize(self):
1573 1573 return self._revlog.checksize()
1574 1574
1575 1575 def revision(self, node, _df=None, raw=False):
1576 1576 return self._revlog.revision(node, _df=_df, raw=raw)
1577 1577
1578 1578 def revdiff(self, rev1, rev2):
1579 1579 return self._revlog.revdiff(rev1, rev2)
1580 1580
1581 1581 def cmp(self, node, text):
1582 1582 return self._revlog.cmp(node, text)
1583 1583
1584 1584 def deltaparent(self, rev):
1585 1585 return self._revlog.deltaparent(rev)
1586 1586
1587 1587 def emitrevisions(self, nodes, nodesorder=None,
1588 1588 revisiondata=False, assumehaveparentrevisions=False,
1589 1589 deltamode=repository.CG_DELTAMODE_STD):
1590 1590 return self._revlog.emitrevisions(
1591 1591 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
1592 1592 assumehaveparentrevisions=assumehaveparentrevisions,
1593 1593 deltamode=deltamode)
1594 1594
1595 1595 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1596 1596 return self._revlog.addgroup(deltas, linkmapper, transaction,
1597 1597 addrevisioncb=addrevisioncb)
1598 1598
1599 1599 def rawsize(self, rev):
1600 1600 return self._revlog.rawsize(rev)
1601 1601
1602 1602 def getstrippoint(self, minlink):
1603 1603 return self._revlog.getstrippoint(minlink)
1604 1604
1605 1605 def strip(self, minlink, transaction):
1606 1606 return self._revlog.strip(minlink, transaction)
1607 1607
1608 1608 def files(self):
1609 1609 return self._revlog.files()
1610 1610
1611 1611 def clone(self, tr, destrevlog, **kwargs):
1612 1612 if not isinstance(destrevlog, manifestrevlog):
1613 1613 raise error.ProgrammingError('expected manifestrevlog to clone()')
1614 1614
1615 1615 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1616 1616
1617 1617 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
1618 1618 revisionscount=False, trackedsize=False,
1619 1619 storedsize=False):
1620 1620 return self._revlog.storageinfo(
1621 1621 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
1622 1622 revisionscount=revisionscount, trackedsize=trackedsize,
1623 1623 storedsize=storedsize)
1624 1624
1625 1625 @property
1626 1626 def indexfile(self):
1627 1627 return self._revlog.indexfile
1628 1628
1629 1629 @indexfile.setter
1630 1630 def indexfile(self, value):
1631 1631 self._revlog.indexfile = value
1632 1632
1633 1633 @property
1634 1634 def opener(self):
1635 1635 return self._revlog.opener
1636 1636
1637 1637 @opener.setter
1638 1638 def opener(self, value):
1639 1639 self._revlog.opener = value
1640 1640
1641 1641 @interfaceutil.implementer(repository.imanifestlog)
1642 1642 class manifestlog(object):
1643 1643 """A collection class representing the collection of manifest snapshots
1644 1644 referenced by commits in the repository.
1645 1645
1646 1646 In this situation, 'manifest' refers to the abstract concept of a snapshot
1647 1647 of the list of files in the given commit. Consumers of the output of this
1648 1648 class do not care about the implementation details of the actual manifests
1649 1649 they receive (i.e. tree or flat or lazily loaded, etc)."""
1650 1650 def __init__(self, opener, repo, rootstore, narrowmatch):
1651 1651 usetreemanifest = False
1652 1652 cachesize = 4
1653 1653
1654 1654 opts = getattr(opener, 'options', None)
1655 1655 if opts is not None:
1656 1656 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1657 1657 cachesize = opts.get('manifestcachesize', cachesize)
1658 1658
1659 1659 self._treemanifests = usetreemanifest
1660 1660
1661 1661 self._rootstore = rootstore
1662 1662 self._rootstore._setupmanifestcachehooks(repo)
1663 1663 self._narrowmatch = narrowmatch
1664 1664
1665 1665 # A cache of the manifestctx or treemanifestctx for each directory
1666 1666 self._dirmancache = {}
1667 1667 self._dirmancache[''] = util.lrucachedict(cachesize)
1668 1668
1669 1669 self._cachesize = cachesize
1670 1670
1671 1671 def __getitem__(self, node):
1672 1672 """Retrieves the manifest instance for the given node. Throws a
1673 1673 LookupError if not found.
1674 1674 """
1675 1675 return self.get('', node)
1676 1676
1677 1677 def get(self, tree, node, verify=True):
1678 1678 """Retrieves the manifest instance for the given node. Throws a
1679 1679 LookupError if not found.
1680 1680
1681 1681 `verify` - if True an exception will be thrown if the node is not in
1682 1682 the revlog
1683 1683 """
1684 1684 if node in self._dirmancache.get(tree, ()):
1685 1685 return self._dirmancache[tree][node]
1686 1686
1687 1687 if not self._narrowmatch.always():
1688 1688 if not self._narrowmatch.visitdir(tree[:-1] or '.'):
1689 1689 return excludeddirmanifestctx(tree, node)
1690 1690 if tree:
1691 1691 if self._rootstore._treeondisk:
1692 1692 if verify:
1693 1693 # Side-effect is LookupError is raised if node doesn't
1694 1694 # exist.
1695 1695 self.getstorage(tree).rev(node)
1696 1696
1697 1697 m = treemanifestctx(self, tree, node)
1698 1698 else:
1699 1699 raise error.Abort(
1700 1700 _("cannot ask for manifest directory '%s' in a flat "
1701 1701 "manifest") % tree)
1702 1702 else:
1703 1703 if verify:
1704 1704 # Side-effect is LookupError is raised if node doesn't exist.
1705 1705 self._rootstore.rev(node)
1706 1706
1707 1707 if self._treemanifests:
1708 1708 m = treemanifestctx(self, '', node)
1709 1709 else:
1710 1710 m = manifestctx(self, node)
1711 1711
1712 1712 if node != nullid:
1713 1713 mancache = self._dirmancache.get(tree)
1714 1714 if not mancache:
1715 1715 mancache = util.lrucachedict(self._cachesize)
1716 1716 self._dirmancache[tree] = mancache
1717 1717 mancache[node] = m
1718 1718 return m
1719 1719
1720 1720 def getstorage(self, tree):
1721 1721 return self._rootstore.dirlog(tree)
1722 1722
1723 1723 def clearcaches(self, clear_persisted_data=False):
1724 1724 self._dirmancache.clear()
1725 1725 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1726 1726
1727 1727 def rev(self, node):
1728 1728 return self._rootstore.rev(node)
1729 1729
1730 1730 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1731 1731 class memmanifestctx(object):
1732 1732 def __init__(self, manifestlog):
1733 1733 self._manifestlog = manifestlog
1734 1734 self._manifestdict = manifestdict()
1735 1735
1736 1736 def _storage(self):
1737 1737 return self._manifestlog.getstorage(b'')
1738 1738
1739 1739 def new(self):
1740 1740 return memmanifestctx(self._manifestlog)
1741 1741
1742 1742 def copy(self):
1743 1743 memmf = memmanifestctx(self._manifestlog)
1744 1744 memmf._manifestdict = self.read().copy()
1745 1745 return memmf
1746 1746
1747 1747 def read(self):
1748 1748 return self._manifestdict
1749 1749
1750 1750 def write(self, transaction, link, p1, p2, added, removed, match=None):
1751 1751 return self._storage().add(self._manifestdict, transaction, link,
1752 1752 p1, p2, added, removed, match=match)
1753 1753
1754 1754 @interfaceutil.implementer(repository.imanifestrevisionstored)
1755 1755 class manifestctx(object):
1756 1756 """A class representing a single revision of a manifest, including its
1757 1757 contents, its parent revs, and its linkrev.
1758 1758 """
1759 1759 def __init__(self, manifestlog, node):
1760 1760 self._manifestlog = manifestlog
1761 1761 self._data = None
1762 1762
1763 1763 self._node = node
1764 1764
1765 1765 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1766 1766 # but let's add it later when something needs it and we can load it
1767 1767 # lazily.
1768 1768 #self.p1, self.p2 = store.parents(node)
1769 1769 #rev = store.rev(node)
1770 1770 #self.linkrev = store.linkrev(rev)
1771 1771
1772 1772 def _storage(self):
1773 1773 return self._manifestlog.getstorage(b'')
1774 1774
1775 1775 def node(self):
1776 1776 return self._node
1777 1777
1778 1778 def new(self):
1779 1779 return memmanifestctx(self._manifestlog)
1780 1780
1781 1781 def copy(self):
1782 1782 memmf = memmanifestctx(self._manifestlog)
1783 1783 memmf._manifestdict = self.read().copy()
1784 1784 return memmf
1785 1785
1786 1786 @propertycache
1787 1787 def parents(self):
1788 1788 return self._storage().parents(self._node)
1789 1789
1790 1790 def read(self):
1791 1791 if self._data is None:
1792 1792 if self._node == nullid:
1793 1793 self._data = manifestdict()
1794 1794 else:
1795 1795 store = self._storage()
1796 1796 if self._node in store.fulltextcache:
1797 1797 text = pycompat.bytestr(store.fulltextcache[self._node])
1798 1798 else:
1799 1799 text = store.revision(self._node)
1800 1800 arraytext = bytearray(text)
1801 1801 store.fulltextcache[self._node] = arraytext
1802 1802 self._data = manifestdict(text)
1803 1803 return self._data
1804 1804
1805 1805 def readfast(self, shallow=False):
1806 1806 '''Calls either readdelta or read, based on which would be less work.
1807 1807 readdelta is called if the delta is against the p1, and therefore can be
1808 1808 read quickly.
1809 1809
1810 1810 If `shallow` is True, nothing changes since this is a flat manifest.
1811 1811 '''
1812 1812 store = self._storage()
1813 1813 r = store.rev(self._node)
1814 1814 deltaparent = store.deltaparent(r)
1815 1815 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
1816 1816 return self.readdelta()
1817 1817 return self.read()
1818 1818
1819 1819 def readdelta(self, shallow=False):
1820 1820 '''Returns a manifest containing just the entries that are present
1821 1821 in this manifest, but not in its p1 manifest. This is efficient to read
1822 1822 if the revlog delta is already p1.
1823 1823
1824 1824 Changing the value of `shallow` has no effect on flat manifests.
1825 1825 '''
1826 1826 store = self._storage()
1827 1827 r = store.rev(self._node)
1828 1828 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1829 1829 return manifestdict(d)
1830 1830
1831 1831 def find(self, key):
1832 1832 return self.read().find(key)
1833 1833
1834 1834 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1835 1835 class memtreemanifestctx(object):
1836 1836 def __init__(self, manifestlog, dir=''):
1837 1837 self._manifestlog = manifestlog
1838 1838 self._dir = dir
1839 1839 self._treemanifest = treemanifest()
1840 1840
1841 1841 def _storage(self):
1842 1842 return self._manifestlog.getstorage(b'')
1843 1843
1844 1844 def new(self, dir=''):
1845 1845 return memtreemanifestctx(self._manifestlog, dir=dir)
1846 1846
1847 1847 def copy(self):
1848 1848 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1849 1849 memmf._treemanifest = self._treemanifest.copy()
1850 1850 return memmf
1851 1851
1852 1852 def read(self):
1853 1853 return self._treemanifest
1854 1854
1855 1855 def write(self, transaction, link, p1, p2, added, removed, match=None):
1856 1856 def readtree(dir, node):
1857 1857 return self._manifestlog.get(dir, node).read()
1858 1858 return self._storage().add(self._treemanifest, transaction, link,
1859 1859 p1, p2, added, removed, readtree=readtree,
1860 1860 match=match)
1861 1861
1862 1862 @interfaceutil.implementer(repository.imanifestrevisionstored)
1863 1863 class treemanifestctx(object):
1864 1864 def __init__(self, manifestlog, dir, node):
1865 1865 self._manifestlog = manifestlog
1866 1866 self._dir = dir
1867 1867 self._data = None
1868 1868
1869 1869 self._node = node
1870 1870
1871 1871 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1872 1872 # we can instantiate treemanifestctx objects for directories we don't
1873 1873 # have on disk.
1874 1874 #self.p1, self.p2 = store.parents(node)
1875 1875 #rev = store.rev(node)
1876 1876 #self.linkrev = store.linkrev(rev)
1877 1877
1878 1878 def _storage(self):
1879 1879 narrowmatch = self._manifestlog._narrowmatch
1880 1880 if not narrowmatch.always():
1881 1881 if not narrowmatch.visitdir(self._dir[:-1] or '.'):
1882 1882 return excludedmanifestrevlog(self._dir)
1883 1883 return self._manifestlog.getstorage(self._dir)
1884 1884
1885 1885 def read(self):
1886 1886 if self._data is None:
1887 1887 store = self._storage()
1888 1888 if self._node == nullid:
1889 1889 self._data = treemanifest()
1890 1890 # TODO accessing non-public API
1891 1891 elif store._treeondisk:
1892 1892 m = treemanifest(dir=self._dir)
1893 1893 def gettext():
1894 1894 return store.revision(self._node)
1895 1895 def readsubtree(dir, subm):
1896 1896 # Set verify to False since we need to be able to create
1897 1897 # subtrees for trees that don't exist on disk.
1898 1898 return self._manifestlog.get(dir, subm, verify=False).read()
1899 1899 m.read(gettext, readsubtree)
1900 1900 m.setnode(self._node)
1901 1901 self._data = m
1902 1902 else:
1903 1903 if self._node in store.fulltextcache:
1904 1904 text = pycompat.bytestr(store.fulltextcache[self._node])
1905 1905 else:
1906 1906 text = store.revision(self._node)
1907 1907 arraytext = bytearray(text)
1908 1908 store.fulltextcache[self._node] = arraytext
1909 1909 self._data = treemanifest(dir=self._dir, text=text)
1910 1910
1911 1911 return self._data
1912 1912
1913 1913 def node(self):
1914 1914 return self._node
1915 1915
1916 1916 def new(self, dir=''):
1917 1917 return memtreemanifestctx(self._manifestlog, dir=dir)
1918 1918
1919 1919 def copy(self):
1920 1920 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1921 1921 memmf._treemanifest = self.read().copy()
1922 1922 return memmf
1923 1923
1924 1924 @propertycache
1925 1925 def parents(self):
1926 1926 return self._storage().parents(self._node)
1927 1927
1928 1928 def readdelta(self, shallow=False):
1929 1929 '''Returns a manifest containing just the entries that are present
1930 1930 in this manifest, but not in its p1 manifest. This is efficient to read
1931 1931 if the revlog delta is already p1.
1932 1932
1933 1933 If `shallow` is True, this will read the delta for this directory,
1934 1934 without recursively reading subdirectory manifests. Instead, any
1935 1935 subdirectory entry will be reported as it appears in the manifest, i.e.
1936 1936 the subdirectory will be reported among files and distinguished only by
1937 1937 its 't' flag.
1938 1938 '''
1939 1939 store = self._storage()
1940 1940 if shallow:
1941 1941 r = store.rev(self._node)
1942 1942 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1943 1943 return manifestdict(d)
1944 1944 else:
1945 1945 # Need to perform a slow delta
1946 1946 r0 = store.deltaparent(store.rev(self._node))
1947 1947 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
1948 1948 m1 = self.read()
1949 1949 md = treemanifest(dir=self._dir)
1950 1950 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1951 1951 if n1:
1952 1952 md[f] = n1
1953 1953 if fl1:
1954 1954 md.setflag(f, fl1)
1955 1955 return md
1956 1956
1957 1957 def readfast(self, shallow=False):
1958 1958 '''Calls either readdelta or read, based on which would be less work.
1959 1959 readdelta is called if the delta is against the p1, and therefore can be
1960 1960 read quickly.
1961 1961
1962 1962 If `shallow` is True, it only returns the entries from this manifest,
1963 1963 and not any submanifests.
1964 1964 '''
1965 1965 store = self._storage()
1966 1966 r = store.rev(self._node)
1967 1967 deltaparent = store.deltaparent(r)
1968 1968 if (deltaparent != nullrev and
1969 1969 deltaparent in store.parentrevs(r)):
1970 1970 return self.readdelta(shallow=shallow)
1971 1971
1972 1972 if shallow:
1973 1973 return manifestdict(store.revision(self._node))
1974 1974 else:
1975 1975 return self.read()
1976 1976
1977 1977 def find(self, key):
1978 1978 return self.read().find(key)
1979 1979
1980 1980 class excludeddir(treemanifest):
1981 1981 """Stand-in for a directory that is excluded from the repository.
1982 1982
1983 1983 With narrowing active on a repository that uses treemanifests,
1984 1984 some of the directory revlogs will be excluded from the resulting
1985 1985 clone. This is a huge storage win for clients, but means we need
1986 1986 some sort of pseudo-manifest to surface to internals so we can
1987 1987 detect a merge conflict outside the narrowspec. That's what this
1988 1988 class is: it stands in for a directory whose node is known, but
1989 1989 whose contents are unknown.
1990 1990 """
1991 1991 def __init__(self, dir, node):
1992 1992 super(excludeddir, self).__init__(dir)
1993 1993 self._node = node
1994 1994 # Add an empty file, which will be included by iterators and such,
1995 1995 # appearing as the directory itself (i.e. something like "dir/")
1996 1996 self._files[''] = node
1997 1997 self._flags[''] = 't'
1998 1998
1999 1999 # Manifests outside the narrowspec should never be modified, so avoid
2000 2000 # copying. This makes a noticeable difference when there are very many
2001 2001 # directories outside the narrowspec. Also, it makes sense for the copy to
2002 2002 # be of the same type as the original, which would not happen with the
2003 2003 # super type's copy().
2004 2004 def copy(self):
2005 2005 return self
2006 2006
2007 2007 class excludeddirmanifestctx(treemanifestctx):
2008 2008 """context wrapper for excludeddir - see that docstring for rationale"""
2009 2009 def __init__(self, dir, node):
2010 2010 self._dir = dir
2011 2011 self._node = node
2012 2012
2013 2013 def read(self):
2014 2014 return excludeddir(self._dir, self._node)
2015 2015
2016 2016 def write(self, *args):
2017 2017 raise error.ProgrammingError(
2018 2018 'attempt to write manifest from excluded dir %s' % self._dir)
2019 2019
2020 2020 class excludedmanifestrevlog(manifestrevlog):
2021 2021 """Stand-in for excluded treemanifest revlogs.
2022 2022
2023 2023 When narrowing is active on a treemanifest repository, we'll have
2024 2024 references to directories we can't see due to the revlog being
2025 2025 skipped. This class exists to conform to the manifestrevlog
2026 2026 interface for those directories and proactively prevent writes to
2027 2027 outside the narrowspec.
2028 2028 """
2029 2029
2030 2030 def __init__(self, dir):
2031 2031 self._dir = dir
2032 2032
2033 2033 def __len__(self):
2034 2034 raise error.ProgrammingError(
2035 2035 'attempt to get length of excluded dir %s' % self._dir)
2036 2036
2037 2037 def rev(self, node):
2038 2038 raise error.ProgrammingError(
2039 2039 'attempt to get rev from excluded dir %s' % self._dir)
2040 2040
2041 2041 def linkrev(self, node):
2042 2042 raise error.ProgrammingError(
2043 2043 'attempt to get linkrev from excluded dir %s' % self._dir)
2044 2044
2045 2045 def node(self, rev):
2046 2046 raise error.ProgrammingError(
2047 2047 'attempt to get node from excluded dir %s' % self._dir)
2048 2048
2049 2049 def add(self, *args, **kwargs):
2050 2050 # We should never write entries in dirlogs outside the narrow clone.
2051 2051 # However, the method still gets called from writesubtree() in
2052 2052 # _addtree(), so we need to handle it. We should possibly make that
2053 2053 # avoid calling add() with a clean manifest (_dirty is always False
2054 2054 # in excludeddir instances).
2055 2055 pass
@@ -1,1293 +1,1294 b''
1 1 #testcases sshv1 sshv2
2 2
3 3 #if sshv2
4 4 $ cat >> $HGRCPATH << EOF
5 5 > [experimental]
6 6 > sshpeer.advertise-v2 = true
7 7 > sshserver.support-v2 = true
8 8 > EOF
9 9 #endif
10 10
11 11 Prepare repo a:
12 12
13 13 $ hg init a
14 14 $ cd a
15 15 $ echo a > a
16 16 $ hg add a
17 17 $ hg commit -m test
18 18 $ echo first line > b
19 19 $ hg add b
20 20
21 21 Create a non-inlined filelog:
22 22
23 23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
24 24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
25 25 > cat data1 >> b
26 26 > hg commit -m test
27 27 > done
28 28
29 29 List files in store/data (should show a 'b.d'):
30 30
31 31 #if reporevlogstore
32 32 $ for i in .hg/store/data/*; do
33 33 > echo $i
34 34 > done
35 35 .hg/store/data/a.i
36 36 .hg/store/data/b.d
37 37 .hg/store/data/b.i
38 38 #endif
39 39
40 40 Trigger branchcache creation:
41 41
42 42 $ hg branches
43 43 default 10:a7949464abda
44 44 $ ls .hg/cache
45 45 branch2-served
46 46 manifestfulltextcache (reporevlogstore !)
47 47 rbc-names-v1
48 48 rbc-revs-v1
49 49
50 50 Default operation:
51 51
52 52 $ hg clone . ../b
53 53 updating to branch default
54 54 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 55 $ cd ../b
56 56
57 57 Ensure branchcache got copied over:
58 58
59 59 $ ls .hg/cache
60 60 branch2-served
61 manifestfulltextcache
61 62 rbc-names-v1
62 63 rbc-revs-v1
63 64
64 65 $ cat a
65 66 a
66 67 $ hg verify
67 68 checking changesets
68 69 checking manifests
69 70 crosschecking files in changesets and manifests
70 71 checking files
71 72 checked 11 changesets with 11 changes to 2 files
72 73
73 74 Invalid dest '' must abort:
74 75
75 76 $ hg clone . ''
76 77 abort: empty destination path is not valid
77 78 [255]
78 79
79 80 No update, with debug option:
80 81
81 82 #if hardlink
82 83 $ hg --debug clone -U . ../c --config progress.debug=true
83 84 linking: 1 files
84 85 linking: 2 files
85 86 linking: 3 files
86 87 linking: 4 files
87 88 linking: 5 files
88 89 linking: 6 files
89 90 linking: 7 files
90 91 linking: 8 files
91 92 linked 8 files (reporevlogstore !)
92 93 linking: 9 files (reposimplestore !)
93 94 linking: 10 files (reposimplestore !)
94 95 linking: 11 files (reposimplestore !)
95 96 linking: 12 files (reposimplestore !)
96 97 linking: 13 files (reposimplestore !)
97 98 linking: 14 files (reposimplestore !)
98 99 linking: 15 files (reposimplestore !)
99 100 linking: 16 files (reposimplestore !)
100 101 linking: 17 files (reposimplestore !)
101 102 linking: 18 files (reposimplestore !)
102 103 linked 18 files (reposimplestore !)
103 104 #else
104 105 $ hg --debug clone -U . ../c --config progress.debug=true
105 106 linking: 1 files
106 107 copying: 2 files
107 108 copying: 3 files
108 109 copying: 4 files
109 110 copying: 5 files
110 111 copying: 6 files
111 112 copying: 7 files
112 113 copying: 8 files
113 114 copied 8 files (reporevlogstore !)
114 115 copying: 9 files (reposimplestore !)
115 116 copying: 10 files (reposimplestore !)
116 117 copying: 11 files (reposimplestore !)
117 118 copying: 12 files (reposimplestore !)
118 119 copying: 13 files (reposimplestore !)
119 120 copying: 14 files (reposimplestore !)
120 121 copying: 15 files (reposimplestore !)
121 122 copying: 16 files (reposimplestore !)
122 123 copying: 17 files (reposimplestore !)
123 124 copying: 18 files (reposimplestore !)
124 125 copied 18 files (reposimplestore !)
125 126 #endif
126 127 $ cd ../c
127 128
128 129 Ensure branchcache got copied over:
129 130
130 131 $ ls .hg/cache
131 132 branch2-served
132 133 rbc-names-v1
133 134 rbc-revs-v1
134 135
135 136 $ cat a 2>/dev/null || echo "a not present"
136 137 a not present
137 138 $ hg verify
138 139 checking changesets
139 140 checking manifests
140 141 crosschecking files in changesets and manifests
141 142 checking files
142 143 checked 11 changesets with 11 changes to 2 files
143 144
144 145 Default destination:
145 146
146 147 $ mkdir ../d
147 148 $ cd ../d
148 149 $ hg clone ../a
149 150 destination directory: a
150 151 updating to branch default
151 152 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
152 153 $ cd a
153 154 $ hg cat a
154 155 a
155 156 $ cd ../..
156 157
157 158 Check that we drop the 'file:' from the path before writing the .hgrc:
158 159
159 160 $ hg clone file:a e
160 161 updating to branch default
161 162 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
162 163 $ grep 'file:' e/.hg/hgrc
163 164 [1]
164 165
165 166 Check that path aliases are expanded:
166 167
167 168 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
168 169 $ hg -R f showconfig paths.default
169 170 $TESTTMP/a#0
170 171
171 172 Use --pull:
172 173
173 174 $ hg clone --pull a g
174 175 requesting all changes
175 176 adding changesets
176 177 adding manifests
177 178 adding file changes
178 179 added 11 changesets with 11 changes to 2 files
179 180 new changesets acb14030fe0a:a7949464abda
180 181 updating to branch default
181 182 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
182 183 $ hg -R g verify
183 184 checking changesets
184 185 checking manifests
185 186 crosschecking files in changesets and manifests
186 187 checking files
187 188 checked 11 changesets with 11 changes to 2 files
188 189
189 190 Invalid dest '' with --pull must abort (issue2528):
190 191
191 192 $ hg clone --pull a ''
192 193 abort: empty destination path is not valid
193 194 [255]
194 195
195 196 Clone to '.':
196 197
197 198 $ mkdir h
198 199 $ cd h
199 200 $ hg clone ../a .
200 201 updating to branch default
201 202 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
202 203 $ cd ..
203 204
204 205
205 206 *** Tests for option -u ***
206 207
207 208 Adding some more history to repo a:
208 209
209 210 $ cd a
210 211 $ hg tag ref1
211 212 $ echo the quick brown fox >a
212 213 $ hg ci -m "hacked default"
213 214 $ hg up ref1
214 215 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
215 216 $ hg branch stable
216 217 marked working directory as branch stable
217 218 (branches are permanent and global, did you want a bookmark?)
218 219 $ echo some text >a
219 220 $ hg ci -m "starting branch stable"
220 221 $ hg tag ref2
221 222 $ echo some more text >a
222 223 $ hg ci -m "another change for branch stable"
223 224 $ hg up ref2
224 225 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
225 226 $ hg parents
226 227 changeset: 13:e8ece76546a6
227 228 branch: stable
228 229 tag: ref2
229 230 parent: 10:a7949464abda
230 231 user: test
231 232 date: Thu Jan 01 00:00:00 1970 +0000
232 233 summary: starting branch stable
233 234
234 235
235 236 Repo a has two heads:
236 237
237 238 $ hg heads
238 239 changeset: 15:0aae7cf88f0d
239 240 branch: stable
240 241 tag: tip
241 242 user: test
242 243 date: Thu Jan 01 00:00:00 1970 +0000
243 244 summary: another change for branch stable
244 245
245 246 changeset: 12:f21241060d6a
246 247 user: test
247 248 date: Thu Jan 01 00:00:00 1970 +0000
248 249 summary: hacked default
249 250
250 251
251 252 $ cd ..
252 253
253 254
254 255 Testing --noupdate with --updaterev (must abort):
255 256
256 257 $ hg clone --noupdate --updaterev 1 a ua
257 258 abort: cannot specify both --noupdate and --updaterev
258 259 [255]
259 260
260 261
261 262 Testing clone -u:
262 263
263 264 $ hg clone -u . a ua
264 265 updating to branch stable
265 266 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
266 267
267 268 Repo ua has both heads:
268 269
269 270 $ hg -R ua heads
270 271 changeset: 15:0aae7cf88f0d
271 272 branch: stable
272 273 tag: tip
273 274 user: test
274 275 date: Thu Jan 01 00:00:00 1970 +0000
275 276 summary: another change for branch stable
276 277
277 278 changeset: 12:f21241060d6a
278 279 user: test
279 280 date: Thu Jan 01 00:00:00 1970 +0000
280 281 summary: hacked default
281 282
282 283
283 284 Same revision checked out in repo a and ua:
284 285
285 286 $ hg -R a parents --template "{node|short}\n"
286 287 e8ece76546a6
287 288 $ hg -R ua parents --template "{node|short}\n"
288 289 e8ece76546a6
289 290
290 291 $ rm -r ua
291 292
292 293
293 294 Testing clone --pull -u:
294 295
295 296 $ hg clone --pull -u . a ua
296 297 requesting all changes
297 298 adding changesets
298 299 adding manifests
299 300 adding file changes
300 301 added 16 changesets with 16 changes to 3 files (+1 heads)
301 302 new changesets acb14030fe0a:0aae7cf88f0d
302 303 updating to branch stable
303 304 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
304 305
305 306 Repo ua has both heads:
306 307
307 308 $ hg -R ua heads
308 309 changeset: 15:0aae7cf88f0d
309 310 branch: stable
310 311 tag: tip
311 312 user: test
312 313 date: Thu Jan 01 00:00:00 1970 +0000
313 314 summary: another change for branch stable
314 315
315 316 changeset: 12:f21241060d6a
316 317 user: test
317 318 date: Thu Jan 01 00:00:00 1970 +0000
318 319 summary: hacked default
319 320
320 321
321 322 Same revision checked out in repo a and ua:
322 323
323 324 $ hg -R a parents --template "{node|short}\n"
324 325 e8ece76546a6
325 326 $ hg -R ua parents --template "{node|short}\n"
326 327 e8ece76546a6
327 328
328 329 $ rm -r ua
329 330
330 331
331 332 Testing clone -u <branch>:
332 333
333 334 $ hg clone -u stable a ua
334 335 updating to branch stable
335 336 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
336 337
337 338 Repo ua has both heads:
338 339
339 340 $ hg -R ua heads
340 341 changeset: 15:0aae7cf88f0d
341 342 branch: stable
342 343 tag: tip
343 344 user: test
344 345 date: Thu Jan 01 00:00:00 1970 +0000
345 346 summary: another change for branch stable
346 347
347 348 changeset: 12:f21241060d6a
348 349 user: test
349 350 date: Thu Jan 01 00:00:00 1970 +0000
350 351 summary: hacked default
351 352
352 353
353 354 Branch 'stable' is checked out:
354 355
355 356 $ hg -R ua parents
356 357 changeset: 15:0aae7cf88f0d
357 358 branch: stable
358 359 tag: tip
359 360 user: test
360 361 date: Thu Jan 01 00:00:00 1970 +0000
361 362 summary: another change for branch stable
362 363
363 364
364 365 $ rm -r ua
365 366
366 367
367 368 Testing default checkout:
368 369
369 370 $ hg clone a ua
370 371 updating to branch default
371 372 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
372 373
373 374 Repo ua has both heads:
374 375
375 376 $ hg -R ua heads
376 377 changeset: 15:0aae7cf88f0d
377 378 branch: stable
378 379 tag: tip
379 380 user: test
380 381 date: Thu Jan 01 00:00:00 1970 +0000
381 382 summary: another change for branch stable
382 383
383 384 changeset: 12:f21241060d6a
384 385 user: test
385 386 date: Thu Jan 01 00:00:00 1970 +0000
386 387 summary: hacked default
387 388
388 389
389 390 Branch 'default' is checked out:
390 391
391 392 $ hg -R ua parents
392 393 changeset: 12:f21241060d6a
393 394 user: test
394 395 date: Thu Jan 01 00:00:00 1970 +0000
395 396 summary: hacked default
396 397
397 398 Test clone with a branch named "@" (issue3677)
398 399
399 400 $ hg -R ua branch @
400 401 marked working directory as branch @
401 402 $ hg -R ua commit -m 'created branch @'
402 403 $ hg clone ua atbranch
403 404 updating to branch default
404 405 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
405 406 $ hg -R atbranch heads
406 407 changeset: 16:798b6d97153e
407 408 branch: @
408 409 tag: tip
409 410 parent: 12:f21241060d6a
410 411 user: test
411 412 date: Thu Jan 01 00:00:00 1970 +0000
412 413 summary: created branch @
413 414
414 415 changeset: 15:0aae7cf88f0d
415 416 branch: stable
416 417 user: test
417 418 date: Thu Jan 01 00:00:00 1970 +0000
418 419 summary: another change for branch stable
419 420
420 421 changeset: 12:f21241060d6a
421 422 user: test
422 423 date: Thu Jan 01 00:00:00 1970 +0000
423 424 summary: hacked default
424 425
425 426 $ hg -R atbranch parents
426 427 changeset: 12:f21241060d6a
427 428 user: test
428 429 date: Thu Jan 01 00:00:00 1970 +0000
429 430 summary: hacked default
430 431
431 432
432 433 $ rm -r ua atbranch
433 434
434 435
435 436 Testing #<branch>:
436 437
437 438 $ hg clone -u . a#stable ua
438 439 adding changesets
439 440 adding manifests
440 441 adding file changes
441 442 added 14 changesets with 14 changes to 3 files
442 443 new changesets acb14030fe0a:0aae7cf88f0d
443 444 updating to branch stable
444 445 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
445 446
446 447 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
447 448
448 449 $ hg -R ua heads
449 450 changeset: 13:0aae7cf88f0d
450 451 branch: stable
451 452 tag: tip
452 453 user: test
453 454 date: Thu Jan 01 00:00:00 1970 +0000
454 455 summary: another change for branch stable
455 456
456 457 changeset: 10:a7949464abda
457 458 user: test
458 459 date: Thu Jan 01 00:00:00 1970 +0000
459 460 summary: test
460 461
461 462
462 463 Same revision checked out in repo a and ua:
463 464
464 465 $ hg -R a parents --template "{node|short}\n"
465 466 e8ece76546a6
466 467 $ hg -R ua parents --template "{node|short}\n"
467 468 e8ece76546a6
468 469
469 470 $ rm -r ua
470 471
471 472
472 473 Testing -u -r <branch>:
473 474
474 475 $ hg clone -u . -r stable a ua
475 476 adding changesets
476 477 adding manifests
477 478 adding file changes
478 479 added 14 changesets with 14 changes to 3 files
479 480 new changesets acb14030fe0a:0aae7cf88f0d
480 481 updating to branch stable
481 482 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
482 483
483 484 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
484 485
485 486 $ hg -R ua heads
486 487 changeset: 13:0aae7cf88f0d
487 488 branch: stable
488 489 tag: tip
489 490 user: test
490 491 date: Thu Jan 01 00:00:00 1970 +0000
491 492 summary: another change for branch stable
492 493
493 494 changeset: 10:a7949464abda
494 495 user: test
495 496 date: Thu Jan 01 00:00:00 1970 +0000
496 497 summary: test
497 498
498 499
499 500 Same revision checked out in repo a and ua:
500 501
501 502 $ hg -R a parents --template "{node|short}\n"
502 503 e8ece76546a6
503 504 $ hg -R ua parents --template "{node|short}\n"
504 505 e8ece76546a6
505 506
506 507 $ rm -r ua
507 508
508 509
509 510 Testing -r <branch>:
510 511
511 512 $ hg clone -r stable a ua
512 513 adding changesets
513 514 adding manifests
514 515 adding file changes
515 516 added 14 changesets with 14 changes to 3 files
516 517 new changesets acb14030fe0a:0aae7cf88f0d
517 518 updating to branch stable
518 519 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
519 520
520 521 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
521 522
522 523 $ hg -R ua heads
523 524 changeset: 13:0aae7cf88f0d
524 525 branch: stable
525 526 tag: tip
526 527 user: test
527 528 date: Thu Jan 01 00:00:00 1970 +0000
528 529 summary: another change for branch stable
529 530
530 531 changeset: 10:a7949464abda
531 532 user: test
532 533 date: Thu Jan 01 00:00:00 1970 +0000
533 534 summary: test
534 535
535 536
536 537 Branch 'stable' is checked out:
537 538
538 539 $ hg -R ua parents
539 540 changeset: 13:0aae7cf88f0d
540 541 branch: stable
541 542 tag: tip
542 543 user: test
543 544 date: Thu Jan 01 00:00:00 1970 +0000
544 545 summary: another change for branch stable
545 546
546 547
547 548 $ rm -r ua
548 549
549 550
550 551 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
551 552 iterable in addbranchrevs()
552 553
553 554 $ cat <<EOF > simpleclone.py
554 555 > from mercurial import hg, ui as uimod
555 556 > myui = uimod.ui.load()
556 557 > repo = hg.repository(myui, b'a')
557 558 > hg.clone(myui, {}, repo, dest=b"ua")
558 559 > EOF
559 560
560 561 $ "$PYTHON" simpleclone.py
561 562 updating to branch default
562 563 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
563 564
564 565 $ rm -r ua
565 566
566 567 $ cat <<EOF > branchclone.py
567 568 > from mercurial import extensions, hg, ui as uimod
568 569 > myui = uimod.ui.load()
569 570 > extensions.loadall(myui)
570 571 > extensions.populateui(myui)
571 572 > repo = hg.repository(myui, b'a')
572 573 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable"])
573 574 > EOF
574 575
575 576 $ "$PYTHON" branchclone.py
576 577 adding changesets
577 578 adding manifests
578 579 adding file changes
579 580 added 14 changesets with 14 changes to 3 files
580 581 new changesets acb14030fe0a:0aae7cf88f0d
581 582 updating to branch stable
582 583 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
583 584 $ rm -r ua
584 585
585 586
586 587 Test clone with special '@' bookmark:
587 588 $ cd a
588 589 $ hg bookmark -r a7949464abda @ # branch point of stable from default
589 590 $ hg clone . ../i
590 591 updating to bookmark @
591 592 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
592 593 $ hg id -i ../i
593 594 a7949464abda
594 595 $ rm -r ../i
595 596
596 597 $ hg bookmark -f -r stable @
597 598 $ hg bookmarks
598 599 @ 15:0aae7cf88f0d
599 600 $ hg clone . ../i
600 601 updating to bookmark @ on branch stable
601 602 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
602 603 $ hg id -i ../i
603 604 0aae7cf88f0d
604 605 $ cd "$TESTTMP"
605 606
606 607
607 608 Testing failures:
608 609
609 610 $ mkdir fail
610 611 $ cd fail
611 612
612 613 No local source
613 614
614 615 $ hg clone a b
615 616 abort: repository a not found!
616 617 [255]
617 618
618 619 No remote source
619 620
620 621 #if windows
621 622 $ hg clone http://$LOCALIP:3121/a b
622 623 abort: error: * (glob)
623 624 [255]
624 625 #else
625 626 $ hg clone http://$LOCALIP:3121/a b
626 627 abort: error: *refused* (glob)
627 628 [255]
628 629 #endif
629 630 $ rm -rf b # work around bug with http clone
630 631
631 632
632 633 #if unix-permissions no-root
633 634
634 635 Inaccessible source
635 636
636 637 $ mkdir a
637 638 $ chmod 000 a
638 639 $ hg clone a b
639 640 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
640 641 [255]
641 642
642 643 Inaccessible destination
643 644
644 645 $ hg init b
645 646 $ cd b
646 647 $ hg clone . ../a
647 648 abort: Permission denied: *../a* (glob)
648 649 [255]
649 650 $ cd ..
650 651 $ chmod 700 a
651 652 $ rm -r a b
652 653
653 654 #endif
654 655
655 656
656 657 #if fifo
657 658
658 659 Source of wrong type
659 660
660 661 $ mkfifo a
661 662 $ hg clone a b
662 663 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
663 664 [255]
664 665 $ rm a
665 666
666 667 #endif
667 668
668 669 Default destination, same directory
669 670
670 671 $ hg init q
671 672 $ hg clone q
672 673 destination directory: q
673 674 abort: destination 'q' is not empty
674 675 [255]
675 676
676 677 destination directory not empty
677 678
678 679 $ mkdir a
679 680 $ echo stuff > a/a
680 681 $ hg clone q a
681 682 abort: destination 'a' is not empty
682 683 [255]
683 684
684 685
685 686 #if unix-permissions no-root
686 687
687 688 leave existing directory in place after clone failure
688 689
689 690 $ hg init c
690 691 $ cd c
691 692 $ echo c > c
692 693 $ hg commit -A -m test
693 694 adding c
694 695 $ chmod -rx .hg/store/data
695 696 $ cd ..
696 697 $ mkdir d
697 698 $ hg clone c d 2> err
698 699 [255]
699 700 $ test -d d
700 701 $ test -d d/.hg
701 702 [1]
702 703
703 704 re-enable perm to allow deletion
704 705
705 706 $ chmod +rx c/.hg/store/data
706 707
707 708 #endif
708 709
709 710 $ cd ..
710 711
711 712 Test clone from the repository in (emulated) revlog format 0 (issue4203):
712 713
713 714 $ mkdir issue4203
714 715 $ mkdir -p src/.hg
715 716 $ echo foo > src/foo
716 717 $ hg -R src add src/foo
717 718 $ hg -R src commit -m '#0'
718 719 $ hg -R src log -q
719 720 0:e1bab28bca43
720 721 $ hg -R src debugrevlog -c | egrep 'format|flags'
721 722 format : 0
722 723 flags : (none)
723 724 $ hg clone -U -q src dst
724 725 $ hg -R dst log -q
725 726 0:e1bab28bca43
726 727
727 728 Create repositories to test auto sharing functionality
728 729
729 730 $ cat >> $HGRCPATH << EOF
730 731 > [extensions]
731 732 > share=
732 733 > EOF
733 734
734 735 $ hg init empty
735 736 $ hg init source1a
736 737 $ cd source1a
737 738 $ echo initial1 > foo
738 739 $ hg -q commit -A -m initial
739 740 $ echo second > foo
740 741 $ hg commit -m second
741 742 $ cd ..
742 743
743 744 $ hg init filteredrev0
744 745 $ cd filteredrev0
745 746 $ cat >> .hg/hgrc << EOF
746 747 > [experimental]
747 748 > evolution.createmarkers=True
748 749 > EOF
749 750 $ echo initial1 > foo
750 751 $ hg -q commit -A -m initial0
751 752 $ hg -q up -r null
752 753 $ echo initial2 > foo
753 754 $ hg -q commit -A -m initial1
754 755 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
755 756 obsoleted 1 changesets
756 757 $ cd ..
757 758
758 759 $ hg -q clone --pull source1a source1b
759 760 $ cd source1a
760 761 $ hg bookmark bookA
761 762 $ echo 1a > foo
762 763 $ hg commit -m 1a
763 764 $ cd ../source1b
764 765 $ hg -q up -r 0
765 766 $ echo head1 > foo
766 767 $ hg commit -m head1
767 768 created new head
768 769 $ hg bookmark head1
769 770 $ hg -q up -r 0
770 771 $ echo head2 > foo
771 772 $ hg commit -m head2
772 773 created new head
773 774 $ hg bookmark head2
774 775 $ hg -q up -r 0
775 776 $ hg branch branch1
776 777 marked working directory as branch branch1
777 778 (branches are permanent and global, did you want a bookmark?)
778 779 $ echo branch1 > foo
779 780 $ hg commit -m branch1
780 781 $ hg -q up -r 0
781 782 $ hg branch branch2
782 783 marked working directory as branch branch2
783 784 $ echo branch2 > foo
784 785 $ hg commit -m branch2
785 786 $ cd ..
786 787 $ hg init source2
787 788 $ cd source2
788 789 $ echo initial2 > foo
789 790 $ hg -q commit -A -m initial2
790 791 $ echo second > foo
791 792 $ hg commit -m second
792 793 $ cd ..
793 794
794 795 Clone with auto share from an empty repo should not result in share
795 796
796 797 $ mkdir share
797 798 $ hg --config share.pool=share clone empty share-empty
798 799 (not using pooled storage: remote appears to be empty)
799 800 updating to branch default
800 801 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
801 802 $ ls share
802 803 $ test -d share-empty/.hg/store
803 804 $ test -f share-empty/.hg/sharedpath
804 805 [1]
805 806
806 807 Clone with auto share from a repo with filtered revision 0 should not result in share
807 808
808 809 $ hg --config share.pool=share clone filteredrev0 share-filtered
809 810 (not using pooled storage: unable to resolve identity of remote)
810 811 requesting all changes
811 812 adding changesets
812 813 adding manifests
813 814 adding file changes
814 815 added 1 changesets with 1 changes to 1 files
815 816 new changesets e082c1832e09
816 817 updating to branch default
817 818 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
818 819
819 820 Clone from repo with content should result in shared store being created
820 821
821 822 $ hg --config share.pool=share clone source1a share-dest1a
822 823 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
823 824 requesting all changes
824 825 adding changesets
825 826 adding manifests
826 827 adding file changes
827 828 added 3 changesets with 3 changes to 1 files
828 829 new changesets b5f04eac9d8f:e5bfe23c0b47
829 830 searching for changes
830 831 no changes found
831 832 adding remote bookmark bookA
832 833 updating working directory
833 834 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
834 835
835 836 The shared repo should have been created
836 837
837 838 $ ls share
838 839 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
839 840
840 841 The destination should point to it
841 842
842 843 $ cat share-dest1a/.hg/sharedpath; echo
843 844 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
844 845
845 846 The destination should have bookmarks
846 847
847 848 $ hg -R share-dest1a bookmarks
848 849 bookA 2:e5bfe23c0b47
849 850
850 851 The default path should be the remote, not the share
851 852
852 853 $ hg -R share-dest1a config paths.default
853 854 $TESTTMP/source1a
854 855
855 856 Clone with existing share dir should result in pull + share
856 857
857 858 $ hg --config share.pool=share clone source1b share-dest1b
858 859 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
859 860 searching for changes
860 861 adding changesets
861 862 adding manifests
862 863 adding file changes
863 864 added 4 changesets with 4 changes to 1 files (+4 heads)
864 865 adding remote bookmark head1
865 866 adding remote bookmark head2
866 867 new changesets 4a8dc1ab4c13:6bacf4683960
867 868 updating working directory
868 869 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
869 870
870 871 $ ls share
871 872 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
872 873
873 874 $ cat share-dest1b/.hg/sharedpath; echo
874 875 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
875 876
876 877 We only get bookmarks from the remote, not everything in the share
877 878
878 879 $ hg -R share-dest1b bookmarks
879 880 head1 3:4a8dc1ab4c13
880 881 head2 4:99f71071f117
881 882
882 883 Default path should be source, not share.
883 884
884 885 $ hg -R share-dest1b config paths.default
885 886 $TESTTMP/source1b
886 887
887 888 Checked out revision should be head of default branch
888 889
889 890 $ hg -R share-dest1b log -r .
890 891 changeset: 4:99f71071f117
891 892 bookmark: head2
892 893 parent: 0:b5f04eac9d8f
893 894 user: test
894 895 date: Thu Jan 01 00:00:00 1970 +0000
895 896 summary: head2
896 897
897 898
898 899 Clone from unrelated repo should result in new share
899 900
900 901 $ hg --config share.pool=share clone source2 share-dest2
901 902 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
902 903 requesting all changes
903 904 adding changesets
904 905 adding manifests
905 906 adding file changes
906 907 added 2 changesets with 2 changes to 1 files
907 908 new changesets 22aeff664783:63cf6c3dba4a
908 909 searching for changes
909 910 no changes found
910 911 updating working directory
911 912 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
912 913
913 914 $ ls share
914 915 22aeff664783fd44c6d9b435618173c118c3448e
915 916 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
916 917
917 918 remote naming mode works as advertised
918 919
919 920 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
920 921 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
921 922 requesting all changes
922 923 adding changesets
923 924 adding manifests
924 925 adding file changes
925 926 added 3 changesets with 3 changes to 1 files
926 927 new changesets b5f04eac9d8f:e5bfe23c0b47
927 928 searching for changes
928 929 no changes found
929 930 adding remote bookmark bookA
930 931 updating working directory
931 932 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
932 933
933 934 $ ls shareremote
934 935 195bb1fcdb595c14a6c13e0269129ed78f6debde
935 936
936 937 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
937 938 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
938 939 requesting all changes
939 940 adding changesets
940 941 adding manifests
941 942 adding file changes
942 943 added 6 changesets with 6 changes to 1 files (+4 heads)
943 944 new changesets b5f04eac9d8f:6bacf4683960
944 945 searching for changes
945 946 no changes found
946 947 adding remote bookmark head1
947 948 adding remote bookmark head2
948 949 updating working directory
949 950 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
950 951
951 952 $ ls shareremote
952 953 195bb1fcdb595c14a6c13e0269129ed78f6debde
953 954 c0d4f83847ca2a873741feb7048a45085fd47c46
954 955
955 956 request to clone a single revision is respected in sharing mode
956 957
957 958 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
958 959 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
959 960 adding changesets
960 961 adding manifests
961 962 adding file changes
962 963 added 2 changesets with 2 changes to 1 files
963 964 new changesets b5f04eac9d8f:4a8dc1ab4c13
964 965 no changes found
965 966 adding remote bookmark head1
966 967 updating working directory
967 968 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
968 969
969 970 $ hg -R share-1arev log -G
970 971 @ changeset: 1:4a8dc1ab4c13
971 972 | bookmark: head1
972 973 | tag: tip
973 974 | user: test
974 975 | date: Thu Jan 01 00:00:00 1970 +0000
975 976 | summary: head1
976 977 |
977 978 o changeset: 0:b5f04eac9d8f
978 979 user: test
979 980 date: Thu Jan 01 00:00:00 1970 +0000
980 981 summary: initial
981 982
982 983
983 984 making another clone should only pull down requested rev
984 985
985 986 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
986 987 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
987 988 searching for changes
988 989 adding changesets
989 990 adding manifests
990 991 adding file changes
991 992 added 1 changesets with 1 changes to 1 files (+1 heads)
992 993 adding remote bookmark head1
993 994 adding remote bookmark head2
994 995 new changesets 99f71071f117
995 996 updating working directory
996 997 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
997 998
998 999 $ hg -R share-1brev log -G
999 1000 @ changeset: 2:99f71071f117
1000 1001 | bookmark: head2
1001 1002 | tag: tip
1002 1003 | parent: 0:b5f04eac9d8f
1003 1004 | user: test
1004 1005 | date: Thu Jan 01 00:00:00 1970 +0000
1005 1006 | summary: head2
1006 1007 |
1007 1008 | o changeset: 1:4a8dc1ab4c13
1008 1009 |/ bookmark: head1
1009 1010 | user: test
1010 1011 | date: Thu Jan 01 00:00:00 1970 +0000
1011 1012 | summary: head1
1012 1013 |
1013 1014 o changeset: 0:b5f04eac9d8f
1014 1015 user: test
1015 1016 date: Thu Jan 01 00:00:00 1970 +0000
1016 1017 summary: initial
1017 1018
1018 1019
1019 1020 Request to clone a single branch is respected in sharing mode
1020 1021
1021 1022 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1022 1023 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1023 1024 adding changesets
1024 1025 adding manifests
1025 1026 adding file changes
1026 1027 added 2 changesets with 2 changes to 1 files
1027 1028 new changesets b5f04eac9d8f:5f92a6c1a1b1
1028 1029 no changes found
1029 1030 updating working directory
1030 1031 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1031 1032
1032 1033 $ hg -R share-1bbranch1 log -G
1033 1034 o changeset: 1:5f92a6c1a1b1
1034 1035 | branch: branch1
1035 1036 | tag: tip
1036 1037 | user: test
1037 1038 | date: Thu Jan 01 00:00:00 1970 +0000
1038 1039 | summary: branch1
1039 1040 |
1040 1041 @ changeset: 0:b5f04eac9d8f
1041 1042 user: test
1042 1043 date: Thu Jan 01 00:00:00 1970 +0000
1043 1044 summary: initial
1044 1045
1045 1046
1046 1047 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1047 1048 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1048 1049 searching for changes
1049 1050 adding changesets
1050 1051 adding manifests
1051 1052 adding file changes
1052 1053 added 1 changesets with 1 changes to 1 files (+1 heads)
1053 1054 new changesets 6bacf4683960
1054 1055 updating working directory
1055 1056 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1056 1057
1057 1058 $ hg -R share-1bbranch2 log -G
1058 1059 o changeset: 2:6bacf4683960
1059 1060 | branch: branch2
1060 1061 | tag: tip
1061 1062 | parent: 0:b5f04eac9d8f
1062 1063 | user: test
1063 1064 | date: Thu Jan 01 00:00:00 1970 +0000
1064 1065 | summary: branch2
1065 1066 |
1066 1067 | o changeset: 1:5f92a6c1a1b1
1067 1068 |/ branch: branch1
1068 1069 | user: test
1069 1070 | date: Thu Jan 01 00:00:00 1970 +0000
1070 1071 | summary: branch1
1071 1072 |
1072 1073 @ changeset: 0:b5f04eac9d8f
1073 1074 user: test
1074 1075 date: Thu Jan 01 00:00:00 1970 +0000
1075 1076 summary: initial
1076 1077
1077 1078
1078 1079 -U is respected in share clone mode
1079 1080
1080 1081 $ hg --config share.pool=share clone -U source1a share-1anowc
1081 1082 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1082 1083 searching for changes
1083 1084 no changes found
1084 1085 adding remote bookmark bookA
1085 1086
1086 1087 $ ls share-1anowc
1087 1088
1088 1089 Test that auto sharing doesn't cause failure of "hg clone local remote"
1089 1090
1090 1091 $ cd $TESTTMP
1091 1092 $ hg -R a id -r 0
1092 1093 acb14030fe0a
1093 1094 $ hg id -R remote -r 0
1094 1095 abort: repository remote not found!
1095 1096 [255]
1096 1097 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1097 1098 $ hg -R remote id -r 0
1098 1099 acb14030fe0a
1099 1100
1100 1101 Cloning into pooled storage doesn't race (issue5104)
1101 1102
1102 1103 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1103 1104 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1104 1105 $ wait
1105 1106
1106 1107 $ hg -R share-destrace1 log -r tip
1107 1108 changeset: 2:e5bfe23c0b47
1108 1109 bookmark: bookA
1109 1110 tag: tip
1110 1111 user: test
1111 1112 date: Thu Jan 01 00:00:00 1970 +0000
1112 1113 summary: 1a
1113 1114
1114 1115
1115 1116 $ hg -R share-destrace2 log -r tip
1116 1117 changeset: 2:e5bfe23c0b47
1117 1118 bookmark: bookA
1118 1119 tag: tip
1119 1120 user: test
1120 1121 date: Thu Jan 01 00:00:00 1970 +0000
1121 1122 summary: 1a
1122 1123
1123 1124 One repo should be new, the other should be shared from the pool. We
1124 1125 don't care which is which, so we just make sure we always print the
1125 1126 one containing "new pooled" first, then one one containing "existing
1126 1127 pooled".
1127 1128
1128 1129 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1129 1130 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1130 1131 requesting all changes
1131 1132 adding changesets
1132 1133 adding manifests
1133 1134 adding file changes
1134 1135 added 3 changesets with 3 changes to 1 files
1135 1136 new changesets b5f04eac9d8f:e5bfe23c0b47
1136 1137 searching for changes
1137 1138 no changes found
1138 1139 adding remote bookmark bookA
1139 1140 updating working directory
1140 1141 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1141 1142
1142 1143 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1143 1144 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1144 1145 searching for changes
1145 1146 no changes found
1146 1147 adding remote bookmark bookA
1147 1148 updating working directory
1148 1149 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1149 1150
1150 1151 SEC: check for unsafe ssh url
1151 1152
1152 1153 $ cat >> $HGRCPATH << EOF
1153 1154 > [ui]
1154 1155 > ssh = sh -c "read l; read l; read l"
1155 1156 > EOF
1156 1157
1157 1158 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1158 1159 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1159 1160 [255]
1160 1161 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1161 1162 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1162 1163 [255]
1163 1164 $ hg clone 'ssh://fakehost|touch%20owned/path'
1164 1165 abort: no suitable response from remote hg!
1165 1166 [255]
1166 1167 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1167 1168 abort: no suitable response from remote hg!
1168 1169 [255]
1169 1170
1170 1171 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1171 1172 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1172 1173 [255]
1173 1174
1174 1175 #if windows
1175 1176 $ hg clone "ssh://%26touch%20owned%20/" --debug
1176 1177 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1177 1178 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1178 1179 sending hello command
1179 1180 sending between command
1180 1181 abort: no suitable response from remote hg!
1181 1182 [255]
1182 1183 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1183 1184 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1184 1185 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1185 1186 sending hello command
1186 1187 sending between command
1187 1188 abort: no suitable response from remote hg!
1188 1189 [255]
1189 1190 #else
1190 1191 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1191 1192 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1192 1193 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1193 1194 sending hello command
1194 1195 sending between command
1195 1196 abort: no suitable response from remote hg!
1196 1197 [255]
1197 1198 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1198 1199 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1199 1200 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1200 1201 sending hello command
1201 1202 sending between command
1202 1203 abort: no suitable response from remote hg!
1203 1204 [255]
1204 1205 #endif
1205 1206
1206 1207 $ hg clone "ssh://v-alid.example.com/" --debug
1207 1208 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1208 1209 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1209 1210 sending hello command
1210 1211 sending between command
1211 1212 abort: no suitable response from remote hg!
1212 1213 [255]
1213 1214
1214 1215 We should not have created a file named owned - if it exists, the
1215 1216 attack succeeded.
1216 1217 $ if test -f owned; then echo 'you got owned'; fi
1217 1218
1218 1219 Cloning without fsmonitor enabled does not print a warning for small repos
1219 1220
1220 1221 $ hg clone a fsmonitor-default
1221 1222 updating to bookmark @ on branch stable
1222 1223 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1223 1224
1224 1225 Lower the warning threshold to simulate a large repo
1225 1226
1226 1227 $ cat >> $HGRCPATH << EOF
1227 1228 > [fsmonitor]
1228 1229 > warn_update_file_count = 2
1229 1230 > EOF
1230 1231
1231 1232 We should see a warning about no fsmonitor on supported platforms
1232 1233
1233 1234 #if linuxormacos no-fsmonitor
1234 1235 $ hg clone a nofsmonitor
1235 1236 updating to bookmark @ on branch stable
1236 1237 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1237 1238 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1238 1239 #else
1239 1240 $ hg clone a nofsmonitor
1240 1241 updating to bookmark @ on branch stable
1241 1242 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1242 1243 #endif
1243 1244
1244 1245 We should not see warning about fsmonitor when it is enabled
1245 1246
1246 1247 #if fsmonitor
1247 1248 $ hg clone a fsmonitor-enabled
1248 1249 updating to bookmark @ on branch stable
1249 1250 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1250 1251 #endif
1251 1252
1252 1253 We can disable the fsmonitor warning
1253 1254
1254 1255 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1255 1256 updating to bookmark @ on branch stable
1256 1257 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1257 1258
1258 1259 Loaded fsmonitor but disabled in config should still print warning
1259 1260
1260 1261 #if linuxormacos fsmonitor
1261 1262 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1262 1263 updating to bookmark @ on branch stable
1263 1264 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1264 1265 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1265 1266 #endif
1266 1267
1267 1268 Warning not printed if working directory isn't empty
1268 1269
1269 1270 $ hg -q clone a fsmonitor-update
1270 1271 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1271 1272 $ cd fsmonitor-update
1272 1273 $ hg up acb14030fe0a
1273 1274 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1274 1275 (leaving bookmark @)
1275 1276 $ hg up cf0fe1914066
1276 1277 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1277 1278
1278 1279 `hg update` from null revision also prints
1279 1280
1280 1281 $ hg up null
1281 1282 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1282 1283
1283 1284 #if linuxormacos no-fsmonitor
1284 1285 $ hg up cf0fe1914066
1285 1286 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1286 1287 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1287 1288 #else
1288 1289 $ hg up cf0fe1914066
1289 1290 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1290 1291 #endif
1291 1292
1292 1293 $ cd ..
1293 1294
@@ -1,432 +1,432 b''
1 1 #require hardlink reporevlogstore
2 2
3 3 $ cat > nlinks.py <<EOF
4 4 > from __future__ import print_function
5 5 > import sys
6 6 > from mercurial import pycompat, util
7 7 > for f in sorted(sys.stdin.readlines()):
8 8 > f = f[:-1]
9 9 > print(util.nlinks(pycompat.fsencode(f)), f)
10 10 > EOF
11 11
12 12 $ nlinksdir()
13 13 > {
14 14 > find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py
15 15 > }
16 16
17 17 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
18 18
19 19 $ cat > linkcp.py <<EOF
20 20 > from __future__ import absolute_import
21 21 > import sys
22 22 > from mercurial import pycompat, util
23 23 > util.copyfiles(pycompat.fsencode(sys.argv[1]),
24 24 > pycompat.fsencode(sys.argv[2]), hardlink=True)
25 25 > EOF
26 26
27 27 $ linkcp()
28 28 > {
29 29 > "$PYTHON" $TESTTMP/linkcp.py $1 $2
30 30 > }
31 31
32 32 Prepare repo r1:
33 33
34 34 $ hg init r1
35 35 $ cd r1
36 36
37 37 $ echo c1 > f1
38 38 $ hg add f1
39 39 $ hg ci -m0
40 40
41 41 $ mkdir d1
42 42 $ cd d1
43 43 $ echo c2 > f2
44 44 $ hg add f2
45 45 $ hg ci -m1
46 46 $ cd ../..
47 47
48 48 $ nlinksdir r1/.hg/store
49 49 1 r1/.hg/store/00changelog.i
50 50 1 r1/.hg/store/00manifest.i
51 51 1 r1/.hg/store/data/d1/f2.i
52 52 1 r1/.hg/store/data/f1.i
53 53 1 r1/.hg/store/fncache (repofncache !)
54 54 1 r1/.hg/store/phaseroots
55 55 1 r1/.hg/store/undo
56 56 1 r1/.hg/store/undo.backup.fncache (repofncache !)
57 57 1 r1/.hg/store/undo.backupfiles
58 58 1 r1/.hg/store/undo.phaseroots
59 59
60 60
61 61 Create hardlinked clone r2:
62 62
63 63 $ hg clone -U --debug r1 r2 --config progress.debug=true
64 64 linking: 1 files
65 65 linking: 2 files
66 66 linking: 3 files
67 67 linking: 4 files
68 68 linking: 5 files
69 69 linking: 6 files
70 70 linking: 7 files
71 71 linked 7 files
72 72
73 73 Create non-hardlinked clone r3:
74 74
75 75 $ hg clone --pull r1 r3
76 76 requesting all changes
77 77 adding changesets
78 78 adding manifests
79 79 adding file changes
80 80 added 2 changesets with 2 changes to 2 files
81 81 new changesets 40d85e9847f2:7069c422939c
82 82 updating to branch default
83 83 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
84 84
85 85
86 86 Repos r1 and r2 should now contain hardlinked files:
87 87
88 88 $ nlinksdir r1/.hg/store
89 89 2 r1/.hg/store/00changelog.i
90 90 2 r1/.hg/store/00manifest.i
91 91 2 r1/.hg/store/data/d1/f2.i
92 92 2 r1/.hg/store/data/f1.i
93 93 2 r1/.hg/store/fncache (repofncache !)
94 94 1 r1/.hg/store/phaseroots
95 95 1 r1/.hg/store/undo
96 96 1 r1/.hg/store/undo.backup.fncache (repofncache !)
97 97 1 r1/.hg/store/undo.backupfiles
98 98 1 r1/.hg/store/undo.phaseroots
99 99
100 100 $ nlinksdir r2/.hg/store
101 101 2 r2/.hg/store/00changelog.i
102 102 2 r2/.hg/store/00manifest.i
103 103 2 r2/.hg/store/data/d1/f2.i
104 104 2 r2/.hg/store/data/f1.i
105 105 2 r2/.hg/store/fncache (repofncache !)
106 106
107 107 Repo r3 should not be hardlinked:
108 108
109 109 $ nlinksdir r3/.hg/store
110 110 1 r3/.hg/store/00changelog.i
111 111 1 r3/.hg/store/00manifest.i
112 112 1 r3/.hg/store/data/d1/f2.i
113 113 1 r3/.hg/store/data/f1.i
114 114 1 r3/.hg/store/fncache (repofncache !)
115 115 1 r3/.hg/store/phaseroots
116 116 1 r3/.hg/store/undo
117 117 1 r3/.hg/store/undo.backupfiles
118 118 1 r3/.hg/store/undo.phaseroots
119 119
120 120
121 121 Create a non-inlined filelog in r3:
122 122
123 123 $ cd r3/d1
124 124 >>> f = open('data1', 'wb')
125 125 >>> for x in range(10000):
126 126 ... f.write(b"%d\n" % x) and None
127 127 >>> f.close()
128 128 $ for j in 0 1 2 3 4 5 6 7 8 9; do
129 129 > cat data1 >> f2
130 130 > hg commit -m$j
131 131 > done
132 132 $ cd ../..
133 133
134 134 $ nlinksdir r3/.hg/store
135 135 1 r3/.hg/store/00changelog.i
136 136 1 r3/.hg/store/00manifest.i
137 137 1 r3/.hg/store/data/d1/f2.d
138 138 1 r3/.hg/store/data/d1/f2.i
139 139 1 r3/.hg/store/data/f1.i
140 140 1 r3/.hg/store/fncache (repofncache !)
141 141 1 r3/.hg/store/phaseroots
142 142 1 r3/.hg/store/undo
143 143 1 r3/.hg/store/undo.backup.fncache (repofncache !)
144 144 1 r3/.hg/store/undo.backup.phaseroots
145 145 1 r3/.hg/store/undo.backupfiles
146 146 1 r3/.hg/store/undo.phaseroots
147 147
148 148 Push to repo r1 should break up most hardlinks in r2:
149 149
150 150 $ hg -R r2 verify
151 151 checking changesets
152 152 checking manifests
153 153 crosschecking files in changesets and manifests
154 154 checking files
155 155 checked 2 changesets with 2 changes to 2 files
156 156
157 157 $ cd r3
158 158 $ hg push
159 159 pushing to $TESTTMP/r1
160 160 searching for changes
161 161 adding changesets
162 162 adding manifests
163 163 adding file changes
164 164 added 10 changesets with 10 changes to 1 files
165 165
166 166 $ cd ..
167 167
168 168 $ nlinksdir r2/.hg/store
169 169 1 r2/.hg/store/00changelog.i
170 170 1 r2/.hg/store/00manifest.i
171 171 1 r2/.hg/store/data/d1/f2.i
172 172 2 r2/.hg/store/data/f1.i
173 173 [12] r2/\.hg/store/fncache (re) (repofncache !)
174 174
175 175 #if hardlink-whitelisted repofncache
176 176 $ nlinksdir r2/.hg/store/fncache
177 177 2 r2/.hg/store/fncache
178 178 #endif
179 179
180 180 $ hg -R r2 verify
181 181 checking changesets
182 182 checking manifests
183 183 crosschecking files in changesets and manifests
184 184 checking files
185 185 checked 2 changesets with 2 changes to 2 files
186 186
187 187
188 188 $ cd r1
189 189 $ hg up
190 190 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
191 191
192 192 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
193 193
194 194 $ echo c1c1 >> f1
195 195 $ hg ci -m00
196 196 $ cd ..
197 197
198 198 $ nlinksdir r2/.hg/store
199 199 1 r2/.hg/store/00changelog.i
200 200 1 r2/.hg/store/00manifest.i
201 201 1 r2/.hg/store/data/d1/f2.i
202 202 1 r2/.hg/store/data/f1.i
203 203 [12] r2/\.hg/store/fncache (re) (repofncache !)
204 204
205 205 #if hardlink-whitelisted repofncache
206 206 $ nlinksdir r2/.hg/store/fncache
207 207 2 r2/.hg/store/fncache
208 208 #endif
209 209
210 210 Create a file which exec permissions we will change
211 211 $ cd r3
212 212 $ echo "echo hello world" > f3
213 213 $ hg add f3
214 214 $ hg ci -mf3
215 215 $ cd ..
216 216
217 217 $ cd r3
218 218 $ hg tip --template '{rev}:{node|short}\n'
219 219 12:d3b77733a28a
220 220 $ echo bla > f1
221 221 $ chmod +x f3
222 222 $ hg ci -m1
223 223 $ cd ..
224 224
225 225 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
226 226
227 227 $ linkcp r3 r4
228 228
229 229 'checklink' is produced by hardlinking a symlink, which is undefined whether
230 230 the symlink should be followed or not. It does behave differently on Linux and
231 231 BSD. Just remove it so the test pass on both platforms.
232 232
233 233 $ rm -f r4/.hg/wcache/checklink
234 234
235 235 r4 has hardlinks in the working dir (not just inside .hg):
236 236
237 237 $ nlinksdir r4
238 238 2 r4/.hg/00changelog.i
239 239 2 r4/.hg/branch
240 240 2 r4/.hg/cache/branch2-base
241 241 2 r4/.hg/cache/branch2-served
242 242 2 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
243 243 2 r4/.hg/cache/rbc-names-v1
244 244 2 r4/.hg/cache/rbc-revs-v1
245 245 2 r4/.hg/dirstate
246 246 2 r4/.hg/fsmonitor.state (fsmonitor !)
247 247 2 r4/.hg/hgrc
248 248 2 r4/.hg/last-message.txt
249 249 2 r4/.hg/requires
250 250 2 r4/.hg/store/00changelog.i
251 251 2 r4/.hg/store/00manifest.i
252 252 2 r4/.hg/store/data/d1/f2.d
253 253 2 r4/.hg/store/data/d1/f2.i
254 254 2 r4/.hg/store/data/f1.i
255 255 2 r4/.hg/store/data/f3.i
256 256 2 r4/.hg/store/fncache (repofncache !)
257 257 2 r4/.hg/store/phaseroots
258 258 2 r4/.hg/store/undo
259 259 2 r4/.hg/store/undo.backup.fncache (repofncache !)
260 260 2 r4/.hg/store/undo.backup.phaseroots
261 261 2 r4/.hg/store/undo.backupfiles
262 262 2 r4/.hg/store/undo.phaseroots
263 263 [24] r4/\.hg/undo\.backup\.dirstate (re)
264 264 2 r4/.hg/undo.bookmarks
265 265 2 r4/.hg/undo.branch
266 266 2 r4/.hg/undo.desc
267 267 [24] r4/\.hg/undo\.dirstate (re)
268 268 2 r4/.hg/wcache/checkisexec (execbit !)
269 269 2 r4/.hg/wcache/checklink-target (symlink !)
270 270 2 r4/.hg/wcache/checknoexec (execbit !)
271 271 2 r4/d1/data1
272 272 2 r4/d1/f2
273 273 2 r4/f1
274 274 2 r4/f3
275 275
276 276 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
277 277 #if hardlink-whitelisted
278 278 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
279 279 4 r4/.hg/undo.backup.dirstate
280 280 4 r4/.hg/undo.dirstate
281 281 #endif
282 282
283 283
284 284 $ hg -R r4 up 12
285 285 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (execbit !)
286 286 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-execbit !)
287 287
288 288 $ nlinksdir r4
289 289 2 r4/.hg/00changelog.i
290 290 1 r4/.hg/branch
291 291 2 r4/.hg/cache/branch2-base
292 292 2 r4/.hg/cache/branch2-served
293 2 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
293 1 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
294 294 2 r4/.hg/cache/rbc-names-v1
295 295 2 r4/.hg/cache/rbc-revs-v1
296 296 1 r4/.hg/dirstate
297 297 1 r4/.hg/fsmonitor.state (fsmonitor !)
298 298 2 r4/.hg/hgrc
299 299 2 r4/.hg/last-message.txt
300 300 2 r4/.hg/requires
301 301 2 r4/.hg/store/00changelog.i
302 302 2 r4/.hg/store/00manifest.i
303 303 2 r4/.hg/store/data/d1/f2.d
304 304 2 r4/.hg/store/data/d1/f2.i
305 305 2 r4/.hg/store/data/f1.i
306 306 2 r4/.hg/store/data/f3.i
307 307 2 r4/.hg/store/fncache
308 308 2 r4/.hg/store/phaseroots
309 309 2 r4/.hg/store/undo
310 310 2 r4/.hg/store/undo.backup.fncache (repofncache !)
311 311 2 r4/.hg/store/undo.backup.phaseroots
312 312 2 r4/.hg/store/undo.backupfiles
313 313 2 r4/.hg/store/undo.phaseroots
314 314 [24] r4/\.hg/undo\.backup\.dirstate (re)
315 315 2 r4/.hg/undo.bookmarks
316 316 2 r4/.hg/undo.branch
317 317 2 r4/.hg/undo.desc
318 318 [24] r4/\.hg/undo\.dirstate (re)
319 319 2 r4/.hg/wcache/checkisexec (execbit !)
320 320 2 r4/.hg/wcache/checklink-target (symlink !)
321 321 2 r4/.hg/wcache/checknoexec (execbit !)
322 322 2 r4/d1/data1
323 323 2 r4/d1/f2
324 324 1 r4/f1
325 325 1 r4/f3 (execbit !)
326 326 2 r4/f3 (no-execbit !)
327 327
328 328 #if hardlink-whitelisted
329 329 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
330 330 4 r4/.hg/undo.backup.dirstate
331 331 4 r4/.hg/undo.dirstate
332 332 #endif
333 333
334 334 Test hardlinking outside hg:
335 335
336 336 $ mkdir x
337 337 $ echo foo > x/a
338 338
339 339 $ linkcp x y
340 340 $ echo bar >> y/a
341 341
342 342 No diff if hardlink:
343 343
344 344 $ diff x/a y/a
345 345
346 346 Test mq hardlinking:
347 347
348 348 $ echo "[extensions]" >> $HGRCPATH
349 349 $ echo "mq=" >> $HGRCPATH
350 350
351 351 $ hg init a
352 352 $ cd a
353 353
354 354 $ hg qimport -n foo - << EOF
355 355 > # HG changeset patch
356 356 > # Date 1 0
357 357 > diff -r 2588a8b53d66 a
358 358 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
359 359 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
360 360 > @@ -0,0 +1,1 @@
361 361 > +a
362 362 > EOF
363 363 adding foo to series file
364 364
365 365 $ hg qpush
366 366 applying foo
367 367 now at: foo
368 368
369 369 $ cd ..
370 370 $ linkcp a b
371 371 $ cd b
372 372
373 373 $ hg qimport -n bar - << EOF
374 374 > # HG changeset patch
375 375 > # Date 2 0
376 376 > diff -r 2588a8b53d66 a
377 377 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
378 378 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
379 379 > @@ -0,0 +1,1 @@
380 380 > +b
381 381 > EOF
382 382 adding bar to series file
383 383
384 384 $ hg qpush
385 385 applying bar
386 386 now at: bar
387 387
388 388 $ cat .hg/patches/status
389 389 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
390 390 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
391 391
392 392 $ cat .hg/patches/series
393 393 foo
394 394 bar
395 395
396 396 $ cat ../a/.hg/patches/status
397 397 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
398 398
399 399 $ cat ../a/.hg/patches/series
400 400 foo
401 401
402 402 Test tags hardlinking:
403 403
404 404 $ hg qdel -r qbase:qtip
405 405 patch foo finalized without changeset message
406 406 patch bar finalized without changeset message
407 407
408 408 $ hg tag -l lfoo
409 409 $ hg tag foo
410 410
411 411 $ cd ..
412 412 $ linkcp b c
413 413 $ cd c
414 414
415 415 $ hg tag -l -r 0 lbar
416 416 $ hg tag -r 0 bar
417 417
418 418 $ cat .hgtags
419 419 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
420 420 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
421 421
422 422 $ cat .hg/localtags
423 423 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
424 424 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
425 425
426 426 $ cat ../b/.hgtags
427 427 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
428 428
429 429 $ cat ../b/.hg/localtags
430 430 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
431 431
432 432 $ cd ..
@@ -1,185 +1,203 b''
1 1 Source bundle was generated with the following script:
2 2
3 3 # hg init
4 4 # echo a > a
5 5 # ln -s a l
6 6 # hg ci -Ama -d'0 0'
7 7 # mkdir b
8 8 # echo a > b/a
9 9 # chmod +x b/a
10 10 # hg ci -Amb -d'1 0'
11 11
12 12 $ hg init
13 13 $ hg unbundle "$TESTDIR/bundles/test-manifest.hg"
14 14 adding changesets
15 15 adding manifests
16 16 adding file changes
17 17 added 2 changesets with 3 changes to 3 files
18 18 new changesets b73562a03cfe:5bdc995175ba (2 drafts)
19 19 (run 'hg update' to get a working copy)
20 20
21 21 The next call is expected to return nothing:
22 22
23 23 $ hg manifest
24 24
25 25 $ hg co
26 26 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 27
28 28 $ hg manifest
29 29 a
30 30 b/a
31 31 l
32 32
33 33 $ hg files -vr .
34 34 2 a
35 35 2 x b/a
36 36 1 l l
37 37 $ hg files -r . -X b
38 38 a
39 39 l
40 40 $ hg files -T '{path} {size} {flags}\n'
41 41 a 2
42 42 b/a 2 x
43 43 l 1 l
44 44 $ hg files -T '{path} {node|shortest}\n' -r.
45 45 a 5bdc
46 46 b/a 5bdc
47 47 l 5bdc
48 48
49 49 $ hg manifest -v
50 50 644 a
51 51 755 * b/a
52 52 644 @ l
53 53 $ hg manifest -T '{path} {rev}\n'
54 54 a 1
55 55 b/a 1
56 56 l 1
57 57
58 58 $ hg manifest --debug
59 59 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 644 a
60 60 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 755 * b/a
61 61 047b75c6d7a3ef6a2243bd0e99f94f6ea6683597 644 @ l
62 62
63 63 $ hg manifest -r 0
64 64 a
65 65 l
66 66
67 67 $ hg manifest -r 1
68 68 a
69 69 b/a
70 70 l
71 71
72 72 $ hg manifest -r tip
73 73 a
74 74 b/a
75 75 l
76 76
77 77 $ hg manifest tip
78 78 a
79 79 b/a
80 80 l
81 81
82 82 $ hg manifest --all
83 83 a
84 84 b/a
85 85 l
86 86
87 87 The next two calls are expected to abort:
88 88
89 89 $ hg manifest -r 2
90 90 abort: unknown revision '2'!
91 91 [255]
92 92
93 93 $ hg manifest -r tip tip
94 94 abort: please specify just one revision
95 95 [255]
96 96
97 97 Testing the manifest full text cache utility
98 98 --------------------------------------------
99 99
100 100 Reminder of the manifest log content
101 101
102 102 $ hg log --debug | grep 'manifest:'
103 103 manifest: 1:1e01206b1d2f72bd55f2a33fa8ccad74144825b7
104 104 manifest: 0:fce2a30dedad1eef4da95ca1dc0004157aa527cf
105 105
106 106 Showing the content of the caches after the above operations
107 107
108 108 $ hg debugmanifestfulltextcache
109 cache empty
109 cache contains 1 manifest entries, in order of most to least recent:
110 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
111 total cache data size 157 bytes, on-disk 157 bytes
110 112
111 113 (Clearing the cache in case of any content)
112 114
113 115 $ hg debugmanifestfulltextcache --clear
114 116
115 117 Adding a new persistent entry in the cache
116 118
117 119 $ hg debugmanifestfulltextcache --add 1e01206b1d2f72bd55f2a33fa8ccad74144825b7
118 120
119 121 $ hg debugmanifestfulltextcache
120 122 cache contains 1 manifest entries, in order of most to least recent:
121 123 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
122 124 total cache data size 157 bytes, on-disk 157 bytes
123 125
124 126 Check we don't duplicated entry (added from the debug command)
125 127
126 128 $ hg debugmanifestfulltextcache --add 1e01206b1d2f72bd55f2a33fa8ccad74144825b7
127 129 $ hg debugmanifestfulltextcache
128 130 cache contains 1 manifest entries, in order of most to least recent:
129 131 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
130 132 total cache data size 157 bytes, on-disk 157 bytes
131 133
132 134 Adding a second entry
133 135
134 136 $ hg debugmanifestfulltextcache --add fce2a30dedad1eef4da95ca1dc0004157aa527cf
135 137 $ hg debugmanifestfulltextcache
136 138 cache contains 2 manifest entries, in order of most to least recent:
137 139 id: fce2a30dedad1eef4da95ca1dc0004157aa527cf, size 87 bytes
138 140 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
139 141 total cache data size 268 bytes, on-disk 268 bytes
140 142
141 143 Accessing the initial entry again, refresh their order
142 144
143 145 $ hg debugmanifestfulltextcache --add 1e01206b1d2f72bd55f2a33fa8ccad74144825b7
144 146 $ hg debugmanifestfulltextcache
145 147 cache contains 2 manifest entries, in order of most to least recent:
146 148 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
147 149 id: fce2a30dedad1eef4da95ca1dc0004157aa527cf, size 87 bytes
148 150 total cache data size 268 bytes, on-disk 268 bytes
149 151
150 152 Check cache clearing
151 153
152 154 $ hg debugmanifestfulltextcache --clear
153 155 $ hg debugmanifestfulltextcache
154 156 cache empty
155 157
156 158 Check adding multiple entry in one go:
157 159
158 160 $ hg debugmanifestfulltextcache --add fce2a30dedad1eef4da95ca1dc0004157aa527cf --add 1e01206b1d2f72bd55f2a33fa8ccad74144825b7
159 161 $ hg debugmanifestfulltextcache
160 162 cache contains 2 manifest entries, in order of most to least recent:
161 163 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
162 164 id: fce2a30dedad1eef4da95ca1dc0004157aa527cf, size 87 bytes
163 165 total cache data size 268 bytes, on-disk 268 bytes
164 166 $ hg debugmanifestfulltextcache --clear
165 167
166 168 Test caching behavior on actual operation
167 169 -----------------------------------------
168 170
169 171 Make sure we start empty
170 172
171 173 $ hg debugmanifestfulltextcache
172 174 cache empty
173 175
174 176 Commit should have the new node cached:
175 177
176 178 $ echo a >> b/a
177 179 $ hg commit -m 'foo'
178 180 $ hg debugmanifestfulltextcache
179 181 cache contains 2 manifest entries, in order of most to least recent:
180 182 id: 26b8653b67af8c1a0a0317c4ee8dac50a41fdb65, size 133 bytes
181 183 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
182 184 total cache data size 314 bytes, on-disk 314 bytes
183 185 $ hg log -r 'ancestors(., 1)' --debug | grep 'manifest:'
184 186 manifest: 1:1e01206b1d2f72bd55f2a33fa8ccad74144825b7
185 187 manifest: 2:26b8653b67af8c1a0a0317c4ee8dac50a41fdb65
188
189 hg update should warm the cache too
190
191 (force dirstate check to avoid flackiness in manifest order)
192 $ hg debugrebuilddirstate
193
194 $ hg update 0
195 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
196 $ hg debugmanifestfulltextcache
197 cache contains 3 manifest entries, in order of most to least recent:
198 id: fce2a30dedad1eef4da95ca1dc0004157aa527cf, size 87 bytes
199 id: 26b8653b67af8c1a0a0317c4ee8dac50a41fdb65, size 133 bytes
200 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
201 total cache data size 425 bytes, on-disk 425 bytes
202 $ hg log -r '0' --debug | grep 'manifest:'
203 manifest: 0:fce2a30dedad1eef4da95ca1dc0004157aa527cf
General Comments 0
You need to be logged in to leave comments. Login now