##// END OF EJS Templates
exchange: use command executor for pushkey...
Gregory Szorc -
r37665:516b5a5e default
parent child Browse files
Show More
@@ -1,3110 +1,3117
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import codecs
11 11 import collections
12 12 import difflib
13 13 import errno
14 14 import operator
15 15 import os
16 16 import random
17 17 import re
18 18 import socket
19 19 import ssl
20 20 import stat
21 21 import string
22 22 import subprocess
23 23 import sys
24 24 import tempfile
25 25 import time
26 26
27 27 from .i18n import _
28 28 from .node import (
29 29 bin,
30 30 hex,
31 31 nullhex,
32 32 nullid,
33 33 nullrev,
34 34 short,
35 35 )
36 36 from .thirdparty import (
37 37 cbor,
38 38 )
39 39 from . import (
40 40 bundle2,
41 41 changegroup,
42 42 cmdutil,
43 43 color,
44 44 context,
45 45 dagparser,
46 46 dagutil,
47 47 encoding,
48 48 error,
49 49 exchange,
50 50 extensions,
51 51 filemerge,
52 52 fileset,
53 53 formatter,
54 54 hg,
55 55 httppeer,
56 56 localrepo,
57 57 lock as lockmod,
58 58 logcmdutil,
59 59 merge as mergemod,
60 60 obsolete,
61 61 obsutil,
62 62 phases,
63 63 policy,
64 64 pvec,
65 65 pycompat,
66 66 registrar,
67 67 repair,
68 68 revlog,
69 69 revset,
70 70 revsetlang,
71 71 scmutil,
72 72 setdiscovery,
73 73 simplemerge,
74 74 smartset,
75 75 sshpeer,
76 76 sslutil,
77 77 streamclone,
78 78 templater,
79 79 treediscovery,
80 80 upgrade,
81 81 url as urlmod,
82 82 util,
83 83 vfs as vfsmod,
84 84 wireprotoframing,
85 85 wireprotoserver,
86 86 )
87 87 from .utils import (
88 88 dateutil,
89 89 procutil,
90 90 stringutil,
91 91 )
92 92
93 93 release = lockmod.release
94 94
95 95 command = registrar.command()
96 96
97 97 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
98 98 def debugancestor(ui, repo, *args):
99 99 """find the ancestor revision of two revisions in a given index"""
100 100 if len(args) == 3:
101 101 index, rev1, rev2 = args
102 102 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
103 103 lookup = r.lookup
104 104 elif len(args) == 2:
105 105 if not repo:
106 106 raise error.Abort(_('there is no Mercurial repository here '
107 107 '(.hg not found)'))
108 108 rev1, rev2 = args
109 109 r = repo.changelog
110 110 lookup = repo.lookup
111 111 else:
112 112 raise error.Abort(_('either two or three arguments required'))
113 113 a = r.ancestor(lookup(rev1), lookup(rev2))
114 114 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
115 115
116 116 @command('debugapplystreamclonebundle', [], 'FILE')
117 117 def debugapplystreamclonebundle(ui, repo, fname):
118 118 """apply a stream clone bundle file"""
119 119 f = hg.openpath(ui, fname)
120 120 gen = exchange.readbundle(ui, f, fname)
121 121 gen.apply(repo)
122 122
123 123 @command('debugbuilddag',
124 124 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
125 125 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
126 126 ('n', 'new-file', None, _('add new file at each rev'))],
127 127 _('[OPTION]... [TEXT]'))
128 128 def debugbuilddag(ui, repo, text=None,
129 129 mergeable_file=False,
130 130 overwritten_file=False,
131 131 new_file=False):
132 132 """builds a repo with a given DAG from scratch in the current empty repo
133 133
134 134 The description of the DAG is read from stdin if not given on the
135 135 command line.
136 136
137 137 Elements:
138 138
139 139 - "+n" is a linear run of n nodes based on the current default parent
140 140 - "." is a single node based on the current default parent
141 141 - "$" resets the default parent to null (implied at the start);
142 142 otherwise the default parent is always the last node created
143 143 - "<p" sets the default parent to the backref p
144 144 - "*p" is a fork at parent p, which is a backref
145 145 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
146 146 - "/p2" is a merge of the preceding node and p2
147 147 - ":tag" defines a local tag for the preceding node
148 148 - "@branch" sets the named branch for subsequent nodes
149 149 - "#...\\n" is a comment up to the end of the line
150 150
151 151 Whitespace between the above elements is ignored.
152 152
153 153 A backref is either
154 154
155 155 - a number n, which references the node curr-n, where curr is the current
156 156 node, or
157 157 - the name of a local tag you placed earlier using ":tag", or
158 158 - empty to denote the default parent.
159 159
160 160 All string valued-elements are either strictly alphanumeric, or must
161 161 be enclosed in double quotes ("..."), with "\\" as escape character.
162 162 """
163 163
164 164 if text is None:
165 165 ui.status(_("reading DAG from stdin\n"))
166 166 text = ui.fin.read()
167 167
168 168 cl = repo.changelog
169 169 if len(cl) > 0:
170 170 raise error.Abort(_('repository is not empty'))
171 171
172 172 # determine number of revs in DAG
173 173 total = 0
174 174 for type, data in dagparser.parsedag(text):
175 175 if type == 'n':
176 176 total += 1
177 177
178 178 if mergeable_file:
179 179 linesperrev = 2
180 180 # make a file with k lines per rev
181 181 initialmergedlines = ['%d' % i for i in xrange(0, total * linesperrev)]
182 182 initialmergedlines.append("")
183 183
184 184 tags = []
185 185
186 186 wlock = lock = tr = None
187 187 try:
188 188 wlock = repo.wlock()
189 189 lock = repo.lock()
190 190 tr = repo.transaction("builddag")
191 191
192 192 at = -1
193 193 atbranch = 'default'
194 194 nodeids = []
195 195 id = 0
196 196 ui.progress(_('building'), id, unit=_('revisions'), total=total)
197 197 for type, data in dagparser.parsedag(text):
198 198 if type == 'n':
199 199 ui.note(('node %s\n' % pycompat.bytestr(data)))
200 200 id, ps = data
201 201
202 202 files = []
203 203 filecontent = {}
204 204
205 205 p2 = None
206 206 if mergeable_file:
207 207 fn = "mf"
208 208 p1 = repo[ps[0]]
209 209 if len(ps) > 1:
210 210 p2 = repo[ps[1]]
211 211 pa = p1.ancestor(p2)
212 212 base, local, other = [x[fn].data() for x in (pa, p1,
213 213 p2)]
214 214 m3 = simplemerge.Merge3Text(base, local, other)
215 215 ml = [l.strip() for l in m3.merge_lines()]
216 216 ml.append("")
217 217 elif at > 0:
218 218 ml = p1[fn].data().split("\n")
219 219 else:
220 220 ml = initialmergedlines
221 221 ml[id * linesperrev] += " r%i" % id
222 222 mergedtext = "\n".join(ml)
223 223 files.append(fn)
224 224 filecontent[fn] = mergedtext
225 225
226 226 if overwritten_file:
227 227 fn = "of"
228 228 files.append(fn)
229 229 filecontent[fn] = "r%i\n" % id
230 230
231 231 if new_file:
232 232 fn = "nf%i" % id
233 233 files.append(fn)
234 234 filecontent[fn] = "r%i\n" % id
235 235 if len(ps) > 1:
236 236 if not p2:
237 237 p2 = repo[ps[1]]
238 238 for fn in p2:
239 239 if fn.startswith("nf"):
240 240 files.append(fn)
241 241 filecontent[fn] = p2[fn].data()
242 242
243 243 def fctxfn(repo, cx, path):
244 244 if path in filecontent:
245 245 return context.memfilectx(repo, cx, path,
246 246 filecontent[path])
247 247 return None
248 248
249 249 if len(ps) == 0 or ps[0] < 0:
250 250 pars = [None, None]
251 251 elif len(ps) == 1:
252 252 pars = [nodeids[ps[0]], None]
253 253 else:
254 254 pars = [nodeids[p] for p in ps]
255 255 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
256 256 date=(id, 0),
257 257 user="debugbuilddag",
258 258 extra={'branch': atbranch})
259 259 nodeid = repo.commitctx(cx)
260 260 nodeids.append(nodeid)
261 261 at = id
262 262 elif type == 'l':
263 263 id, name = data
264 264 ui.note(('tag %s\n' % name))
265 265 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
266 266 elif type == 'a':
267 267 ui.note(('branch %s\n' % data))
268 268 atbranch = data
269 269 ui.progress(_('building'), id, unit=_('revisions'), total=total)
270 270 tr.close()
271 271
272 272 if tags:
273 273 repo.vfs.write("localtags", "".join(tags))
274 274 finally:
275 275 ui.progress(_('building'), None)
276 276 release(tr, lock, wlock)
277 277
278 278 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
279 279 indent_string = ' ' * indent
280 280 if all:
281 281 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
282 282 % indent_string)
283 283
284 284 def showchunks(named):
285 285 ui.write("\n%s%s\n" % (indent_string, named))
286 286 for deltadata in gen.deltaiter():
287 287 node, p1, p2, cs, deltabase, delta, flags = deltadata
288 288 ui.write("%s%s %s %s %s %s %d\n" %
289 289 (indent_string, hex(node), hex(p1), hex(p2),
290 290 hex(cs), hex(deltabase), len(delta)))
291 291
292 292 chunkdata = gen.changelogheader()
293 293 showchunks("changelog")
294 294 chunkdata = gen.manifestheader()
295 295 showchunks("manifest")
296 296 for chunkdata in iter(gen.filelogheader, {}):
297 297 fname = chunkdata['filename']
298 298 showchunks(fname)
299 299 else:
300 300 if isinstance(gen, bundle2.unbundle20):
301 301 raise error.Abort(_('use debugbundle2 for this file'))
302 302 chunkdata = gen.changelogheader()
303 303 for deltadata in gen.deltaiter():
304 304 node, p1, p2, cs, deltabase, delta, flags = deltadata
305 305 ui.write("%s%s\n" % (indent_string, hex(node)))
306 306
307 307 def _debugobsmarkers(ui, part, indent=0, **opts):
308 308 """display version and markers contained in 'data'"""
309 309 opts = pycompat.byteskwargs(opts)
310 310 data = part.read()
311 311 indent_string = ' ' * indent
312 312 try:
313 313 version, markers = obsolete._readmarkers(data)
314 314 except error.UnknownVersion as exc:
315 315 msg = "%sunsupported version: %s (%d bytes)\n"
316 316 msg %= indent_string, exc.version, len(data)
317 317 ui.write(msg)
318 318 else:
319 319 msg = "%sversion: %d (%d bytes)\n"
320 320 msg %= indent_string, version, len(data)
321 321 ui.write(msg)
322 322 fm = ui.formatter('debugobsolete', opts)
323 323 for rawmarker in sorted(markers):
324 324 m = obsutil.marker(None, rawmarker)
325 325 fm.startitem()
326 326 fm.plain(indent_string)
327 327 cmdutil.showmarker(fm, m)
328 328 fm.end()
329 329
330 330 def _debugphaseheads(ui, data, indent=0):
331 331 """display version and markers contained in 'data'"""
332 332 indent_string = ' ' * indent
333 333 headsbyphase = phases.binarydecode(data)
334 334 for phase in phases.allphases:
335 335 for head in headsbyphase[phase]:
336 336 ui.write(indent_string)
337 337 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
338 338
339 339 def _quasirepr(thing):
340 340 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
341 341 return '{%s}' % (
342 342 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
343 343 return pycompat.bytestr(repr(thing))
344 344
345 345 def _debugbundle2(ui, gen, all=None, **opts):
346 346 """lists the contents of a bundle2"""
347 347 if not isinstance(gen, bundle2.unbundle20):
348 348 raise error.Abort(_('not a bundle2 file'))
349 349 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
350 350 parttypes = opts.get(r'part_type', [])
351 351 for part in gen.iterparts():
352 352 if parttypes and part.type not in parttypes:
353 353 continue
354 354 ui.write('%s -- %s\n' % (part.type, _quasirepr(part.params)))
355 355 if part.type == 'changegroup':
356 356 version = part.params.get('version', '01')
357 357 cg = changegroup.getunbundler(version, part, 'UN')
358 358 if not ui.quiet:
359 359 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
360 360 if part.type == 'obsmarkers':
361 361 if not ui.quiet:
362 362 _debugobsmarkers(ui, part, indent=4, **opts)
363 363 if part.type == 'phase-heads':
364 364 if not ui.quiet:
365 365 _debugphaseheads(ui, part, indent=4)
366 366
367 367 @command('debugbundle',
368 368 [('a', 'all', None, _('show all details')),
369 369 ('', 'part-type', [], _('show only the named part type')),
370 370 ('', 'spec', None, _('print the bundlespec of the bundle'))],
371 371 _('FILE'),
372 372 norepo=True)
373 373 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
374 374 """lists the contents of a bundle"""
375 375 with hg.openpath(ui, bundlepath) as f:
376 376 if spec:
377 377 spec = exchange.getbundlespec(ui, f)
378 378 ui.write('%s\n' % spec)
379 379 return
380 380
381 381 gen = exchange.readbundle(ui, f, bundlepath)
382 382 if isinstance(gen, bundle2.unbundle20):
383 383 return _debugbundle2(ui, gen, all=all, **opts)
384 384 _debugchangegroup(ui, gen, all=all, **opts)
385 385
386 386 @command('debugcapabilities',
387 387 [], _('PATH'),
388 388 norepo=True)
389 389 def debugcapabilities(ui, path, **opts):
390 390 """lists the capabilities of a remote peer"""
391 391 opts = pycompat.byteskwargs(opts)
392 392 peer = hg.peer(ui, opts, path)
393 393 caps = peer.capabilities()
394 394 ui.write(('Main capabilities:\n'))
395 395 for c in sorted(caps):
396 396 ui.write((' %s\n') % c)
397 397 b2caps = bundle2.bundle2caps(peer)
398 398 if b2caps:
399 399 ui.write(('Bundle2 capabilities:\n'))
400 400 for key, values in sorted(b2caps.iteritems()):
401 401 ui.write((' %s\n') % key)
402 402 for v in values:
403 403 ui.write((' %s\n') % v)
404 404
405 405 @command('debugcheckstate', [], '')
406 406 def debugcheckstate(ui, repo):
407 407 """validate the correctness of the current dirstate"""
408 408 parent1, parent2 = repo.dirstate.parents()
409 409 m1 = repo[parent1].manifest()
410 410 m2 = repo[parent2].manifest()
411 411 errors = 0
412 412 for f in repo.dirstate:
413 413 state = repo.dirstate[f]
414 414 if state in "nr" and f not in m1:
415 415 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
416 416 errors += 1
417 417 if state in "a" and f in m1:
418 418 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
419 419 errors += 1
420 420 if state in "m" and f not in m1 and f not in m2:
421 421 ui.warn(_("%s in state %s, but not in either manifest\n") %
422 422 (f, state))
423 423 errors += 1
424 424 for f in m1:
425 425 state = repo.dirstate[f]
426 426 if state not in "nrm":
427 427 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
428 428 errors += 1
429 429 if errors:
430 430 error = _(".hg/dirstate inconsistent with current parent's manifest")
431 431 raise error.Abort(error)
432 432
433 433 @command('debugcolor',
434 434 [('', 'style', None, _('show all configured styles'))],
435 435 'hg debugcolor')
436 436 def debugcolor(ui, repo, **opts):
437 437 """show available color, effects or style"""
438 438 ui.write(('color mode: %s\n') % ui._colormode)
439 439 if opts.get(r'style'):
440 440 return _debugdisplaystyle(ui)
441 441 else:
442 442 return _debugdisplaycolor(ui)
443 443
444 444 def _debugdisplaycolor(ui):
445 445 ui = ui.copy()
446 446 ui._styles.clear()
447 447 for effect in color._activeeffects(ui).keys():
448 448 ui._styles[effect] = effect
449 449 if ui._terminfoparams:
450 450 for k, v in ui.configitems('color'):
451 451 if k.startswith('color.'):
452 452 ui._styles[k] = k[6:]
453 453 elif k.startswith('terminfo.'):
454 454 ui._styles[k] = k[9:]
455 455 ui.write(_('available colors:\n'))
456 456 # sort label with a '_' after the other to group '_background' entry.
457 457 items = sorted(ui._styles.items(),
458 458 key=lambda i: ('_' in i[0], i[0], i[1]))
459 459 for colorname, label in items:
460 460 ui.write(('%s\n') % colorname, label=label)
461 461
462 462 def _debugdisplaystyle(ui):
463 463 ui.write(_('available style:\n'))
464 464 width = max(len(s) for s in ui._styles)
465 465 for label, effects in sorted(ui._styles.items()):
466 466 ui.write('%s' % label, label=label)
467 467 if effects:
468 468 # 50
469 469 ui.write(': ')
470 470 ui.write(' ' * (max(0, width - len(label))))
471 471 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
472 472 ui.write('\n')
473 473
474 474 @command('debugcreatestreamclonebundle', [], 'FILE')
475 475 def debugcreatestreamclonebundle(ui, repo, fname):
476 476 """create a stream clone bundle file
477 477
478 478 Stream bundles are special bundles that are essentially archives of
479 479 revlog files. They are commonly used for cloning very quickly.
480 480 """
481 481 # TODO we may want to turn this into an abort when this functionality
482 482 # is moved into `hg bundle`.
483 483 if phases.hassecret(repo):
484 484 ui.warn(_('(warning: stream clone bundle will contain secret '
485 485 'revisions)\n'))
486 486
487 487 requirements, gen = streamclone.generatebundlev1(repo)
488 488 changegroup.writechunks(ui, gen, fname)
489 489
490 490 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
491 491
492 492 @command('debugdag',
493 493 [('t', 'tags', None, _('use tags as labels')),
494 494 ('b', 'branches', None, _('annotate with branch names')),
495 495 ('', 'dots', None, _('use dots for runs')),
496 496 ('s', 'spaces', None, _('separate elements by spaces'))],
497 497 _('[OPTION]... [FILE [REV]...]'),
498 498 optionalrepo=True)
499 499 def debugdag(ui, repo, file_=None, *revs, **opts):
500 500 """format the changelog or an index DAG as a concise textual description
501 501
502 502 If you pass a revlog index, the revlog's DAG is emitted. If you list
503 503 revision numbers, they get labeled in the output as rN.
504 504
505 505 Otherwise, the changelog DAG of the current repo is emitted.
506 506 """
507 507 spaces = opts.get(r'spaces')
508 508 dots = opts.get(r'dots')
509 509 if file_:
510 510 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
511 511 file_)
512 512 revs = set((int(r) for r in revs))
513 513 def events():
514 514 for r in rlog:
515 515 yield 'n', (r, list(p for p in rlog.parentrevs(r)
516 516 if p != -1))
517 517 if r in revs:
518 518 yield 'l', (r, "r%i" % r)
519 519 elif repo:
520 520 cl = repo.changelog
521 521 tags = opts.get(r'tags')
522 522 branches = opts.get(r'branches')
523 523 if tags:
524 524 labels = {}
525 525 for l, n in repo.tags().items():
526 526 labels.setdefault(cl.rev(n), []).append(l)
527 527 def events():
528 528 b = "default"
529 529 for r in cl:
530 530 if branches:
531 531 newb = cl.read(cl.node(r))[5]['branch']
532 532 if newb != b:
533 533 yield 'a', newb
534 534 b = newb
535 535 yield 'n', (r, list(p for p in cl.parentrevs(r)
536 536 if p != -1))
537 537 if tags:
538 538 ls = labels.get(r)
539 539 if ls:
540 540 for l in ls:
541 541 yield 'l', (r, l)
542 542 else:
543 543 raise error.Abort(_('need repo for changelog dag'))
544 544
545 545 for line in dagparser.dagtextlines(events(),
546 546 addspaces=spaces,
547 547 wraplabels=True,
548 548 wrapannotations=True,
549 549 wrapnonlinear=dots,
550 550 usedots=dots,
551 551 maxlinewidth=70):
552 552 ui.write(line)
553 553 ui.write("\n")
554 554
555 555 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
556 556 def debugdata(ui, repo, file_, rev=None, **opts):
557 557 """dump the contents of a data file revision"""
558 558 opts = pycompat.byteskwargs(opts)
559 559 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
560 560 if rev is not None:
561 561 raise error.CommandError('debugdata', _('invalid arguments'))
562 562 file_, rev = None, file_
563 563 elif rev is None:
564 564 raise error.CommandError('debugdata', _('invalid arguments'))
565 565 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
566 566 try:
567 567 ui.write(r.revision(r.lookup(rev), raw=True))
568 568 except KeyError:
569 569 raise error.Abort(_('invalid revision identifier %s') % rev)
570 570
571 571 @command('debugdate',
572 572 [('e', 'extended', None, _('try extended date formats'))],
573 573 _('[-e] DATE [RANGE]'),
574 574 norepo=True, optionalrepo=True)
575 575 def debugdate(ui, date, range=None, **opts):
576 576 """parse and display a date"""
577 577 if opts[r"extended"]:
578 578 d = dateutil.parsedate(date, util.extendeddateformats)
579 579 else:
580 580 d = dateutil.parsedate(date)
581 581 ui.write(("internal: %d %d\n") % d)
582 582 ui.write(("standard: %s\n") % dateutil.datestr(d))
583 583 if range:
584 584 m = dateutil.matchdate(range)
585 585 ui.write(("match: %s\n") % m(d[0]))
586 586
587 587 @command('debugdeltachain',
588 588 cmdutil.debugrevlogopts + cmdutil.formatteropts,
589 589 _('-c|-m|FILE'),
590 590 optionalrepo=True)
591 591 def debugdeltachain(ui, repo, file_=None, **opts):
592 592 """dump information about delta chains in a revlog
593 593
594 594 Output can be templatized. Available template keywords are:
595 595
596 596 :``rev``: revision number
597 597 :``chainid``: delta chain identifier (numbered by unique base)
598 598 :``chainlen``: delta chain length to this revision
599 599 :``prevrev``: previous revision in delta chain
600 600 :``deltatype``: role of delta / how it was computed
601 601 :``compsize``: compressed size of revision
602 602 :``uncompsize``: uncompressed size of revision
603 603 :``chainsize``: total size of compressed revisions in chain
604 604 :``chainratio``: total chain size divided by uncompressed revision size
605 605 (new delta chains typically start at ratio 2.00)
606 606 :``lindist``: linear distance from base revision in delta chain to end
607 607 of this revision
608 608 :``extradist``: total size of revisions not part of this delta chain from
609 609 base of delta chain to end of this revision; a measurement
610 610 of how much extra data we need to read/seek across to read
611 611 the delta chain for this revision
612 612 :``extraratio``: extradist divided by chainsize; another representation of
613 613 how much unrelated data is needed to load this delta chain
614 614
615 615 If the repository is configured to use the sparse read, additional keywords
616 616 are available:
617 617
618 618 :``readsize``: total size of data read from the disk for a revision
619 619 (sum of the sizes of all the blocks)
620 620 :``largestblock``: size of the largest block of data read from the disk
621 621 :``readdensity``: density of useful bytes in the data read from the disk
622 622 :``srchunks``: in how many data hunks the whole revision would be read
623 623
624 624 The sparse read can be enabled with experimental.sparse-read = True
625 625 """
626 626 opts = pycompat.byteskwargs(opts)
627 627 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
628 628 index = r.index
629 629 generaldelta = r.version & revlog.FLAG_GENERALDELTA
630 630 withsparseread = getattr(r, '_withsparseread', False)
631 631
632 632 def revinfo(rev):
633 633 e = index[rev]
634 634 compsize = e[1]
635 635 uncompsize = e[2]
636 636 chainsize = 0
637 637
638 638 if generaldelta:
639 639 if e[3] == e[5]:
640 640 deltatype = 'p1'
641 641 elif e[3] == e[6]:
642 642 deltatype = 'p2'
643 643 elif e[3] == rev - 1:
644 644 deltatype = 'prev'
645 645 elif e[3] == rev:
646 646 deltatype = 'base'
647 647 else:
648 648 deltatype = 'other'
649 649 else:
650 650 if e[3] == rev:
651 651 deltatype = 'base'
652 652 else:
653 653 deltatype = 'prev'
654 654
655 655 chain = r._deltachain(rev)[0]
656 656 for iterrev in chain:
657 657 e = index[iterrev]
658 658 chainsize += e[1]
659 659
660 660 return compsize, uncompsize, deltatype, chain, chainsize
661 661
662 662 fm = ui.formatter('debugdeltachain', opts)
663 663
664 664 fm.plain(' rev chain# chainlen prev delta '
665 665 'size rawsize chainsize ratio lindist extradist '
666 666 'extraratio')
667 667 if withsparseread:
668 668 fm.plain(' readsize largestblk rddensity srchunks')
669 669 fm.plain('\n')
670 670
671 671 chainbases = {}
672 672 for rev in r:
673 673 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
674 674 chainbase = chain[0]
675 675 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
676 676 start = r.start
677 677 length = r.length
678 678 basestart = start(chainbase)
679 679 revstart = start(rev)
680 680 lineardist = revstart + comp - basestart
681 681 extradist = lineardist - chainsize
682 682 try:
683 683 prevrev = chain[-2]
684 684 except IndexError:
685 685 prevrev = -1
686 686
687 687 chainratio = float(chainsize) / float(uncomp)
688 688 extraratio = float(extradist) / float(chainsize)
689 689
690 690 fm.startitem()
691 691 fm.write('rev chainid chainlen prevrev deltatype compsize '
692 692 'uncompsize chainsize chainratio lindist extradist '
693 693 'extraratio',
694 694 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
695 695 rev, chainid, len(chain), prevrev, deltatype, comp,
696 696 uncomp, chainsize, chainratio, lineardist, extradist,
697 697 extraratio,
698 698 rev=rev, chainid=chainid, chainlen=len(chain),
699 699 prevrev=prevrev, deltatype=deltatype, compsize=comp,
700 700 uncompsize=uncomp, chainsize=chainsize,
701 701 chainratio=chainratio, lindist=lineardist,
702 702 extradist=extradist, extraratio=extraratio)
703 703 if withsparseread:
704 704 readsize = 0
705 705 largestblock = 0
706 706 srchunks = 0
707 707
708 708 for revschunk in revlog._slicechunk(r, chain):
709 709 srchunks += 1
710 710 blkend = start(revschunk[-1]) + length(revschunk[-1])
711 711 blksize = blkend - start(revschunk[0])
712 712
713 713 readsize += blksize
714 714 if largestblock < blksize:
715 715 largestblock = blksize
716 716
717 717 readdensity = float(chainsize) / float(readsize)
718 718
719 719 fm.write('readsize largestblock readdensity srchunks',
720 720 ' %10d %10d %9.5f %8d',
721 721 readsize, largestblock, readdensity, srchunks,
722 722 readsize=readsize, largestblock=largestblock,
723 723 readdensity=readdensity, srchunks=srchunks)
724 724
725 725 fm.plain('\n')
726 726
727 727 fm.end()
728 728
729 729 @command('debugdirstate|debugstate',
730 730 [('', 'nodates', None, _('do not display the saved mtime')),
731 731 ('', 'datesort', None, _('sort by saved mtime'))],
732 732 _('[OPTION]...'))
733 733 def debugstate(ui, repo, **opts):
734 734 """show the contents of the current dirstate"""
735 735
736 736 nodates = opts.get(r'nodates')
737 737 datesort = opts.get(r'datesort')
738 738
739 739 timestr = ""
740 740 if datesort:
741 741 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
742 742 else:
743 743 keyfunc = None # sort by filename
744 744 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
745 745 if ent[3] == -1:
746 746 timestr = 'unset '
747 747 elif nodates:
748 748 timestr = 'set '
749 749 else:
750 750 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
751 751 time.localtime(ent[3]))
752 752 timestr = encoding.strtolocal(timestr)
753 753 if ent[1] & 0o20000:
754 754 mode = 'lnk'
755 755 else:
756 756 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
757 757 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
758 758 for f in repo.dirstate.copies():
759 759 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
760 760
761 761 @command('debugdiscovery',
762 762 [('', 'old', None, _('use old-style discovery')),
763 763 ('', 'nonheads', None,
764 764 _('use old-style discovery with non-heads included')),
765 765 ('', 'rev', [], 'restrict discovery to this set of revs'),
766 766 ] + cmdutil.remoteopts,
767 767 _('[--rev REV] [OTHER]'))
768 768 def debugdiscovery(ui, repo, remoteurl="default", **opts):
769 769 """runs the changeset discovery protocol in isolation"""
770 770 opts = pycompat.byteskwargs(opts)
771 771 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
772 772 remote = hg.peer(repo, opts, remoteurl)
773 773 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
774 774
775 775 # make sure tests are repeatable
776 776 random.seed(12323)
777 777
778 778 def doit(pushedrevs, remoteheads, remote=remote):
779 779 if opts.get('old'):
780 780 if not util.safehasattr(remote, 'branches'):
781 781 # enable in-client legacy support
782 782 remote = localrepo.locallegacypeer(remote.local())
783 783 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
784 784 force=True)
785 785 common = set(common)
786 786 if not opts.get('nonheads'):
787 787 ui.write(("unpruned common: %s\n") %
788 788 " ".join(sorted(short(n) for n in common)))
789 789 dag = dagutil.revlogdag(repo.changelog)
790 790 all = dag.ancestorset(dag.internalizeall(common))
791 791 common = dag.externalizeall(dag.headsetofconnecteds(all))
792 792 else:
793 793 nodes = None
794 794 if pushedrevs:
795 795 revs = scmutil.revrange(repo, pushedrevs)
796 796 nodes = [repo[r].node() for r in revs]
797 797 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
798 798 ancestorsof=nodes)
799 799 common = set(common)
800 800 rheads = set(hds)
801 801 lheads = set(repo.heads())
802 802 ui.write(("common heads: %s\n") %
803 803 " ".join(sorted(short(n) for n in common)))
804 804 if lheads <= common:
805 805 ui.write(("local is subset\n"))
806 806 elif rheads <= common:
807 807 ui.write(("remote is subset\n"))
808 808
809 809 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
810 810 localrevs = opts['rev']
811 811 doit(localrevs, remoterevs)
812 812
813 813 _chunksize = 4 << 10
814 814
815 815 @command('debugdownload',
816 816 [
817 817 ('o', 'output', '', _('path')),
818 818 ],
819 819 optionalrepo=True)
820 820 def debugdownload(ui, repo, url, output=None, **opts):
821 821 """download a resource using Mercurial logic and config
822 822 """
823 823 fh = urlmod.open(ui, url, output)
824 824
825 825 dest = ui
826 826 if output:
827 827 dest = open(output, "wb", _chunksize)
828 828 try:
829 829 data = fh.read(_chunksize)
830 830 while data:
831 831 dest.write(data)
832 832 data = fh.read(_chunksize)
833 833 finally:
834 834 if output:
835 835 dest.close()
836 836
837 837 @command('debugextensions', cmdutil.formatteropts, [], norepo=True)
838 838 def debugextensions(ui, **opts):
839 839 '''show information about active extensions'''
840 840 opts = pycompat.byteskwargs(opts)
841 841 exts = extensions.extensions(ui)
842 842 hgver = util.version()
843 843 fm = ui.formatter('debugextensions', opts)
844 844 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
845 845 isinternal = extensions.ismoduleinternal(extmod)
846 846 extsource = pycompat.fsencode(extmod.__file__)
847 847 if isinternal:
848 848 exttestedwith = [] # never expose magic string to users
849 849 else:
850 850 exttestedwith = getattr(extmod, 'testedwith', '').split()
851 851 extbuglink = getattr(extmod, 'buglink', None)
852 852
853 853 fm.startitem()
854 854
855 855 if ui.quiet or ui.verbose:
856 856 fm.write('name', '%s\n', extname)
857 857 else:
858 858 fm.write('name', '%s', extname)
859 859 if isinternal or hgver in exttestedwith:
860 860 fm.plain('\n')
861 861 elif not exttestedwith:
862 862 fm.plain(_(' (untested!)\n'))
863 863 else:
864 864 lasttestedversion = exttestedwith[-1]
865 865 fm.plain(' (%s!)\n' % lasttestedversion)
866 866
867 867 fm.condwrite(ui.verbose and extsource, 'source',
868 868 _(' location: %s\n'), extsource or "")
869 869
870 870 if ui.verbose:
871 871 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
872 872 fm.data(bundled=isinternal)
873 873
874 874 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
875 875 _(' tested with: %s\n'),
876 876 fm.formatlist(exttestedwith, name='ver'))
877 877
878 878 fm.condwrite(ui.verbose and extbuglink, 'buglink',
879 879 _(' bug reporting: %s\n'), extbuglink or "")
880 880
881 881 fm.end()
882 882
883 883 @command('debugfileset',
884 884 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
885 885 _('[-r REV] FILESPEC'))
886 886 def debugfileset(ui, repo, expr, **opts):
887 887 '''parse and apply a fileset specification'''
888 888 ctx = scmutil.revsingle(repo, opts.get(r'rev'), None)
889 889 if ui.verbose:
890 890 tree = fileset.parse(expr)
891 891 ui.note(fileset.prettyformat(tree), "\n")
892 892
893 893 for f in ctx.getfileset(expr):
894 894 ui.write("%s\n" % f)
895 895
896 896 @command('debugformat',
897 897 [] + cmdutil.formatteropts,
898 898 _(''))
899 899 def debugformat(ui, repo, **opts):
900 900 """display format information about the current repository
901 901
902 902 Use --verbose to get extra information about current config value and
903 903 Mercurial default."""
904 904 opts = pycompat.byteskwargs(opts)
905 905 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
906 906 maxvariantlength = max(len('format-variant'), maxvariantlength)
907 907
908 908 def makeformatname(name):
909 909 return '%s:' + (' ' * (maxvariantlength - len(name)))
910 910
911 911 fm = ui.formatter('debugformat', opts)
912 912 if fm.isplain():
913 913 def formatvalue(value):
914 914 if util.safehasattr(value, 'startswith'):
915 915 return value
916 916 if value:
917 917 return 'yes'
918 918 else:
919 919 return 'no'
920 920 else:
921 921 formatvalue = pycompat.identity
922 922
923 923 fm.plain('format-variant')
924 924 fm.plain(' ' * (maxvariantlength - len('format-variant')))
925 925 fm.plain(' repo')
926 926 if ui.verbose:
927 927 fm.plain(' config default')
928 928 fm.plain('\n')
929 929 for fv in upgrade.allformatvariant:
930 930 fm.startitem()
931 931 repovalue = fv.fromrepo(repo)
932 932 configvalue = fv.fromconfig(repo)
933 933
934 934 if repovalue != configvalue:
935 935 namelabel = 'formatvariant.name.mismatchconfig'
936 936 repolabel = 'formatvariant.repo.mismatchconfig'
937 937 elif repovalue != fv.default:
938 938 namelabel = 'formatvariant.name.mismatchdefault'
939 939 repolabel = 'formatvariant.repo.mismatchdefault'
940 940 else:
941 941 namelabel = 'formatvariant.name.uptodate'
942 942 repolabel = 'formatvariant.repo.uptodate'
943 943
944 944 fm.write('name', makeformatname(fv.name), fv.name,
945 945 label=namelabel)
946 946 fm.write('repo', ' %3s', formatvalue(repovalue),
947 947 label=repolabel)
948 948 if fv.default != configvalue:
949 949 configlabel = 'formatvariant.config.special'
950 950 else:
951 951 configlabel = 'formatvariant.config.default'
952 952 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
953 953 label=configlabel)
954 954 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
955 955 label='formatvariant.default')
956 956 fm.plain('\n')
957 957 fm.end()
958 958
959 959 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
960 960 def debugfsinfo(ui, path="."):
961 961 """show information detected about current filesystem"""
962 962 ui.write(('path: %s\n') % path)
963 963 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
964 964 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
965 965 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
966 966 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
967 967 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
968 968 casesensitive = '(unknown)'
969 969 try:
970 970 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
971 971 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
972 972 except OSError:
973 973 pass
974 974 ui.write(('case-sensitive: %s\n') % casesensitive)
975 975
976 976 @command('debuggetbundle',
977 977 [('H', 'head', [], _('id of head node'), _('ID')),
978 978 ('C', 'common', [], _('id of common node'), _('ID')),
979 979 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
980 980 _('REPO FILE [-H|-C ID]...'),
981 981 norepo=True)
982 982 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
983 983 """retrieves a bundle from a repo
984 984
985 985 Every ID must be a full-length hex node id string. Saves the bundle to the
986 986 given file.
987 987 """
988 988 opts = pycompat.byteskwargs(opts)
989 989 repo = hg.peer(ui, opts, repopath)
990 990 if not repo.capable('getbundle'):
991 991 raise error.Abort("getbundle() not supported by target repository")
992 992 args = {}
993 993 if common:
994 994 args[r'common'] = [bin(s) for s in common]
995 995 if head:
996 996 args[r'heads'] = [bin(s) for s in head]
997 997 # TODO: get desired bundlecaps from command line.
998 998 args[r'bundlecaps'] = None
999 999 bundle = repo.getbundle('debug', **args)
1000 1000
1001 1001 bundletype = opts.get('type', 'bzip2').lower()
1002 1002 btypes = {'none': 'HG10UN',
1003 1003 'bzip2': 'HG10BZ',
1004 1004 'gzip': 'HG10GZ',
1005 1005 'bundle2': 'HG20'}
1006 1006 bundletype = btypes.get(bundletype)
1007 1007 if bundletype not in bundle2.bundletypes:
1008 1008 raise error.Abort(_('unknown bundle type specified with --type'))
1009 1009 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1010 1010
1011 1011 @command('debugignore', [], '[FILE]')
1012 1012 def debugignore(ui, repo, *files, **opts):
1013 1013 """display the combined ignore pattern and information about ignored files
1014 1014
1015 1015 With no argument display the combined ignore pattern.
1016 1016
1017 1017 Given space separated file names, shows if the given file is ignored and
1018 1018 if so, show the ignore rule (file and line number) that matched it.
1019 1019 """
1020 1020 ignore = repo.dirstate._ignore
1021 1021 if not files:
1022 1022 # Show all the patterns
1023 1023 ui.write("%s\n" % pycompat.byterepr(ignore))
1024 1024 else:
1025 1025 m = scmutil.match(repo[None], pats=files)
1026 1026 for f in m.files():
1027 1027 nf = util.normpath(f)
1028 1028 ignored = None
1029 1029 ignoredata = None
1030 1030 if nf != '.':
1031 1031 if ignore(nf):
1032 1032 ignored = nf
1033 1033 ignoredata = repo.dirstate._ignorefileandline(nf)
1034 1034 else:
1035 1035 for p in util.finddirs(nf):
1036 1036 if ignore(p):
1037 1037 ignored = p
1038 1038 ignoredata = repo.dirstate._ignorefileandline(p)
1039 1039 break
1040 1040 if ignored:
1041 1041 if ignored == nf:
1042 1042 ui.write(_("%s is ignored\n") % m.uipath(f))
1043 1043 else:
1044 1044 ui.write(_("%s is ignored because of "
1045 1045 "containing folder %s\n")
1046 1046 % (m.uipath(f), ignored))
1047 1047 ignorefile, lineno, line = ignoredata
1048 1048 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1049 1049 % (ignorefile, lineno, line))
1050 1050 else:
1051 1051 ui.write(_("%s is not ignored\n") % m.uipath(f))
1052 1052
1053 1053 @command('debugindex', cmdutil.debugrevlogopts +
1054 1054 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
1055 1055 _('[-f FORMAT] -c|-m|FILE'),
1056 1056 optionalrepo=True)
1057 1057 def debugindex(ui, repo, file_=None, **opts):
1058 1058 """dump the contents of an index file"""
1059 1059 opts = pycompat.byteskwargs(opts)
1060 1060 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
1061 1061 format = opts.get('format', 0)
1062 1062 if format not in (0, 1):
1063 1063 raise error.Abort(_("unknown format %d") % format)
1064 1064
1065 1065 if ui.debugflag:
1066 1066 shortfn = hex
1067 1067 else:
1068 1068 shortfn = short
1069 1069
1070 1070 # There might not be anything in r, so have a sane default
1071 1071 idlen = 12
1072 1072 for i in r:
1073 1073 idlen = len(shortfn(r.node(i)))
1074 1074 break
1075 1075
1076 1076 if format == 0:
1077 1077 if ui.verbose:
1078 1078 ui.write((" rev offset length linkrev"
1079 1079 " %s %s p2\n") % ("nodeid".ljust(idlen),
1080 1080 "p1".ljust(idlen)))
1081 1081 else:
1082 1082 ui.write((" rev linkrev %s %s p2\n") % (
1083 1083 "nodeid".ljust(idlen), "p1".ljust(idlen)))
1084 1084 elif format == 1:
1085 1085 if ui.verbose:
1086 1086 ui.write((" rev flag offset length size link p1"
1087 1087 " p2 %s\n") % "nodeid".rjust(idlen))
1088 1088 else:
1089 1089 ui.write((" rev flag size link p1 p2 %s\n") %
1090 1090 "nodeid".rjust(idlen))
1091 1091
1092 1092 for i in r:
1093 1093 node = r.node(i)
1094 1094 if format == 0:
1095 1095 try:
1096 1096 pp = r.parents(node)
1097 1097 except Exception:
1098 1098 pp = [nullid, nullid]
1099 1099 if ui.verbose:
1100 1100 ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
1101 1101 i, r.start(i), r.length(i), r.linkrev(i),
1102 1102 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
1103 1103 else:
1104 1104 ui.write("% 6d % 7d %s %s %s\n" % (
1105 1105 i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
1106 1106 shortfn(pp[1])))
1107 1107 elif format == 1:
1108 1108 pr = r.parentrevs(i)
1109 1109 if ui.verbose:
1110 1110 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
1111 1111 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
1112 1112 r.linkrev(i), pr[0], pr[1], shortfn(node)))
1113 1113 else:
1114 1114 ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
1115 1115 i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
1116 1116 shortfn(node)))
1117 1117
1118 1118 @command('debugindexdot', cmdutil.debugrevlogopts,
1119 1119 _('-c|-m|FILE'), optionalrepo=True)
1120 1120 def debugindexdot(ui, repo, file_=None, **opts):
1121 1121 """dump an index DAG as a graphviz dot file"""
1122 1122 opts = pycompat.byteskwargs(opts)
1123 1123 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
1124 1124 ui.write(("digraph G {\n"))
1125 1125 for i in r:
1126 1126 node = r.node(i)
1127 1127 pp = r.parents(node)
1128 1128 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1129 1129 if pp[1] != nullid:
1130 1130 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1131 1131 ui.write("}\n")
1132 1132
1133 1133 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1134 1134 def debuginstall(ui, **opts):
1135 1135 '''test Mercurial installation
1136 1136
1137 1137 Returns 0 on success.
1138 1138 '''
1139 1139 opts = pycompat.byteskwargs(opts)
1140 1140
1141 1141 def writetemp(contents):
1142 1142 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
1143 1143 f = os.fdopen(fd, r"wb")
1144 1144 f.write(contents)
1145 1145 f.close()
1146 1146 return name
1147 1147
1148 1148 problems = 0
1149 1149
1150 1150 fm = ui.formatter('debuginstall', opts)
1151 1151 fm.startitem()
1152 1152
1153 1153 # encoding
1154 1154 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1155 1155 err = None
1156 1156 try:
1157 1157 codecs.lookup(pycompat.sysstr(encoding.encoding))
1158 1158 except LookupError as inst:
1159 1159 err = stringutil.forcebytestr(inst)
1160 1160 problems += 1
1161 1161 fm.condwrite(err, 'encodingerror', _(" %s\n"
1162 1162 " (check that your locale is properly set)\n"), err)
1163 1163
1164 1164 # Python
1165 1165 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1166 1166 pycompat.sysexecutable)
1167 1167 fm.write('pythonver', _("checking Python version (%s)\n"),
1168 1168 ("%d.%d.%d" % sys.version_info[:3]))
1169 1169 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1170 1170 os.path.dirname(pycompat.fsencode(os.__file__)))
1171 1171
1172 1172 security = set(sslutil.supportedprotocols)
1173 1173 if sslutil.hassni:
1174 1174 security.add('sni')
1175 1175
1176 1176 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1177 1177 fm.formatlist(sorted(security), name='protocol',
1178 1178 fmt='%s', sep=','))
1179 1179
1180 1180 # These are warnings, not errors. So don't increment problem count. This
1181 1181 # may change in the future.
1182 1182 if 'tls1.2' not in security:
1183 1183 fm.plain(_(' TLS 1.2 not supported by Python install; '
1184 1184 'network connections lack modern security\n'))
1185 1185 if 'sni' not in security:
1186 1186 fm.plain(_(' SNI not supported by Python install; may have '
1187 1187 'connectivity issues with some servers\n'))
1188 1188
1189 1189 # TODO print CA cert info
1190 1190
1191 1191 # hg version
1192 1192 hgver = util.version()
1193 1193 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1194 1194 hgver.split('+')[0])
1195 1195 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1196 1196 '+'.join(hgver.split('+')[1:]))
1197 1197
1198 1198 # compiled modules
1199 1199 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1200 1200 policy.policy)
1201 1201 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1202 1202 os.path.dirname(pycompat.fsencode(__file__)))
1203 1203
1204 1204 if policy.policy in ('c', 'allow'):
1205 1205 err = None
1206 1206 try:
1207 1207 from .cext import (
1208 1208 base85,
1209 1209 bdiff,
1210 1210 mpatch,
1211 1211 osutil,
1212 1212 )
1213 1213 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1214 1214 except Exception as inst:
1215 1215 err = stringutil.forcebytestr(inst)
1216 1216 problems += 1
1217 1217 fm.condwrite(err, 'extensionserror', " %s\n", err)
1218 1218
1219 1219 compengines = util.compengines._engines.values()
1220 1220 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1221 1221 fm.formatlist(sorted(e.name() for e in compengines),
1222 1222 name='compengine', fmt='%s', sep=', '))
1223 1223 fm.write('compenginesavail', _('checking available compression engines '
1224 1224 '(%s)\n'),
1225 1225 fm.formatlist(sorted(e.name() for e in compengines
1226 1226 if e.available()),
1227 1227 name='compengine', fmt='%s', sep=', '))
1228 1228 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1229 1229 fm.write('compenginesserver', _('checking available compression engines '
1230 1230 'for wire protocol (%s)\n'),
1231 1231 fm.formatlist([e.name() for e in wirecompengines
1232 1232 if e.wireprotosupport()],
1233 1233 name='compengine', fmt='%s', sep=', '))
1234 1234 re2 = 'missing'
1235 1235 if util._re2:
1236 1236 re2 = 'available'
1237 1237 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1238 1238 fm.data(re2=bool(util._re2))
1239 1239
1240 1240 # templates
1241 1241 p = templater.templatepaths()
1242 1242 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1243 1243 fm.condwrite(not p, '', _(" no template directories found\n"))
1244 1244 if p:
1245 1245 m = templater.templatepath("map-cmdline.default")
1246 1246 if m:
1247 1247 # template found, check if it is working
1248 1248 err = None
1249 1249 try:
1250 1250 templater.templater.frommapfile(m)
1251 1251 except Exception as inst:
1252 1252 err = stringutil.forcebytestr(inst)
1253 1253 p = None
1254 1254 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1255 1255 else:
1256 1256 p = None
1257 1257 fm.condwrite(p, 'defaulttemplate',
1258 1258 _("checking default template (%s)\n"), m)
1259 1259 fm.condwrite(not m, 'defaulttemplatenotfound',
1260 1260 _(" template '%s' not found\n"), "default")
1261 1261 if not p:
1262 1262 problems += 1
1263 1263 fm.condwrite(not p, '',
1264 1264 _(" (templates seem to have been installed incorrectly)\n"))
1265 1265
1266 1266 # editor
1267 1267 editor = ui.geteditor()
1268 1268 editor = util.expandpath(editor)
1269 1269 editorbin = procutil.shellsplit(editor)[0]
1270 1270 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1271 1271 cmdpath = procutil.findexe(editorbin)
1272 1272 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1273 1273 _(" No commit editor set and can't find %s in PATH\n"
1274 1274 " (specify a commit editor in your configuration"
1275 1275 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1276 1276 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1277 1277 _(" Can't find editor '%s' in PATH\n"
1278 1278 " (specify a commit editor in your configuration"
1279 1279 " file)\n"), not cmdpath and editorbin)
1280 1280 if not cmdpath and editor != 'vi':
1281 1281 problems += 1
1282 1282
1283 1283 # check username
1284 1284 username = None
1285 1285 err = None
1286 1286 try:
1287 1287 username = ui.username()
1288 1288 except error.Abort as e:
1289 1289 err = stringutil.forcebytestr(e)
1290 1290 problems += 1
1291 1291
1292 1292 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1293 1293 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1294 1294 " (specify a username in your configuration file)\n"), err)
1295 1295
1296 1296 fm.condwrite(not problems, '',
1297 1297 _("no problems detected\n"))
1298 1298 if not problems:
1299 1299 fm.data(problems=problems)
1300 1300 fm.condwrite(problems, 'problems',
1301 1301 _("%d problems detected,"
1302 1302 " please check your install!\n"), problems)
1303 1303 fm.end()
1304 1304
1305 1305 return problems
1306 1306
1307 1307 @command('debugknown', [], _('REPO ID...'), norepo=True)
1308 1308 def debugknown(ui, repopath, *ids, **opts):
1309 1309 """test whether node ids are known to a repo
1310 1310
1311 1311 Every ID must be a full-length hex node id string. Returns a list of 0s
1312 1312 and 1s indicating unknown/known.
1313 1313 """
1314 1314 opts = pycompat.byteskwargs(opts)
1315 1315 repo = hg.peer(ui, opts, repopath)
1316 1316 if not repo.capable('known'):
1317 1317 raise error.Abort("known() not supported by target repository")
1318 1318 flags = repo.known([bin(s) for s in ids])
1319 1319 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1320 1320
1321 1321 @command('debuglabelcomplete', [], _('LABEL...'))
1322 1322 def debuglabelcomplete(ui, repo, *args):
1323 1323 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1324 1324 debugnamecomplete(ui, repo, *args)
1325 1325
1326 1326 @command('debuglocks',
1327 1327 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1328 1328 ('W', 'force-wlock', None,
1329 1329 _('free the working state lock (DANGEROUS)')),
1330 1330 ('s', 'set-lock', None, _('set the store lock until stopped')),
1331 1331 ('S', 'set-wlock', None,
1332 1332 _('set the working state lock until stopped'))],
1333 1333 _('[OPTION]...'))
1334 1334 def debuglocks(ui, repo, **opts):
1335 1335 """show or modify state of locks
1336 1336
1337 1337 By default, this command will show which locks are held. This
1338 1338 includes the user and process holding the lock, the amount of time
1339 1339 the lock has been held, and the machine name where the process is
1340 1340 running if it's not local.
1341 1341
1342 1342 Locks protect the integrity of Mercurial's data, so should be
1343 1343 treated with care. System crashes or other interruptions may cause
1344 1344 locks to not be properly released, though Mercurial will usually
1345 1345 detect and remove such stale locks automatically.
1346 1346
1347 1347 However, detecting stale locks may not always be possible (for
1348 1348 instance, on a shared filesystem). Removing locks may also be
1349 1349 blocked by filesystem permissions.
1350 1350
1351 1351 Setting a lock will prevent other commands from changing the data.
1352 1352 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1353 1353 The set locks are removed when the command exits.
1354 1354
1355 1355 Returns 0 if no locks are held.
1356 1356
1357 1357 """
1358 1358
1359 1359 if opts.get(r'force_lock'):
1360 1360 repo.svfs.unlink('lock')
1361 1361 if opts.get(r'force_wlock'):
1362 1362 repo.vfs.unlink('wlock')
1363 1363 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1364 1364 return 0
1365 1365
1366 1366 locks = []
1367 1367 try:
1368 1368 if opts.get(r'set_wlock'):
1369 1369 try:
1370 1370 locks.append(repo.wlock(False))
1371 1371 except error.LockHeld:
1372 1372 raise error.Abort(_('wlock is already held'))
1373 1373 if opts.get(r'set_lock'):
1374 1374 try:
1375 1375 locks.append(repo.lock(False))
1376 1376 except error.LockHeld:
1377 1377 raise error.Abort(_('lock is already held'))
1378 1378 if len(locks):
1379 1379 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1380 1380 return 0
1381 1381 finally:
1382 1382 release(*locks)
1383 1383
1384 1384 now = time.time()
1385 1385 held = 0
1386 1386
1387 1387 def report(vfs, name, method):
1388 1388 # this causes stale locks to get reaped for more accurate reporting
1389 1389 try:
1390 1390 l = method(False)
1391 1391 except error.LockHeld:
1392 1392 l = None
1393 1393
1394 1394 if l:
1395 1395 l.release()
1396 1396 else:
1397 1397 try:
1398 1398 st = vfs.lstat(name)
1399 1399 age = now - st[stat.ST_MTIME]
1400 1400 user = util.username(st.st_uid)
1401 1401 locker = vfs.readlock(name)
1402 1402 if ":" in locker:
1403 1403 host, pid = locker.split(':')
1404 1404 if host == socket.gethostname():
1405 1405 locker = 'user %s, process %s' % (user, pid)
1406 1406 else:
1407 1407 locker = 'user %s, process %s, host %s' \
1408 1408 % (user, pid, host)
1409 1409 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1410 1410 return 1
1411 1411 except OSError as e:
1412 1412 if e.errno != errno.ENOENT:
1413 1413 raise
1414 1414
1415 1415 ui.write(("%-6s free\n") % (name + ":"))
1416 1416 return 0
1417 1417
1418 1418 held += report(repo.svfs, "lock", repo.lock)
1419 1419 held += report(repo.vfs, "wlock", repo.wlock)
1420 1420
1421 1421 return held
1422 1422
1423 1423 @command('debugmergestate', [], '')
1424 1424 def debugmergestate(ui, repo, *args):
1425 1425 """print merge state
1426 1426
1427 1427 Use --verbose to print out information about whether v1 or v2 merge state
1428 1428 was chosen."""
1429 1429 def _hashornull(h):
1430 1430 if h == nullhex:
1431 1431 return 'null'
1432 1432 else:
1433 1433 return h
1434 1434
1435 1435 def printrecords(version):
1436 1436 ui.write(('* version %d records\n') % version)
1437 1437 if version == 1:
1438 1438 records = v1records
1439 1439 else:
1440 1440 records = v2records
1441 1441
1442 1442 for rtype, record in records:
1443 1443 # pretty print some record types
1444 1444 if rtype == 'L':
1445 1445 ui.write(('local: %s\n') % record)
1446 1446 elif rtype == 'O':
1447 1447 ui.write(('other: %s\n') % record)
1448 1448 elif rtype == 'm':
1449 1449 driver, mdstate = record.split('\0', 1)
1450 1450 ui.write(('merge driver: %s (state "%s")\n')
1451 1451 % (driver, mdstate))
1452 1452 elif rtype in 'FDC':
1453 1453 r = record.split('\0')
1454 1454 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1455 1455 if version == 1:
1456 1456 onode = 'not stored in v1 format'
1457 1457 flags = r[7]
1458 1458 else:
1459 1459 onode, flags = r[7:9]
1460 1460 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1461 1461 % (f, rtype, state, _hashornull(hash)))
1462 1462 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1463 1463 ui.write((' ancestor path: %s (node %s)\n')
1464 1464 % (afile, _hashornull(anode)))
1465 1465 ui.write((' other path: %s (node %s)\n')
1466 1466 % (ofile, _hashornull(onode)))
1467 1467 elif rtype == 'f':
1468 1468 filename, rawextras = record.split('\0', 1)
1469 1469 extras = rawextras.split('\0')
1470 1470 i = 0
1471 1471 extrastrings = []
1472 1472 while i < len(extras):
1473 1473 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1474 1474 i += 2
1475 1475
1476 1476 ui.write(('file extras: %s (%s)\n')
1477 1477 % (filename, ', '.join(extrastrings)))
1478 1478 elif rtype == 'l':
1479 1479 labels = record.split('\0', 2)
1480 1480 labels = [l for l in labels if len(l) > 0]
1481 1481 ui.write(('labels:\n'))
1482 1482 ui.write((' local: %s\n' % labels[0]))
1483 1483 ui.write((' other: %s\n' % labels[1]))
1484 1484 if len(labels) > 2:
1485 1485 ui.write((' base: %s\n' % labels[2]))
1486 1486 else:
1487 1487 ui.write(('unrecognized entry: %s\t%s\n')
1488 1488 % (rtype, record.replace('\0', '\t')))
1489 1489
1490 1490 # Avoid mergestate.read() since it may raise an exception for unsupported
1491 1491 # merge state records. We shouldn't be doing this, but this is OK since this
1492 1492 # command is pretty low-level.
1493 1493 ms = mergemod.mergestate(repo)
1494 1494
1495 1495 # sort so that reasonable information is on top
1496 1496 v1records = ms._readrecordsv1()
1497 1497 v2records = ms._readrecordsv2()
1498 1498 order = 'LOml'
1499 1499 def key(r):
1500 1500 idx = order.find(r[0])
1501 1501 if idx == -1:
1502 1502 return (1, r[1])
1503 1503 else:
1504 1504 return (0, idx)
1505 1505 v1records.sort(key=key)
1506 1506 v2records.sort(key=key)
1507 1507
1508 1508 if not v1records and not v2records:
1509 1509 ui.write(('no merge state found\n'))
1510 1510 elif not v2records:
1511 1511 ui.note(('no version 2 merge state\n'))
1512 1512 printrecords(1)
1513 1513 elif ms._v1v2match(v1records, v2records):
1514 1514 ui.note(('v1 and v2 states match: using v2\n'))
1515 1515 printrecords(2)
1516 1516 else:
1517 1517 ui.note(('v1 and v2 states mismatch: using v1\n'))
1518 1518 printrecords(1)
1519 1519 if ui.verbose:
1520 1520 printrecords(2)
1521 1521
1522 1522 @command('debugnamecomplete', [], _('NAME...'))
1523 1523 def debugnamecomplete(ui, repo, *args):
1524 1524 '''complete "names" - tags, open branch names, bookmark names'''
1525 1525
1526 1526 names = set()
1527 1527 # since we previously only listed open branches, we will handle that
1528 1528 # specially (after this for loop)
1529 1529 for name, ns in repo.names.iteritems():
1530 1530 if name != 'branches':
1531 1531 names.update(ns.listnames(repo))
1532 1532 names.update(tag for (tag, heads, tip, closed)
1533 1533 in repo.branchmap().iterbranches() if not closed)
1534 1534 completions = set()
1535 1535 if not args:
1536 1536 args = ['']
1537 1537 for a in args:
1538 1538 completions.update(n for n in names if n.startswith(a))
1539 1539 ui.write('\n'.join(sorted(completions)))
1540 1540 ui.write('\n')
1541 1541
1542 1542 @command('debugobsolete',
1543 1543 [('', 'flags', 0, _('markers flag')),
1544 1544 ('', 'record-parents', False,
1545 1545 _('record parent information for the precursor')),
1546 1546 ('r', 'rev', [], _('display markers relevant to REV')),
1547 1547 ('', 'exclusive', False, _('restrict display to markers only '
1548 1548 'relevant to REV')),
1549 1549 ('', 'index', False, _('display index of the marker')),
1550 1550 ('', 'delete', [], _('delete markers specified by indices')),
1551 1551 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1552 1552 _('[OBSOLETED [REPLACEMENT ...]]'))
1553 1553 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1554 1554 """create arbitrary obsolete marker
1555 1555
1556 1556 With no arguments, displays the list of obsolescence markers."""
1557 1557
1558 1558 opts = pycompat.byteskwargs(opts)
1559 1559
1560 1560 def parsenodeid(s):
1561 1561 try:
1562 1562 # We do not use revsingle/revrange functions here to accept
1563 1563 # arbitrary node identifiers, possibly not present in the
1564 1564 # local repository.
1565 1565 n = bin(s)
1566 1566 if len(n) != len(nullid):
1567 1567 raise TypeError()
1568 1568 return n
1569 1569 except TypeError:
1570 1570 raise error.Abort('changeset references must be full hexadecimal '
1571 1571 'node identifiers')
1572 1572
1573 1573 if opts.get('delete'):
1574 1574 indices = []
1575 1575 for v in opts.get('delete'):
1576 1576 try:
1577 1577 indices.append(int(v))
1578 1578 except ValueError:
1579 1579 raise error.Abort(_('invalid index value: %r') % v,
1580 1580 hint=_('use integers for indices'))
1581 1581
1582 1582 if repo.currenttransaction():
1583 1583 raise error.Abort(_('cannot delete obsmarkers in the middle '
1584 1584 'of transaction.'))
1585 1585
1586 1586 with repo.lock():
1587 1587 n = repair.deleteobsmarkers(repo.obsstore, indices)
1588 1588 ui.write(_('deleted %i obsolescence markers\n') % n)
1589 1589
1590 1590 return
1591 1591
1592 1592 if precursor is not None:
1593 1593 if opts['rev']:
1594 1594 raise error.Abort('cannot select revision when creating marker')
1595 1595 metadata = {}
1596 1596 metadata['user'] = opts['user'] or ui.username()
1597 1597 succs = tuple(parsenodeid(succ) for succ in successors)
1598 1598 l = repo.lock()
1599 1599 try:
1600 1600 tr = repo.transaction('debugobsolete')
1601 1601 try:
1602 1602 date = opts.get('date')
1603 1603 if date:
1604 1604 date = dateutil.parsedate(date)
1605 1605 else:
1606 1606 date = None
1607 1607 prec = parsenodeid(precursor)
1608 1608 parents = None
1609 1609 if opts['record_parents']:
1610 1610 if prec not in repo.unfiltered():
1611 1611 raise error.Abort('cannot used --record-parents on '
1612 1612 'unknown changesets')
1613 1613 parents = repo.unfiltered()[prec].parents()
1614 1614 parents = tuple(p.node() for p in parents)
1615 1615 repo.obsstore.create(tr, prec, succs, opts['flags'],
1616 1616 parents=parents, date=date,
1617 1617 metadata=metadata, ui=ui)
1618 1618 tr.close()
1619 1619 except ValueError as exc:
1620 1620 raise error.Abort(_('bad obsmarker input: %s') %
1621 1621 pycompat.bytestr(exc))
1622 1622 finally:
1623 1623 tr.release()
1624 1624 finally:
1625 1625 l.release()
1626 1626 else:
1627 1627 if opts['rev']:
1628 1628 revs = scmutil.revrange(repo, opts['rev'])
1629 1629 nodes = [repo[r].node() for r in revs]
1630 1630 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1631 1631 exclusive=opts['exclusive']))
1632 1632 markers.sort(key=lambda x: x._data)
1633 1633 else:
1634 1634 markers = obsutil.getmarkers(repo)
1635 1635
1636 1636 markerstoiter = markers
1637 1637 isrelevant = lambda m: True
1638 1638 if opts.get('rev') and opts.get('index'):
1639 1639 markerstoiter = obsutil.getmarkers(repo)
1640 1640 markerset = set(markers)
1641 1641 isrelevant = lambda m: m in markerset
1642 1642
1643 1643 fm = ui.formatter('debugobsolete', opts)
1644 1644 for i, m in enumerate(markerstoiter):
1645 1645 if not isrelevant(m):
1646 1646 # marker can be irrelevant when we're iterating over a set
1647 1647 # of markers (markerstoiter) which is bigger than the set
1648 1648 # of markers we want to display (markers)
1649 1649 # this can happen if both --index and --rev options are
1650 1650 # provided and thus we need to iterate over all of the markers
1651 1651 # to get the correct indices, but only display the ones that
1652 1652 # are relevant to --rev value
1653 1653 continue
1654 1654 fm.startitem()
1655 1655 ind = i if opts.get('index') else None
1656 1656 cmdutil.showmarker(fm, m, index=ind)
1657 1657 fm.end()
1658 1658
1659 1659 @command('debugpathcomplete',
1660 1660 [('f', 'full', None, _('complete an entire path')),
1661 1661 ('n', 'normal', None, _('show only normal files')),
1662 1662 ('a', 'added', None, _('show only added files')),
1663 1663 ('r', 'removed', None, _('show only removed files'))],
1664 1664 _('FILESPEC...'))
1665 1665 def debugpathcomplete(ui, repo, *specs, **opts):
1666 1666 '''complete part or all of a tracked path
1667 1667
1668 1668 This command supports shells that offer path name completion. It
1669 1669 currently completes only files already known to the dirstate.
1670 1670
1671 1671 Completion extends only to the next path segment unless
1672 1672 --full is specified, in which case entire paths are used.'''
1673 1673
1674 1674 def complete(path, acceptable):
1675 1675 dirstate = repo.dirstate
1676 1676 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1677 1677 rootdir = repo.root + pycompat.ossep
1678 1678 if spec != repo.root and not spec.startswith(rootdir):
1679 1679 return [], []
1680 1680 if os.path.isdir(spec):
1681 1681 spec += '/'
1682 1682 spec = spec[len(rootdir):]
1683 1683 fixpaths = pycompat.ossep != '/'
1684 1684 if fixpaths:
1685 1685 spec = spec.replace(pycompat.ossep, '/')
1686 1686 speclen = len(spec)
1687 1687 fullpaths = opts[r'full']
1688 1688 files, dirs = set(), set()
1689 1689 adddir, addfile = dirs.add, files.add
1690 1690 for f, st in dirstate.iteritems():
1691 1691 if f.startswith(spec) and st[0] in acceptable:
1692 1692 if fixpaths:
1693 1693 f = f.replace('/', pycompat.ossep)
1694 1694 if fullpaths:
1695 1695 addfile(f)
1696 1696 continue
1697 1697 s = f.find(pycompat.ossep, speclen)
1698 1698 if s >= 0:
1699 1699 adddir(f[:s])
1700 1700 else:
1701 1701 addfile(f)
1702 1702 return files, dirs
1703 1703
1704 1704 acceptable = ''
1705 1705 if opts[r'normal']:
1706 1706 acceptable += 'nm'
1707 1707 if opts[r'added']:
1708 1708 acceptable += 'a'
1709 1709 if opts[r'removed']:
1710 1710 acceptable += 'r'
1711 1711 cwd = repo.getcwd()
1712 1712 if not specs:
1713 1713 specs = ['.']
1714 1714
1715 1715 files, dirs = set(), set()
1716 1716 for spec in specs:
1717 1717 f, d = complete(spec, acceptable or 'nmar')
1718 1718 files.update(f)
1719 1719 dirs.update(d)
1720 1720 files.update(dirs)
1721 1721 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1722 1722 ui.write('\n')
1723 1723
1724 1724 @command('debugpeer', [], _('PATH'), norepo=True)
1725 1725 def debugpeer(ui, path):
1726 1726 """establish a connection to a peer repository"""
1727 1727 # Always enable peer request logging. Requires --debug to display
1728 1728 # though.
1729 1729 overrides = {
1730 1730 ('devel', 'debug.peer-request'): True,
1731 1731 }
1732 1732
1733 1733 with ui.configoverride(overrides):
1734 1734 peer = hg.peer(ui, {}, path)
1735 1735
1736 1736 local = peer.local() is not None
1737 1737 canpush = peer.canpush()
1738 1738
1739 1739 ui.write(_('url: %s\n') % peer.url())
1740 1740 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1741 1741 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1742 1742
1743 1743 @command('debugpickmergetool',
1744 1744 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1745 1745 ('', 'changedelete', None, _('emulate merging change and delete')),
1746 1746 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1747 1747 _('[PATTERN]...'),
1748 1748 inferrepo=True)
1749 1749 def debugpickmergetool(ui, repo, *pats, **opts):
1750 1750 """examine which merge tool is chosen for specified file
1751 1751
1752 1752 As described in :hg:`help merge-tools`, Mercurial examines
1753 1753 configurations below in this order to decide which merge tool is
1754 1754 chosen for specified file.
1755 1755
1756 1756 1. ``--tool`` option
1757 1757 2. ``HGMERGE`` environment variable
1758 1758 3. configurations in ``merge-patterns`` section
1759 1759 4. configuration of ``ui.merge``
1760 1760 5. configurations in ``merge-tools`` section
1761 1761 6. ``hgmerge`` tool (for historical reason only)
1762 1762 7. default tool for fallback (``:merge`` or ``:prompt``)
1763 1763
1764 1764 This command writes out examination result in the style below::
1765 1765
1766 1766 FILE = MERGETOOL
1767 1767
1768 1768 By default, all files known in the first parent context of the
1769 1769 working directory are examined. Use file patterns and/or -I/-X
1770 1770 options to limit target files. -r/--rev is also useful to examine
1771 1771 files in another context without actual updating to it.
1772 1772
1773 1773 With --debug, this command shows warning messages while matching
1774 1774 against ``merge-patterns`` and so on, too. It is recommended to
1775 1775 use this option with explicit file patterns and/or -I/-X options,
1776 1776 because this option increases amount of output per file according
1777 1777 to configurations in hgrc.
1778 1778
1779 1779 With -v/--verbose, this command shows configurations below at
1780 1780 first (only if specified).
1781 1781
1782 1782 - ``--tool`` option
1783 1783 - ``HGMERGE`` environment variable
1784 1784 - configuration of ``ui.merge``
1785 1785
1786 1786 If merge tool is chosen before matching against
1787 1787 ``merge-patterns``, this command can't show any helpful
1788 1788 information, even with --debug. In such case, information above is
1789 1789 useful to know why a merge tool is chosen.
1790 1790 """
1791 1791 opts = pycompat.byteskwargs(opts)
1792 1792 overrides = {}
1793 1793 if opts['tool']:
1794 1794 overrides[('ui', 'forcemerge')] = opts['tool']
1795 1795 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1796 1796
1797 1797 with ui.configoverride(overrides, 'debugmergepatterns'):
1798 1798 hgmerge = encoding.environ.get("HGMERGE")
1799 1799 if hgmerge is not None:
1800 1800 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1801 1801 uimerge = ui.config("ui", "merge")
1802 1802 if uimerge:
1803 1803 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1804 1804
1805 1805 ctx = scmutil.revsingle(repo, opts.get('rev'))
1806 1806 m = scmutil.match(ctx, pats, opts)
1807 1807 changedelete = opts['changedelete']
1808 1808 for path in ctx.walk(m):
1809 1809 fctx = ctx[path]
1810 1810 try:
1811 1811 if not ui.debugflag:
1812 1812 ui.pushbuffer(error=True)
1813 1813 tool, toolpath = filemerge._picktool(repo, ui, path,
1814 1814 fctx.isbinary(),
1815 1815 'l' in fctx.flags(),
1816 1816 changedelete)
1817 1817 finally:
1818 1818 if not ui.debugflag:
1819 1819 ui.popbuffer()
1820 1820 ui.write(('%s = %s\n') % (path, tool))
1821 1821
1822 1822 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1823 1823 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1824 1824 '''access the pushkey key/value protocol
1825 1825
1826 1826 With two args, list the keys in the given namespace.
1827 1827
1828 1828 With five args, set a key to new if it currently is set to old.
1829 1829 Reports success or failure.
1830 1830 '''
1831 1831
1832 1832 target = hg.peer(ui, {}, repopath)
1833 1833 if keyinfo:
1834 1834 key, old, new = keyinfo
1835 r = target.pushkey(namespace, key, old, new)
1835 with target.commandexecutor() as e:
1836 r = e.callcommand('pushkey', {
1837 'namespace': namespace,
1838 'key': key,
1839 'old': old,
1840 'new': new,
1841 }).result()
1842
1836 1843 ui.status(pycompat.bytestr(r) + '\n')
1837 1844 return not r
1838 1845 else:
1839 1846 for k, v in sorted(target.listkeys(namespace).iteritems()):
1840 1847 ui.write("%s\t%s\n" % (stringutil.escapestr(k),
1841 1848 stringutil.escapestr(v)))
1842 1849
1843 1850 @command('debugpvec', [], _('A B'))
1844 1851 def debugpvec(ui, repo, a, b=None):
1845 1852 ca = scmutil.revsingle(repo, a)
1846 1853 cb = scmutil.revsingle(repo, b)
1847 1854 pa = pvec.ctxpvec(ca)
1848 1855 pb = pvec.ctxpvec(cb)
1849 1856 if pa == pb:
1850 1857 rel = "="
1851 1858 elif pa > pb:
1852 1859 rel = ">"
1853 1860 elif pa < pb:
1854 1861 rel = "<"
1855 1862 elif pa | pb:
1856 1863 rel = "|"
1857 1864 ui.write(_("a: %s\n") % pa)
1858 1865 ui.write(_("b: %s\n") % pb)
1859 1866 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1860 1867 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1861 1868 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1862 1869 pa.distance(pb), rel))
1863 1870
1864 1871 @command('debugrebuilddirstate|debugrebuildstate',
1865 1872 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1866 1873 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1867 1874 'the working copy parent')),
1868 1875 ],
1869 1876 _('[-r REV]'))
1870 1877 def debugrebuilddirstate(ui, repo, rev, **opts):
1871 1878 """rebuild the dirstate as it would look like for the given revision
1872 1879
1873 1880 If no revision is specified the first current parent will be used.
1874 1881
1875 1882 The dirstate will be set to the files of the given revision.
1876 1883 The actual working directory content or existing dirstate
1877 1884 information such as adds or removes is not considered.
1878 1885
1879 1886 ``minimal`` will only rebuild the dirstate status for files that claim to be
1880 1887 tracked but are not in the parent manifest, or that exist in the parent
1881 1888 manifest but are not in the dirstate. It will not change adds, removes, or
1882 1889 modified files that are in the working copy parent.
1883 1890
1884 1891 One use of this command is to make the next :hg:`status` invocation
1885 1892 check the actual file content.
1886 1893 """
1887 1894 ctx = scmutil.revsingle(repo, rev)
1888 1895 with repo.wlock():
1889 1896 dirstate = repo.dirstate
1890 1897 changedfiles = None
1891 1898 # See command doc for what minimal does.
1892 1899 if opts.get(r'minimal'):
1893 1900 manifestfiles = set(ctx.manifest().keys())
1894 1901 dirstatefiles = set(dirstate)
1895 1902 manifestonly = manifestfiles - dirstatefiles
1896 1903 dsonly = dirstatefiles - manifestfiles
1897 1904 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1898 1905 changedfiles = manifestonly | dsnotadded
1899 1906
1900 1907 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1901 1908
1902 1909 @command('debugrebuildfncache', [], '')
1903 1910 def debugrebuildfncache(ui, repo):
1904 1911 """rebuild the fncache file"""
1905 1912 repair.rebuildfncache(ui, repo)
1906 1913
1907 1914 @command('debugrename',
1908 1915 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1909 1916 _('[-r REV] FILE'))
1910 1917 def debugrename(ui, repo, file1, *pats, **opts):
1911 1918 """dump rename information"""
1912 1919
1913 1920 opts = pycompat.byteskwargs(opts)
1914 1921 ctx = scmutil.revsingle(repo, opts.get('rev'))
1915 1922 m = scmutil.match(ctx, (file1,) + pats, opts)
1916 1923 for abs in ctx.walk(m):
1917 1924 fctx = ctx[abs]
1918 1925 o = fctx.filelog().renamed(fctx.filenode())
1919 1926 rel = m.rel(abs)
1920 1927 if o:
1921 1928 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1922 1929 else:
1923 1930 ui.write(_("%s not renamed\n") % rel)
1924 1931
1925 1932 @command('debugrevlog', cmdutil.debugrevlogopts +
1926 1933 [('d', 'dump', False, _('dump index data'))],
1927 1934 _('-c|-m|FILE'),
1928 1935 optionalrepo=True)
1929 1936 def debugrevlog(ui, repo, file_=None, **opts):
1930 1937 """show data and statistics about a revlog"""
1931 1938 opts = pycompat.byteskwargs(opts)
1932 1939 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1933 1940
1934 1941 if opts.get("dump"):
1935 1942 numrevs = len(r)
1936 1943 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1937 1944 " rawsize totalsize compression heads chainlen\n"))
1938 1945 ts = 0
1939 1946 heads = set()
1940 1947
1941 1948 for rev in xrange(numrevs):
1942 1949 dbase = r.deltaparent(rev)
1943 1950 if dbase == -1:
1944 1951 dbase = rev
1945 1952 cbase = r.chainbase(rev)
1946 1953 clen = r.chainlen(rev)
1947 1954 p1, p2 = r.parentrevs(rev)
1948 1955 rs = r.rawsize(rev)
1949 1956 ts = ts + rs
1950 1957 heads -= set(r.parentrevs(rev))
1951 1958 heads.add(rev)
1952 1959 try:
1953 1960 compression = ts / r.end(rev)
1954 1961 except ZeroDivisionError:
1955 1962 compression = 0
1956 1963 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1957 1964 "%11d %5d %8d\n" %
1958 1965 (rev, p1, p2, r.start(rev), r.end(rev),
1959 1966 r.start(dbase), r.start(cbase),
1960 1967 r.start(p1), r.start(p2),
1961 1968 rs, ts, compression, len(heads), clen))
1962 1969 return 0
1963 1970
1964 1971 v = r.version
1965 1972 format = v & 0xFFFF
1966 1973 flags = []
1967 1974 gdelta = False
1968 1975 if v & revlog.FLAG_INLINE_DATA:
1969 1976 flags.append('inline')
1970 1977 if v & revlog.FLAG_GENERALDELTA:
1971 1978 gdelta = True
1972 1979 flags.append('generaldelta')
1973 1980 if not flags:
1974 1981 flags = ['(none)']
1975 1982
1976 1983 nummerges = 0
1977 1984 numfull = 0
1978 1985 numprev = 0
1979 1986 nump1 = 0
1980 1987 nump2 = 0
1981 1988 numother = 0
1982 1989 nump1prev = 0
1983 1990 nump2prev = 0
1984 1991 chainlengths = []
1985 1992 chainbases = []
1986 1993 chainspans = []
1987 1994
1988 1995 datasize = [None, 0, 0]
1989 1996 fullsize = [None, 0, 0]
1990 1997 deltasize = [None, 0, 0]
1991 1998 chunktypecounts = {}
1992 1999 chunktypesizes = {}
1993 2000
1994 2001 def addsize(size, l):
1995 2002 if l[0] is None or size < l[0]:
1996 2003 l[0] = size
1997 2004 if size > l[1]:
1998 2005 l[1] = size
1999 2006 l[2] += size
2000 2007
2001 2008 numrevs = len(r)
2002 2009 for rev in xrange(numrevs):
2003 2010 p1, p2 = r.parentrevs(rev)
2004 2011 delta = r.deltaparent(rev)
2005 2012 if format > 0:
2006 2013 addsize(r.rawsize(rev), datasize)
2007 2014 if p2 != nullrev:
2008 2015 nummerges += 1
2009 2016 size = r.length(rev)
2010 2017 if delta == nullrev:
2011 2018 chainlengths.append(0)
2012 2019 chainbases.append(r.start(rev))
2013 2020 chainspans.append(size)
2014 2021 numfull += 1
2015 2022 addsize(size, fullsize)
2016 2023 else:
2017 2024 chainlengths.append(chainlengths[delta] + 1)
2018 2025 baseaddr = chainbases[delta]
2019 2026 revaddr = r.start(rev)
2020 2027 chainbases.append(baseaddr)
2021 2028 chainspans.append((revaddr - baseaddr) + size)
2022 2029 addsize(size, deltasize)
2023 2030 if delta == rev - 1:
2024 2031 numprev += 1
2025 2032 if delta == p1:
2026 2033 nump1prev += 1
2027 2034 elif delta == p2:
2028 2035 nump2prev += 1
2029 2036 elif delta == p1:
2030 2037 nump1 += 1
2031 2038 elif delta == p2:
2032 2039 nump2 += 1
2033 2040 elif delta != nullrev:
2034 2041 numother += 1
2035 2042
2036 2043 # Obtain data on the raw chunks in the revlog.
2037 2044 segment = r._getsegmentforrevs(rev, rev)[1]
2038 2045 if segment:
2039 2046 chunktype = bytes(segment[0:1])
2040 2047 else:
2041 2048 chunktype = 'empty'
2042 2049
2043 2050 if chunktype not in chunktypecounts:
2044 2051 chunktypecounts[chunktype] = 0
2045 2052 chunktypesizes[chunktype] = 0
2046 2053
2047 2054 chunktypecounts[chunktype] += 1
2048 2055 chunktypesizes[chunktype] += size
2049 2056
2050 2057 # Adjust size min value for empty cases
2051 2058 for size in (datasize, fullsize, deltasize):
2052 2059 if size[0] is None:
2053 2060 size[0] = 0
2054 2061
2055 2062 numdeltas = numrevs - numfull
2056 2063 numoprev = numprev - nump1prev - nump2prev
2057 2064 totalrawsize = datasize[2]
2058 2065 datasize[2] /= numrevs
2059 2066 fulltotal = fullsize[2]
2060 2067 fullsize[2] /= numfull
2061 2068 deltatotal = deltasize[2]
2062 2069 if numrevs - numfull > 0:
2063 2070 deltasize[2] /= numrevs - numfull
2064 2071 totalsize = fulltotal + deltatotal
2065 2072 avgchainlen = sum(chainlengths) / numrevs
2066 2073 maxchainlen = max(chainlengths)
2067 2074 maxchainspan = max(chainspans)
2068 2075 compratio = 1
2069 2076 if totalsize:
2070 2077 compratio = totalrawsize / totalsize
2071 2078
2072 2079 basedfmtstr = '%%%dd\n'
2073 2080 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2074 2081
2075 2082 def dfmtstr(max):
2076 2083 return basedfmtstr % len(str(max))
2077 2084 def pcfmtstr(max, padding=0):
2078 2085 return basepcfmtstr % (len(str(max)), ' ' * padding)
2079 2086
2080 2087 def pcfmt(value, total):
2081 2088 if total:
2082 2089 return (value, 100 * float(value) / total)
2083 2090 else:
2084 2091 return value, 100.0
2085 2092
2086 2093 ui.write(('format : %d\n') % format)
2087 2094 ui.write(('flags : %s\n') % ', '.join(flags))
2088 2095
2089 2096 ui.write('\n')
2090 2097 fmt = pcfmtstr(totalsize)
2091 2098 fmt2 = dfmtstr(totalsize)
2092 2099 ui.write(('revisions : ') + fmt2 % numrevs)
2093 2100 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2094 2101 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2095 2102 ui.write(('revisions : ') + fmt2 % numrevs)
2096 2103 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
2097 2104 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2098 2105 ui.write(('revision size : ') + fmt2 % totalsize)
2099 2106 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
2100 2107 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2101 2108
2102 2109 def fmtchunktype(chunktype):
2103 2110 if chunktype == 'empty':
2104 2111 return ' %s : ' % chunktype
2105 2112 elif chunktype in pycompat.bytestr(string.ascii_letters):
2106 2113 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2107 2114 else:
2108 2115 return ' 0x%s : ' % hex(chunktype)
2109 2116
2110 2117 ui.write('\n')
2111 2118 ui.write(('chunks : ') + fmt2 % numrevs)
2112 2119 for chunktype in sorted(chunktypecounts):
2113 2120 ui.write(fmtchunktype(chunktype))
2114 2121 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2115 2122 ui.write(('chunks size : ') + fmt2 % totalsize)
2116 2123 for chunktype in sorted(chunktypecounts):
2117 2124 ui.write(fmtchunktype(chunktype))
2118 2125 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2119 2126
2120 2127 ui.write('\n')
2121 2128 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2122 2129 ui.write(('avg chain length : ') + fmt % avgchainlen)
2123 2130 ui.write(('max chain length : ') + fmt % maxchainlen)
2124 2131 ui.write(('max chain reach : ') + fmt % maxchainspan)
2125 2132 ui.write(('compression ratio : ') + fmt % compratio)
2126 2133
2127 2134 if format > 0:
2128 2135 ui.write('\n')
2129 2136 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2130 2137 % tuple(datasize))
2131 2138 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2132 2139 % tuple(fullsize))
2133 2140 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2134 2141 % tuple(deltasize))
2135 2142
2136 2143 if numdeltas > 0:
2137 2144 ui.write('\n')
2138 2145 fmt = pcfmtstr(numdeltas)
2139 2146 fmt2 = pcfmtstr(numdeltas, 4)
2140 2147 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2141 2148 if numprev > 0:
2142 2149 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2143 2150 numprev))
2144 2151 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2145 2152 numprev))
2146 2153 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2147 2154 numprev))
2148 2155 if gdelta:
2149 2156 ui.write(('deltas against p1 : ')
2150 2157 + fmt % pcfmt(nump1, numdeltas))
2151 2158 ui.write(('deltas against p2 : ')
2152 2159 + fmt % pcfmt(nump2, numdeltas))
2153 2160 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2154 2161 numdeltas))
2155 2162
2156 2163 @command('debugrevspec',
2157 2164 [('', 'optimize', None,
2158 2165 _('print parsed tree after optimizing (DEPRECATED)')),
2159 2166 ('', 'show-revs', True, _('print list of result revisions (default)')),
2160 2167 ('s', 'show-set', None, _('print internal representation of result set')),
2161 2168 ('p', 'show-stage', [],
2162 2169 _('print parsed tree at the given stage'), _('NAME')),
2163 2170 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2164 2171 ('', 'verify-optimized', False, _('verify optimized result')),
2165 2172 ],
2166 2173 ('REVSPEC'))
2167 2174 def debugrevspec(ui, repo, expr, **opts):
2168 2175 """parse and apply a revision specification
2169 2176
2170 2177 Use -p/--show-stage option to print the parsed tree at the given stages.
2171 2178 Use -p all to print tree at every stage.
2172 2179
2173 2180 Use --no-show-revs option with -s or -p to print only the set
2174 2181 representation or the parsed tree respectively.
2175 2182
2176 2183 Use --verify-optimized to compare the optimized result with the unoptimized
2177 2184 one. Returns 1 if the optimized result differs.
2178 2185 """
2179 2186 opts = pycompat.byteskwargs(opts)
2180 2187 aliases = ui.configitems('revsetalias')
2181 2188 stages = [
2182 2189 ('parsed', lambda tree: tree),
2183 2190 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2184 2191 ui.warn)),
2185 2192 ('concatenated', revsetlang.foldconcat),
2186 2193 ('analyzed', revsetlang.analyze),
2187 2194 ('optimized', revsetlang.optimize),
2188 2195 ]
2189 2196 if opts['no_optimized']:
2190 2197 stages = stages[:-1]
2191 2198 if opts['verify_optimized'] and opts['no_optimized']:
2192 2199 raise error.Abort(_('cannot use --verify-optimized with '
2193 2200 '--no-optimized'))
2194 2201 stagenames = set(n for n, f in stages)
2195 2202
2196 2203 showalways = set()
2197 2204 showchanged = set()
2198 2205 if ui.verbose and not opts['show_stage']:
2199 2206 # show parsed tree by --verbose (deprecated)
2200 2207 showalways.add('parsed')
2201 2208 showchanged.update(['expanded', 'concatenated'])
2202 2209 if opts['optimize']:
2203 2210 showalways.add('optimized')
2204 2211 if opts['show_stage'] and opts['optimize']:
2205 2212 raise error.Abort(_('cannot use --optimize with --show-stage'))
2206 2213 if opts['show_stage'] == ['all']:
2207 2214 showalways.update(stagenames)
2208 2215 else:
2209 2216 for n in opts['show_stage']:
2210 2217 if n not in stagenames:
2211 2218 raise error.Abort(_('invalid stage name: %s') % n)
2212 2219 showalways.update(opts['show_stage'])
2213 2220
2214 2221 treebystage = {}
2215 2222 printedtree = None
2216 2223 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
2217 2224 for n, f in stages:
2218 2225 treebystage[n] = tree = f(tree)
2219 2226 if n in showalways or (n in showchanged and tree != printedtree):
2220 2227 if opts['show_stage'] or n != 'parsed':
2221 2228 ui.write(("* %s:\n") % n)
2222 2229 ui.write(revsetlang.prettyformat(tree), "\n")
2223 2230 printedtree = tree
2224 2231
2225 2232 if opts['verify_optimized']:
2226 2233 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2227 2234 brevs = revset.makematcher(treebystage['optimized'])(repo)
2228 2235 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2229 2236 ui.write(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
2230 2237 ui.write(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
2231 2238 arevs = list(arevs)
2232 2239 brevs = list(brevs)
2233 2240 if arevs == brevs:
2234 2241 return 0
2235 2242 ui.write(('--- analyzed\n'), label='diff.file_a')
2236 2243 ui.write(('+++ optimized\n'), label='diff.file_b')
2237 2244 sm = difflib.SequenceMatcher(None, arevs, brevs)
2238 2245 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2239 2246 if tag in ('delete', 'replace'):
2240 2247 for c in arevs[alo:ahi]:
2241 2248 ui.write('-%s\n' % c, label='diff.deleted')
2242 2249 if tag in ('insert', 'replace'):
2243 2250 for c in brevs[blo:bhi]:
2244 2251 ui.write('+%s\n' % c, label='diff.inserted')
2245 2252 if tag == 'equal':
2246 2253 for c in arevs[alo:ahi]:
2247 2254 ui.write(' %s\n' % c)
2248 2255 return 1
2249 2256
2250 2257 func = revset.makematcher(tree)
2251 2258 revs = func(repo)
2252 2259 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2253 2260 ui.write(("* set:\n"), smartset.prettyformat(revs), "\n")
2254 2261 if not opts['show_revs']:
2255 2262 return
2256 2263 for c in revs:
2257 2264 ui.write("%d\n" % c)
2258 2265
2259 2266 @command('debugserve', [
2260 2267 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2261 2268 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2262 2269 ('', 'logiofile', '', _('file to log server I/O to')),
2263 2270 ], '')
2264 2271 def debugserve(ui, repo, **opts):
2265 2272 """run a server with advanced settings
2266 2273
2267 2274 This command is similar to :hg:`serve`. It exists partially as a
2268 2275 workaround to the fact that ``hg serve --stdio`` must have specific
2269 2276 arguments for security reasons.
2270 2277 """
2271 2278 opts = pycompat.byteskwargs(opts)
2272 2279
2273 2280 if not opts['sshstdio']:
2274 2281 raise error.Abort(_('only --sshstdio is currently supported'))
2275 2282
2276 2283 logfh = None
2277 2284
2278 2285 if opts['logiofd'] and opts['logiofile']:
2279 2286 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2280 2287
2281 2288 if opts['logiofd']:
2282 2289 # Line buffered because output is line based.
2283 2290 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2284 2291 elif opts['logiofile']:
2285 2292 logfh = open(opts['logiofile'], 'ab', 1)
2286 2293
2287 2294 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2288 2295 s.serve_forever()
2289 2296
2290 2297 @command('debugsetparents', [], _('REV1 [REV2]'))
2291 2298 def debugsetparents(ui, repo, rev1, rev2=None):
2292 2299 """manually set the parents of the current working directory
2293 2300
2294 2301 This is useful for writing repository conversion tools, but should
2295 2302 be used with care. For example, neither the working directory nor the
2296 2303 dirstate is updated, so file status may be incorrect after running this
2297 2304 command.
2298 2305
2299 2306 Returns 0 on success.
2300 2307 """
2301 2308
2302 2309 node1 = scmutil.revsingle(repo, rev1).node()
2303 2310 node2 = scmutil.revsingle(repo, rev2, 'null').node()
2304 2311
2305 2312 with repo.wlock():
2306 2313 repo.setparents(node1, node2)
2307 2314
2308 2315 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2309 2316 def debugssl(ui, repo, source=None, **opts):
2310 2317 '''test a secure connection to a server
2311 2318
2312 2319 This builds the certificate chain for the server on Windows, installing the
2313 2320 missing intermediates and trusted root via Windows Update if necessary. It
2314 2321 does nothing on other platforms.
2315 2322
2316 2323 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2317 2324 that server is used. See :hg:`help urls` for more information.
2318 2325
2319 2326 If the update succeeds, retry the original operation. Otherwise, the cause
2320 2327 of the SSL error is likely another issue.
2321 2328 '''
2322 2329 if not pycompat.iswindows:
2323 2330 raise error.Abort(_('certificate chain building is only possible on '
2324 2331 'Windows'))
2325 2332
2326 2333 if not source:
2327 2334 if not repo:
2328 2335 raise error.Abort(_("there is no Mercurial repository here, and no "
2329 2336 "server specified"))
2330 2337 source = "default"
2331 2338
2332 2339 source, branches = hg.parseurl(ui.expandpath(source))
2333 2340 url = util.url(source)
2334 2341 addr = None
2335 2342
2336 2343 defaultport = {'https': 443, 'ssh': 22}
2337 2344 if url.scheme in defaultport:
2338 2345 try:
2339 2346 addr = (url.host, int(url.port or defaultport[url.scheme]))
2340 2347 except ValueError:
2341 2348 raise error.Abort(_("malformed port number in URL"))
2342 2349 else:
2343 2350 raise error.Abort(_("only https and ssh connections are supported"))
2344 2351
2345 2352 from . import win32
2346 2353
2347 2354 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2348 2355 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2349 2356
2350 2357 try:
2351 2358 s.connect(addr)
2352 2359 cert = s.getpeercert(True)
2353 2360
2354 2361 ui.status(_('checking the certificate chain for %s\n') % url.host)
2355 2362
2356 2363 complete = win32.checkcertificatechain(cert, build=False)
2357 2364
2358 2365 if not complete:
2359 2366 ui.status(_('certificate chain is incomplete, updating... '))
2360 2367
2361 2368 if not win32.checkcertificatechain(cert):
2362 2369 ui.status(_('failed.\n'))
2363 2370 else:
2364 2371 ui.status(_('done.\n'))
2365 2372 else:
2366 2373 ui.status(_('full certificate chain is available\n'))
2367 2374 finally:
2368 2375 s.close()
2369 2376
2370 2377 @command('debugsub',
2371 2378 [('r', 'rev', '',
2372 2379 _('revision to check'), _('REV'))],
2373 2380 _('[-r REV] [REV]'))
2374 2381 def debugsub(ui, repo, rev=None):
2375 2382 ctx = scmutil.revsingle(repo, rev, None)
2376 2383 for k, v in sorted(ctx.substate.items()):
2377 2384 ui.write(('path %s\n') % k)
2378 2385 ui.write((' source %s\n') % v[0])
2379 2386 ui.write((' revision %s\n') % v[1])
2380 2387
2381 2388 @command('debugsuccessorssets',
2382 2389 [('', 'closest', False, _('return closest successors sets only'))],
2383 2390 _('[REV]'))
2384 2391 def debugsuccessorssets(ui, repo, *revs, **opts):
2385 2392 """show set of successors for revision
2386 2393
2387 2394 A successors set of changeset A is a consistent group of revisions that
2388 2395 succeed A. It contains non-obsolete changesets only unless closests
2389 2396 successors set is set.
2390 2397
2391 2398 In most cases a changeset A has a single successors set containing a single
2392 2399 successor (changeset A replaced by A').
2393 2400
2394 2401 A changeset that is made obsolete with no successors are called "pruned".
2395 2402 Such changesets have no successors sets at all.
2396 2403
2397 2404 A changeset that has been "split" will have a successors set containing
2398 2405 more than one successor.
2399 2406
2400 2407 A changeset that has been rewritten in multiple different ways is called
2401 2408 "divergent". Such changesets have multiple successor sets (each of which
2402 2409 may also be split, i.e. have multiple successors).
2403 2410
2404 2411 Results are displayed as follows::
2405 2412
2406 2413 <rev1>
2407 2414 <successors-1A>
2408 2415 <rev2>
2409 2416 <successors-2A>
2410 2417 <successors-2B1> <successors-2B2> <successors-2B3>
2411 2418
2412 2419 Here rev2 has two possible (i.e. divergent) successors sets. The first
2413 2420 holds one element, whereas the second holds three (i.e. the changeset has
2414 2421 been split).
2415 2422 """
2416 2423 # passed to successorssets caching computation from one call to another
2417 2424 cache = {}
2418 2425 ctx2str = bytes
2419 2426 node2str = short
2420 2427 for rev in scmutil.revrange(repo, revs):
2421 2428 ctx = repo[rev]
2422 2429 ui.write('%s\n'% ctx2str(ctx))
2423 2430 for succsset in obsutil.successorssets(repo, ctx.node(),
2424 2431 closest=opts[r'closest'],
2425 2432 cache=cache):
2426 2433 if succsset:
2427 2434 ui.write(' ')
2428 2435 ui.write(node2str(succsset[0]))
2429 2436 for node in succsset[1:]:
2430 2437 ui.write(' ')
2431 2438 ui.write(node2str(node))
2432 2439 ui.write('\n')
2433 2440
2434 2441 @command('debugtemplate',
2435 2442 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2436 2443 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2437 2444 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2438 2445 optionalrepo=True)
2439 2446 def debugtemplate(ui, repo, tmpl, **opts):
2440 2447 """parse and apply a template
2441 2448
2442 2449 If -r/--rev is given, the template is processed as a log template and
2443 2450 applied to the given changesets. Otherwise, it is processed as a generic
2444 2451 template.
2445 2452
2446 2453 Use --verbose to print the parsed tree.
2447 2454 """
2448 2455 revs = None
2449 2456 if opts[r'rev']:
2450 2457 if repo is None:
2451 2458 raise error.RepoError(_('there is no Mercurial repository here '
2452 2459 '(.hg not found)'))
2453 2460 revs = scmutil.revrange(repo, opts[r'rev'])
2454 2461
2455 2462 props = {}
2456 2463 for d in opts[r'define']:
2457 2464 try:
2458 2465 k, v = (e.strip() for e in d.split('=', 1))
2459 2466 if not k or k == 'ui':
2460 2467 raise ValueError
2461 2468 props[k] = v
2462 2469 except ValueError:
2463 2470 raise error.Abort(_('malformed keyword definition: %s') % d)
2464 2471
2465 2472 if ui.verbose:
2466 2473 aliases = ui.configitems('templatealias')
2467 2474 tree = templater.parse(tmpl)
2468 2475 ui.note(templater.prettyformat(tree), '\n')
2469 2476 newtree = templater.expandaliases(tree, aliases)
2470 2477 if newtree != tree:
2471 2478 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2472 2479
2473 2480 if revs is None:
2474 2481 tres = formatter.templateresources(ui, repo)
2475 2482 t = formatter.maketemplater(ui, tmpl, resources=tres)
2476 2483 ui.write(t.renderdefault(props))
2477 2484 else:
2478 2485 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2479 2486 for r in revs:
2480 2487 displayer.show(repo[r], **pycompat.strkwargs(props))
2481 2488 displayer.close()
2482 2489
2483 2490 @command('debuguigetpass', [
2484 2491 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2485 2492 ], _('[-p TEXT]'), norepo=True)
2486 2493 def debuguigetpass(ui, prompt=''):
2487 2494 """show prompt to type password"""
2488 2495 r = ui.getpass(prompt)
2489 2496 ui.write(('respose: %s\n') % r)
2490 2497
2491 2498 @command('debuguiprompt', [
2492 2499 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2493 2500 ], _('[-p TEXT]'), norepo=True)
2494 2501 def debuguiprompt(ui, prompt=''):
2495 2502 """show plain prompt"""
2496 2503 r = ui.prompt(prompt)
2497 2504 ui.write(('response: %s\n') % r)
2498 2505
2499 2506 @command('debugupdatecaches', [])
2500 2507 def debugupdatecaches(ui, repo, *pats, **opts):
2501 2508 """warm all known caches in the repository"""
2502 2509 with repo.wlock(), repo.lock():
2503 2510 repo.updatecaches(full=True)
2504 2511
2505 2512 @command('debugupgraderepo', [
2506 2513 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2507 2514 ('', 'run', False, _('performs an upgrade')),
2508 2515 ])
2509 2516 def debugupgraderepo(ui, repo, run=False, optimize=None):
2510 2517 """upgrade a repository to use different features
2511 2518
2512 2519 If no arguments are specified, the repository is evaluated for upgrade
2513 2520 and a list of problems and potential optimizations is printed.
2514 2521
2515 2522 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2516 2523 can be influenced via additional arguments. More details will be provided
2517 2524 by the command output when run without ``--run``.
2518 2525
2519 2526 During the upgrade, the repository will be locked and no writes will be
2520 2527 allowed.
2521 2528
2522 2529 At the end of the upgrade, the repository may not be readable while new
2523 2530 repository data is swapped in. This window will be as long as it takes to
2524 2531 rename some directories inside the ``.hg`` directory. On most machines, this
2525 2532 should complete almost instantaneously and the chances of a consumer being
2526 2533 unable to access the repository should be low.
2527 2534 """
2528 2535 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2529 2536
2530 2537 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2531 2538 inferrepo=True)
2532 2539 def debugwalk(ui, repo, *pats, **opts):
2533 2540 """show how files match on given patterns"""
2534 2541 opts = pycompat.byteskwargs(opts)
2535 2542 m = scmutil.match(repo[None], pats, opts)
2536 2543 ui.write(('matcher: %r\n' % m))
2537 2544 items = list(repo[None].walk(m))
2538 2545 if not items:
2539 2546 return
2540 2547 f = lambda fn: fn
2541 2548 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2542 2549 f = lambda fn: util.normpath(fn)
2543 2550 fmt = 'f %%-%ds %%-%ds %%s' % (
2544 2551 max([len(abs) for abs in items]),
2545 2552 max([len(m.rel(abs)) for abs in items]))
2546 2553 for abs in items:
2547 2554 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2548 2555 ui.write("%s\n" % line.rstrip())
2549 2556
2550 2557 @command('debugwhyunstable', [], _('REV'))
2551 2558 def debugwhyunstable(ui, repo, rev):
2552 2559 """explain instabilities of a changeset"""
2553 2560 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
2554 2561 dnodes = ''
2555 2562 if entry.get('divergentnodes'):
2556 2563 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2557 2564 for ctx in entry['divergentnodes']) + ' '
2558 2565 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2559 2566 entry['reason'], entry['node']))
2560 2567
2561 2568 @command('debugwireargs',
2562 2569 [('', 'three', '', 'three'),
2563 2570 ('', 'four', '', 'four'),
2564 2571 ('', 'five', '', 'five'),
2565 2572 ] + cmdutil.remoteopts,
2566 2573 _('REPO [OPTIONS]... [ONE [TWO]]'),
2567 2574 norepo=True)
2568 2575 def debugwireargs(ui, repopath, *vals, **opts):
2569 2576 opts = pycompat.byteskwargs(opts)
2570 2577 repo = hg.peer(ui, opts, repopath)
2571 2578 for opt in cmdutil.remoteopts:
2572 2579 del opts[opt[1]]
2573 2580 args = {}
2574 2581 for k, v in opts.iteritems():
2575 2582 if v:
2576 2583 args[k] = v
2577 2584 args = pycompat.strkwargs(args)
2578 2585 # run twice to check that we don't mess up the stream for the next command
2579 2586 res1 = repo.debugwireargs(*vals, **args)
2580 2587 res2 = repo.debugwireargs(*vals, **args)
2581 2588 ui.write("%s\n" % res1)
2582 2589 if res1 != res2:
2583 2590 ui.warn("%s\n" % res2)
2584 2591
2585 2592 def _parsewirelangblocks(fh):
2586 2593 activeaction = None
2587 2594 blocklines = []
2588 2595
2589 2596 for line in fh:
2590 2597 line = line.rstrip()
2591 2598 if not line:
2592 2599 continue
2593 2600
2594 2601 if line.startswith(b'#'):
2595 2602 continue
2596 2603
2597 2604 if not line.startswith(' '):
2598 2605 # New block. Flush previous one.
2599 2606 if activeaction:
2600 2607 yield activeaction, blocklines
2601 2608
2602 2609 activeaction = line
2603 2610 blocklines = []
2604 2611 continue
2605 2612
2606 2613 # Else we start with an indent.
2607 2614
2608 2615 if not activeaction:
2609 2616 raise error.Abort(_('indented line outside of block'))
2610 2617
2611 2618 blocklines.append(line)
2612 2619
2613 2620 # Flush last block.
2614 2621 if activeaction:
2615 2622 yield activeaction, blocklines
2616 2623
2617 2624 @command('debugwireproto',
2618 2625 [
2619 2626 ('', 'localssh', False, _('start an SSH server for this repo')),
2620 2627 ('', 'peer', '', _('construct a specific version of the peer')),
2621 2628 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2622 2629 ] + cmdutil.remoteopts,
2623 2630 _('[PATH]'),
2624 2631 optionalrepo=True)
2625 2632 def debugwireproto(ui, repo, path=None, **opts):
2626 2633 """send wire protocol commands to a server
2627 2634
2628 2635 This command can be used to issue wire protocol commands to remote
2629 2636 peers and to debug the raw data being exchanged.
2630 2637
2631 2638 ``--localssh`` will start an SSH server against the current repository
2632 2639 and connect to that. By default, the connection will perform a handshake
2633 2640 and establish an appropriate peer instance.
2634 2641
2635 2642 ``--peer`` can be used to bypass the handshake protocol and construct a
2636 2643 peer instance using the specified class type. Valid values are ``raw``,
2637 2644 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
2638 2645 raw data payloads and don't support higher-level command actions.
2639 2646
2640 2647 ``--noreadstderr`` can be used to disable automatic reading from stderr
2641 2648 of the peer (for SSH connections only). Disabling automatic reading of
2642 2649 stderr is useful for making output more deterministic.
2643 2650
2644 2651 Commands are issued via a mini language which is specified via stdin.
2645 2652 The language consists of individual actions to perform. An action is
2646 2653 defined by a block. A block is defined as a line with no leading
2647 2654 space followed by 0 or more lines with leading space. Blocks are
2648 2655 effectively a high-level command with additional metadata.
2649 2656
2650 2657 Lines beginning with ``#`` are ignored.
2651 2658
2652 2659 The following sections denote available actions.
2653 2660
2654 2661 raw
2655 2662 ---
2656 2663
2657 2664 Send raw data to the server.
2658 2665
2659 2666 The block payload contains the raw data to send as one atomic send
2660 2667 operation. The data may not actually be delivered in a single system
2661 2668 call: it depends on the abilities of the transport being used.
2662 2669
2663 2670 Each line in the block is de-indented and concatenated. Then, that
2664 2671 value is evaluated as a Python b'' literal. This allows the use of
2665 2672 backslash escaping, etc.
2666 2673
2667 2674 raw+
2668 2675 ----
2669 2676
2670 2677 Behaves like ``raw`` except flushes output afterwards.
2671 2678
2672 2679 command <X>
2673 2680 -----------
2674 2681
2675 2682 Send a request to run a named command, whose name follows the ``command``
2676 2683 string.
2677 2684
2678 2685 Arguments to the command are defined as lines in this block. The format of
2679 2686 each line is ``<key> <value>``. e.g.::
2680 2687
2681 2688 command listkeys
2682 2689 namespace bookmarks
2683 2690
2684 2691 If the value begins with ``eval:``, it will be interpreted as a Python
2685 2692 literal expression. Otherwise values are interpreted as Python b'' literals.
2686 2693 This allows sending complex types and encoding special byte sequences via
2687 2694 backslash escaping.
2688 2695
2689 2696 The following arguments have special meaning:
2690 2697
2691 2698 ``PUSHFILE``
2692 2699 When defined, the *push* mechanism of the peer will be used instead
2693 2700 of the static request-response mechanism and the content of the
2694 2701 file specified in the value of this argument will be sent as the
2695 2702 command payload.
2696 2703
2697 2704 This can be used to submit a local bundle file to the remote.
2698 2705
2699 2706 batchbegin
2700 2707 ----------
2701 2708
2702 2709 Instruct the peer to begin a batched send.
2703 2710
2704 2711 All ``command`` blocks are queued for execution until the next
2705 2712 ``batchsubmit`` block.
2706 2713
2707 2714 batchsubmit
2708 2715 -----------
2709 2716
2710 2717 Submit previously queued ``command`` blocks as a batch request.
2711 2718
2712 2719 This action MUST be paired with a ``batchbegin`` action.
2713 2720
2714 2721 httprequest <method> <path>
2715 2722 ---------------------------
2716 2723
2717 2724 (HTTP peer only)
2718 2725
2719 2726 Send an HTTP request to the peer.
2720 2727
2721 2728 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
2722 2729
2723 2730 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
2724 2731 headers to add to the request. e.g. ``Accept: foo``.
2725 2732
2726 2733 The following arguments are special:
2727 2734
2728 2735 ``BODYFILE``
2729 2736 The content of the file defined as the value to this argument will be
2730 2737 transferred verbatim as the HTTP request body.
2731 2738
2732 2739 ``frame <type> <flags> <payload>``
2733 2740 Send a unified protocol frame as part of the request body.
2734 2741
2735 2742 All frames will be collected and sent as the body to the HTTP
2736 2743 request.
2737 2744
2738 2745 close
2739 2746 -----
2740 2747
2741 2748 Close the connection to the server.
2742 2749
2743 2750 flush
2744 2751 -----
2745 2752
2746 2753 Flush data written to the server.
2747 2754
2748 2755 readavailable
2749 2756 -------------
2750 2757
2751 2758 Close the write end of the connection and read all available data from
2752 2759 the server.
2753 2760
2754 2761 If the connection to the server encompasses multiple pipes, we poll both
2755 2762 pipes and read available data.
2756 2763
2757 2764 readline
2758 2765 --------
2759 2766
2760 2767 Read a line of output from the server. If there are multiple output
2761 2768 pipes, reads only the main pipe.
2762 2769
2763 2770 ereadline
2764 2771 ---------
2765 2772
2766 2773 Like ``readline``, but read from the stderr pipe, if available.
2767 2774
2768 2775 read <X>
2769 2776 --------
2770 2777
2771 2778 ``read()`` N bytes from the server's main output pipe.
2772 2779
2773 2780 eread <X>
2774 2781 ---------
2775 2782
2776 2783 ``read()`` N bytes from the server's stderr pipe, if available.
2777 2784
2778 2785 Specifying Unified Frame-Based Protocol Frames
2779 2786 ----------------------------------------------
2780 2787
2781 2788 It is possible to emit a *Unified Frame-Based Protocol* by using special
2782 2789 syntax.
2783 2790
2784 2791 A frame is composed as a type, flags, and payload. These can be parsed
2785 2792 from a string of the form:
2786 2793
2787 2794 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
2788 2795
2789 2796 ``request-id`` and ``stream-id`` are integers defining the request and
2790 2797 stream identifiers.
2791 2798
2792 2799 ``type`` can be an integer value for the frame type or the string name
2793 2800 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
2794 2801 ``command-name``.
2795 2802
2796 2803 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
2797 2804 components. Each component (and there can be just one) can be an integer
2798 2805 or a flag name for stream flags or frame flags, respectively. Values are
2799 2806 resolved to integers and then bitwise OR'd together.
2800 2807
2801 2808 ``payload`` represents the raw frame payload. If it begins with
2802 2809 ``cbor:``, the following string is evaluated as Python code and the
2803 2810 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
2804 2811 as a Python byte string literal.
2805 2812 """
2806 2813 opts = pycompat.byteskwargs(opts)
2807 2814
2808 2815 if opts['localssh'] and not repo:
2809 2816 raise error.Abort(_('--localssh requires a repository'))
2810 2817
2811 2818 if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
2812 2819 raise error.Abort(_('invalid value for --peer'),
2813 2820 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
2814 2821
2815 2822 if path and opts['localssh']:
2816 2823 raise error.Abort(_('cannot specify --localssh with an explicit '
2817 2824 'path'))
2818 2825
2819 2826 if ui.interactive():
2820 2827 ui.write(_('(waiting for commands on stdin)\n'))
2821 2828
2822 2829 blocks = list(_parsewirelangblocks(ui.fin))
2823 2830
2824 2831 proc = None
2825 2832 stdin = None
2826 2833 stdout = None
2827 2834 stderr = None
2828 2835 opener = None
2829 2836
2830 2837 if opts['localssh']:
2831 2838 # We start the SSH server in its own process so there is process
2832 2839 # separation. This prevents a whole class of potential bugs around
2833 2840 # shared state from interfering with server operation.
2834 2841 args = procutil.hgcmd() + [
2835 2842 '-R', repo.root,
2836 2843 'debugserve', '--sshstdio',
2837 2844 ]
2838 2845 proc = subprocess.Popen(args, stdin=subprocess.PIPE,
2839 2846 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2840 2847 bufsize=0)
2841 2848
2842 2849 stdin = proc.stdin
2843 2850 stdout = proc.stdout
2844 2851 stderr = proc.stderr
2845 2852
2846 2853 # We turn the pipes into observers so we can log I/O.
2847 2854 if ui.verbose or opts['peer'] == 'raw':
2848 2855 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
2849 2856 logdata=True)
2850 2857 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
2851 2858 logdata=True)
2852 2859 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
2853 2860 logdata=True)
2854 2861
2855 2862 # --localssh also implies the peer connection settings.
2856 2863
2857 2864 url = 'ssh://localserver'
2858 2865 autoreadstderr = not opts['noreadstderr']
2859 2866
2860 2867 if opts['peer'] == 'ssh1':
2861 2868 ui.write(_('creating ssh peer for wire protocol version 1\n'))
2862 2869 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
2863 2870 None, autoreadstderr=autoreadstderr)
2864 2871 elif opts['peer'] == 'ssh2':
2865 2872 ui.write(_('creating ssh peer for wire protocol version 2\n'))
2866 2873 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
2867 2874 None, autoreadstderr=autoreadstderr)
2868 2875 elif opts['peer'] == 'raw':
2869 2876 ui.write(_('using raw connection to peer\n'))
2870 2877 peer = None
2871 2878 else:
2872 2879 ui.write(_('creating ssh peer from handshake results\n'))
2873 2880 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
2874 2881 autoreadstderr=autoreadstderr)
2875 2882
2876 2883 elif path:
2877 2884 # We bypass hg.peer() so we can proxy the sockets.
2878 2885 # TODO consider not doing this because we skip
2879 2886 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
2880 2887 u = util.url(path)
2881 2888 if u.scheme != 'http':
2882 2889 raise error.Abort(_('only http:// paths are currently supported'))
2883 2890
2884 2891 url, authinfo = u.authinfo()
2885 2892 openerargs = {
2886 2893 r'useragent': b'Mercurial debugwireproto',
2887 2894 }
2888 2895
2889 2896 # Turn pipes/sockets into observers so we can log I/O.
2890 2897 if ui.verbose:
2891 2898 openerargs.update({
2892 2899 r'loggingfh': ui,
2893 2900 r'loggingname': b's',
2894 2901 r'loggingopts': {
2895 2902 r'logdata': True,
2896 2903 r'logdataapis': False,
2897 2904 },
2898 2905 })
2899 2906
2900 2907 if ui.debugflag:
2901 2908 openerargs[r'loggingopts'][r'logdataapis'] = True
2902 2909
2903 2910 # Don't send default headers when in raw mode. This allows us to
2904 2911 # bypass most of the behavior of our URL handling code so we can
2905 2912 # have near complete control over what's sent on the wire.
2906 2913 if opts['peer'] == 'raw':
2907 2914 openerargs[r'sendaccept'] = False
2908 2915
2909 2916 opener = urlmod.opener(ui, authinfo, **openerargs)
2910 2917
2911 2918 if opts['peer'] == 'http2':
2912 2919 ui.write(_('creating http peer for wire protocol version 2\n'))
2913 2920 # We go through makepeer() because we need an API descriptor for
2914 2921 # the peer instance to be useful.
2915 2922 with ui.configoverride({
2916 2923 ('experimental', 'httppeer.advertise-v2'): True}):
2917 2924 peer = httppeer.makepeer(ui, path, opener=opener)
2918 2925
2919 2926 if not isinstance(peer, httppeer.httpv2peer):
2920 2927 raise error.Abort(_('could not instantiate HTTP peer for '
2921 2928 'wire protocol version 2'),
2922 2929 hint=_('the server may not have the feature '
2923 2930 'enabled or is not allowing this '
2924 2931 'client version'))
2925 2932
2926 2933 elif opts['peer'] == 'raw':
2927 2934 ui.write(_('using raw connection to peer\n'))
2928 2935 peer = None
2929 2936 elif opts['peer']:
2930 2937 raise error.Abort(_('--peer %s not supported with HTTP peers') %
2931 2938 opts['peer'])
2932 2939 else:
2933 2940 peer = httppeer.makepeer(ui, path, opener=opener)
2934 2941
2935 2942 # We /could/ populate stdin/stdout with sock.makefile()...
2936 2943 else:
2937 2944 raise error.Abort(_('unsupported connection configuration'))
2938 2945
2939 2946 batchedcommands = None
2940 2947
2941 2948 # Now perform actions based on the parsed wire language instructions.
2942 2949 for action, lines in blocks:
2943 2950 if action in ('raw', 'raw+'):
2944 2951 if not stdin:
2945 2952 raise error.Abort(_('cannot call raw/raw+ on this peer'))
2946 2953
2947 2954 # Concatenate the data together.
2948 2955 data = ''.join(l.lstrip() for l in lines)
2949 2956 data = stringutil.unescapestr(data)
2950 2957 stdin.write(data)
2951 2958
2952 2959 if action == 'raw+':
2953 2960 stdin.flush()
2954 2961 elif action == 'flush':
2955 2962 if not stdin:
2956 2963 raise error.Abort(_('cannot call flush on this peer'))
2957 2964 stdin.flush()
2958 2965 elif action.startswith('command'):
2959 2966 if not peer:
2960 2967 raise error.Abort(_('cannot send commands unless peer instance '
2961 2968 'is available'))
2962 2969
2963 2970 command = action.split(' ', 1)[1]
2964 2971
2965 2972 args = {}
2966 2973 for line in lines:
2967 2974 # We need to allow empty values.
2968 2975 fields = line.lstrip().split(' ', 1)
2969 2976 if len(fields) == 1:
2970 2977 key = fields[0]
2971 2978 value = ''
2972 2979 else:
2973 2980 key, value = fields
2974 2981
2975 2982 if value.startswith('eval:'):
2976 2983 value = stringutil.evalpythonliteral(value[5:])
2977 2984 else:
2978 2985 value = stringutil.unescapestr(value)
2979 2986
2980 2987 args[key] = value
2981 2988
2982 2989 if batchedcommands is not None:
2983 2990 batchedcommands.append((command, args))
2984 2991 continue
2985 2992
2986 2993 ui.status(_('sending %s command\n') % command)
2987 2994
2988 2995 if 'PUSHFILE' in args:
2989 2996 with open(args['PUSHFILE'], r'rb') as fh:
2990 2997 del args['PUSHFILE']
2991 2998 res, output = peer._callpush(command, fh,
2992 2999 **pycompat.strkwargs(args))
2993 3000 ui.status(_('result: %s\n') % stringutil.escapestr(res))
2994 3001 ui.status(_('remote output: %s\n') %
2995 3002 stringutil.escapestr(output))
2996 3003 else:
2997 3004 res = peer._call(command, **pycompat.strkwargs(args))
2998 3005 ui.status(_('response: %s\n') % stringutil.pprint(res))
2999 3006
3000 3007 elif action == 'batchbegin':
3001 3008 if batchedcommands is not None:
3002 3009 raise error.Abort(_('nested batchbegin not allowed'))
3003 3010
3004 3011 batchedcommands = []
3005 3012 elif action == 'batchsubmit':
3006 3013 # There is a batching API we could go through. But it would be
3007 3014 # difficult to normalize requests into function calls. It is easier
3008 3015 # to bypass this layer and normalize to commands + args.
3009 3016 ui.status(_('sending batch with %d sub-commands\n') %
3010 3017 len(batchedcommands))
3011 3018 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
3012 3019 ui.status(_('response #%d: %s\n') %
3013 3020 (i, stringutil.escapestr(chunk)))
3014 3021
3015 3022 batchedcommands = None
3016 3023
3017 3024 elif action.startswith('httprequest '):
3018 3025 if not opener:
3019 3026 raise error.Abort(_('cannot use httprequest without an HTTP '
3020 3027 'peer'))
3021 3028
3022 3029 request = action.split(' ', 2)
3023 3030 if len(request) != 3:
3024 3031 raise error.Abort(_('invalid httprequest: expected format is '
3025 3032 '"httprequest <method> <path>'))
3026 3033
3027 3034 method, httppath = request[1:]
3028 3035 headers = {}
3029 3036 body = None
3030 3037 frames = []
3031 3038 for line in lines:
3032 3039 line = line.lstrip()
3033 3040 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
3034 3041 if m:
3035 3042 headers[m.group(1)] = m.group(2)
3036 3043 continue
3037 3044
3038 3045 if line.startswith(b'BODYFILE '):
3039 3046 with open(line.split(b' ', 1), 'rb') as fh:
3040 3047 body = fh.read()
3041 3048 elif line.startswith(b'frame '):
3042 3049 frame = wireprotoframing.makeframefromhumanstring(
3043 3050 line[len(b'frame '):])
3044 3051
3045 3052 frames.append(frame)
3046 3053 else:
3047 3054 raise error.Abort(_('unknown argument to httprequest: %s') %
3048 3055 line)
3049 3056
3050 3057 url = path + httppath
3051 3058
3052 3059 if frames:
3053 3060 body = b''.join(bytes(f) for f in frames)
3054 3061
3055 3062 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
3056 3063
3057 3064 # urllib.Request insists on using has_data() as a proxy for
3058 3065 # determining the request method. Override that to use our
3059 3066 # explicitly requested method.
3060 3067 req.get_method = lambda: method
3061 3068
3062 3069 try:
3063 3070 res = opener.open(req)
3064 3071 body = res.read()
3065 3072 except util.urlerr.urlerror as e:
3066 3073 e.read()
3067 3074 continue
3068 3075
3069 3076 if res.headers.get('Content-Type') == 'application/mercurial-cbor':
3070 3077 ui.write(_('cbor> %s\n') % stringutil.pprint(cbor.loads(body)))
3071 3078
3072 3079 elif action == 'close':
3073 3080 peer.close()
3074 3081 elif action == 'readavailable':
3075 3082 if not stdout or not stderr:
3076 3083 raise error.Abort(_('readavailable not available on this peer'))
3077 3084
3078 3085 stdin.close()
3079 3086 stdout.read()
3080 3087 stderr.read()
3081 3088
3082 3089 elif action == 'readline':
3083 3090 if not stdout:
3084 3091 raise error.Abort(_('readline not available on this peer'))
3085 3092 stdout.readline()
3086 3093 elif action == 'ereadline':
3087 3094 if not stderr:
3088 3095 raise error.Abort(_('ereadline not available on this peer'))
3089 3096 stderr.readline()
3090 3097 elif action.startswith('read '):
3091 3098 count = int(action.split(' ', 1)[1])
3092 3099 if not stdout:
3093 3100 raise error.Abort(_('read not available on this peer'))
3094 3101 stdout.read(count)
3095 3102 elif action.startswith('eread '):
3096 3103 count = int(action.split(' ', 1)[1])
3097 3104 if not stderr:
3098 3105 raise error.Abort(_('eread not available on this peer'))
3099 3106 stderr.read(count)
3100 3107 else:
3101 3108 raise error.Abort(_('unknown action: %s') % action)
3102 3109
3103 3110 if batchedcommands is not None:
3104 3111 raise error.Abort(_('unclosed "batchbegin" request'))
3105 3112
3106 3113 if peer:
3107 3114 peer.close()
3108 3115
3109 3116 if proc:
3110 3117 proc.kill()
@@ -1,2391 +1,2404
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20 from .thirdparty import (
21 21 attr,
22 22 )
23 23 from . import (
24 24 bookmarks as bookmod,
25 25 bundle2,
26 26 changegroup,
27 27 discovery,
28 28 error,
29 29 lock as lockmod,
30 30 logexchange,
31 31 obsolete,
32 32 phases,
33 33 pushkey,
34 34 pycompat,
35 35 scmutil,
36 36 sslutil,
37 37 streamclone,
38 38 url as urlmod,
39 39 util,
40 40 )
41 41 from .utils import (
42 42 stringutil,
43 43 )
44 44
45 45 urlerr = util.urlerr
46 46 urlreq = util.urlreq
47 47
48 48 # Maps bundle version human names to changegroup versions.
49 49 _bundlespeccgversions = {'v1': '01',
50 50 'v2': '02',
51 51 'packed1': 's1',
52 52 'bundle2': '02', #legacy
53 53 }
54 54
55 55 # Maps bundle version with content opts to choose which part to bundle
56 56 _bundlespeccontentopts = {
57 57 'v1': {
58 58 'changegroup': True,
59 59 'cg.version': '01',
60 60 'obsolescence': False,
61 61 'phases': False,
62 62 'tagsfnodescache': False,
63 63 'revbranchcache': False
64 64 },
65 65 'v2': {
66 66 'changegroup': True,
67 67 'cg.version': '02',
68 68 'obsolescence': False,
69 69 'phases': False,
70 70 'tagsfnodescache': True,
71 71 'revbranchcache': True
72 72 },
73 73 'packed1' : {
74 74 'cg.version': 's1'
75 75 }
76 76 }
77 77 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
78 78
79 79 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
80 80 "tagsfnodescache": False,
81 81 "revbranchcache": False}}
82 82
83 83 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
84 84 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
85 85
86 86 @attr.s
87 87 class bundlespec(object):
88 88 compression = attr.ib()
89 89 version = attr.ib()
90 90 params = attr.ib()
91 91 contentopts = attr.ib()
92 92
93 93 def parsebundlespec(repo, spec, strict=True, externalnames=False):
94 94 """Parse a bundle string specification into parts.
95 95
96 96 Bundle specifications denote a well-defined bundle/exchange format.
97 97 The content of a given specification should not change over time in
98 98 order to ensure that bundles produced by a newer version of Mercurial are
99 99 readable from an older version.
100 100
101 101 The string currently has the form:
102 102
103 103 <compression>-<type>[;<parameter0>[;<parameter1>]]
104 104
105 105 Where <compression> is one of the supported compression formats
106 106 and <type> is (currently) a version string. A ";" can follow the type and
107 107 all text afterwards is interpreted as URI encoded, ";" delimited key=value
108 108 pairs.
109 109
110 110 If ``strict`` is True (the default) <compression> is required. Otherwise,
111 111 it is optional.
112 112
113 113 If ``externalnames`` is False (the default), the human-centric names will
114 114 be converted to their internal representation.
115 115
116 116 Returns a bundlespec object of (compression, version, parameters).
117 117 Compression will be ``None`` if not in strict mode and a compression isn't
118 118 defined.
119 119
120 120 An ``InvalidBundleSpecification`` is raised when the specification is
121 121 not syntactically well formed.
122 122
123 123 An ``UnsupportedBundleSpecification`` is raised when the compression or
124 124 bundle type/version is not recognized.
125 125
126 126 Note: this function will likely eventually return a more complex data
127 127 structure, including bundle2 part information.
128 128 """
129 129 def parseparams(s):
130 130 if ';' not in s:
131 131 return s, {}
132 132
133 133 params = {}
134 134 version, paramstr = s.split(';', 1)
135 135
136 136 for p in paramstr.split(';'):
137 137 if '=' not in p:
138 138 raise error.InvalidBundleSpecification(
139 139 _('invalid bundle specification: '
140 140 'missing "=" in parameter: %s') % p)
141 141
142 142 key, value = p.split('=', 1)
143 143 key = urlreq.unquote(key)
144 144 value = urlreq.unquote(value)
145 145 params[key] = value
146 146
147 147 return version, params
148 148
149 149
150 150 if strict and '-' not in spec:
151 151 raise error.InvalidBundleSpecification(
152 152 _('invalid bundle specification; '
153 153 'must be prefixed with compression: %s') % spec)
154 154
155 155 if '-' in spec:
156 156 compression, version = spec.split('-', 1)
157 157
158 158 if compression not in util.compengines.supportedbundlenames:
159 159 raise error.UnsupportedBundleSpecification(
160 160 _('%s compression is not supported') % compression)
161 161
162 162 version, params = parseparams(version)
163 163
164 164 if version not in _bundlespeccgversions:
165 165 raise error.UnsupportedBundleSpecification(
166 166 _('%s is not a recognized bundle version') % version)
167 167 else:
168 168 # Value could be just the compression or just the version, in which
169 169 # case some defaults are assumed (but only when not in strict mode).
170 170 assert not strict
171 171
172 172 spec, params = parseparams(spec)
173 173
174 174 if spec in util.compengines.supportedbundlenames:
175 175 compression = spec
176 176 version = 'v1'
177 177 # Generaldelta repos require v2.
178 178 if 'generaldelta' in repo.requirements:
179 179 version = 'v2'
180 180 # Modern compression engines require v2.
181 181 if compression not in _bundlespecv1compengines:
182 182 version = 'v2'
183 183 elif spec in _bundlespeccgversions:
184 184 if spec == 'packed1':
185 185 compression = 'none'
186 186 else:
187 187 compression = 'bzip2'
188 188 version = spec
189 189 else:
190 190 raise error.UnsupportedBundleSpecification(
191 191 _('%s is not a recognized bundle specification') % spec)
192 192
193 193 # Bundle version 1 only supports a known set of compression engines.
194 194 if version == 'v1' and compression not in _bundlespecv1compengines:
195 195 raise error.UnsupportedBundleSpecification(
196 196 _('compression engine %s is not supported on v1 bundles') %
197 197 compression)
198 198
199 199 # The specification for packed1 can optionally declare the data formats
200 200 # required to apply it. If we see this metadata, compare against what the
201 201 # repo supports and error if the bundle isn't compatible.
202 202 if version == 'packed1' and 'requirements' in params:
203 203 requirements = set(params['requirements'].split(','))
204 204 missingreqs = requirements - repo.supportedformats
205 205 if missingreqs:
206 206 raise error.UnsupportedBundleSpecification(
207 207 _('missing support for repository features: %s') %
208 208 ', '.join(sorted(missingreqs)))
209 209
210 210 # Compute contentopts based on the version
211 211 contentopts = _bundlespeccontentopts.get(version, {}).copy()
212 212
213 213 # Process the variants
214 214 if "stream" in params and params["stream"] == "v2":
215 215 variant = _bundlespecvariants["streamv2"]
216 216 contentopts.update(variant)
217 217
218 218 if not externalnames:
219 219 engine = util.compengines.forbundlename(compression)
220 220 compression = engine.bundletype()[1]
221 221 version = _bundlespeccgversions[version]
222 222
223 223 return bundlespec(compression, version, params, contentopts)
224 224
225 225 def readbundle(ui, fh, fname, vfs=None):
226 226 header = changegroup.readexactly(fh, 4)
227 227
228 228 alg = None
229 229 if not fname:
230 230 fname = "stream"
231 231 if not header.startswith('HG') and header.startswith('\0'):
232 232 fh = changegroup.headerlessfixup(fh, header)
233 233 header = "HG10"
234 234 alg = 'UN'
235 235 elif vfs:
236 236 fname = vfs.join(fname)
237 237
238 238 magic, version = header[0:2], header[2:4]
239 239
240 240 if magic != 'HG':
241 241 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
242 242 if version == '10':
243 243 if alg is None:
244 244 alg = changegroup.readexactly(fh, 2)
245 245 return changegroup.cg1unpacker(fh, alg)
246 246 elif version.startswith('2'):
247 247 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
248 248 elif version == 'S1':
249 249 return streamclone.streamcloneapplier(fh)
250 250 else:
251 251 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
252 252
253 253 def getbundlespec(ui, fh):
254 254 """Infer the bundlespec from a bundle file handle.
255 255
256 256 The input file handle is seeked and the original seek position is not
257 257 restored.
258 258 """
259 259 def speccompression(alg):
260 260 try:
261 261 return util.compengines.forbundletype(alg).bundletype()[0]
262 262 except KeyError:
263 263 return None
264 264
265 265 b = readbundle(ui, fh, None)
266 266 if isinstance(b, changegroup.cg1unpacker):
267 267 alg = b._type
268 268 if alg == '_truncatedBZ':
269 269 alg = 'BZ'
270 270 comp = speccompression(alg)
271 271 if not comp:
272 272 raise error.Abort(_('unknown compression algorithm: %s') % alg)
273 273 return '%s-v1' % comp
274 274 elif isinstance(b, bundle2.unbundle20):
275 275 if 'Compression' in b.params:
276 276 comp = speccompression(b.params['Compression'])
277 277 if not comp:
278 278 raise error.Abort(_('unknown compression algorithm: %s') % comp)
279 279 else:
280 280 comp = 'none'
281 281
282 282 version = None
283 283 for part in b.iterparts():
284 284 if part.type == 'changegroup':
285 285 version = part.params['version']
286 286 if version in ('01', '02'):
287 287 version = 'v2'
288 288 else:
289 289 raise error.Abort(_('changegroup version %s does not have '
290 290 'a known bundlespec') % version,
291 291 hint=_('try upgrading your Mercurial '
292 292 'client'))
293 293 elif part.type == 'stream2' and version is None:
294 294 # A stream2 part requires to be part of a v2 bundle
295 295 version = "v2"
296 296 requirements = urlreq.unquote(part.params['requirements'])
297 297 splitted = requirements.split()
298 298 params = bundle2._formatrequirementsparams(splitted)
299 299 return 'none-v2;stream=v2;%s' % params
300 300
301 301 if not version:
302 302 raise error.Abort(_('could not identify changegroup version in '
303 303 'bundle'))
304 304
305 305 return '%s-%s' % (comp, version)
306 306 elif isinstance(b, streamclone.streamcloneapplier):
307 307 requirements = streamclone.readbundle1header(fh)[2]
308 308 formatted = bundle2._formatrequirementsparams(requirements)
309 309 return 'none-packed1;%s' % formatted
310 310 else:
311 311 raise error.Abort(_('unknown bundle type: %s') % b)
312 312
313 313 def _computeoutgoing(repo, heads, common):
314 314 """Computes which revs are outgoing given a set of common
315 315 and a set of heads.
316 316
317 317 This is a separate function so extensions can have access to
318 318 the logic.
319 319
320 320 Returns a discovery.outgoing object.
321 321 """
322 322 cl = repo.changelog
323 323 if common:
324 324 hasnode = cl.hasnode
325 325 common = [n for n in common if hasnode(n)]
326 326 else:
327 327 common = [nullid]
328 328 if not heads:
329 329 heads = cl.heads()
330 330 return discovery.outgoing(repo, common, heads)
331 331
332 332 def _forcebundle1(op):
333 333 """return true if a pull/push must use bundle1
334 334
335 335 This function is used to allow testing of the older bundle version"""
336 336 ui = op.repo.ui
337 337 # The goal is this config is to allow developer to choose the bundle
338 338 # version used during exchanged. This is especially handy during test.
339 339 # Value is a list of bundle version to be picked from, highest version
340 340 # should be used.
341 341 #
342 342 # developer config: devel.legacy.exchange
343 343 exchange = ui.configlist('devel', 'legacy.exchange')
344 344 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
345 345 return forcebundle1 or not op.remote.capable('bundle2')
346 346
347 347 class pushoperation(object):
348 348 """A object that represent a single push operation
349 349
350 350 Its purpose is to carry push related state and very common operations.
351 351
352 352 A new pushoperation should be created at the beginning of each push and
353 353 discarded afterward.
354 354 """
355 355
356 356 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
357 357 bookmarks=(), pushvars=None):
358 358 # repo we push from
359 359 self.repo = repo
360 360 self.ui = repo.ui
361 361 # repo we push to
362 362 self.remote = remote
363 363 # force option provided
364 364 self.force = force
365 365 # revs to be pushed (None is "all")
366 366 self.revs = revs
367 367 # bookmark explicitly pushed
368 368 self.bookmarks = bookmarks
369 369 # allow push of new branch
370 370 self.newbranch = newbranch
371 371 # step already performed
372 372 # (used to check what steps have been already performed through bundle2)
373 373 self.stepsdone = set()
374 374 # Integer version of the changegroup push result
375 375 # - None means nothing to push
376 376 # - 0 means HTTP error
377 377 # - 1 means we pushed and remote head count is unchanged *or*
378 378 # we have outgoing changesets but refused to push
379 379 # - other values as described by addchangegroup()
380 380 self.cgresult = None
381 381 # Boolean value for the bookmark push
382 382 self.bkresult = None
383 383 # discover.outgoing object (contains common and outgoing data)
384 384 self.outgoing = None
385 385 # all remote topological heads before the push
386 386 self.remoteheads = None
387 387 # Details of the remote branch pre and post push
388 388 #
389 389 # mapping: {'branch': ([remoteheads],
390 390 # [newheads],
391 391 # [unsyncedheads],
392 392 # [discardedheads])}
393 393 # - branch: the branch name
394 394 # - remoteheads: the list of remote heads known locally
395 395 # None if the branch is new
396 396 # - newheads: the new remote heads (known locally) with outgoing pushed
397 397 # - unsyncedheads: the list of remote heads unknown locally.
398 398 # - discardedheads: the list of remote heads made obsolete by the push
399 399 self.pushbranchmap = None
400 400 # testable as a boolean indicating if any nodes are missing locally.
401 401 self.incoming = None
402 402 # summary of the remote phase situation
403 403 self.remotephases = None
404 404 # phases changes that must be pushed along side the changesets
405 405 self.outdatedphases = None
406 406 # phases changes that must be pushed if changeset push fails
407 407 self.fallbackoutdatedphases = None
408 408 # outgoing obsmarkers
409 409 self.outobsmarkers = set()
410 410 # outgoing bookmarks
411 411 self.outbookmarks = []
412 412 # transaction manager
413 413 self.trmanager = None
414 414 # map { pushkey partid -> callback handling failure}
415 415 # used to handle exception from mandatory pushkey part failure
416 416 self.pkfailcb = {}
417 417 # an iterable of pushvars or None
418 418 self.pushvars = pushvars
419 419
420 420 @util.propertycache
421 421 def futureheads(self):
422 422 """future remote heads if the changeset push succeeds"""
423 423 return self.outgoing.missingheads
424 424
425 425 @util.propertycache
426 426 def fallbackheads(self):
427 427 """future remote heads if the changeset push fails"""
428 428 if self.revs is None:
429 429 # not target to push, all common are relevant
430 430 return self.outgoing.commonheads
431 431 unfi = self.repo.unfiltered()
432 432 # I want cheads = heads(::missingheads and ::commonheads)
433 433 # (missingheads is revs with secret changeset filtered out)
434 434 #
435 435 # This can be expressed as:
436 436 # cheads = ( (missingheads and ::commonheads)
437 437 # + (commonheads and ::missingheads))"
438 438 # )
439 439 #
440 440 # while trying to push we already computed the following:
441 441 # common = (::commonheads)
442 442 # missing = ((commonheads::missingheads) - commonheads)
443 443 #
444 444 # We can pick:
445 445 # * missingheads part of common (::commonheads)
446 446 common = self.outgoing.common
447 447 nm = self.repo.changelog.nodemap
448 448 cheads = [node for node in self.revs if nm[node] in common]
449 449 # and
450 450 # * commonheads parents on missing
451 451 revset = unfi.set('%ln and parents(roots(%ln))',
452 452 self.outgoing.commonheads,
453 453 self.outgoing.missing)
454 454 cheads.extend(c.node() for c in revset)
455 455 return cheads
456 456
457 457 @property
458 458 def commonheads(self):
459 459 """set of all common heads after changeset bundle push"""
460 460 if self.cgresult:
461 461 return self.futureheads
462 462 else:
463 463 return self.fallbackheads
464 464
465 465 # mapping of message used when pushing bookmark
466 466 bookmsgmap = {'update': (_("updating bookmark %s\n"),
467 467 _('updating bookmark %s failed!\n')),
468 468 'export': (_("exporting bookmark %s\n"),
469 469 _('exporting bookmark %s failed!\n')),
470 470 'delete': (_("deleting remote bookmark %s\n"),
471 471 _('deleting remote bookmark %s failed!\n')),
472 472 }
473 473
474 474
475 475 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
476 476 opargs=None):
477 477 '''Push outgoing changesets (limited by revs) from a local
478 478 repository to remote. Return an integer:
479 479 - None means nothing to push
480 480 - 0 means HTTP error
481 481 - 1 means we pushed and remote head count is unchanged *or*
482 482 we have outgoing changesets but refused to push
483 483 - other values as described by addchangegroup()
484 484 '''
485 485 if opargs is None:
486 486 opargs = {}
487 487 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
488 488 **pycompat.strkwargs(opargs))
489 489 if pushop.remote.local():
490 490 missing = (set(pushop.repo.requirements)
491 491 - pushop.remote.local().supported)
492 492 if missing:
493 493 msg = _("required features are not"
494 494 " supported in the destination:"
495 495 " %s") % (', '.join(sorted(missing)))
496 496 raise error.Abort(msg)
497 497
498 498 if not pushop.remote.canpush():
499 499 raise error.Abort(_("destination does not support push"))
500 500
501 501 if not pushop.remote.capable('unbundle'):
502 502 raise error.Abort(_('cannot push: destination does not support the '
503 503 'unbundle wire protocol command'))
504 504
505 505 # get lock as we might write phase data
506 506 wlock = lock = None
507 507 try:
508 508 # bundle2 push may receive a reply bundle touching bookmarks or other
509 509 # things requiring the wlock. Take it now to ensure proper ordering.
510 510 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
511 511 if (not _forcebundle1(pushop)) and maypushback:
512 512 wlock = pushop.repo.wlock()
513 513 lock = pushop.repo.lock()
514 514 pushop.trmanager = transactionmanager(pushop.repo,
515 515 'push-response',
516 516 pushop.remote.url())
517 517 except IOError as err:
518 518 if err.errno != errno.EACCES:
519 519 raise
520 520 # source repo cannot be locked.
521 521 # We do not abort the push, but just disable the local phase
522 522 # synchronisation.
523 523 msg = 'cannot lock source repository: %s\n' % err
524 524 pushop.ui.debug(msg)
525 525
526 526 with wlock or util.nullcontextmanager(), \
527 527 lock or util.nullcontextmanager(), \
528 528 pushop.trmanager or util.nullcontextmanager():
529 529 pushop.repo.checkpush(pushop)
530 530 _pushdiscovery(pushop)
531 531 if not _forcebundle1(pushop):
532 532 _pushbundle2(pushop)
533 533 _pushchangeset(pushop)
534 534 _pushsyncphase(pushop)
535 535 _pushobsolete(pushop)
536 536 _pushbookmark(pushop)
537 537
538 538 return pushop
539 539
540 540 # list of steps to perform discovery before push
541 541 pushdiscoveryorder = []
542 542
543 543 # Mapping between step name and function
544 544 #
545 545 # This exists to help extensions wrap steps if necessary
546 546 pushdiscoverymapping = {}
547 547
548 548 def pushdiscovery(stepname):
549 549 """decorator for function performing discovery before push
550 550
551 551 The function is added to the step -> function mapping and appended to the
552 552 list of steps. Beware that decorated function will be added in order (this
553 553 may matter).
554 554
555 555 You can only use this decorator for a new step, if you want to wrap a step
556 556 from an extension, change the pushdiscovery dictionary directly."""
557 557 def dec(func):
558 558 assert stepname not in pushdiscoverymapping
559 559 pushdiscoverymapping[stepname] = func
560 560 pushdiscoveryorder.append(stepname)
561 561 return func
562 562 return dec
563 563
564 564 def _pushdiscovery(pushop):
565 565 """Run all discovery steps"""
566 566 for stepname in pushdiscoveryorder:
567 567 step = pushdiscoverymapping[stepname]
568 568 step(pushop)
569 569
570 570 @pushdiscovery('changeset')
571 571 def _pushdiscoverychangeset(pushop):
572 572 """discover the changeset that need to be pushed"""
573 573 fci = discovery.findcommonincoming
574 574 if pushop.revs:
575 575 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
576 576 ancestorsof=pushop.revs)
577 577 else:
578 578 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
579 579 common, inc, remoteheads = commoninc
580 580 fco = discovery.findcommonoutgoing
581 581 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
582 582 commoninc=commoninc, force=pushop.force)
583 583 pushop.outgoing = outgoing
584 584 pushop.remoteheads = remoteheads
585 585 pushop.incoming = inc
586 586
587 587 @pushdiscovery('phase')
588 588 def _pushdiscoveryphase(pushop):
589 589 """discover the phase that needs to be pushed
590 590
591 591 (computed for both success and failure case for changesets push)"""
592 592 outgoing = pushop.outgoing
593 593 unfi = pushop.repo.unfiltered()
594 594 remotephases = pushop.remote.listkeys('phases')
595 595 if (pushop.ui.configbool('ui', '_usedassubrepo')
596 596 and remotephases # server supports phases
597 597 and not pushop.outgoing.missing # no changesets to be pushed
598 598 and remotephases.get('publishing', False)):
599 599 # When:
600 600 # - this is a subrepo push
601 601 # - and remote support phase
602 602 # - and no changeset are to be pushed
603 603 # - and remote is publishing
604 604 # We may be in issue 3781 case!
605 605 # We drop the possible phase synchronisation done by
606 606 # courtesy to publish changesets possibly locally draft
607 607 # on the remote.
608 608 pushop.outdatedphases = []
609 609 pushop.fallbackoutdatedphases = []
610 610 return
611 611
612 612 pushop.remotephases = phases.remotephasessummary(pushop.repo,
613 613 pushop.fallbackheads,
614 614 remotephases)
615 615 droots = pushop.remotephases.draftroots
616 616
617 617 extracond = ''
618 618 if not pushop.remotephases.publishing:
619 619 extracond = ' and public()'
620 620 revset = 'heads((%%ln::%%ln) %s)' % extracond
621 621 # Get the list of all revs draft on remote by public here.
622 622 # XXX Beware that revset break if droots is not strictly
623 623 # XXX root we may want to ensure it is but it is costly
624 624 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
625 625 if not outgoing.missing:
626 626 future = fallback
627 627 else:
628 628 # adds changeset we are going to push as draft
629 629 #
630 630 # should not be necessary for publishing server, but because of an
631 631 # issue fixed in xxxxx we have to do it anyway.
632 632 fdroots = list(unfi.set('roots(%ln + %ln::)',
633 633 outgoing.missing, droots))
634 634 fdroots = [f.node() for f in fdroots]
635 635 future = list(unfi.set(revset, fdroots, pushop.futureheads))
636 636 pushop.outdatedphases = future
637 637 pushop.fallbackoutdatedphases = fallback
638 638
639 639 @pushdiscovery('obsmarker')
640 640 def _pushdiscoveryobsmarkers(pushop):
641 641 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
642 642 and pushop.repo.obsstore
643 643 and 'obsolete' in pushop.remote.listkeys('namespaces')):
644 644 repo = pushop.repo
645 645 # very naive computation, that can be quite expensive on big repo.
646 646 # However: evolution is currently slow on them anyway.
647 647 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
648 648 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
649 649
650 650 @pushdiscovery('bookmarks')
651 651 def _pushdiscoverybookmarks(pushop):
652 652 ui = pushop.ui
653 653 repo = pushop.repo.unfiltered()
654 654 remote = pushop.remote
655 655 ui.debug("checking for updated bookmarks\n")
656 656 ancestors = ()
657 657 if pushop.revs:
658 658 revnums = map(repo.changelog.rev, pushop.revs)
659 659 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
660 660 remotebookmark = remote.listkeys('bookmarks')
661 661
662 662 explicit = set([repo._bookmarks.expandname(bookmark)
663 663 for bookmark in pushop.bookmarks])
664 664
665 665 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
666 666 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
667 667
668 668 def safehex(x):
669 669 if x is None:
670 670 return x
671 671 return hex(x)
672 672
673 673 def hexifycompbookmarks(bookmarks):
674 674 return [(b, safehex(scid), safehex(dcid))
675 675 for (b, scid, dcid) in bookmarks]
676 676
677 677 comp = [hexifycompbookmarks(marks) for marks in comp]
678 678 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
679 679
680 680 def _processcompared(pushop, pushed, explicit, remotebms, comp):
681 681 """take decision on bookmark to pull from the remote bookmark
682 682
683 683 Exist to help extensions who want to alter this behavior.
684 684 """
685 685 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
686 686
687 687 repo = pushop.repo
688 688
689 689 for b, scid, dcid in advsrc:
690 690 if b in explicit:
691 691 explicit.remove(b)
692 692 if not pushed or repo[scid].rev() in pushed:
693 693 pushop.outbookmarks.append((b, dcid, scid))
694 694 # search added bookmark
695 695 for b, scid, dcid in addsrc:
696 696 if b in explicit:
697 697 explicit.remove(b)
698 698 pushop.outbookmarks.append((b, '', scid))
699 699 # search for overwritten bookmark
700 700 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
701 701 if b in explicit:
702 702 explicit.remove(b)
703 703 pushop.outbookmarks.append((b, dcid, scid))
704 704 # search for bookmark to delete
705 705 for b, scid, dcid in adddst:
706 706 if b in explicit:
707 707 explicit.remove(b)
708 708 # treat as "deleted locally"
709 709 pushop.outbookmarks.append((b, dcid, ''))
710 710 # identical bookmarks shouldn't get reported
711 711 for b, scid, dcid in same:
712 712 if b in explicit:
713 713 explicit.remove(b)
714 714
715 715 if explicit:
716 716 explicit = sorted(explicit)
717 717 # we should probably list all of them
718 718 pushop.ui.warn(_('bookmark %s does not exist on the local '
719 719 'or remote repository!\n') % explicit[0])
720 720 pushop.bkresult = 2
721 721
722 722 pushop.outbookmarks.sort()
723 723
724 724 def _pushcheckoutgoing(pushop):
725 725 outgoing = pushop.outgoing
726 726 unfi = pushop.repo.unfiltered()
727 727 if not outgoing.missing:
728 728 # nothing to push
729 729 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
730 730 return False
731 731 # something to push
732 732 if not pushop.force:
733 733 # if repo.obsstore == False --> no obsolete
734 734 # then, save the iteration
735 735 if unfi.obsstore:
736 736 # this message are here for 80 char limit reason
737 737 mso = _("push includes obsolete changeset: %s!")
738 738 mspd = _("push includes phase-divergent changeset: %s!")
739 739 mscd = _("push includes content-divergent changeset: %s!")
740 740 mst = {"orphan": _("push includes orphan changeset: %s!"),
741 741 "phase-divergent": mspd,
742 742 "content-divergent": mscd}
743 743 # If we are to push if there is at least one
744 744 # obsolete or unstable changeset in missing, at
745 745 # least one of the missinghead will be obsolete or
746 746 # unstable. So checking heads only is ok
747 747 for node in outgoing.missingheads:
748 748 ctx = unfi[node]
749 749 if ctx.obsolete():
750 750 raise error.Abort(mso % ctx)
751 751 elif ctx.isunstable():
752 752 # TODO print more than one instability in the abort
753 753 # message
754 754 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
755 755
756 756 discovery.checkheads(pushop)
757 757 return True
758 758
759 759 # List of names of steps to perform for an outgoing bundle2, order matters.
760 760 b2partsgenorder = []
761 761
762 762 # Mapping between step name and function
763 763 #
764 764 # This exists to help extensions wrap steps if necessary
765 765 b2partsgenmapping = {}
766 766
767 767 def b2partsgenerator(stepname, idx=None):
768 768 """decorator for function generating bundle2 part
769 769
770 770 The function is added to the step -> function mapping and appended to the
771 771 list of steps. Beware that decorated functions will be added in order
772 772 (this may matter).
773 773
774 774 You can only use this decorator for new steps, if you want to wrap a step
775 775 from an extension, attack the b2partsgenmapping dictionary directly."""
776 776 def dec(func):
777 777 assert stepname not in b2partsgenmapping
778 778 b2partsgenmapping[stepname] = func
779 779 if idx is None:
780 780 b2partsgenorder.append(stepname)
781 781 else:
782 782 b2partsgenorder.insert(idx, stepname)
783 783 return func
784 784 return dec
785 785
786 786 def _pushb2ctxcheckheads(pushop, bundler):
787 787 """Generate race condition checking parts
788 788
789 789 Exists as an independent function to aid extensions
790 790 """
791 791 # * 'force' do not check for push race,
792 792 # * if we don't push anything, there are nothing to check.
793 793 if not pushop.force and pushop.outgoing.missingheads:
794 794 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
795 795 emptyremote = pushop.pushbranchmap is None
796 796 if not allowunrelated or emptyremote:
797 797 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
798 798 else:
799 799 affected = set()
800 800 for branch, heads in pushop.pushbranchmap.iteritems():
801 801 remoteheads, newheads, unsyncedheads, discardedheads = heads
802 802 if remoteheads is not None:
803 803 remote = set(remoteheads)
804 804 affected |= set(discardedheads) & remote
805 805 affected |= remote - set(newheads)
806 806 if affected:
807 807 data = iter(sorted(affected))
808 808 bundler.newpart('check:updated-heads', data=data)
809 809
810 810 def _pushing(pushop):
811 811 """return True if we are pushing anything"""
812 812 return bool(pushop.outgoing.missing
813 813 or pushop.outdatedphases
814 814 or pushop.outobsmarkers
815 815 or pushop.outbookmarks)
816 816
817 817 @b2partsgenerator('check-bookmarks')
818 818 def _pushb2checkbookmarks(pushop, bundler):
819 819 """insert bookmark move checking"""
820 820 if not _pushing(pushop) or pushop.force:
821 821 return
822 822 b2caps = bundle2.bundle2caps(pushop.remote)
823 823 hasbookmarkcheck = 'bookmarks' in b2caps
824 824 if not (pushop.outbookmarks and hasbookmarkcheck):
825 825 return
826 826 data = []
827 827 for book, old, new in pushop.outbookmarks:
828 828 old = bin(old)
829 829 data.append((book, old))
830 830 checkdata = bookmod.binaryencode(data)
831 831 bundler.newpart('check:bookmarks', data=checkdata)
832 832
833 833 @b2partsgenerator('check-phases')
834 834 def _pushb2checkphases(pushop, bundler):
835 835 """insert phase move checking"""
836 836 if not _pushing(pushop) or pushop.force:
837 837 return
838 838 b2caps = bundle2.bundle2caps(pushop.remote)
839 839 hasphaseheads = 'heads' in b2caps.get('phases', ())
840 840 if pushop.remotephases is not None and hasphaseheads:
841 841 # check that the remote phase has not changed
842 842 checks = [[] for p in phases.allphases]
843 843 checks[phases.public].extend(pushop.remotephases.publicheads)
844 844 checks[phases.draft].extend(pushop.remotephases.draftroots)
845 845 if any(checks):
846 846 for nodes in checks:
847 847 nodes.sort()
848 848 checkdata = phases.binaryencode(checks)
849 849 bundler.newpart('check:phases', data=checkdata)
850 850
851 851 @b2partsgenerator('changeset')
852 852 def _pushb2ctx(pushop, bundler):
853 853 """handle changegroup push through bundle2
854 854
855 855 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
856 856 """
857 857 if 'changesets' in pushop.stepsdone:
858 858 return
859 859 pushop.stepsdone.add('changesets')
860 860 # Send known heads to the server for race detection.
861 861 if not _pushcheckoutgoing(pushop):
862 862 return
863 863 pushop.repo.prepushoutgoinghooks(pushop)
864 864
865 865 _pushb2ctxcheckheads(pushop, bundler)
866 866
867 867 b2caps = bundle2.bundle2caps(pushop.remote)
868 868 version = '01'
869 869 cgversions = b2caps.get('changegroup')
870 870 if cgversions: # 3.1 and 3.2 ship with an empty value
871 871 cgversions = [v for v in cgversions
872 872 if v in changegroup.supportedoutgoingversions(
873 873 pushop.repo)]
874 874 if not cgversions:
875 875 raise ValueError(_('no common changegroup version'))
876 876 version = max(cgversions)
877 877 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
878 878 'push')
879 879 cgpart = bundler.newpart('changegroup', data=cgstream)
880 880 if cgversions:
881 881 cgpart.addparam('version', version)
882 882 if 'treemanifest' in pushop.repo.requirements:
883 883 cgpart.addparam('treemanifest', '1')
884 884 def handlereply(op):
885 885 """extract addchangegroup returns from server reply"""
886 886 cgreplies = op.records.getreplies(cgpart.id)
887 887 assert len(cgreplies['changegroup']) == 1
888 888 pushop.cgresult = cgreplies['changegroup'][0]['return']
889 889 return handlereply
890 890
891 891 @b2partsgenerator('phase')
892 892 def _pushb2phases(pushop, bundler):
893 893 """handle phase push through bundle2"""
894 894 if 'phases' in pushop.stepsdone:
895 895 return
896 896 b2caps = bundle2.bundle2caps(pushop.remote)
897 897 ui = pushop.repo.ui
898 898
899 899 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
900 900 haspushkey = 'pushkey' in b2caps
901 901 hasphaseheads = 'heads' in b2caps.get('phases', ())
902 902
903 903 if hasphaseheads and not legacyphase:
904 904 return _pushb2phaseheads(pushop, bundler)
905 905 elif haspushkey:
906 906 return _pushb2phasespushkey(pushop, bundler)
907 907
908 908 def _pushb2phaseheads(pushop, bundler):
909 909 """push phase information through a bundle2 - binary part"""
910 910 pushop.stepsdone.add('phases')
911 911 if pushop.outdatedphases:
912 912 updates = [[] for p in phases.allphases]
913 913 updates[0].extend(h.node() for h in pushop.outdatedphases)
914 914 phasedata = phases.binaryencode(updates)
915 915 bundler.newpart('phase-heads', data=phasedata)
916 916
917 917 def _pushb2phasespushkey(pushop, bundler):
918 918 """push phase information through a bundle2 - pushkey part"""
919 919 pushop.stepsdone.add('phases')
920 920 part2node = []
921 921
922 922 def handlefailure(pushop, exc):
923 923 targetid = int(exc.partid)
924 924 for partid, node in part2node:
925 925 if partid == targetid:
926 926 raise error.Abort(_('updating %s to public failed') % node)
927 927
928 928 enc = pushkey.encode
929 929 for newremotehead in pushop.outdatedphases:
930 930 part = bundler.newpart('pushkey')
931 931 part.addparam('namespace', enc('phases'))
932 932 part.addparam('key', enc(newremotehead.hex()))
933 933 part.addparam('old', enc('%d' % phases.draft))
934 934 part.addparam('new', enc('%d' % phases.public))
935 935 part2node.append((part.id, newremotehead))
936 936 pushop.pkfailcb[part.id] = handlefailure
937 937
938 938 def handlereply(op):
939 939 for partid, node in part2node:
940 940 partrep = op.records.getreplies(partid)
941 941 results = partrep['pushkey']
942 942 assert len(results) <= 1
943 943 msg = None
944 944 if not results:
945 945 msg = _('server ignored update of %s to public!\n') % node
946 946 elif not int(results[0]['return']):
947 947 msg = _('updating %s to public failed!\n') % node
948 948 if msg is not None:
949 949 pushop.ui.warn(msg)
950 950 return handlereply
951 951
952 952 @b2partsgenerator('obsmarkers')
953 953 def _pushb2obsmarkers(pushop, bundler):
954 954 if 'obsmarkers' in pushop.stepsdone:
955 955 return
956 956 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
957 957 if obsolete.commonversion(remoteversions) is None:
958 958 return
959 959 pushop.stepsdone.add('obsmarkers')
960 960 if pushop.outobsmarkers:
961 961 markers = sorted(pushop.outobsmarkers)
962 962 bundle2.buildobsmarkerspart(bundler, markers)
963 963
964 964 @b2partsgenerator('bookmarks')
965 965 def _pushb2bookmarks(pushop, bundler):
966 966 """handle bookmark push through bundle2"""
967 967 if 'bookmarks' in pushop.stepsdone:
968 968 return
969 969 b2caps = bundle2.bundle2caps(pushop.remote)
970 970
971 971 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
972 972 legacybooks = 'bookmarks' in legacy
973 973
974 974 if not legacybooks and 'bookmarks' in b2caps:
975 975 return _pushb2bookmarkspart(pushop, bundler)
976 976 elif 'pushkey' in b2caps:
977 977 return _pushb2bookmarkspushkey(pushop, bundler)
978 978
979 979 def _bmaction(old, new):
980 980 """small utility for bookmark pushing"""
981 981 if not old:
982 982 return 'export'
983 983 elif not new:
984 984 return 'delete'
985 985 return 'update'
986 986
987 987 def _pushb2bookmarkspart(pushop, bundler):
988 988 pushop.stepsdone.add('bookmarks')
989 989 if not pushop.outbookmarks:
990 990 return
991 991
992 992 allactions = []
993 993 data = []
994 994 for book, old, new in pushop.outbookmarks:
995 995 new = bin(new)
996 996 data.append((book, new))
997 997 allactions.append((book, _bmaction(old, new)))
998 998 checkdata = bookmod.binaryencode(data)
999 999 bundler.newpart('bookmarks', data=checkdata)
1000 1000
1001 1001 def handlereply(op):
1002 1002 ui = pushop.ui
1003 1003 # if success
1004 1004 for book, action in allactions:
1005 1005 ui.status(bookmsgmap[action][0] % book)
1006 1006
1007 1007 return handlereply
1008 1008
1009 1009 def _pushb2bookmarkspushkey(pushop, bundler):
1010 1010 pushop.stepsdone.add('bookmarks')
1011 1011 part2book = []
1012 1012 enc = pushkey.encode
1013 1013
1014 1014 def handlefailure(pushop, exc):
1015 1015 targetid = int(exc.partid)
1016 1016 for partid, book, action in part2book:
1017 1017 if partid == targetid:
1018 1018 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1019 1019 # we should not be called for part we did not generated
1020 1020 assert False
1021 1021
1022 1022 for book, old, new in pushop.outbookmarks:
1023 1023 part = bundler.newpart('pushkey')
1024 1024 part.addparam('namespace', enc('bookmarks'))
1025 1025 part.addparam('key', enc(book))
1026 1026 part.addparam('old', enc(old))
1027 1027 part.addparam('new', enc(new))
1028 1028 action = 'update'
1029 1029 if not old:
1030 1030 action = 'export'
1031 1031 elif not new:
1032 1032 action = 'delete'
1033 1033 part2book.append((part.id, book, action))
1034 1034 pushop.pkfailcb[part.id] = handlefailure
1035 1035
1036 1036 def handlereply(op):
1037 1037 ui = pushop.ui
1038 1038 for partid, book, action in part2book:
1039 1039 partrep = op.records.getreplies(partid)
1040 1040 results = partrep['pushkey']
1041 1041 assert len(results) <= 1
1042 1042 if not results:
1043 1043 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1044 1044 else:
1045 1045 ret = int(results[0]['return'])
1046 1046 if ret:
1047 1047 ui.status(bookmsgmap[action][0] % book)
1048 1048 else:
1049 1049 ui.warn(bookmsgmap[action][1] % book)
1050 1050 if pushop.bkresult is not None:
1051 1051 pushop.bkresult = 1
1052 1052 return handlereply
1053 1053
1054 1054 @b2partsgenerator('pushvars', idx=0)
1055 1055 def _getbundlesendvars(pushop, bundler):
1056 1056 '''send shellvars via bundle2'''
1057 1057 pushvars = pushop.pushvars
1058 1058 if pushvars:
1059 1059 shellvars = {}
1060 1060 for raw in pushvars:
1061 1061 if '=' not in raw:
1062 1062 msg = ("unable to parse variable '%s', should follow "
1063 1063 "'KEY=VALUE' or 'KEY=' format")
1064 1064 raise error.Abort(msg % raw)
1065 1065 k, v = raw.split('=', 1)
1066 1066 shellvars[k] = v
1067 1067
1068 1068 part = bundler.newpart('pushvars')
1069 1069
1070 1070 for key, value in shellvars.iteritems():
1071 1071 part.addparam(key, value, mandatory=False)
1072 1072
1073 1073 def _pushbundle2(pushop):
1074 1074 """push data to the remote using bundle2
1075 1075
1076 1076 The only currently supported type of data is changegroup but this will
1077 1077 evolve in the future."""
1078 1078 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1079 1079 pushback = (pushop.trmanager
1080 1080 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1081 1081
1082 1082 # create reply capability
1083 1083 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1084 1084 allowpushback=pushback,
1085 1085 role='client'))
1086 1086 bundler.newpart('replycaps', data=capsblob)
1087 1087 replyhandlers = []
1088 1088 for partgenname in b2partsgenorder:
1089 1089 partgen = b2partsgenmapping[partgenname]
1090 1090 ret = partgen(pushop, bundler)
1091 1091 if callable(ret):
1092 1092 replyhandlers.append(ret)
1093 1093 # do not push if nothing to push
1094 1094 if bundler.nbparts <= 1:
1095 1095 return
1096 1096 stream = util.chunkbuffer(bundler.getchunks())
1097 1097 try:
1098 1098 try:
1099 1099 with pushop.remote.commandexecutor() as e:
1100 1100 reply = e.callcommand('unbundle', {
1101 1101 'bundle': stream,
1102 1102 'heads': ['force'],
1103 1103 'url': pushop.remote.url(),
1104 1104 }).result()
1105 1105 except error.BundleValueError as exc:
1106 1106 raise error.Abort(_('missing support for %s') % exc)
1107 1107 try:
1108 1108 trgetter = None
1109 1109 if pushback:
1110 1110 trgetter = pushop.trmanager.transaction
1111 1111 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1112 1112 except error.BundleValueError as exc:
1113 1113 raise error.Abort(_('missing support for %s') % exc)
1114 1114 except bundle2.AbortFromPart as exc:
1115 1115 pushop.ui.status(_('remote: %s\n') % exc)
1116 1116 if exc.hint is not None:
1117 1117 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1118 1118 raise error.Abort(_('push failed on remote'))
1119 1119 except error.PushkeyFailed as exc:
1120 1120 partid = int(exc.partid)
1121 1121 if partid not in pushop.pkfailcb:
1122 1122 raise
1123 1123 pushop.pkfailcb[partid](pushop, exc)
1124 1124 for rephand in replyhandlers:
1125 1125 rephand(op)
1126 1126
1127 1127 def _pushchangeset(pushop):
1128 1128 """Make the actual push of changeset bundle to remote repo"""
1129 1129 if 'changesets' in pushop.stepsdone:
1130 1130 return
1131 1131 pushop.stepsdone.add('changesets')
1132 1132 if not _pushcheckoutgoing(pushop):
1133 1133 return
1134 1134
1135 1135 # Should have verified this in push().
1136 1136 assert pushop.remote.capable('unbundle')
1137 1137
1138 1138 pushop.repo.prepushoutgoinghooks(pushop)
1139 1139 outgoing = pushop.outgoing
1140 1140 # TODO: get bundlecaps from remote
1141 1141 bundlecaps = None
1142 1142 # create a changegroup from local
1143 1143 if pushop.revs is None and not (outgoing.excluded
1144 1144 or pushop.repo.changelog.filteredrevs):
1145 1145 # push everything,
1146 1146 # use the fast path, no race possible on push
1147 1147 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1148 1148 fastpath=True, bundlecaps=bundlecaps)
1149 1149 else:
1150 1150 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1151 1151 'push', bundlecaps=bundlecaps)
1152 1152
1153 1153 # apply changegroup to remote
1154 1154 # local repo finds heads on server, finds out what
1155 1155 # revs it must push. once revs transferred, if server
1156 1156 # finds it has different heads (someone else won
1157 1157 # commit/push race), server aborts.
1158 1158 if pushop.force:
1159 1159 remoteheads = ['force']
1160 1160 else:
1161 1161 remoteheads = pushop.remoteheads
1162 1162 # ssh: return remote's addchangegroup()
1163 1163 # http: return remote's addchangegroup() or 0 for error
1164 1164 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1165 1165 pushop.repo.url())
1166 1166
1167 1167 def _pushsyncphase(pushop):
1168 1168 """synchronise phase information locally and remotely"""
1169 1169 cheads = pushop.commonheads
1170 1170 # even when we don't push, exchanging phase data is useful
1171 1171 remotephases = pushop.remote.listkeys('phases')
1172 1172 if (pushop.ui.configbool('ui', '_usedassubrepo')
1173 1173 and remotephases # server supports phases
1174 1174 and pushop.cgresult is None # nothing was pushed
1175 1175 and remotephases.get('publishing', False)):
1176 1176 # When:
1177 1177 # - this is a subrepo push
1178 1178 # - and remote support phase
1179 1179 # - and no changeset was pushed
1180 1180 # - and remote is publishing
1181 1181 # We may be in issue 3871 case!
1182 1182 # We drop the possible phase synchronisation done by
1183 1183 # courtesy to publish changesets possibly locally draft
1184 1184 # on the remote.
1185 1185 remotephases = {'publishing': 'True'}
1186 1186 if not remotephases: # old server or public only reply from non-publishing
1187 1187 _localphasemove(pushop, cheads)
1188 1188 # don't push any phase data as there is nothing to push
1189 1189 else:
1190 1190 ana = phases.analyzeremotephases(pushop.repo, cheads,
1191 1191 remotephases)
1192 1192 pheads, droots = ana
1193 1193 ### Apply remote phase on local
1194 1194 if remotephases.get('publishing', False):
1195 1195 _localphasemove(pushop, cheads)
1196 1196 else: # publish = False
1197 1197 _localphasemove(pushop, pheads)
1198 1198 _localphasemove(pushop, cheads, phases.draft)
1199 1199 ### Apply local phase on remote
1200 1200
1201 1201 if pushop.cgresult:
1202 1202 if 'phases' in pushop.stepsdone:
1203 1203 # phases already pushed though bundle2
1204 1204 return
1205 1205 outdated = pushop.outdatedphases
1206 1206 else:
1207 1207 outdated = pushop.fallbackoutdatedphases
1208 1208
1209 1209 pushop.stepsdone.add('phases')
1210 1210
1211 1211 # filter heads already turned public by the push
1212 1212 outdated = [c for c in outdated if c.node() not in pheads]
1213 1213 # fallback to independent pushkey command
1214 1214 for newremotehead in outdated:
1215 r = pushop.remote.pushkey('phases',
1216 newremotehead.hex(),
1217 ('%d' % phases.draft),
1218 ('%d' % phases.public))
1215 with pushop.remote.commandexecutor() as e:
1216 r = e.callcommand('pushkey', {
1217 'namespace': 'phases',
1218 'key': newremotehead.hex(),
1219 'old': '%d' % phases.draft,
1220 'new': '%d' % phases.public
1221 }).result()
1222
1219 1223 if not r:
1220 1224 pushop.ui.warn(_('updating %s to public failed!\n')
1221 1225 % newremotehead)
1222 1226
1223 1227 def _localphasemove(pushop, nodes, phase=phases.public):
1224 1228 """move <nodes> to <phase> in the local source repo"""
1225 1229 if pushop.trmanager:
1226 1230 phases.advanceboundary(pushop.repo,
1227 1231 pushop.trmanager.transaction(),
1228 1232 phase,
1229 1233 nodes)
1230 1234 else:
1231 1235 # repo is not locked, do not change any phases!
1232 1236 # Informs the user that phases should have been moved when
1233 1237 # applicable.
1234 1238 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1235 1239 phasestr = phases.phasenames[phase]
1236 1240 if actualmoves:
1237 1241 pushop.ui.status(_('cannot lock source repo, skipping '
1238 1242 'local %s phase update\n') % phasestr)
1239 1243
1240 1244 def _pushobsolete(pushop):
1241 1245 """utility function to push obsolete markers to a remote"""
1242 1246 if 'obsmarkers' in pushop.stepsdone:
1243 1247 return
1244 1248 repo = pushop.repo
1245 1249 remote = pushop.remote
1246 1250 pushop.stepsdone.add('obsmarkers')
1247 1251 if pushop.outobsmarkers:
1248 1252 pushop.ui.debug('try to push obsolete markers to remote\n')
1249 1253 rslts = []
1250 1254 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1251 1255 for key in sorted(remotedata, reverse=True):
1252 1256 # reverse sort to ensure we end with dump0
1253 1257 data = remotedata[key]
1254 1258 rslts.append(remote.pushkey('obsolete', key, '', data))
1255 1259 if [r for r in rslts if not r]:
1256 1260 msg = _('failed to push some obsolete markers!\n')
1257 1261 repo.ui.warn(msg)
1258 1262
1259 1263 def _pushbookmark(pushop):
1260 1264 """Update bookmark position on remote"""
1261 1265 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1262 1266 return
1263 1267 pushop.stepsdone.add('bookmarks')
1264 1268 ui = pushop.ui
1265 1269 remote = pushop.remote
1266 1270
1267 1271 for b, old, new in pushop.outbookmarks:
1268 1272 action = 'update'
1269 1273 if not old:
1270 1274 action = 'export'
1271 1275 elif not new:
1272 1276 action = 'delete'
1273 if remote.pushkey('bookmarks', b, old, new):
1277
1278 with remote.commandexecutor() as e:
1279 r = e.callcommand('pushkey', {
1280 'namespace': 'bookmarks',
1281 'key': b,
1282 'old': old,
1283 'new': new,
1284 }).result()
1285
1286 if r:
1274 1287 ui.status(bookmsgmap[action][0] % b)
1275 1288 else:
1276 1289 ui.warn(bookmsgmap[action][1] % b)
1277 1290 # discovery can have set the value form invalid entry
1278 1291 if pushop.bkresult is not None:
1279 1292 pushop.bkresult = 1
1280 1293
1281 1294 class pulloperation(object):
1282 1295 """A object that represent a single pull operation
1283 1296
1284 1297 It purpose is to carry pull related state and very common operation.
1285 1298
1286 1299 A new should be created at the beginning of each pull and discarded
1287 1300 afterward.
1288 1301 """
1289 1302
1290 1303 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1291 1304 remotebookmarks=None, streamclonerequested=None):
1292 1305 # repo we pull into
1293 1306 self.repo = repo
1294 1307 # repo we pull from
1295 1308 self.remote = remote
1296 1309 # revision we try to pull (None is "all")
1297 1310 self.heads = heads
1298 1311 # bookmark pulled explicitly
1299 1312 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1300 1313 for bookmark in bookmarks]
1301 1314 # do we force pull?
1302 1315 self.force = force
1303 1316 # whether a streaming clone was requested
1304 1317 self.streamclonerequested = streamclonerequested
1305 1318 # transaction manager
1306 1319 self.trmanager = None
1307 1320 # set of common changeset between local and remote before pull
1308 1321 self.common = None
1309 1322 # set of pulled head
1310 1323 self.rheads = None
1311 1324 # list of missing changeset to fetch remotely
1312 1325 self.fetch = None
1313 1326 # remote bookmarks data
1314 1327 self.remotebookmarks = remotebookmarks
1315 1328 # result of changegroup pulling (used as return code by pull)
1316 1329 self.cgresult = None
1317 1330 # list of step already done
1318 1331 self.stepsdone = set()
1319 1332 # Whether we attempted a clone from pre-generated bundles.
1320 1333 self.clonebundleattempted = False
1321 1334
1322 1335 @util.propertycache
1323 1336 def pulledsubset(self):
1324 1337 """heads of the set of changeset target by the pull"""
1325 1338 # compute target subset
1326 1339 if self.heads is None:
1327 1340 # We pulled every thing possible
1328 1341 # sync on everything common
1329 1342 c = set(self.common)
1330 1343 ret = list(self.common)
1331 1344 for n in self.rheads:
1332 1345 if n not in c:
1333 1346 ret.append(n)
1334 1347 return ret
1335 1348 else:
1336 1349 # We pulled a specific subset
1337 1350 # sync on this subset
1338 1351 return self.heads
1339 1352
1340 1353 @util.propertycache
1341 1354 def canusebundle2(self):
1342 1355 return not _forcebundle1(self)
1343 1356
1344 1357 @util.propertycache
1345 1358 def remotebundle2caps(self):
1346 1359 return bundle2.bundle2caps(self.remote)
1347 1360
1348 1361 def gettransaction(self):
1349 1362 # deprecated; talk to trmanager directly
1350 1363 return self.trmanager.transaction()
1351 1364
1352 1365 class transactionmanager(util.transactional):
1353 1366 """An object to manage the life cycle of a transaction
1354 1367
1355 1368 It creates the transaction on demand and calls the appropriate hooks when
1356 1369 closing the transaction."""
1357 1370 def __init__(self, repo, source, url):
1358 1371 self.repo = repo
1359 1372 self.source = source
1360 1373 self.url = url
1361 1374 self._tr = None
1362 1375
1363 1376 def transaction(self):
1364 1377 """Return an open transaction object, constructing if necessary"""
1365 1378 if not self._tr:
1366 1379 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1367 1380 self._tr = self.repo.transaction(trname)
1368 1381 self._tr.hookargs['source'] = self.source
1369 1382 self._tr.hookargs['url'] = self.url
1370 1383 return self._tr
1371 1384
1372 1385 def close(self):
1373 1386 """close transaction if created"""
1374 1387 if self._tr is not None:
1375 1388 self._tr.close()
1376 1389
1377 1390 def release(self):
1378 1391 """release transaction if created"""
1379 1392 if self._tr is not None:
1380 1393 self._tr.release()
1381 1394
1382 1395 def _fullpullbundle2(repo, pullop):
1383 1396 # The server may send a partial reply, i.e. when inlining
1384 1397 # pre-computed bundles. In that case, update the common
1385 1398 # set based on the results and pull another bundle.
1386 1399 #
1387 1400 # There are two indicators that the process is finished:
1388 1401 # - no changeset has been added, or
1389 1402 # - all remote heads are known locally.
1390 1403 # The head check must use the unfiltered view as obsoletion
1391 1404 # markers can hide heads.
1392 1405 unfi = repo.unfiltered()
1393 1406 unficl = unfi.changelog
1394 1407 def headsofdiff(h1, h2):
1395 1408 """Returns heads(h1 % h2)"""
1396 1409 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1397 1410 return set(ctx.node() for ctx in res)
1398 1411 def headsofunion(h1, h2):
1399 1412 """Returns heads((h1 + h2) - null)"""
1400 1413 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1401 1414 return set(ctx.node() for ctx in res)
1402 1415 while True:
1403 1416 old_heads = unficl.heads()
1404 1417 clstart = len(unficl)
1405 1418 _pullbundle2(pullop)
1406 1419 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1407 1420 # XXX narrow clones filter the heads on the server side during
1408 1421 # XXX getbundle and result in partial replies as well.
1409 1422 # XXX Disable pull bundles in this case as band aid to avoid
1410 1423 # XXX extra round trips.
1411 1424 break
1412 1425 if clstart == len(unficl):
1413 1426 break
1414 1427 if all(unficl.hasnode(n) for n in pullop.rheads):
1415 1428 break
1416 1429 new_heads = headsofdiff(unficl.heads(), old_heads)
1417 1430 pullop.common = headsofunion(new_heads, pullop.common)
1418 1431 pullop.rheads = set(pullop.rheads) - pullop.common
1419 1432
1420 1433 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1421 1434 streamclonerequested=None):
1422 1435 """Fetch repository data from a remote.
1423 1436
1424 1437 This is the main function used to retrieve data from a remote repository.
1425 1438
1426 1439 ``repo`` is the local repository to clone into.
1427 1440 ``remote`` is a peer instance.
1428 1441 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1429 1442 default) means to pull everything from the remote.
1430 1443 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1431 1444 default, all remote bookmarks are pulled.
1432 1445 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1433 1446 initialization.
1434 1447 ``streamclonerequested`` is a boolean indicating whether a "streaming
1435 1448 clone" is requested. A "streaming clone" is essentially a raw file copy
1436 1449 of revlogs from the server. This only works when the local repository is
1437 1450 empty. The default value of ``None`` means to respect the server
1438 1451 configuration for preferring stream clones.
1439 1452
1440 1453 Returns the ``pulloperation`` created for this pull.
1441 1454 """
1442 1455 if opargs is None:
1443 1456 opargs = {}
1444 1457 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1445 1458 streamclonerequested=streamclonerequested,
1446 1459 **pycompat.strkwargs(opargs))
1447 1460
1448 1461 peerlocal = pullop.remote.local()
1449 1462 if peerlocal:
1450 1463 missing = set(peerlocal.requirements) - pullop.repo.supported
1451 1464 if missing:
1452 1465 msg = _("required features are not"
1453 1466 " supported in the destination:"
1454 1467 " %s") % (', '.join(sorted(missing)))
1455 1468 raise error.Abort(msg)
1456 1469
1457 1470 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1458 1471 with repo.wlock(), repo.lock(), pullop.trmanager:
1459 1472 # This should ideally be in _pullbundle2(). However, it needs to run
1460 1473 # before discovery to avoid extra work.
1461 1474 _maybeapplyclonebundle(pullop)
1462 1475 streamclone.maybeperformlegacystreamclone(pullop)
1463 1476 _pulldiscovery(pullop)
1464 1477 if pullop.canusebundle2:
1465 1478 _fullpullbundle2(repo, pullop)
1466 1479 _pullchangeset(pullop)
1467 1480 _pullphase(pullop)
1468 1481 _pullbookmarks(pullop)
1469 1482 _pullobsolete(pullop)
1470 1483
1471 1484 # storing remotenames
1472 1485 if repo.ui.configbool('experimental', 'remotenames'):
1473 1486 logexchange.pullremotenames(repo, remote)
1474 1487
1475 1488 return pullop
1476 1489
1477 1490 # list of steps to perform discovery before pull
1478 1491 pulldiscoveryorder = []
1479 1492
1480 1493 # Mapping between step name and function
1481 1494 #
1482 1495 # This exists to help extensions wrap steps if necessary
1483 1496 pulldiscoverymapping = {}
1484 1497
1485 1498 def pulldiscovery(stepname):
1486 1499 """decorator for function performing discovery before pull
1487 1500
1488 1501 The function is added to the step -> function mapping and appended to the
1489 1502 list of steps. Beware that decorated function will be added in order (this
1490 1503 may matter).
1491 1504
1492 1505 You can only use this decorator for a new step, if you want to wrap a step
1493 1506 from an extension, change the pulldiscovery dictionary directly."""
1494 1507 def dec(func):
1495 1508 assert stepname not in pulldiscoverymapping
1496 1509 pulldiscoverymapping[stepname] = func
1497 1510 pulldiscoveryorder.append(stepname)
1498 1511 return func
1499 1512 return dec
1500 1513
1501 1514 def _pulldiscovery(pullop):
1502 1515 """Run all discovery steps"""
1503 1516 for stepname in pulldiscoveryorder:
1504 1517 step = pulldiscoverymapping[stepname]
1505 1518 step(pullop)
1506 1519
1507 1520 @pulldiscovery('b1:bookmarks')
1508 1521 def _pullbookmarkbundle1(pullop):
1509 1522 """fetch bookmark data in bundle1 case
1510 1523
1511 1524 If not using bundle2, we have to fetch bookmarks before changeset
1512 1525 discovery to reduce the chance and impact of race conditions."""
1513 1526 if pullop.remotebookmarks is not None:
1514 1527 return
1515 1528 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1516 1529 # all known bundle2 servers now support listkeys, but lets be nice with
1517 1530 # new implementation.
1518 1531 return
1519 1532 books = pullop.remote.listkeys('bookmarks')
1520 1533 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1521 1534
1522 1535
1523 1536 @pulldiscovery('changegroup')
1524 1537 def _pulldiscoverychangegroup(pullop):
1525 1538 """discovery phase for the pull
1526 1539
1527 1540 Current handle changeset discovery only, will change handle all discovery
1528 1541 at some point."""
1529 1542 tmp = discovery.findcommonincoming(pullop.repo,
1530 1543 pullop.remote,
1531 1544 heads=pullop.heads,
1532 1545 force=pullop.force)
1533 1546 common, fetch, rheads = tmp
1534 1547 nm = pullop.repo.unfiltered().changelog.nodemap
1535 1548 if fetch and rheads:
1536 1549 # If a remote heads is filtered locally, put in back in common.
1537 1550 #
1538 1551 # This is a hackish solution to catch most of "common but locally
1539 1552 # hidden situation". We do not performs discovery on unfiltered
1540 1553 # repository because it end up doing a pathological amount of round
1541 1554 # trip for w huge amount of changeset we do not care about.
1542 1555 #
1543 1556 # If a set of such "common but filtered" changeset exist on the server
1544 1557 # but are not including a remote heads, we'll not be able to detect it,
1545 1558 scommon = set(common)
1546 1559 for n in rheads:
1547 1560 if n in nm:
1548 1561 if n not in scommon:
1549 1562 common.append(n)
1550 1563 if set(rheads).issubset(set(common)):
1551 1564 fetch = []
1552 1565 pullop.common = common
1553 1566 pullop.fetch = fetch
1554 1567 pullop.rheads = rheads
1555 1568
1556 1569 def _pullbundle2(pullop):
1557 1570 """pull data using bundle2
1558 1571
1559 1572 For now, the only supported data are changegroup."""
1560 1573 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1561 1574
1562 1575 # make ui easier to access
1563 1576 ui = pullop.repo.ui
1564 1577
1565 1578 # At the moment we don't do stream clones over bundle2. If that is
1566 1579 # implemented then here's where the check for that will go.
1567 1580 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1568 1581
1569 1582 # declare pull perimeters
1570 1583 kwargs['common'] = pullop.common
1571 1584 kwargs['heads'] = pullop.heads or pullop.rheads
1572 1585
1573 1586 if streaming:
1574 1587 kwargs['cg'] = False
1575 1588 kwargs['stream'] = True
1576 1589 pullop.stepsdone.add('changegroup')
1577 1590 pullop.stepsdone.add('phases')
1578 1591
1579 1592 else:
1580 1593 # pulling changegroup
1581 1594 pullop.stepsdone.add('changegroup')
1582 1595
1583 1596 kwargs['cg'] = pullop.fetch
1584 1597
1585 1598 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1586 1599 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1587 1600 if (not legacyphase and hasbinaryphase):
1588 1601 kwargs['phases'] = True
1589 1602 pullop.stepsdone.add('phases')
1590 1603
1591 1604 if 'listkeys' in pullop.remotebundle2caps:
1592 1605 if 'phases' not in pullop.stepsdone:
1593 1606 kwargs['listkeys'] = ['phases']
1594 1607
1595 1608 bookmarksrequested = False
1596 1609 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1597 1610 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1598 1611
1599 1612 if pullop.remotebookmarks is not None:
1600 1613 pullop.stepsdone.add('request-bookmarks')
1601 1614
1602 1615 if ('request-bookmarks' not in pullop.stepsdone
1603 1616 and pullop.remotebookmarks is None
1604 1617 and not legacybookmark and hasbinarybook):
1605 1618 kwargs['bookmarks'] = True
1606 1619 bookmarksrequested = True
1607 1620
1608 1621 if 'listkeys' in pullop.remotebundle2caps:
1609 1622 if 'request-bookmarks' not in pullop.stepsdone:
1610 1623 # make sure to always includes bookmark data when migrating
1611 1624 # `hg incoming --bundle` to using this function.
1612 1625 pullop.stepsdone.add('request-bookmarks')
1613 1626 kwargs.setdefault('listkeys', []).append('bookmarks')
1614 1627
1615 1628 # If this is a full pull / clone and the server supports the clone bundles
1616 1629 # feature, tell the server whether we attempted a clone bundle. The
1617 1630 # presence of this flag indicates the client supports clone bundles. This
1618 1631 # will enable the server to treat clients that support clone bundles
1619 1632 # differently from those that don't.
1620 1633 if (pullop.remote.capable('clonebundles')
1621 1634 and pullop.heads is None and list(pullop.common) == [nullid]):
1622 1635 kwargs['cbattempted'] = pullop.clonebundleattempted
1623 1636
1624 1637 if streaming:
1625 1638 pullop.repo.ui.status(_('streaming all changes\n'))
1626 1639 elif not pullop.fetch:
1627 1640 pullop.repo.ui.status(_("no changes found\n"))
1628 1641 pullop.cgresult = 0
1629 1642 else:
1630 1643 if pullop.heads is None and list(pullop.common) == [nullid]:
1631 1644 pullop.repo.ui.status(_("requesting all changes\n"))
1632 1645 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1633 1646 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1634 1647 if obsolete.commonversion(remoteversions) is not None:
1635 1648 kwargs['obsmarkers'] = True
1636 1649 pullop.stepsdone.add('obsmarkers')
1637 1650 _pullbundle2extraprepare(pullop, kwargs)
1638 1651 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1639 1652 try:
1640 1653 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1641 1654 source='pull')
1642 1655 op.modes['bookmarks'] = 'records'
1643 1656 bundle2.processbundle(pullop.repo, bundle, op=op)
1644 1657 except bundle2.AbortFromPart as exc:
1645 1658 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1646 1659 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1647 1660 except error.BundleValueError as exc:
1648 1661 raise error.Abort(_('missing support for %s') % exc)
1649 1662
1650 1663 if pullop.fetch:
1651 1664 pullop.cgresult = bundle2.combinechangegroupresults(op)
1652 1665
1653 1666 # processing phases change
1654 1667 for namespace, value in op.records['listkeys']:
1655 1668 if namespace == 'phases':
1656 1669 _pullapplyphases(pullop, value)
1657 1670
1658 1671 # processing bookmark update
1659 1672 if bookmarksrequested:
1660 1673 books = {}
1661 1674 for record in op.records['bookmarks']:
1662 1675 books[record['bookmark']] = record["node"]
1663 1676 pullop.remotebookmarks = books
1664 1677 else:
1665 1678 for namespace, value in op.records['listkeys']:
1666 1679 if namespace == 'bookmarks':
1667 1680 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1668 1681
1669 1682 # bookmark data were either already there or pulled in the bundle
1670 1683 if pullop.remotebookmarks is not None:
1671 1684 _pullbookmarks(pullop)
1672 1685
1673 1686 def _pullbundle2extraprepare(pullop, kwargs):
1674 1687 """hook function so that extensions can extend the getbundle call"""
1675 1688
1676 1689 def _pullchangeset(pullop):
1677 1690 """pull changeset from unbundle into the local repo"""
1678 1691 # We delay the open of the transaction as late as possible so we
1679 1692 # don't open transaction for nothing or you break future useful
1680 1693 # rollback call
1681 1694 if 'changegroup' in pullop.stepsdone:
1682 1695 return
1683 1696 pullop.stepsdone.add('changegroup')
1684 1697 if not pullop.fetch:
1685 1698 pullop.repo.ui.status(_("no changes found\n"))
1686 1699 pullop.cgresult = 0
1687 1700 return
1688 1701 tr = pullop.gettransaction()
1689 1702 if pullop.heads is None and list(pullop.common) == [nullid]:
1690 1703 pullop.repo.ui.status(_("requesting all changes\n"))
1691 1704 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1692 1705 # issue1320, avoid a race if remote changed after discovery
1693 1706 pullop.heads = pullop.rheads
1694 1707
1695 1708 if pullop.remote.capable('getbundle'):
1696 1709 # TODO: get bundlecaps from remote
1697 1710 cg = pullop.remote.getbundle('pull', common=pullop.common,
1698 1711 heads=pullop.heads or pullop.rheads)
1699 1712 elif pullop.heads is None:
1700 1713 with pullop.remote.commandexecutor() as e:
1701 1714 cg = e.callcommand('changegroup', {
1702 1715 'nodes': pullop.fetch,
1703 1716 'source': 'pull',
1704 1717 }).result()
1705 1718
1706 1719 elif not pullop.remote.capable('changegroupsubset'):
1707 1720 raise error.Abort(_("partial pull cannot be done because "
1708 1721 "other repository doesn't support "
1709 1722 "changegroupsubset."))
1710 1723 else:
1711 1724 with pullop.remote.commandexecutor() as e:
1712 1725 cg = e.callcommand('changegroupsubset', {
1713 1726 'bases': pullop.fetch,
1714 1727 'heads': pullop.heads,
1715 1728 'source': 'pull',
1716 1729 }).result()
1717 1730
1718 1731 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1719 1732 pullop.remote.url())
1720 1733 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1721 1734
1722 1735 def _pullphase(pullop):
1723 1736 # Get remote phases data from remote
1724 1737 if 'phases' in pullop.stepsdone:
1725 1738 return
1726 1739 remotephases = pullop.remote.listkeys('phases')
1727 1740 _pullapplyphases(pullop, remotephases)
1728 1741
1729 1742 def _pullapplyphases(pullop, remotephases):
1730 1743 """apply phase movement from observed remote state"""
1731 1744 if 'phases' in pullop.stepsdone:
1732 1745 return
1733 1746 pullop.stepsdone.add('phases')
1734 1747 publishing = bool(remotephases.get('publishing', False))
1735 1748 if remotephases and not publishing:
1736 1749 # remote is new and non-publishing
1737 1750 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1738 1751 pullop.pulledsubset,
1739 1752 remotephases)
1740 1753 dheads = pullop.pulledsubset
1741 1754 else:
1742 1755 # Remote is old or publishing all common changesets
1743 1756 # should be seen as public
1744 1757 pheads = pullop.pulledsubset
1745 1758 dheads = []
1746 1759 unfi = pullop.repo.unfiltered()
1747 1760 phase = unfi._phasecache.phase
1748 1761 rev = unfi.changelog.nodemap.get
1749 1762 public = phases.public
1750 1763 draft = phases.draft
1751 1764
1752 1765 # exclude changesets already public locally and update the others
1753 1766 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1754 1767 if pheads:
1755 1768 tr = pullop.gettransaction()
1756 1769 phases.advanceboundary(pullop.repo, tr, public, pheads)
1757 1770
1758 1771 # exclude changesets already draft locally and update the others
1759 1772 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1760 1773 if dheads:
1761 1774 tr = pullop.gettransaction()
1762 1775 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1763 1776
1764 1777 def _pullbookmarks(pullop):
1765 1778 """process the remote bookmark information to update the local one"""
1766 1779 if 'bookmarks' in pullop.stepsdone:
1767 1780 return
1768 1781 pullop.stepsdone.add('bookmarks')
1769 1782 repo = pullop.repo
1770 1783 remotebookmarks = pullop.remotebookmarks
1771 1784 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1772 1785 pullop.remote.url(),
1773 1786 pullop.gettransaction,
1774 1787 explicit=pullop.explicitbookmarks)
1775 1788
1776 1789 def _pullobsolete(pullop):
1777 1790 """utility function to pull obsolete markers from a remote
1778 1791
1779 1792 The `gettransaction` is function that return the pull transaction, creating
1780 1793 one if necessary. We return the transaction to inform the calling code that
1781 1794 a new transaction have been created (when applicable).
1782 1795
1783 1796 Exists mostly to allow overriding for experimentation purpose"""
1784 1797 if 'obsmarkers' in pullop.stepsdone:
1785 1798 return
1786 1799 pullop.stepsdone.add('obsmarkers')
1787 1800 tr = None
1788 1801 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1789 1802 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1790 1803 remoteobs = pullop.remote.listkeys('obsolete')
1791 1804 if 'dump0' in remoteobs:
1792 1805 tr = pullop.gettransaction()
1793 1806 markers = []
1794 1807 for key in sorted(remoteobs, reverse=True):
1795 1808 if key.startswith('dump'):
1796 1809 data = util.b85decode(remoteobs[key])
1797 1810 version, newmarks = obsolete._readmarkers(data)
1798 1811 markers += newmarks
1799 1812 if markers:
1800 1813 pullop.repo.obsstore.add(tr, markers)
1801 1814 pullop.repo.invalidatevolatilesets()
1802 1815 return tr
1803 1816
1804 1817 def caps20to10(repo, role):
1805 1818 """return a set with appropriate options to use bundle20 during getbundle"""
1806 1819 caps = {'HG20'}
1807 1820 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1808 1821 caps.add('bundle2=' + urlreq.quote(capsblob))
1809 1822 return caps
1810 1823
1811 1824 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1812 1825 getbundle2partsorder = []
1813 1826
1814 1827 # Mapping between step name and function
1815 1828 #
1816 1829 # This exists to help extensions wrap steps if necessary
1817 1830 getbundle2partsmapping = {}
1818 1831
1819 1832 def getbundle2partsgenerator(stepname, idx=None):
1820 1833 """decorator for function generating bundle2 part for getbundle
1821 1834
1822 1835 The function is added to the step -> function mapping and appended to the
1823 1836 list of steps. Beware that decorated functions will be added in order
1824 1837 (this may matter).
1825 1838
1826 1839 You can only use this decorator for new steps, if you want to wrap a step
1827 1840 from an extension, attack the getbundle2partsmapping dictionary directly."""
1828 1841 def dec(func):
1829 1842 assert stepname not in getbundle2partsmapping
1830 1843 getbundle2partsmapping[stepname] = func
1831 1844 if idx is None:
1832 1845 getbundle2partsorder.append(stepname)
1833 1846 else:
1834 1847 getbundle2partsorder.insert(idx, stepname)
1835 1848 return func
1836 1849 return dec
1837 1850
1838 1851 def bundle2requested(bundlecaps):
1839 1852 if bundlecaps is not None:
1840 1853 return any(cap.startswith('HG2') for cap in bundlecaps)
1841 1854 return False
1842 1855
1843 1856 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1844 1857 **kwargs):
1845 1858 """Return chunks constituting a bundle's raw data.
1846 1859
1847 1860 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1848 1861 passed.
1849 1862
1850 1863 Returns a 2-tuple of a dict with metadata about the generated bundle
1851 1864 and an iterator over raw chunks (of varying sizes).
1852 1865 """
1853 1866 kwargs = pycompat.byteskwargs(kwargs)
1854 1867 info = {}
1855 1868 usebundle2 = bundle2requested(bundlecaps)
1856 1869 # bundle10 case
1857 1870 if not usebundle2:
1858 1871 if bundlecaps and not kwargs.get('cg', True):
1859 1872 raise ValueError(_('request for bundle10 must include changegroup'))
1860 1873
1861 1874 if kwargs:
1862 1875 raise ValueError(_('unsupported getbundle arguments: %s')
1863 1876 % ', '.join(sorted(kwargs.keys())))
1864 1877 outgoing = _computeoutgoing(repo, heads, common)
1865 1878 info['bundleversion'] = 1
1866 1879 return info, changegroup.makestream(repo, outgoing, '01', source,
1867 1880 bundlecaps=bundlecaps)
1868 1881
1869 1882 # bundle20 case
1870 1883 info['bundleversion'] = 2
1871 1884 b2caps = {}
1872 1885 for bcaps in bundlecaps:
1873 1886 if bcaps.startswith('bundle2='):
1874 1887 blob = urlreq.unquote(bcaps[len('bundle2='):])
1875 1888 b2caps.update(bundle2.decodecaps(blob))
1876 1889 bundler = bundle2.bundle20(repo.ui, b2caps)
1877 1890
1878 1891 kwargs['heads'] = heads
1879 1892 kwargs['common'] = common
1880 1893
1881 1894 for name in getbundle2partsorder:
1882 1895 func = getbundle2partsmapping[name]
1883 1896 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1884 1897 **pycompat.strkwargs(kwargs))
1885 1898
1886 1899 info['prefercompressed'] = bundler.prefercompressed
1887 1900
1888 1901 return info, bundler.getchunks()
1889 1902
1890 1903 @getbundle2partsgenerator('stream2')
1891 1904 def _getbundlestream2(bundler, repo, *args, **kwargs):
1892 1905 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1893 1906
1894 1907 @getbundle2partsgenerator('changegroup')
1895 1908 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1896 1909 b2caps=None, heads=None, common=None, **kwargs):
1897 1910 """add a changegroup part to the requested bundle"""
1898 1911 cgstream = None
1899 1912 if kwargs.get(r'cg', True):
1900 1913 # build changegroup bundle here.
1901 1914 version = '01'
1902 1915 cgversions = b2caps.get('changegroup')
1903 1916 if cgversions: # 3.1 and 3.2 ship with an empty value
1904 1917 cgversions = [v for v in cgversions
1905 1918 if v in changegroup.supportedoutgoingversions(repo)]
1906 1919 if not cgversions:
1907 1920 raise ValueError(_('no common changegroup version'))
1908 1921 version = max(cgversions)
1909 1922 outgoing = _computeoutgoing(repo, heads, common)
1910 1923 if outgoing.missing:
1911 1924 cgstream = changegroup.makestream(repo, outgoing, version, source,
1912 1925 bundlecaps=bundlecaps)
1913 1926
1914 1927 if cgstream:
1915 1928 part = bundler.newpart('changegroup', data=cgstream)
1916 1929 if cgversions:
1917 1930 part.addparam('version', version)
1918 1931 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1919 1932 mandatory=False)
1920 1933 if 'treemanifest' in repo.requirements:
1921 1934 part.addparam('treemanifest', '1')
1922 1935
1923 1936 @getbundle2partsgenerator('bookmarks')
1924 1937 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1925 1938 b2caps=None, **kwargs):
1926 1939 """add a bookmark part to the requested bundle"""
1927 1940 if not kwargs.get(r'bookmarks', False):
1928 1941 return
1929 1942 if 'bookmarks' not in b2caps:
1930 1943 raise ValueError(_('no common bookmarks exchange method'))
1931 1944 books = bookmod.listbinbookmarks(repo)
1932 1945 data = bookmod.binaryencode(books)
1933 1946 if data:
1934 1947 bundler.newpart('bookmarks', data=data)
1935 1948
1936 1949 @getbundle2partsgenerator('listkeys')
1937 1950 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1938 1951 b2caps=None, **kwargs):
1939 1952 """add parts containing listkeys namespaces to the requested bundle"""
1940 1953 listkeys = kwargs.get(r'listkeys', ())
1941 1954 for namespace in listkeys:
1942 1955 part = bundler.newpart('listkeys')
1943 1956 part.addparam('namespace', namespace)
1944 1957 keys = repo.listkeys(namespace).items()
1945 1958 part.data = pushkey.encodekeys(keys)
1946 1959
1947 1960 @getbundle2partsgenerator('obsmarkers')
1948 1961 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1949 1962 b2caps=None, heads=None, **kwargs):
1950 1963 """add an obsolescence markers part to the requested bundle"""
1951 1964 if kwargs.get(r'obsmarkers', False):
1952 1965 if heads is None:
1953 1966 heads = repo.heads()
1954 1967 subset = [c.node() for c in repo.set('::%ln', heads)]
1955 1968 markers = repo.obsstore.relevantmarkers(subset)
1956 1969 markers = sorted(markers)
1957 1970 bundle2.buildobsmarkerspart(bundler, markers)
1958 1971
1959 1972 @getbundle2partsgenerator('phases')
1960 1973 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1961 1974 b2caps=None, heads=None, **kwargs):
1962 1975 """add phase heads part to the requested bundle"""
1963 1976 if kwargs.get(r'phases', False):
1964 1977 if not 'heads' in b2caps.get('phases'):
1965 1978 raise ValueError(_('no common phases exchange method'))
1966 1979 if heads is None:
1967 1980 heads = repo.heads()
1968 1981
1969 1982 headsbyphase = collections.defaultdict(set)
1970 1983 if repo.publishing():
1971 1984 headsbyphase[phases.public] = heads
1972 1985 else:
1973 1986 # find the appropriate heads to move
1974 1987
1975 1988 phase = repo._phasecache.phase
1976 1989 node = repo.changelog.node
1977 1990 rev = repo.changelog.rev
1978 1991 for h in heads:
1979 1992 headsbyphase[phase(repo, rev(h))].add(h)
1980 1993 seenphases = list(headsbyphase.keys())
1981 1994
1982 1995 # We do not handle anything but public and draft phase for now)
1983 1996 if seenphases:
1984 1997 assert max(seenphases) <= phases.draft
1985 1998
1986 1999 # if client is pulling non-public changesets, we need to find
1987 2000 # intermediate public heads.
1988 2001 draftheads = headsbyphase.get(phases.draft, set())
1989 2002 if draftheads:
1990 2003 publicheads = headsbyphase.get(phases.public, set())
1991 2004
1992 2005 revset = 'heads(only(%ln, %ln) and public())'
1993 2006 extraheads = repo.revs(revset, draftheads, publicheads)
1994 2007 for r in extraheads:
1995 2008 headsbyphase[phases.public].add(node(r))
1996 2009
1997 2010 # transform data in a format used by the encoding function
1998 2011 phasemapping = []
1999 2012 for phase in phases.allphases:
2000 2013 phasemapping.append(sorted(headsbyphase[phase]))
2001 2014
2002 2015 # generate the actual part
2003 2016 phasedata = phases.binaryencode(phasemapping)
2004 2017 bundler.newpart('phase-heads', data=phasedata)
2005 2018
2006 2019 @getbundle2partsgenerator('hgtagsfnodes')
2007 2020 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2008 2021 b2caps=None, heads=None, common=None,
2009 2022 **kwargs):
2010 2023 """Transfer the .hgtags filenodes mapping.
2011 2024
2012 2025 Only values for heads in this bundle will be transferred.
2013 2026
2014 2027 The part data consists of pairs of 20 byte changeset node and .hgtags
2015 2028 filenodes raw values.
2016 2029 """
2017 2030 # Don't send unless:
2018 2031 # - changeset are being exchanged,
2019 2032 # - the client supports it.
2020 2033 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2021 2034 return
2022 2035
2023 2036 outgoing = _computeoutgoing(repo, heads, common)
2024 2037 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2025 2038
2026 2039 @getbundle2partsgenerator('cache:rev-branch-cache')
2027 2040 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2028 2041 b2caps=None, heads=None, common=None,
2029 2042 **kwargs):
2030 2043 """Transfer the rev-branch-cache mapping
2031 2044
2032 2045 The payload is a series of data related to each branch
2033 2046
2034 2047 1) branch name length
2035 2048 2) number of open heads
2036 2049 3) number of closed heads
2037 2050 4) open heads nodes
2038 2051 5) closed heads nodes
2039 2052 """
2040 2053 # Don't send unless:
2041 2054 # - changeset are being exchanged,
2042 2055 # - the client supports it.
2043 2056 if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
2044 2057 return
2045 2058 outgoing = _computeoutgoing(repo, heads, common)
2046 2059 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2047 2060
2048 2061 def check_heads(repo, their_heads, context):
2049 2062 """check if the heads of a repo have been modified
2050 2063
2051 2064 Used by peer for unbundling.
2052 2065 """
2053 2066 heads = repo.heads()
2054 2067 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2055 2068 if not (their_heads == ['force'] or their_heads == heads or
2056 2069 their_heads == ['hashed', heads_hash]):
2057 2070 # someone else committed/pushed/unbundled while we
2058 2071 # were transferring data
2059 2072 raise error.PushRaced('repository changed while %s - '
2060 2073 'please try again' % context)
2061 2074
2062 2075 def unbundle(repo, cg, heads, source, url):
2063 2076 """Apply a bundle to a repo.
2064 2077
2065 2078 this function makes sure the repo is locked during the application and have
2066 2079 mechanism to check that no push race occurred between the creation of the
2067 2080 bundle and its application.
2068 2081
2069 2082 If the push was raced as PushRaced exception is raised."""
2070 2083 r = 0
2071 2084 # need a transaction when processing a bundle2 stream
2072 2085 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2073 2086 lockandtr = [None, None, None]
2074 2087 recordout = None
2075 2088 # quick fix for output mismatch with bundle2 in 3.4
2076 2089 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2077 2090 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2078 2091 captureoutput = True
2079 2092 try:
2080 2093 # note: outside bundle1, 'heads' is expected to be empty and this
2081 2094 # 'check_heads' call wil be a no-op
2082 2095 check_heads(repo, heads, 'uploading changes')
2083 2096 # push can proceed
2084 2097 if not isinstance(cg, bundle2.unbundle20):
2085 2098 # legacy case: bundle1 (changegroup 01)
2086 2099 txnname = "\n".join([source, util.hidepassword(url)])
2087 2100 with repo.lock(), repo.transaction(txnname) as tr:
2088 2101 op = bundle2.applybundle(repo, cg, tr, source, url)
2089 2102 r = bundle2.combinechangegroupresults(op)
2090 2103 else:
2091 2104 r = None
2092 2105 try:
2093 2106 def gettransaction():
2094 2107 if not lockandtr[2]:
2095 2108 lockandtr[0] = repo.wlock()
2096 2109 lockandtr[1] = repo.lock()
2097 2110 lockandtr[2] = repo.transaction(source)
2098 2111 lockandtr[2].hookargs['source'] = source
2099 2112 lockandtr[2].hookargs['url'] = url
2100 2113 lockandtr[2].hookargs['bundle2'] = '1'
2101 2114 return lockandtr[2]
2102 2115
2103 2116 # Do greedy locking by default until we're satisfied with lazy
2104 2117 # locking.
2105 2118 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2106 2119 gettransaction()
2107 2120
2108 2121 op = bundle2.bundleoperation(repo, gettransaction,
2109 2122 captureoutput=captureoutput,
2110 2123 source='push')
2111 2124 try:
2112 2125 op = bundle2.processbundle(repo, cg, op=op)
2113 2126 finally:
2114 2127 r = op.reply
2115 2128 if captureoutput and r is not None:
2116 2129 repo.ui.pushbuffer(error=True, subproc=True)
2117 2130 def recordout(output):
2118 2131 r.newpart('output', data=output, mandatory=False)
2119 2132 if lockandtr[2] is not None:
2120 2133 lockandtr[2].close()
2121 2134 except BaseException as exc:
2122 2135 exc.duringunbundle2 = True
2123 2136 if captureoutput and r is not None:
2124 2137 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2125 2138 def recordout(output):
2126 2139 part = bundle2.bundlepart('output', data=output,
2127 2140 mandatory=False)
2128 2141 parts.append(part)
2129 2142 raise
2130 2143 finally:
2131 2144 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2132 2145 if recordout is not None:
2133 2146 recordout(repo.ui.popbuffer())
2134 2147 return r
2135 2148
2136 2149 def _maybeapplyclonebundle(pullop):
2137 2150 """Apply a clone bundle from a remote, if possible."""
2138 2151
2139 2152 repo = pullop.repo
2140 2153 remote = pullop.remote
2141 2154
2142 2155 if not repo.ui.configbool('ui', 'clonebundles'):
2143 2156 return
2144 2157
2145 2158 # Only run if local repo is empty.
2146 2159 if len(repo):
2147 2160 return
2148 2161
2149 2162 if pullop.heads:
2150 2163 return
2151 2164
2152 2165 if not remote.capable('clonebundles'):
2153 2166 return
2154 2167
2155 2168 res = remote._call('clonebundles')
2156 2169
2157 2170 # If we call the wire protocol command, that's good enough to record the
2158 2171 # attempt.
2159 2172 pullop.clonebundleattempted = True
2160 2173
2161 2174 entries = parseclonebundlesmanifest(repo, res)
2162 2175 if not entries:
2163 2176 repo.ui.note(_('no clone bundles available on remote; '
2164 2177 'falling back to regular clone\n'))
2165 2178 return
2166 2179
2167 2180 entries = filterclonebundleentries(
2168 2181 repo, entries, streamclonerequested=pullop.streamclonerequested)
2169 2182
2170 2183 if not entries:
2171 2184 # There is a thundering herd concern here. However, if a server
2172 2185 # operator doesn't advertise bundles appropriate for its clients,
2173 2186 # they deserve what's coming. Furthermore, from a client's
2174 2187 # perspective, no automatic fallback would mean not being able to
2175 2188 # clone!
2176 2189 repo.ui.warn(_('no compatible clone bundles available on server; '
2177 2190 'falling back to regular clone\n'))
2178 2191 repo.ui.warn(_('(you may want to report this to the server '
2179 2192 'operator)\n'))
2180 2193 return
2181 2194
2182 2195 entries = sortclonebundleentries(repo.ui, entries)
2183 2196
2184 2197 url = entries[0]['URL']
2185 2198 repo.ui.status(_('applying clone bundle from %s\n') % url)
2186 2199 if trypullbundlefromurl(repo.ui, repo, url):
2187 2200 repo.ui.status(_('finished applying clone bundle\n'))
2188 2201 # Bundle failed.
2189 2202 #
2190 2203 # We abort by default to avoid the thundering herd of
2191 2204 # clients flooding a server that was expecting expensive
2192 2205 # clone load to be offloaded.
2193 2206 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2194 2207 repo.ui.warn(_('falling back to normal clone\n'))
2195 2208 else:
2196 2209 raise error.Abort(_('error applying bundle'),
2197 2210 hint=_('if this error persists, consider contacting '
2198 2211 'the server operator or disable clone '
2199 2212 'bundles via '
2200 2213 '"--config ui.clonebundles=false"'))
2201 2214
2202 2215 def parseclonebundlesmanifest(repo, s):
2203 2216 """Parses the raw text of a clone bundles manifest.
2204 2217
2205 2218 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2206 2219 to the URL and other keys are the attributes for the entry.
2207 2220 """
2208 2221 m = []
2209 2222 for line in s.splitlines():
2210 2223 fields = line.split()
2211 2224 if not fields:
2212 2225 continue
2213 2226 attrs = {'URL': fields[0]}
2214 2227 for rawattr in fields[1:]:
2215 2228 key, value = rawattr.split('=', 1)
2216 2229 key = urlreq.unquote(key)
2217 2230 value = urlreq.unquote(value)
2218 2231 attrs[key] = value
2219 2232
2220 2233 # Parse BUNDLESPEC into components. This makes client-side
2221 2234 # preferences easier to specify since you can prefer a single
2222 2235 # component of the BUNDLESPEC.
2223 2236 if key == 'BUNDLESPEC':
2224 2237 try:
2225 2238 bundlespec = parsebundlespec(repo, value,
2226 2239 externalnames=True)
2227 2240 attrs['COMPRESSION'] = bundlespec.compression
2228 2241 attrs['VERSION'] = bundlespec.version
2229 2242 except error.InvalidBundleSpecification:
2230 2243 pass
2231 2244 except error.UnsupportedBundleSpecification:
2232 2245 pass
2233 2246
2234 2247 m.append(attrs)
2235 2248
2236 2249 return m
2237 2250
2238 2251 def isstreamclonespec(bundlespec):
2239 2252 # Stream clone v1
2240 2253 if (bundlespec.compression == 'UN' and bundlespec.version == 's1'):
2241 2254 return True
2242 2255
2243 2256 # Stream clone v2
2244 2257 if (bundlespec.compression == 'UN' and bundlespec.version == '02' and \
2245 2258 bundlespec.contentopts.get('streamv2')):
2246 2259 return True
2247 2260
2248 2261 return False
2249 2262
2250 2263 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2251 2264 """Remove incompatible clone bundle manifest entries.
2252 2265
2253 2266 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2254 2267 and returns a new list consisting of only the entries that this client
2255 2268 should be able to apply.
2256 2269
2257 2270 There is no guarantee we'll be able to apply all returned entries because
2258 2271 the metadata we use to filter on may be missing or wrong.
2259 2272 """
2260 2273 newentries = []
2261 2274 for entry in entries:
2262 2275 spec = entry.get('BUNDLESPEC')
2263 2276 if spec:
2264 2277 try:
2265 2278 bundlespec = parsebundlespec(repo, spec, strict=True)
2266 2279
2267 2280 # If a stream clone was requested, filter out non-streamclone
2268 2281 # entries.
2269 2282 if streamclonerequested and not isstreamclonespec(bundlespec):
2270 2283 repo.ui.debug('filtering %s because not a stream clone\n' %
2271 2284 entry['URL'])
2272 2285 continue
2273 2286
2274 2287 except error.InvalidBundleSpecification as e:
2275 2288 repo.ui.debug(str(e) + '\n')
2276 2289 continue
2277 2290 except error.UnsupportedBundleSpecification as e:
2278 2291 repo.ui.debug('filtering %s because unsupported bundle '
2279 2292 'spec: %s\n' % (
2280 2293 entry['URL'], stringutil.forcebytestr(e)))
2281 2294 continue
2282 2295 # If we don't have a spec and requested a stream clone, we don't know
2283 2296 # what the entry is so don't attempt to apply it.
2284 2297 elif streamclonerequested:
2285 2298 repo.ui.debug('filtering %s because cannot determine if a stream '
2286 2299 'clone bundle\n' % entry['URL'])
2287 2300 continue
2288 2301
2289 2302 if 'REQUIRESNI' in entry and not sslutil.hassni:
2290 2303 repo.ui.debug('filtering %s because SNI not supported\n' %
2291 2304 entry['URL'])
2292 2305 continue
2293 2306
2294 2307 newentries.append(entry)
2295 2308
2296 2309 return newentries
2297 2310
2298 2311 class clonebundleentry(object):
2299 2312 """Represents an item in a clone bundles manifest.
2300 2313
2301 2314 This rich class is needed to support sorting since sorted() in Python 3
2302 2315 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2303 2316 won't work.
2304 2317 """
2305 2318
2306 2319 def __init__(self, value, prefers):
2307 2320 self.value = value
2308 2321 self.prefers = prefers
2309 2322
2310 2323 def _cmp(self, other):
2311 2324 for prefkey, prefvalue in self.prefers:
2312 2325 avalue = self.value.get(prefkey)
2313 2326 bvalue = other.value.get(prefkey)
2314 2327
2315 2328 # Special case for b missing attribute and a matches exactly.
2316 2329 if avalue is not None and bvalue is None and avalue == prefvalue:
2317 2330 return -1
2318 2331
2319 2332 # Special case for a missing attribute and b matches exactly.
2320 2333 if bvalue is not None and avalue is None and bvalue == prefvalue:
2321 2334 return 1
2322 2335
2323 2336 # We can't compare unless attribute present on both.
2324 2337 if avalue is None or bvalue is None:
2325 2338 continue
2326 2339
2327 2340 # Same values should fall back to next attribute.
2328 2341 if avalue == bvalue:
2329 2342 continue
2330 2343
2331 2344 # Exact matches come first.
2332 2345 if avalue == prefvalue:
2333 2346 return -1
2334 2347 if bvalue == prefvalue:
2335 2348 return 1
2336 2349
2337 2350 # Fall back to next attribute.
2338 2351 continue
2339 2352
2340 2353 # If we got here we couldn't sort by attributes and prefers. Fall
2341 2354 # back to index order.
2342 2355 return 0
2343 2356
2344 2357 def __lt__(self, other):
2345 2358 return self._cmp(other) < 0
2346 2359
2347 2360 def __gt__(self, other):
2348 2361 return self._cmp(other) > 0
2349 2362
2350 2363 def __eq__(self, other):
2351 2364 return self._cmp(other) == 0
2352 2365
2353 2366 def __le__(self, other):
2354 2367 return self._cmp(other) <= 0
2355 2368
2356 2369 def __ge__(self, other):
2357 2370 return self._cmp(other) >= 0
2358 2371
2359 2372 def __ne__(self, other):
2360 2373 return self._cmp(other) != 0
2361 2374
2362 2375 def sortclonebundleentries(ui, entries):
2363 2376 prefers = ui.configlist('ui', 'clonebundleprefers')
2364 2377 if not prefers:
2365 2378 return list(entries)
2366 2379
2367 2380 prefers = [p.split('=', 1) for p in prefers]
2368 2381
2369 2382 items = sorted(clonebundleentry(v, prefers) for v in entries)
2370 2383 return [i.value for i in items]
2371 2384
2372 2385 def trypullbundlefromurl(ui, repo, url):
2373 2386 """Attempt to apply a bundle from a URL."""
2374 2387 with repo.lock(), repo.transaction('bundleurl') as tr:
2375 2388 try:
2376 2389 fh = urlmod.open(ui, url)
2377 2390 cg = readbundle(ui, fh, 'stream')
2378 2391
2379 2392 if isinstance(cg, streamclone.streamcloneapplier):
2380 2393 cg.apply(repo)
2381 2394 else:
2382 2395 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2383 2396 return True
2384 2397 except urlerr.httperror as e:
2385 2398 ui.warn(_('HTTP error fetching bundle: %s\n') %
2386 2399 stringutil.forcebytestr(e))
2387 2400 except urlerr.urlerror as e:
2388 2401 ui.warn(_('error fetching bundle: %s\n') %
2389 2402 stringutil.forcebytestr(e.reason))
2390 2403
2391 2404 return False
General Comments 0
You need to be logged in to leave comments. Login now